diff --git a/.github/components.yml b/.github/components.yml index 04465f78a788e9..f929f8cdf00b07 100644 --- a/.github/components.yml +++ b/.github/components.yml @@ -82,7 +82,6 @@ TEMPLATE: - C_API - Python_API - NVIDIA - - TOKENIZERS build: - IR_FE @@ -130,6 +129,7 @@ TF_FE: build: - CPU - Python_API + - TOKENIZERS TFL_FE: revalidate: @@ -144,6 +144,7 @@ PyTorch_FE: build: - CPU - Python_API + - TOKENIZERS C_API: build: @@ -196,7 +197,6 @@ IE_Tests: - TEMPLATE - AUTO - NVIDIA - - TOKENIZERS build: - IR_FE diff --git a/.github/workflows/android_arm64.yml b/.github/workflows/android_arm64.yml index 3b52ba7e72cc11..6e60051cd0c0ab 100644 --- a/.github/workflows/android_arm64.yml +++ b/.github/workflows/android_arm64.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - - master + # - master - 'releases/**' concurrency: diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 0a8298e5a17497..04e7e36fd63b86 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - - master + # - master - 'releases/**' concurrency: diff --git a/.github/workflows/job_cpu_functional_tests.yml b/.github/workflows/job_cpu_functional_tests.yml index fa45d642813b05..03708246ddc4bc 100644 --- a/.github/workflows/job_cpu_functional_tests.yml +++ b/.github/workflows/job_cpu_functional_tests.yml @@ -29,7 +29,6 @@ jobs: INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests PARALLEL_TEST_SCRIPT: ${{ github.workspace }}/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py PARALLEL_TEST_CACHE: ${{ github.workspace }}/install/tests/test_cache.lst - if: ${{ github.event_name != 'merge_group' }} steps: - name: Set apt retries run: echo 'Acquire::Retries "10";' > /etc/apt/apt.conf.d/80-retries diff --git a/.github/workflows/job_cxx_unit_tests.yml b/.github/workflows/job_cxx_unit_tests.yml index a2e42d1a0aed30..d131674bffad1e 100644 --- a/.github/workflows/job_cxx_unit_tests.yml +++ b/.github/workflows/job_cxx_unit_tests.yml @@ -31,7 +31,6 @@ jobs: DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input INSTALL_DIR: ${{ github.workspace }}/install INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - if: ${{ github.event_name != 'merge_group' }} steps: - name: Set apt retries run: echo 'Acquire::Retries "10";' > /etc/apt/apt.conf.d/80-retries diff --git a/.github/workflows/job_pytorch_models_tests.yml b/.github/workflows/job_pytorch_models_tests.yml index f2a651e6727533..0e9bf740fc4159 100644 --- a/.github/workflows/job_pytorch_models_tests.yml +++ b/.github/workflows/job_pytorch_models_tests.yml @@ -51,6 +51,12 @@ jobs: name: openvino_package path: ${{ env.INSTALL_DIR }} + - name: Download OpenVINO tokenizers extension + uses: actions/download-artifact@v3 + with: + name: openvino_tokenizers_wheel + path: ${{ env.INSTALL_DIR }} + - name: Download OpenVINO tests package uses: actions/download-artifact@v3 with: @@ -98,7 +104,9 @@ jobs: self-hosted-runner: ${{ contains(inputs.runner, 'aks') }} - name: Install OpenVINO Python wheels - run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* + run: | + python3 -m pip install ${INSTALL_DIR}/tools/openvino-* + python3 -m pip install ${INSTALL_DIR}/openvino_tokenizers-* - name: Install PyTorch tests requirements run: | diff --git a/.github/workflows/job_tensorflow_hub_models_tests.yml b/.github/workflows/job_tensorflow_hub_models_tests.yml index 54302b7cfbef58..c0cad932fbb631 100644 --- a/.github/workflows/job_tensorflow_hub_models_tests.yml +++ b/.github/workflows/job_tensorflow_hub_models_tests.yml @@ -51,6 +51,12 @@ jobs: name: openvino_package path: ${{ env.INSTALL_DIR }} + - name: Download OpenVINO tokenizers extension + uses: actions/download-artifact@v3 + with: + name: openvino_tokenizers_wheel + path: ${{ env.INSTALL_DIR }} + - name: Download OpenVINO tests package uses: actions/download-artifact@v3 with: @@ -97,7 +103,9 @@ jobs: self-hosted-runner: ${{ contains(inputs.runner, 'aks') }} - name: Install OpenVINO Python wheels - run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* + run: | + python3 -m pip install ${INSTALL_DIR}/tools/openvino-* + python3 -m pip install ${INSTALL_DIR}/openvino_tokenizers-* - name: Install TF Hub tests requirements run: python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/requirements.txt diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 84ea27f5635393..02559bb20292fc 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -480,7 +480,7 @@ jobs: name: TensorFlow Hub Models tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - needs: [ Build, Smart_CI ] + needs: [ Build, Smart_CI, Openvino_tokenizers ] uses: ./.github/workflows/job_tensorflow_hub_models_tests.yml with: runner: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores' }} @@ -494,7 +494,7 @@ jobs: PyTorch_Models_Tests: name: PyTorch Models tests if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - needs: [ Build, Smart_CI ] + needs: [ Build, Smart_CI, Openvino_tokenizers ] uses: ./.github/workflows/job_pytorch_models_tests.yml with: runner: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores' }} diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index 362c36a50504e3..7f6ec8a70f7590 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - - master + # - master - 'releases/**' concurrency: diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index 179020a218f0c6..2f1ca29890fc47 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -60,8 +60,8 @@ jobs: OPENVINO_REPO: /__w/openvino/openvino/openvino OPENVINO_BUILD_DIR: /__w/openvino/openvino/openvino_build INSTALL_DIR: /__w/openvino/openvino/openvino_install - CONAN_USER_HOME: /mount/caches/ccache/ubuntu22_riscv64_master_release/.conan - CCACHE_DIR: /mount/caches/ccache/ubuntu22_riscv64_master_release + CONAN_USER_HOME: /mount/caches/ccache/ubuntu22_riscv64_master/.conan + CCACHE_DIR: /mount/caches/ccache/ubuntu22_riscv64_master CCACHE_TEMPDIR: /__w/openvino/openvino/ccache_temp CCACHE_MAXSIZE: 50G if: ${{ !needs.smart_ci.outputs.skip_workflow && github.event_name != 'merge_group' }} @@ -220,7 +220,7 @@ jobs: - name: Check status of all jobs if: >- ${{ - contains(needs.*.result, 'failure') || + contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} run: exit 1 diff --git a/.github/workflows/webassembly.yml b/.github/workflows/webassembly.yml index f7ba021b2472d8..250b9f549a6ec2 100644 --- a/.github/workflows/webassembly.yml +++ b/.github/workflows/webassembly.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - - master + # - master - 'releases/**' concurrency: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index caf1d2ee7edcce..871bb09c02c68c 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -252,8 +252,7 @@ jobs: if-no-files-found: 'error' - name: Upload openvino js package - if: ${{ 'false' }} # 128689 - # if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API + if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API uses: actions/upload-artifact@v3 with: name: openvino_js_package @@ -364,8 +363,7 @@ jobs: env: OPENVINO_JS_DIR: "${{ github.workspace }}\\openvino\\src\\bindings\\js" OPENVINO_JS_LIBS_DIR: "${{ github.workspace }}\\openvino\\src\\bindings\\js\\node\\bin" - if: ${{ 'false' }} # 128689 - # if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API + if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API steps: - name: Fetch OpenVINO JS sources diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.rst index bcbf32234356ac..51c7537399c0bd 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.rst @@ -5,7 +5,7 @@ Swish .. meta:: - :description: Learn about Swish-4 - an element-wise, activation operation, which + :description: Learn about Swish-4 - an element-wise, activation operation, which can be performed on a single tensor in OpenVINO. **Versioned name**: *Swish-4* @@ -55,7 +55,7 @@ Example: Second input ``beta`` provided 256 56 - < !-- beta value: 2.0 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.rst index a6270e77ad8f49..5a1626d8db60c6 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.rst @@ -5,7 +5,7 @@ CumSum .. meta:: - :description: Learn about CumSum-3 - an element-wise, arithmetic operation, which + :description: Learn about CumSum-3 - an element-wise, arithmetic operation, which can be performed on a single tensor in OpenVINO. **Versioned name**: *CumSum-3* @@ -24,7 +24,7 @@ To perform the summation in the opposite direction of the axis, set reverse attr * **Description**: If the attribute is set to ``true``, then exclusive sums are returned, the ``j-th`` element is not included in the ``j-th`` sum. Otherwise, the inclusive sum of the first ``j`` elements for the ``j-th`` element is calculated. * **Range of values**: - + * ``false`` - include the top element * ``true`` - do not include the top element * **Type**: ``boolean`` @@ -35,7 +35,7 @@ To perform the summation in the opposite direction of the axis, set reverse attr * **Description**: If set to ``true`` will perform the sums in reverse direction. * **Range of values**: - + * ``false`` - do not perform sums in reverse direction * ``true`` - perform sums in reverse direction * **Type**: ``boolean`` @@ -63,16 +63,16 @@ To perform the summation in the opposite direction of the axis, set reverse attr .. code-block:: xml :force: - + - < !-- input value is: [1., 2., 3., 4., 5.] --> + 5 - < !-- axis value is: 0 --> + - < !-- output value is: [1., 3., 6., 10., 15.] --> + 5 @@ -82,16 +82,16 @@ To perform the summation in the opposite direction of the axis, set reverse attr .. code-block:: xml :force: - + - < !-- input value is: [1., 2., 3., 4., 5.] --> + 5 - < !-- axis value is: 0 --> + - < !-- output value is: [0., 1., 3., 6., 10.] --> + 5 @@ -101,16 +101,16 @@ To perform the summation in the opposite direction of the axis, set reverse attr .. code-block:: xml :force: - + - < !-- input value is: [1., 2., 3., 4., 5.] --> + 5 - < !-- axis value is: 0 --> + - < !-- output value is: [15., 14., 12., 9., 5.] --> + 5 @@ -120,7 +120,7 @@ To perform the summation in the opposite direction of the axis, set reverse attr .. code-block:: xml :force: - + < -- input value is: [1., 2., 3., 4., 5.] --> diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.rst index c90bacf4443d2a..b6d380638db5c2 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.rst @@ -5,7 +5,7 @@ Sqrt .. meta:: - :description: Learn about Sqrt-1 - an element-wise, arithmetic operation, which + :description: Learn about Sqrt-1 - an element-wise, arithmetic operation, which can be performed on a single tensor in OpenVINO. **Versioned name**: *Sqrt-1* @@ -48,12 +48,12 @@ Sqrt - 4 < !-- float input values: [4.0, 7.0, 9.0, 10.0] --> + 4 - 4 < !-- float output values: [2.0, 2.6457512, 3.0, 3.1622777] --> + 4 @@ -66,12 +66,12 @@ Sqrt - 4 < !-- int input values: [4, 7, 9, 10] --> + 4 - 4 < !-- int output values: [2, 3, 3, 3] --> + 4 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.rst index a72896382765e2..d38e91d1328d2c 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.rst @@ -5,7 +5,7 @@ IsFinite .. meta:: - :description: Learn about IsFinite-10 - an element-wise, comparison operation, which + :description: Learn about IsFinite-10 - an element-wise, comparison operation, which can be performed on a single tensor in OpenVINO. **Versioned name**: *IsFinite-10* @@ -64,12 +64,12 @@ IsFinite - 4 < !-- Input value is: [NaN, 2.1, 3.7, Inf] --> + 4  - 4 < !-- Output value is: [False, True, True, False] --> + 4  diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.rst index 272b29acfd7f47..2d093126a11c1d 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.rst @@ -5,7 +5,7 @@ Select .. meta:: - :description: Learn about Select-1 - an element-wise, condition operation, which + :description: Learn about Select-1 - an element-wise, condition operation, which can be performed on three given tensors in OpenVINO. **Versioned name**: *Select-1* @@ -58,21 +58,21 @@ Select - < !-- cond value is: [[false, false], [true, false], [true, true]] --> + 3 2 - < !-- then value is: [[-1, 0], [1, 2], [3, 4]] --> + 3 2 - < !-- else value is: [[11, 10], [9, 8], [7, 6]] --> + 3 2 - < !-- output value is: [[11, 10], [1, 8], [3, 4]] --> + 3 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.rst index cc0a265754a6af..b39df77e94b5de 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.rst @@ -5,7 +5,7 @@ ConvolutionBackpropData .. meta:: - :description: Learn about ConvolutionBackpropData-1 - a 1D, 2D or 3D convolution operation, which + :description: Learn about ConvolutionBackpropData-1 - a 1D, 2D or 3D convolution operation, which can be performed on input and kernel tensors in OpenVINO. **Versioned name**: *ConvolutionBackpropData-1* @@ -24,11 +24,11 @@ When output shape is specified as an input tensor ``output_shape`` then it speci .. code-block:: xml :force: - + if auto_pads != None: pads_begin[i] = 0 pads_end[i] = 0 - + Y_i = stride[i] * (X_i - 1) + ((K_i - 1) * dilations[i] + 1) - pads_begin[i] - pads_end[i] + output_padding[i] where ``K_i`` filter kernel dimension along spatial axis ``i``. @@ -37,7 +37,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a .. code-block:: xml :force: - + total_padding[i] = stride[i] * (X_i - 1) + ((K_i - 1) * dilations[i] + 1) - output_shape[i] + output_padding[i] if auto_pads != SAME_UPPER: pads_begin[i] = total_padding[i] // 2 @@ -81,7 +81,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a * *auto_pad* * **Description**: *auto_pad* has the same definition as *auto_pad* for a regular Convolution but applied in the backward way, for the output tensor. - + * *explicit*: use explicit padding values from ``pads_begin`` and ``pads_end``. * *same_upper* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end. * *same_lower* the input is padded to match the output size. In case of odd padding value an extra padding is added at the beginning. @@ -105,7 +105,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a * **2**: Convolution kernel tensor of type *T1* and rank 3, 4 or 5. Layout is ``[C_INPUT, C_OUTPUT, Z, Y, X]`` (number of input channels, number of output channels, spatial axes Z, Y, X). Spatial size of the kernel is derived from the shape of this input and aren't specified by any attribute. **Required.** * **3**: ``output_shape`` is 1D tensor of type *T2* that specifies spatial shape of the output. If specified, *padding amount* is deduced from relation of input and output spatial shapes according to formulas in the description. If not specified, *output shape* is calculated based on the ``pads_begin`` and ``pads_end`` or completely according to ``auto_pad``. **Optional.** * **Note**: Type of the convolution (1D, 2D or 3D) is derived from the rank of the input tensors and not specified by any attribute: - + * 1D convolution (input tensors rank 3) means that there is only one spatial axis X, * 2D convolution (input tensors rank 4) means that there are two spatial axes Y, X, * 3D convolution (input tensors rank 5) means that there are three spatial axes Z, Y, X. @@ -125,7 +125,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a .. code-block:: xml :force: - + @@ -156,7 +156,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a .. code-block:: xml :force: - + @@ -187,7 +187,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a .. code-block:: xml :force: - + @@ -204,7 +204,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a 3 - 2 < !-- output_shape value is: [450, 450]--> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.rst index 9c44b148909d46..ef7ba693c1e8d0 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.rst @@ -5,7 +5,7 @@ PriorBoxClustered .. meta:: - :description: Learn about PriorBoxClustered-1 - an object detection operation, + :description: Learn about PriorBoxClustered-1 - an object detection operation, which can be performed on two 1D input tensors. **Versioned name**: *PriorBoxClustered-1* @@ -94,7 +94,7 @@ If *clip* is defined, the coordinates of prior boxes are recalculated with the f * *step (step_w, step_h)* - * **Description**: *step (step_w, step_h)* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85. If both *step_h* and *step_w* are 0 then they are updated with value of *step*. If after that they are still 0 then they are calculated as input image width(height) divided with first input width(height). + * **Description**: *step (step_w, step_h)* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85. If both *step_h* and *step_w* are 0 then they are updated with value of *step*. If after that they are still 0 then they are calculated as input image width(height) divided with first input width(height). * **Range of values**: floating-point positive number * **Type**: ``float`` * **Default value**: 0.0 @@ -139,10 +139,10 @@ If *clip* is defined, the coordinates of prior boxes are recalculated with the f - 2 < !-- [10, 19] --> + 2 - 2 < !-- [180, 320] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.rst index 6c5bb401ee1039..05955f73889aee 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.rst @@ -6,7 +6,7 @@ PriorBox .. meta:: - :description: Learn about PriorBox-1 - an object detection operation, + :description: Learn about PriorBox-1 - an object detection operation, which can be performed on two required input tensors. **Versioned name**: *PriorBox-1* @@ -22,44 +22,44 @@ PriorBox 1. First calculates *center_x* and *center_y* of prior box: .. math:: - + W \equiv Width \quad Of \quad Image \\ H \equiv Height \quad Of \quad Image - - + + * If step equals 0: - + .. math:: - + center_x=(w+0.5) \\ center_y=(h+0.5) - + * else: - + .. math:: - + center_x=(w+offset)*step \\ center_y=(h+offset)*step \\ w \subset \left( 0, W \right ) \\ h \subset \left( 0, H \right ) 2. Then, for each :math:`s \subset \left( 0, min\_sizes \right )` calculates coordinates of prior boxes: .. math:: - + xmin = \frac{\frac{center_x - s}{2}}{W} - - - + + + .. math:: - + ymin = \frac{\frac{center_y - s}{2}}{H} - - - + + + .. math:: - + xmax = \frac{\frac{center_x + s}{2}}{W} - - - + + + .. math:: - + ymin = \frac{\frac{center_y + s}{2}}{H} 3. If *clip* attribute is set to true, each output value is clipped between :math:`\left< 0, 1 \right>`. @@ -186,10 +186,10 @@ PriorBox - 2 < !-- values: [24, 42] --> + 2 - 2 < !-- values: [384, 672] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.rst index 4535ad02ca962c..e1b9e1e71ac084 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.rst @@ -5,7 +5,7 @@ PriorBox .. meta:: - :description: Learn about PriorBox-8 - an object detection operation, + :description: Learn about PriorBox-8 - an object detection operation, which can be performed on two required input tensors. **Versioned name**: *PriorBox-8* @@ -21,41 +21,41 @@ PriorBox 1. First, it calculates *center_x* and *center_y* of a prior box: .. math:: - + W \equiv Width \quad Of \quad Image \\ H \equiv Height \quad Of \quad Image * If step equals 0: .. math:: - + center_x=(w+0.5) \\ center_y=(h+0.5) * else: .. math:: - + center_x=(w+offset)*step \\ center_y=(h+offset)*step \\ w \subset \left( 0, W \right ) \\ h \subset \left( 0, H \right ) 2. Then, it calculates coordinates of prior boxes for each :math:`s \subset \left( 0, min\_sizes \right )` : .. math:: - + xmin = \frac{\frac{center_x - s}{2}}{W} - - - + + + .. math:: - + ymin = \frac{\frac{center_y - s}{2}}{H} - - + + .. math:: - + xmax = \frac{\frac{center_x + s}{2}}{W} - - + + .. math:: - + ymin = \frac{\frac{center_y + s}{2}}{H} 3. If *clip* attribute is set to true, each output value is clipped between :math:`\left< 0, 1 \right>`. @@ -82,7 +82,7 @@ PriorBox * **Description**: *flip* is a flag that denotes that each *aspect_ratio* is duplicated and flipped. For example, *flip* equals 1 and *aspect_ratio* equals ``[4.0,2.0]``, meaning that the aspect_ratio is equal to ``[4.0,2.0,0.25,0.5]``. * **Range of values**: - + * false or 0 - each *aspect_ratio* is flipped * true or 1 - each *aspect_ratio* is not flipped * **Type**: ``boolean`` @@ -193,10 +193,10 @@ PriorBox - 2 < !-- values: [24, 42] --> + 2 - 2 < !-- values: [384, 672] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.rst index 0466aca91977d9..f1c235ab3ef8c1 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.rst @@ -5,7 +5,7 @@ RegionYolo .. meta:: - :description: Learn about RegionYolo-1 - an object detection operation, + :description: Learn about RegionYolo-1 - an object detection operation, which can be performed on a 4D input tensor. **Versioned name**: *RegionYolo-1* @@ -65,7 +65,7 @@ RegionYolo * **Description**: *do_softmax* is a flag that specifies the inference method and affects how the number of regions is determined. It also affects output shape. If it is 0, then output shape is 4D, and 2D otherwise. * **Range of values**: - + * *false* - do not perform softmax * *true* - perform softmax * **Type**: ``boolean`` @@ -100,7 +100,7 @@ RegionYolo .. code-block:: xml :force: - < !-- YOLO V3 example --> + @@ -120,8 +120,8 @@ RegionYolo - - < !-- YOLO V2 Example --> + + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.rst index aabb8ab101c212..411c54364258f8 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.rst @@ -5,7 +5,7 @@ Eye .. meta:: - :description: Learn about Eye-9 - a generation operation, which can be + :description: Learn about Eye-9 - a generation operation, which can be performed on three required and one optional input tensors. **Versioned name**: *Eye-9* @@ -23,13 +23,13 @@ Example 1. *Eye* output with ``output_type`` = ``i32``: .. code-block:: xml :force: - + num_rows = 3 - + num_columns = 4 - + diagonal_index = 2 - + output = [[0 0 1 0] [0 0 0 1] [0 0 0 0]] @@ -38,13 +38,13 @@ Example 2. *Eye* output with ``output_type`` = ``i32``: .. code-block:: xml :force: - + num_rows = 3 - + num_columns = 4 - + diagonal_index = -1 - + output = [[0 0 0 0] [1 0 0 0] [0 1 0 0]] @@ -53,13 +53,13 @@ Example 3. *Eye* output with ``output_type`` = ``f16``: .. code-block:: xml :force: - + num_rows = 2 - + diagonal_index = 5 - + batch_shape = [1, 2] - + output = [[[[0. 0.] [0. 0.]] [[0. 0.] @@ -97,13 +97,13 @@ Example 3. *Eye* output with ``output_type`` = ``f16``: .. code-block:: xml :force: - + - < !-- num rows: 5 --> - < !-- num columns: 5 --> - < !-- diagonal index --> + + + @@ -117,14 +117,14 @@ Example 3. *Eye* output with ``output_type`` = ``f16``: .. code-block:: xml :force: - + - < !-- num rows --> - < !-- num columns --> - < !-- diagonal index --> - < !-- batch_shape : [2, 3] --> + + + + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.rst index 34f355612232f1..46d2d66213594f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.rst @@ -91,10 +91,10 @@ Example 3 - 2D tensor, without replacement * **Description**: controls whether to sample with replacement (classes can be sampled multiple times). * **Range of values**: `true`, `false` - + * ``true`` - class indices can be sampled multiple times. * ``false`` - class indices will not repeat in the output and the size of ``probs``' ``class_size`` dimension is required to be larger or equal to *num_samples* value. Might affect performance. - + * **Type**: `bool` * **Required**: *Yes* @@ -149,16 +149,16 @@ Example 3 - 2D tensor, without replacement - < !-- probs value: [[0.1, 0.5, 0.4]] --> - 1 < !-- batch size of 2 --> + + 1 3 - < !-- num_samples value: 5 --> + - 1 < !--dimension depends on input batch size --> - 5 < !--dimension depends on num_samples --> + 1 + 5 @@ -171,16 +171,16 @@ Example 3 - 2D tensor, without replacement - < !-- probs value: [[-1, 1, 2], [50, 1, 21]] --> - 2 < !-- batch size of 2 --> + + 2 3 - < !-- num_samples value: 10 --> + - 2 < !--dimension depends on input batch size --> - 10 < !--dimension depends on num_samples --> + 2 + 10 @@ -193,16 +193,16 @@ Example 3 - 2D tensor, without replacement - < !-- probs value: [[0.1, 0.5, 0.4]] --> - 2 < !-- batch size of 2 --> + + 2 3 - < !-- num_samples value: 2 --> + - 2 < !-- batch size of 2 --> - 2 < !-- 2 unique samples of classes --> + 2 + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.rst index d7b63d49b83d54..526d13d594afdb 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.rst @@ -5,7 +5,7 @@ RandomUniform .. meta:: - :description: Learn about RandomUniform-8 - a generation operation, which can be + :description: Learn about RandomUniform-8 - a generation operation, which can be performed on three required input tensors. **Versioned name**: *RandomUniform-8* @@ -16,10 +16,10 @@ RandomUniform **Detailed description**: -*RandomUniform* operation generates random numbers from a uniform distribution in the range ``[minval, maxval)``. -The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm -is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns -four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized +*RandomUniform* operation generates random numbers from a uniform distribution in the range ``[minval, maxval)``. +The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm +is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns +four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized with *global_seed* and *op_seed* attributes respectively. If both seed values equal to zero, RandomUniform generates non-deterministic sequence. @@ -32,7 +32,7 @@ If both seed values equal to zero, RandomUniform generates non-deterministic seq Link to the original paper `Parallel Random Numbers: As Easy as 1, 2, 3 `__. -The result of Philox is calculated by applying a fixed number of *key* and *counter* updating so-called "rounds". +The result of Philox is calculated by applying a fixed number of *key* and *counter* updating so-called "rounds". This implementation uses 4x32_10 version of Philox algorithm, where number of rounds = 10. Suppose we have *n* which determines *n*-th 4 elements of random sequence. @@ -43,7 +43,7 @@ In each round *key*, *counter* and *n* are splitted to pairs of uint32 values: R = cast\_to\_uint32(value)\\ L = cast\_to\_uint32(value >> 32), -where *cast\_to\_uint32* - static cast to uint32, *value* - uint64 input value, *L*, *R* - uint32 +where *cast\_to\_uint32* - static cast to uint32, *value* - uint64 input value, *L*, *R* - uint32 result values, >> - bitwise right shift. Then *n* and *counter* are updated with the following formula: @@ -68,7 +68,7 @@ Values :math:`L'_{n}, R'_{n}, L'_{counter}, R'_{counter}` are resulting four ran Float values between [0..1) are obtained from 32-bit integers by the following rules. -Float16 is formatted as follows: *sign* (1 bit) *exponent* (5 bits) *mantissa* (10 bits). The value is interpreted +Float16 is formatted as follows: *sign* (1 bit) *exponent* (5 bits) *mantissa* (10 bits). The value is interpreted using following formula: .. math:: @@ -99,7 +99,7 @@ where x is uint32 generated random value. Float32 is formatted as follows: *sign* (1 bit) *exponent* (8 bits) *mantissa* (23 bits). The value is interpreted using following formula: .. math:: - + (-1)^{sign} * 1, mantissa * 2 ^{exponent - 127} @@ -117,7 +117,7 @@ So the resulting float value is: .. code-block:: xml :force: - + val = ((exponent << 23) | x & 0x7fffffu) - 1.0, where x is uint32 generated random value. @@ -125,7 +125,7 @@ where x is uint32 generated random value. Double is formatted as follows: *sign* (1 bit) *exponent* (11 bits) *mantissa* (52 bits). The value is interpreted using following formula: .. math:: - + (-1)^{sign} * 1, mantissa * 2 ^{exponent - 1023} @@ -133,7 +133,7 @@ so to obtain double values *sign*, *exponent* and *mantissa* are set as follows: .. code-block:: xml :force: - + sign = 0 exponent = 1023 - representation of a zero exponent. mantissa = 52 right bits from two concatinated uint32 values from random integer generator. @@ -143,7 +143,7 @@ So the resulting double is obtained as follows: .. code-block:: xml :force: - + mantissa_h = x0 & 0xfffffu; // upper 20 bits of mantissa mantissa_l = x1; // lower 32 bits of mantissa mantissa = (mantissa_h << 32) | mantissa_l; @@ -156,7 +156,7 @@ To obtain a value in a specified range each value is processed with the followin For float values: .. math:: - + result = x * (maxval - minval) + minval, where *x* is random float or double value between [0..1). @@ -174,7 +174,7 @@ Example 1. *RandomUniform* output with ``global_seed`` = 150, ``op_seed`` = 10, .. code-block:: xml :force: - + input_shape = [ 3, 3 ] output = [[0.7011236 0.30539632 0.93931055] [0.9456035 0.11694777 0.50770056] @@ -185,7 +185,7 @@ Example 2. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, .. code-block:: xml :force: - + input_shape = [ 2, 2 ] minval = 2 @@ -200,7 +200,7 @@ Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, .. code-block:: xml :force: - + input_shape = [ 2, 3 ] minval = 50 @@ -261,11 +261,11 @@ Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, - < !-- shape value: [2, 3, 10] --> + 3 - < !-- min value --> - < !-- max value --> + + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.rst index fa0b5fe6bb1dee..689f8ae0e617e1 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.rst @@ -5,7 +5,7 @@ Range .. meta:: - :description: Learn about Range-1 - a generation operation, which can be + :description: Learn about Range-1 - a generation operation, which can be performed on three required input tensors. **Versioned name**: *Range-1* @@ -46,7 +46,7 @@ For a positive ``step``: for a negative ``step``: .. math:: - + start>=val[i]>stop, @@ -66,16 +66,16 @@ where - < !-- start value: 2 --> + - < !-- stop value: 23 --> + - < !-- step value: 3 --> + - 7 < !-- [ 2, 5, 8, 11, 14, 17, 20] --> + 7 @@ -88,16 +88,16 @@ where - < !-- start value: 23 --> + - < !-- stop value: 2 --> + - < !-- step value: -3 --> + - 7 < !-- [23, 20, 17, 14, 11, 8, 5] --> + 7 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.rst index 0a0418124d75f1..471eeef22482ab 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.rst @@ -5,7 +5,7 @@ Range .. meta:: - :description: Learn about Range-4 - a generation operation, which can be + :description: Learn about Range-4 - a generation operation, which can be performed on three required input tensors. **Versioned name**: *Range-4* @@ -81,16 +81,16 @@ This is aligned with PyTorch's operation ``torch.arange``, to align with tensorf - < !-- start value: 2 --> + - < !-- stop value: 23 --> + - < !-- step value: 3 --> + - 7 < !-- [ 2, 5, 8, 11, 14, 17, 20] --> + 7 @@ -104,16 +104,16 @@ This is aligned with PyTorch's operation ``torch.arange``, to align with tensorf - < !-- start value: 23 --> + - < !-- stop value: 2 --> + - < !-- step value: -3 --> + - 7 < !-- [23, 20, 17, 14, 11, 8, 5] --> + 7 @@ -127,16 +127,16 @@ This is aligned with PyTorch's operation ``torch.arange``, to align with tensorf - < !-- start value: 1 --> + - < !-- stop value: 2.5 --> + - < !-- step value: 0.5 --> + - 3 < !-- [ 1.0, 1.5, 2.0] --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.rst index 8b5e46eeb6ca98..a3df192c305726 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.rst @@ -5,7 +5,7 @@ I420toBGR .. meta:: - :description: Learn about I420toBGR-8 - an image processing operation, which + :description: Learn about I420toBGR-8 - an image processing operation, which can be performed to convert image from I420 to BGR format. **Versioned name**: *I420toBGR-8* @@ -70,19 +70,19 @@ Same as specified for :doc:`I420toRGB ` ope -  < !-- Y plane --> +   1 480 640 1 -  < !-- U plane --> +   1 240 320 1 -  < !-- V plane --> +   1 240 320 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.rst index a0d4a3d0532e28..8d37f583503ec1 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.rst @@ -5,7 +5,7 @@ I420toRGB .. meta:: - :description: Learn about I420toRGB-8 - an image processing operation, which + :description: Learn about I420toRGB-8 - an image processing operation, which can be performed to convert image from I420 to RGB format. **Versioned name**: *I420toRGB-8* @@ -113,19 +113,19 @@ Input I420 image tensor shall have ``NHWC (also known as NYXC)`` layout and can -  < !-- Y plane --> +   1 480 640 1 -  < !-- U plane --> +   1 240 320 1 -  < !-- V plane --> +   1 240 320 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.rst index 0bed435759eb60..470fa5b7099006 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.rst @@ -5,7 +5,7 @@ Interpolate .. meta:: - :description: Learn about I420toRGB-8 - an image processing operation, which + :description: Learn about I420toRGB-8 - an image processing operation, which can be performed on two required tensors. **Versioned name**: *Interpolate-1* @@ -91,7 +91,7 @@ This is a scalar that specifies padding for each spatial dimension. 80 - 2  < !--The values in this input are [50, 60] --> + 2   diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.rst index b497cd42d297f9..281607f2504e62 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.rst @@ -5,7 +5,7 @@ Interpolate .. meta:: - :description: Learn about Interpolate-11 - an image processing operation, which + :description: Learn about Interpolate-11 - an image processing operation, which can be performed on two required and one optional tensor. **Versioned name**: *Interpolate-11* @@ -129,13 +129,13 @@ Interpolate 80 - 2 < !--The values in this input are [24, 160] --> + 2  - 2 < !--The values in this input are [0.5, 2.0] --> + 2  - 2 < !--The values in this input are [2, 3] (axes). --> + 2  diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.rst index c81ccff8eac943..7572f7c1bc97ac 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.rst @@ -5,7 +5,7 @@ Interpolate .. meta:: - :description: Learn about Interpolate-4 - an image processing operation, which + :description: Learn about Interpolate-4 - an image processing operation, which can be performed on three required and one optional tensor. **Versioned name**: *Interpolate-4* @@ -128,7 +128,7 @@ Calculations are performed according to the following rules. import math import numpy as np from enum import Enum, unique - + class GetNearestPixel: def __init__(self, mode: str): self.func = { @@ -138,37 +138,37 @@ Calculations are performed according to the following rules. 'ceil': GetNearestPixel.ceil_func, 'simple': GetNearestPixel.simple_func }[mode] - + def __call__(self, x_original, is_downsample): return self.func(x_original, is_downsample) - + @staticmethod def prefer_floor_func(x_original, is_downsample): if x_original == int(x_original) + 0.5: return int(math.floor(x_original)) else: return int(round(x_original)) - + @staticmethod def prefer_ceil_func(x_original, is_downsample): return int(round(x_original)) - + @staticmethod def floor_func(x_original, is_downsample): return int(math.floor(x_original)) - + @staticmethod def ceil_func(x_original, is_downsample): return int(math.ceil(x_original)) - + @staticmethod def simple_func(x_original, is_downsample): if is_downsample: return int(math.ceil(x_original)) else: return int(x_original) - - + + class GetOriginalCoordinate: def __init__(self, mode: str): self.func = { @@ -178,31 +178,31 @@ Calculations are performed according to the following rules. 'tf_half_pixel_for_nn': GetOriginalCoordinate.tf_half_pixel_for_nn_func, 'align_corners': GetOriginalCoordinate.align_corners_func }[mode] - + def __call__(self, x_resized, x_scale, length_resized, length_original): return self.func(x_resized, x_scale, length_resized, length_original) - + @staticmethod def half_pixel_func(x_resized, x_scale, length_resized, length_original): return ((x_resized + 0.5) / x_scale) - 0.5 - + @staticmethod def pytorch_half_pixel_func(x_resized, x_scale, length_resized, length_original): return (x_resized + 0.5) / x_scale - 0.5 if length_resized > 1 else 0.0 - + @staticmethod def asymmetric_func(x_resized, x_scale, length_resized, length_original): return x_resized / x_scale - + @staticmethod def tf_half_pixel_for_nn_func(x_resized, x_scale, length_resized, length_original): return (x_resized + 0.5) / x_scale - + @staticmethod def align_corners_func(x_resized, x_scale, length_resized, length_original): return 0 if length_resized == 1 else x_resized * (length_original - 1) / (length_resized - 1) - - + + def get_cubic_coeff(s, a): abs_s = abs(s) coeff = np.zeros(4) @@ -211,18 +211,18 @@ Calculations are performed according to the following rules. coeff[2] = (((-a -2.0) * abs_s+ (2.0 * a + 3.0)) * abs_s - a) * abs_s coeff[3] = - a * abs_s * abs_s * (abs_s - 1.0) return coeff - - + + def triangle_coeffs(dz): return np.maximum(0.0, 1.0 - np.abs(dz)) - - + + @unique class ShapeCalculationMode(Enum): SIZES = 0 SCALES = 1 - - + + class InterpolateCalculation: def __init__(self, attrs: dict): self.mode = attrs['mode'] @@ -233,38 +233,38 @@ Calculations are performed according to the following rules. 'linear_onnx': self.onnx_linear_interpolation }[self.mode] self.attrs = attrs - + self.pads_begin = attrs.get('pads_begin', [0]) self.pads_end = attrs.get('pads_end', [0]) self.coordinate_transformation_mode = attrs.get('coordinate_transformation_mode', 'half_pixel') self.nearest_mode = attrs.get('nearest_mode', 'round_prefer_floor') self.cube_coeff = attrs.get('cube_coeff', -0.75) self.antialias = attrs.get('antialias', False) - + self.shape_calculation_mode = { 'sizes': ShapeCalculationMode.SIZES, 'scales': ShapeCalculationMode.SCALES }[attrs['shape_calculation_mode']] - + self.get_original_coordinate = self.get_coordinate_transformation_mode() self.get_nearest_pixel = GetNearestPixel(self.nearest_mode) - - + + def get_coordinate_transformation_mode(self): return GetOriginalCoordinate(self.coordinate_transformation_mode) - + def shape_infer(self, input_data, sizes, scales): result = input_data.shape + self.pads_begin + self.pads_end - + if self.shape_calculation_mode == ShapeCalculationMode.SIZES: for i, axis in enumerate(self.axes): result[axis] = sizes[i] else: for i, axis in enumerate(self.axes): result[axis] = math.floor(scales[i] * result[axis]) - + return result - + @staticmethod def correct_pad(pad, rank): pad_len = len(pad) @@ -274,17 +274,17 @@ Calculations are performed according to the following rules. return np.array(pad[: rank - 1]).astype(np.int64) else: return np.array(pad, dtype=np.int64) - + def __call__(self, input_data, sizes, scales, axes): rank = input_data.ndim self.pads_begin = InterpolateCalculation.correct_pad(self.pads_begin, rank) self.pads_end = InterpolateCalculation.correct_pad(self.pads_end, rank) self.pads = list(zip(self.pads_begin, self.pads_end)) self.axes = np.array(axes).astype(np.int64) - + self.output_shape = self.shape_infer(input_data, sizes, scales) padded_data = np.pad(input_data, self.pads, 'constant') - + if self.shape_calculation_mode == ShapeCalculationMode.SIZES: num_of_axes = len(self.axes) self.scales = np.zeros(num_of_axes) @@ -292,18 +292,18 @@ Calculations are performed according to the following rules. self.scales[i] = self.output_shape[axis] / padded_data.shape[axis] else: self.scales = scales - + if self.mode == 'nearest': self.all_scales = np.ones(rank).astype(np.float) for i, axis in enumerate(self.axes): self.all_scales[axis] = self.scales[i] - + self.input_shape = padded_data.shape return self.func(padded_data) - + def clip_coord(self, coord, axis): return max(0, min(coord, self.input_shape[axis] - 1)) - + def cubic_interpolation(self, input_data): rank = len(self.input_shape) result = np.zeros(self.output_shape) @@ -328,28 +328,28 @@ Calculations are performed according to the following rules. summa += coeffs_prod * input_data[tuple(coords_for_sum)] result[coordinates] = summa return result - + def linear_interpolation(self, input_data): result = np.zeros(self.output_shape) num_of_axes = len(self.axes) is_downsample = False - + for scale in self.scales: is_downsample = is_downsample or (scale < 1) - + antialias = is_downsample and self.antialias - + a = np.zeros(num_of_axes) for i, _ in enumerate(self.axes): a[i] = self.scales[i] if antialias else 1.0 - + prod_of_a = np.prod(a) r = np.zeros(num_of_axes).astype(np.int64) for i, _ in enumerate(self.axes): r[i] = 2 if self.scales[i] > 1.0 else int(math.ceil(2.0/a[i])) - + indices = [tuple(np.array(ind).astype(np.int64) - r) for ind in np.ndindex(tuple(2 * r + 1))] - + for coordinates in np.ndindex(tuple(self.output_shape)): icoords = np.array(coordinates).astype(np.float64) icoords_r = np.array(coordinates).astype(np.float64) @@ -357,51 +357,51 @@ Calculations are performed according to the following rules. in_coord = self.get_original_coordinate(coordinates[axis], self.scales[i], self.output_shape[axis], self.input_shape[axis]) icoords[axis] = in_coord icoords_r[axis] = round(in_coord) - + summa = 0.0 wsum = 0.0 - + for index in indices: inner_coords = np.array(coordinates) for i, axis in enumerate(self.axes): inner_coords[axis] = index[i] + icoords_r[axis] - + conditions = [inner_coords[axis] >= 0 and inner_coords[axis] < self.input_shape[axis] for axis in self.axes] if not all(conditions): continue - + dz = np.zeros(num_of_axes) for i, axis in enumerate(self.axes): dz[i] = icoords[axis] - inner_coords[axis] - + w = prod_of_a * np.prod(triangle_coeffs(a * dz)) wsum += w summa += w * input_data[tuple(inner_coords)] - + if wsum == 0: result[coordinates] = 0.0 else: result[coordinates] = summa / wsum - + return result - + def onnx_linear_interpolation5D(self, input_data): rank = len(self.input_shape) assert rank in [3, 5], "mode 'linear_onnx' supports only 3D or 5D tensors" assert set(self.axes) == {2, 3, 4} or set(self.axes) == {0, 1, 2}, \ "mode 'linear_onnx' supports only case when axes = {2, 3, 4} or axes = {0, 1, 2}" - + result = np.zeros(self.output_shape) - + if rank == 3: reshaped_data = np.reshape(input_data, (1, 1, self.input_shape[0], self.input_shape[1], self.input_shape[2])) result = np.reshape(result, (1, 1, self.output_shape[0], self.output_shape[1], self.output_shape[2])) else: reshaped_data = input_data - + input_shape = np.array(reshaped_data.shape).astype(np.int64) output_shape = np.array(result.shape).astype(np.int64) - + batch_size = input_shape[0]; num_channels = input_shape[1]; input_depth = input_shape[2]; @@ -410,31 +410,31 @@ Calculations are performed according to the following rules. output_depth = output_shape[2]; output_height = output_shape[3]; output_width = output_shape[4]; - + depth_scale = self.scales[0]; height_scale = self.scales[1]; width_scale = self.scales[2]; - + z_original = np.zeros(output_depth).astype(np.float) y_original = np.zeros(output_height).astype(np.float) x_original = np.zeros(output_width).astype(np.float) - + in_z1 = np.zeros(output_depth).astype(np.int64) in_z2 = np.zeros(output_depth).astype(np.int64) in_y1 = np.zeros(output_height).astype(np.int64) in_y2 = np.zeros(output_height).astype(np.int64) in_x1 = np.zeros(output_width).astype(np.int64) in_x2 = np.zeros(output_width).astype(np.int64) - + dz1 = np.zeros(output_depth).astype(np.float) dz2 = np.zeros(output_depth).astype(np.float) - + dy1 = np.zeros(output_height).astype(np.float) dy2 = np.zeros(output_height).astype(np.float) - + dx1 = np.zeros(output_width).astype(np.float) dx2 = np.zeros(output_width).astype(np.float) - + for z in range(0, output_depth): in_z = self.get_original_coordinate(z, depth_scale, output_depth, input_depth) z_original[z] = in_z @@ -443,11 +443,11 @@ Calculations are performed according to the following rules. in_z2[z] = min(in_z1[z] + 1, input_depth - 1) dz1[z] = abs(in_z - in_z1[z]) dz2[z] = abs(in_z - in_z2[z]) - + if in_z1[z] == in_z2[z]: dz1[z] = 0.5 dz2[z] = 0.5 - + for y in range(0, output_height): in_y = self.get_original_coordinate(y, height_scale, output_height, input_height) y_original[y] = in_y @@ -456,19 +456,19 @@ Calculations are performed according to the following rules. in_y2[y] = min(in_y1[y] + 1, input_height - 1) dy1[y] = abs(in_y - in_y1[y]) dy2[y] = abs(in_y - in_y2[y]) - + if in_y1[y] == in_y2[y]: dy1[y] = 0.5 dy2[y] = 0.5 - + for x in range(0, output_width): in_x = self.get_original_coordinate(x, width_scale, output_width, input_width); x_original[x] = in_x in_x = max(0.0, min(in_x, input_width - 1)); - + in_x1[x] = min(in_x, input_width - 1); in_x2[x] = min(in_x1[x] + 1, input_width - 1); - + dx1[x] = abs(in_x - in_x1[x]); dx2[x] = abs(in_x - in_x2[x]); if in_x1[x] == in_x2[x]: @@ -487,33 +487,33 @@ Calculations are performed according to the following rules. x212 = reshaped_data[n, c, in_z2[z], in_y1[y], in_x2[x]] x122 = reshaped_data[n, c, in_z2[z], in_y2[y], in_x1[x]] x222 = reshaped_data[n, c, in_z2[z], in_y2[y], in_x2[x]] - + temp = dx2[x] * dy2[y] * dz2[z] * x111 + dx1[x] * dy2[y] * dz2[z] * x211 temp += dx2[x] * dy1[y] * dz2[z] * x121 + dx1[x] * dy1[y] * dz2[z] * x221 temp += dx2[x] * dy2[y] * dz1[z] * x112 + dx1[x] * dy2[y] * dz1[z] * x212 temp += dx2[x] * dy1[y] * dz1[z] * x122 + dx1[x] * dy1[y] * dz1[z] * x222 - + result[n, c, z, y, x] = temp - + return np.reshape(result, self.output_shape) - + def onnx_linear_interpolation4D(self, input_data): rank = len(self.input_shape) assert rank in [2, 4], "mode 'linear_onnx' supports only 2D or 4D tensors" assert set(self.axes) == {2, 3} or set(self.axes) == {0, 1}, \ "mode 'linear_onnx' supports only case when axes = {2, 3} or axes = {0, 1}" - + result = np.zeros(self.output_shape) - + if rank == 2: reshaped_data = np.reshape(input_data, (1, 1, self.input_shape[0], self.input_shape[1])) result = np.reshape(result, (1, 1, self.output_shape[0], self.output_shape[1])) else: reshaped_data = input_data - + input_shape = np.array(reshaped_data.shape).astype(np.int64) output_shape = np.array(result.shape).astype(np.int64) - + output_height = output_shape[2] output_width = output_shape[3] input_height = input_shape[2] @@ -522,21 +522,21 @@ Calculations are performed according to the following rules. width_scale = self.scales[1] batch_size = input_shape[0] num_channels = input_shape[1] - + y_original = np.zeros(output_height).astype(np.float) x_original = np.zeros(output_width).astype(np.float) - + in_y1 = np.zeros(output_height).astype(np.int64) in_y2 = np.zeros(output_height).astype(np.int64) in_x1 = np.zeros(output_width).astype(np.int64) in_x2 = np.zeros(output_width).astype(np.int64) - + dy1 = np.zeros(output_height).astype(np.float) dy2 = np.zeros(output_height).astype(np.float) - + dx1 = np.zeros(output_width).astype(np.float) dx2 = np.zeros(output_width).astype(np.float) - + for y in range(0, output_height): in_y = self.get_original_coordinate(y, height_scale, output_height, input_height) y_original[y] = in_y @@ -545,25 +545,25 @@ Calculations are performed according to the following rules. in_y2[y] = min(in_y1[y] + 1, input_height - 1) dy1[y] = abs(in_y - in_y1[y]) dy2[y] = abs(in_y - in_y2[y]) - + if in_y1[y] == in_y2[y]: dy1[y] = 0.5 dy2[y] = 0.5 - + for x in range(0, output_width): in_x = self.get_original_coordinate(x, width_scale, output_width, input_width); x_original[x] = in_x in_x = max(0.0, min(in_x, input_width - 1)); - + in_x1[x] = min(in_x, input_width - 1); in_x2[x] = min(in_x1[x] + 1, input_width - 1); - + dx1[x] = abs(in_x - in_x1[x]); dx2[x] = abs(in_x - in_x2[x]); if in_x1[x] == in_x2[x]: dx1[x] = 0.5 dx2[x] = 0.5 - + for n in range(0, batch_size): for c in range(0, num_channels): for y in range(0, output_height): @@ -574,21 +574,21 @@ Calculations are performed according to the following rules. x22 = reshaped_data[n, c, in_y2[y], in_x2[x]] temp = dx2[x] * dy2[y] * x11 + dx1[x] * dy2[y] * x21 + dx2[x] * dy1[y] * x12 + dx1[x] * dy1[y] * x22 result[n, c, y, x] = temp - + return np.reshape(result, self.output_shape) - + def onnx_linear_interpolation(self, input_data): rank = len(self.input_shape) assert rank in [2, 3, 4, 5], "mode 'linear_onnx' supports only 2D, 3D, 4D, or 5D tensors" - + if rank in [2, 4]: self.onnx_linear_interpolation4D(input_data) else: self.onnx_linear_interpolation5D(input_data) - + def nearest_interpolation(self, input_data): result = np.zeros(self.output_shape) - + num_of_axes = len(self.axes) for coordinates in np.ndindex(tuple(self.output_shape)): input_coords = np.array(coordinates, dtype=np.int64) @@ -597,7 +597,7 @@ Calculations are performed according to the following rules. nearest_pixel = self.get_nearest_pixel(in_coord, scale < 1) input_coords[axis] = max(0, min(nearest_pixel, self.input_shape[axis] - 1)) result[coordinates] = input_data[tuple(input_coords)] - + return result @@ -617,13 +617,13 @@ Calculations are performed according to the following rules. 80 - 2  < !--The values in this input are [24, 160] --> + 2   - 2  < !--The values in this input are [0.5, 2.0] --> + 2   - 2  < !--The values in this input are [2, 3] (axes). --> + 2   diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.rst index 777d132f7a9e7e..5320ecd4fe4317 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.rst @@ -5,7 +5,7 @@ NV12toBGR .. meta:: - :description: Learn about NV12toBGR-8 - an image processing operation, which + :description: Learn about NV12toBGR-8 - an image processing operation, which can be performed to convert an image from NV12 to BGR format. **Versioned name**: *NV12toBGR-8* @@ -70,13 +70,13 @@ Same as specified for :doc:`NV12toRGB ` ope - < !-- Y plane --> + 1 480 640 1 - < !-- UV plane --> + 1 240 320 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.rst index 1044e6b18916c5..2012d9f3d0c642 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.rst @@ -5,7 +5,7 @@ NV12toRGB .. meta:: - :description: Learn about NV12toRGB-8 - an image processing operation, which + :description: Learn about NV12toRGB-8 - an image processing operation, which can be performed to convert an image from NV12 to RGB format. **Versioned name**: *NV12toRGB-8* @@ -102,13 +102,13 @@ Input NV12 image tensor shall have ``NHWC (also known as NYXC)`` layout and can - < !-- Y plane --> + 1 480 640 1 - < !-- UV plane --> + 1 240 320 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.rst index e7a52a05faf540..051cababe93065 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.rst @@ -5,7 +5,7 @@ BatchToSpace .. meta:: - :description: Learn about BatchToSpace-2 - a data movement operation, + :description: Learn about BatchToSpace-2 - a data movement operation, which can be performed on four required input tensors. **Versioned name**: *BatchToSpace-2* @@ -21,25 +21,25 @@ BatchToSpace 1. Reshape ``data`` input to produce a tensor of shape :math:`[B_1, \dots, B_{N - 1}, \frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, D_2, \dots, D_{N - 1}]` .. math:: - + x^{\prime} = reshape(data, [B_1, \dots, B_{N - 1}, \frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, D_2, \dots, D_{N - 1}]) 2. Permute dimensions of :math:`x^{\prime}` to produce a tensor of shape :math:`[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, B_1, D_2, B_2, \dots, D_{N-1}, B_{N - 1}]` .. math:: - + x^{\prime\prime} = transpose(x', [N, N + 1, 0, N + 2, 1, \dots, N + N - 1, N - 1]) 3. Reshape :math:`x^{\prime\prime}` to produce a tensor of shape :math:`[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1 \times B_1, D_2 \times B_2, \dots, D_{N - 1} \times B_{N - 1}]` .. math:: - + x^{\prime\prime\prime} = reshape(x^{\prime\prime}, [\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1 \times B_1, D_2 \times B_2, \dots, D_{N - 1} \times B_{N - 1}]) 4. Crop the start and end of spatial dimensions of :math:`x^{\prime\prime\prime}` according to ``crops_begin`` and ``crops_end`` inputs to produce the output :math:`y` of shape: .. math:: - + \left[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, crop(D_1 \times B_1, CB_1, CE_1), crop(D_2 \times B_2, CB_2, CE_2), \dots , crop(D_{N - 1} \times B_{N - 1}, CB_{N - 1}, CE_{N - 1})\right] Where @@ -80,27 +80,27 @@ Example: 2D input tensor ``data`` .. code-block:: xml :force: - + - < !-- data --> - 10 < !-- batch --> - 2 < !-- spatial dimension 1 --> + + 10 + 2 - < !-- block_shape value: [1, 5] --> + 2 - < !-- crops_begin value: [0, 2] --> + 2 - < !-- crops_end value: [0, 0] --> + 2 - 2 < !-- data.shape[0] / (block_shape.shape[0] * block_shape.shape[1]) --> - 8 < !-- data.shape[1] * block_shape.shape[1] - crops_begin[1] - crops_end[1]--> + 2 + 8 @@ -109,33 +109,33 @@ Example: 5D input tensor ``data`` .. code-block:: xml :force: - + - < !-- data --> - 48 < !-- batch --> - 3 < !-- spatial dimension 1 --> - 3 < !-- spatial dimension 2 --> - 1 < !-- spatial dimension 3 --> - 3 < !-- spatial dimension 4 --> + + 48 + 3 + 3 + 1 + 3 - < !-- block_shape value: [1, 2, 4, 3, 1] --> + 5 - < !-- crops_begin value: [0, 0, 1, 0, 0] --> + 5 - < !-- crops_end value: [0, 0, 1, 0, 0] --> + 5 - 2 < !-- data.shape[0] / (block_shape.shape[0] * block_shape.shape[1] * ... * block_shape.shape[4]) --> - 6 < !-- data.shape[1] * block_shape.shape[1] - crops_begin[1] - crops_end[1]--> - 10 < !-- data.shape[2] * block_shape.shape[2] - crops_begin[2] - crops_end[2] --> - 3 < !-- data.shape[3] * block_shape.shape[3] - crops_begin[3] - crops_end[3] --> - 3 < !-- data.shape[4] * block_shape.shape[4] - crops_begin[4] - crops_end[4] --> + 2 + 6 + 10 + 3 + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.rst index 37f7c4e3f101ff..583a182609403e 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.rst @@ -5,7 +5,7 @@ Broadcast .. meta:: - :description: Learn about Broadcast-1 - a data movement operation, + :description: Learn about Broadcast-1 - a data movement operation, which can be performed on two required and one optional input tensor. **Versioned name**: *Broadcast-1* @@ -53,7 +53,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape .. code-block:: xml :force: - + @@ -63,9 +63,9 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 1 - 4 < !--The tensor contains 4 elements: [1, 16, 50, 50] --> + 4 - < !-- the 3rd input shouldn't be provided with mode="numpy" --> + @@ -76,7 +76,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -84,10 +84,10 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 16 - 4 < !--The tensor contains 4 elements: [1, 16, 50, 50] --> + 4 - 1 < !--The tensor contains 1 elements: [1] --> + 1 @@ -99,7 +99,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -108,10 +108,10 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 50 - 4 < !--The tensor contains 4 elements: [1, 50, 50, 16] --> + 4 - 2 < !--The tensor contains 2 elements: [1, 2] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.rst index e7b3f3a0d3d1fd..e13946f4780518 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.rst @@ -5,7 +5,7 @@ Broadcast .. meta:: - :description: Learn about Broadcast-3 - a data movement operation, + :description: Learn about Broadcast-3 - a data movement operation, which can be performed on two required and one optional input tensor. **Versioned name**: *Broadcast-3* @@ -61,7 +61,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape .. code-block:: xml :force: - + @@ -71,9 +71,9 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 1 - 4 < !--The tensor contains 4 elements: [1, 16, 50, 50] --> + 4 - < !-- the 3rd input shouldn't be provided with mode="numpy" --> + @@ -84,7 +84,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -92,10 +92,10 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 16 - 4 < !--The tensor contains 4 elements: [1, 16, 50, 50] --> + 4 - 1 < !--The tensor contains 1 elements: [1] --> + 1 @@ -107,7 +107,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -116,10 +116,10 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 50 - 4 < !--The tensor contains 4 elements: [1, 50, 50, 16] --> + 4 - 2 < !--The tensor contains 2 elements: [1, 2] --> + 2 @@ -131,7 +131,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -141,9 +141,9 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 1 - 4 < !--The tensor contains 4 elements: [1, 1, 50, 50] --> + 4 - < !-- the 3rd input shouldn't be provided with mode="bidirectional" --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.rst index 6c07321e08be99..7c3c11131f49e4 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.rst @@ -5,7 +5,7 @@ Concat .. meta:: - :description: Learn about Concat-1 - a data movement operation, + :description: Learn about Concat-1 - a data movement operation, which can be performed on arbitrary number of input tensors. **Versioned name**: *Concat-1* @@ -39,25 +39,25 @@ Concat .. code-block:: xml :force: - + 1 - 8 < !-- axis for concatenation --> + 8 50 50 1 - 16 < !-- axis for concatenation --> + 16 50 50 1 - 32 < !-- axis for concatenation --> + 32 50 50 @@ -65,7 +65,7 @@ Concat 1 - 56 < !-- concatenated axis: 8 + 16 + 32 = 48 --> + 56 50 50 @@ -75,25 +75,25 @@ Concat .. code-block:: xml :force: - + 1 - 8 < !-- axis for concatenation --> + 8 50 50 1 - 16 < !-- axis for concatenation --> + 16 50 50 1 - 32 < !-- axis for concatenation --> + 32 50 50 @@ -101,7 +101,7 @@ Concat 1 - 56 < !-- concatenated axis: 8 + 16 + 32 = 48 --> + 56 50 50 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.rst index de7af7a597b276..1df751ac0c5f68 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.rst @@ -5,7 +5,7 @@ DepthToSpace .. meta:: - :description: Learn about DepthToSpace-1 - a data movement operation, + :description: Learn about DepthToSpace-1 - a data movement operation, which can be performed on a single input tensor. **Versioned name**: *DepthToSpace-1* @@ -21,7 +21,7 @@ DepthToSpace The operation is equivalent to the following transformation of the input tensor ``data`` with ``K`` spatial dimensions of shape ``[N, C, D1, D2, ..., DK]`` to *Y* output tensor. If ``mode = blocks_first``: .. code-block:: cpp - + x' = reshape(data, [N, block_size, block_size, ..., block_size, C / (block_size ^ K), D1, D2, ..., DK]) x'' = transpose(x', [0, K + 1, K + 2, 1, K + 3, 2, K + 4, 3, ..., K + (K + 1), K]) y = reshape(x'', [N, C / (block_size ^ K), D1 * block_size, D2 * block_size, D3 * block_size, ..., DK * block_size]) @@ -29,7 +29,7 @@ The operation is equivalent to the following transformation of the input tensor If ``mode = depth_first``: .. code-block:: cpp - + x' = reshape(data, [N, C / (block_size ^ K), block_size, block_size, ..., block_size, D1, D2, ..., DK]) x'' = transpose(x', [0, 1, K + 2, 2, K + 3, 3, K + 4, 4, ..., K + (K + 1), K + 1]) y = reshape(x'', [N, C / (block_size ^ K), D1 * block_size, D2 * block_size, D3 * block_size, ..., DK * block_size]) @@ -70,7 +70,7 @@ If ``mode = depth_first``: .. code-block:: xml :force: - + @@ -83,10 +83,10 @@ If ``mode = depth_first``: - 5 < !-- data.shape[0] --> - 7 < !-- data.shape[1] / (block_size ^ 2) --> - 4 < !-- data.shape[2] * block_size --> - 6 < !-- data.shape[3] * block_size --> + 5 + 7 + 4 + 6 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.rst index a2d18a3c4c65a1..35036ddf555950 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.rst @@ -5,14 +5,14 @@ Gather .. meta:: - :description: Learn about Gather-1 - a data movement operation, + :description: Learn about Gather-1 - a data movement operation, which can be performed on three required input tensors. **Versioned name:** *Gather-1* **Category:** *Data movement* -**Short description:** *Gather* operation takes slices of data in the first input tensor according +**Short description:** *Gather* operation takes slices of data in the first input tensor according to the indices specified in the second input tensor and axis from the third input. **Detailed description** @@ -30,13 +30,13 @@ Where ``axis`` is the value from the third input. * **1**: Tensor with arbitrary data. **Required.** * **2**: Tensor with indices to gather. The values for indices are in the range ``[0, input1[axis] - 1]``. **Required.** -* **3**: Scalar or 1D tensor *axis* is a dimension index to gather data from. For example, *axis* equal - to 1 means that gathering is performed over the first dimension. Negative value means reverse indexing. +* **3**: Scalar or 1D tensor *axis* is a dimension index to gather data from. For example, *axis* equal + to 1 means that gathering is performed over the first dimension. Negative value means reverse indexing. Allowed values are from ``[-len(input1.shape), len(input1.shape) - 1]``. **Required.** **Outputs** -* **1**: The resulting tensor that consists of elements from the first input tensor gathered by indices +* **1**: The resulting tensor that consists of elements from the first input tensor gathered by indices from the second input tensor. Shape of the tensor is ``[input1.shape[:axis], input2.shape, input1.shape[axis + 1:]]`` **Example** @@ -58,17 +58,17 @@ Where ``axis`` is the value from the third input. 20 28 - < !-- axis = 1 --> + - 6 < !-- embedded dimension from the 1st input --> - 15 < !-- embedded dimension from the 2nd input --> - 4 < !-- embedded dimension from the 2nd input --> - 20 < !-- embedded dimension from the 2nd input --> - 28 < !-- embedded dimension from the 2nd input --> - 10 < !-- embedded dimension from the 1st input --> - 24 < !-- embedded dimension from the 1st input --> + 6 + 15 + 4 + 20 + 28 + 10 + 24 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.rst index d2cee5ffd926c4..ebe248309a122d 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.rst @@ -5,7 +5,7 @@ Gather .. meta:: - :description: Learn about Gather-7 - a data movement operation, + :description: Learn about Gather-7 - a data movement operation, which can be performed on three required input tensors. **Versioned name**: *Gather-7* @@ -29,12 +29,12 @@ the number of batch dimensions. ``N`` and ``M`` are numbers of dimensions of ``d **Attributes**: * *batch_dims* - - * **Description**: *batch_dims* (also denoted as ``b``) is a leading number of dimensions of ``data`` - tensor and ``indices`` representing the batches, and *Gather* starts to gather from the ``b`` - dimension. It requires the first ``b`` dimensions in `data` and `indices` tensors to be equal. + + * **Description**: *batch_dims* (also denoted as ``b``) is a leading number of dimensions of ``data`` + tensor and ``indices`` representing the batches, and *Gather* starts to gather from the ``b`` + dimension. It requires the first ``b`` dimensions in `data` and `indices` tensors to be equal. If ``batch_dims`` is less than zero, the normalized value is used ``batch_dims = indices.rank + batch_dims``. - * **Range of values**: ``[-min(data.rank, indices.rank); min(data.rank, indices.rank)]`` and + * **Range of values**: ``[-min(data.rank, indices.rank); min(data.rank, indices.rank)]`` and ``batch_dims' <= axis'``. Where ``batch_dims'`` and ``axis'`` stand for normalized ``batch_dims`` and ``axis`` values. * **Type**: *T_AXIS* * **Default value**: 0 @@ -46,7 +46,7 @@ Example 1 with default *batch_dims* value: batch_dims = 0 axis = 0 - + indices = [0, 0, 4] data = [1, 2, 3, 4, 5] output = [1, 1, 5] @@ -58,15 +58,15 @@ Example 2 with non-default *batch_dims* value: batch_dims = 1 axis = 1 - + indices = [[0, 0, 4], <-- this is applied to the first batch [4, 0, 0]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[1, 2, 3, 4, 5], <-- the first batch [6, 7, 8, 9, 10]] <-- the second batch data_shape = (2, 5) - + output = [[ 1, 1, 5], [10, 6, 6]] output_shape = (2, 3) @@ -78,24 +78,24 @@ Example 3 with non-default *batch_dims* value: batch_dims = 2 axis = 2 - + indices = [[[0, 0, 4], <-- this is applied to the first batch, index = (0, 0) [4, 0, 0]], <-- this is applied to the second batch, index = (0, 1) - + [[1, 2, 4], <-- this is applied to the third batch, index = (1, 0) [4, 3, 2]]] <-- this is applied to the fourth batch, index = (1, 1) indices_shape = (2, 2, 3) - + data = [[[1, 2, 3, 4, 5], <-- the first batch, index = (0, 0) [6, 7, 8, 9, 10]], <-- the second batch, index = (0, 1) - + [[11, 12, 13, 14, 15], <-- the third batch, index = (1, 0) [16, 17, 18, 19, 20]]] <-- the fourth batch, index = (1, 1) data_shape = (2, 2, 5) - + output = [[[ 1, 1, 5], [10, 6, 6]], - + [[12, 13, 15], [20, 19, 18]]] output_shape = (2, 2, 3) @@ -106,28 +106,28 @@ Example 4 with *axis* > *batch_dims*: batch_dims = 1 axis = 2 - + indices = [[1, 2, 4], <-- this is applied to the first batch [4, 3, 2]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[[[ 1, 2, 3, 4], <-- first batch [ 5, 6, 7, 8], [ 9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]]], - + [[[21, 22, 23, 24], <-- second batch [25, 26, 27, 28], [29, 30, 31, 32], [33, 34, 35, 36], [37, 38, 39, 40]]]] data_shape = (2, 1, 5, 4) - + output = [[[[ 5, 6, 7, 8], [ 9, 10, 11, 12], [17, 18, 19, 20]]], - + [[[37, 38, 39, 40], [33, 34, 35, 36], [29, 30, 31, 32]]]] @@ -140,15 +140,15 @@ Example 5 with negative *batch_dims* value: batch_dims = -1 <-- normalized value will be indices.rank + batch_dims = 2 - 1 = 1 axis = 1 - + indices = [[0, 0, 4], <-- this is applied to the first batch [4, 0, 0]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[1, 2, 3, 4, 5], <-- the first batch [6, 7, 8, 9, 10]] <-- the second batch data_shape = (2, 5) - + output = [[ 1, 1, 5], [10, 6, 6]] output_shape = (2, 3) @@ -167,7 +167,7 @@ Example 5 with negative *batch_dims* value: **Outputs** -* **1**: The resulting tensor of type *T* that consists of elements from ``data`` tensor gathered by ``indices``. +* **1**: The resulting tensor of type *T* that consists of elements from ``data`` tensor gathered by ``indices``. The shape of the output tensor is ``data.shape[:axis] + indices.shape[batch_dims:] + data.shape[axis + 1:]`` **Types** @@ -193,7 +193,7 @@ Example 5 with negative *batch_dims* value: 32 21 - < !-- axis = 1 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.rst index c4df65f49e1be8..b2bb5bf0235c60 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.rst @@ -6,7 +6,7 @@ Gather .. meta:: - :description: Learn about Gather-8 - a data movement operation, + :description: Learn about Gather-8 - a data movement operation, which can be performed on three required input tensors. **Versioned name**: *Gather-8* @@ -33,10 +33,10 @@ range output data for corresponding index will be filled with zeros (Example 7). **Attributes**: * *batch_dims* - - * **Description**: *batch_dims* (also denoted as ``b``) is a leading number of dimensions of ``data`` tensor - and ``indices`` representing the batches, and *Gather* starts to gather from the ``b`` dimension. - It requires the first ``b`` dimensions in ``data`` and ``indices`` tensors to be equal. + + * **Description**: *batch_dims* (also denoted as ``b``) is a leading number of dimensions of ``data`` tensor + and ``indices`` representing the batches, and *Gather* starts to gather from the ``b`` dimension. + It requires the first ``b`` dimensions in ``data`` and ``indices`` tensors to be equal. If ``batch_dims`` is less than zero, normalized value is used ``batch_dims = indices.rank + batch_dims``. * **Range of values**: ``[-min(data.rank, indices.rank); min(data.rank, indices.rank)]`` and ``batch_dims' <= axis'``. Where ``batch_dims'`` and ``axis'`` stand for normalized ``batch_dims`` and ``axis`` values. @@ -50,7 +50,7 @@ Example 1 with default *batch_dims* value: batch_dims = 0 axis = 0 - + indices = [0, 0, 4] data = [1, 2, 3, 4, 5] output = [1, 1, 5] @@ -61,15 +61,15 @@ Example 2 with non-default *batch_dims* value: batch_dims = 1 axis = 1 - + indices = [[0, 0, 4], <-- this is applied to the first batch [4, 0, 0]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[1, 2, 3, 4, 5], <-- the first batch [6, 7, 8, 9, 10]] <-- the second batch data_shape = (2, 5) - + output = [[ 1, 1, 5], [10, 6, 6]] output_shape = (2, 3) @@ -81,24 +81,24 @@ Example 3 with non-default *batch_dims* value: batch_dims = 2 axis = 2 - + indices = [[[0, 0, 4], <-- this is applied to the first batch, index = (0, 0) [4, 0, 0]], <-- this is applied to the second batch, index = (0, 1) - + [[1, 2, 4], <-- this is applied to the third batch, index = (1, 0) [4, 3, 2]]] <-- this is applied to the fourth batch, index = (1, 1) indices_shape = (2, 2, 3) - + data = [[[1, 2, 3, 4, 5], <-- the first batch, index = (0, 0) [6, 7, 8, 9, 10]], <-- the second batch, index = (0, 1) - + [[11, 12, 13, 14, 15], <-- the third batch, index = (1, 0) [16, 17, 18, 19, 20]]] <-- the fourth batch, index = (1, 1) data_shape = (2, 2, 5) - + output = [[[ 1, 1, 5], [10, 6, 6]], - + [[12, 13, 15], [20, 19, 18]]] output_shape = (2, 2, 3) @@ -109,28 +109,28 @@ Example 4 with *axis* > *batch_dims*: batch_dims = 1 axis = 2 - + indices = [[1, 2, 4], <-- this is applied to the first batch [4, 3, 2]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[[[ 1, 2, 3, 4], <-- first batch [ 5, 6, 7, 8], [ 9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]]], - + [[[21, 22, 23, 24], <-- second batch [25, 26, 27, 28], [29, 30, 31, 32], [33, 34, 35, 36], [37, 38, 39, 40]]]] data_shape = (2, 1, 5, 4) - + output = [[[[ 5, 6, 7, 8], [ 9, 10, 11, 12], [17, 18, 19, 20]]], - + [[[37, 38, 39, 40], [33, 34, 35, 36], [29, 30, 31, 32]]]] @@ -143,15 +143,15 @@ Example 5 with negative *batch_dims* value: batch_dims = -1 <-- normalized value will be indices.rank + batch_dims = 2 - 1 = 1 axis = 1 - + indices = [[0, 0, 4], <-- this is applied to the first batch [4, 0, 0]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[1, 2, 3, 4, 5], <-- the first batch [6, 7, 8, 9, 10]] <-- the second batch data_shape = (2, 5) - + output = [[ 1, 1, 5], [10, 6, 6]] output_shape = (2, 3) @@ -163,7 +163,7 @@ Example 6 with negative indices: batch_dims = 0 axis = 0 - + indices = [0, -2, -1] data = [1, 2, 3, 4, 5] output = [1, 4, 5] @@ -175,8 +175,8 @@ Example 7 with indices out of the range: batch_dims = 0 axis = 0 - - indices = [3, 10, -20] + + indices = [3, 10, -20] data = [1, 2, 3, 4, 5] output = [4, 0, 0] @@ -221,7 +221,7 @@ of the output tensor is ``data.shape[:axis] + indices.shape[batch_dims:] + data. 32 21 - < !-- axis = 1 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.rst index 3c44d8b8188c76..41ef471065e158 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.rst @@ -5,7 +5,7 @@ Pad .. meta:: - :description: Learn about Pad-1 - a data movement operation, + :description: Learn about Pad-1 - a data movement operation, which can be performed on three required and one optional input tensor. **Versioned name**: *Pad-1* @@ -26,7 +26,7 @@ The following examples illustrate how output tensor is generated for the *Pad* l [ 9 10 11 12 ]] -with the following attributes: +with the following attributes: .. code-block:: cpp @@ -36,7 +36,7 @@ with the following attributes: depending on the *pad_mode*. -* ``pad_mode = "constant"``: +* ``pad_mode = "constant"``: .. code-block:: cpp @@ -48,7 +48,7 @@ depending on the *pad_mode*. [ 0 0 0 0 0 0 0 0 ]] -* ``pad_mode = "edge"``: +* ``pad_mode = "edge"``: .. code-block:: cpp @@ -121,7 +121,7 @@ depending on the *pad_mode*. **Example**: constant mode .. code-block:: xml - :force: + :force: @@ -133,22 +133,22 @@ depending on the *pad_mode*. 40 - 4 < !-- pads_begin = [0, 5, 2, 1] --> + 4 - 4 < !-- pads_end = [1, 0, 3, 7] --> + 4 - < !-- pad_value = 15.0 --> + - 2 < !-- 2 = 0 + 1 + 1 = pads_begin[0] + input.shape[0] + pads_end[0] --> - 8 < !-- 8 = 5 + 3 + 0 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 37 < !-- 37 = 2 + 32 + 3 = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> - < !-- all new elements are filled with 15.0 value --> + 2 + 8 + 37 + 48 + @@ -169,18 +169,18 @@ depending on the *pad_mode*. 40 - 4 < !-- pads_begin = [0, 5, 2, 1] --> + 4 - 4 < !-- pads_end = [1, 0, 3, 7] --> + 4 - 2 < !-- 2 = 0 + 1 + 1 = pads_begin[0] + input.shape[0] + pads_end[0] --> - 8 < !-- 8 = 5 + 3 + 0 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 37 < !-- 37 = 2 + 32 + 3 = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> + 2 + 8 + 37 + 48 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.rst index eea353f0934bbb..1c214393d6cc4e 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.rst @@ -5,7 +5,7 @@ Pad .. meta:: - :description: Learn about Pad-12 - a data movement operation, + :description: Learn about Pad-12 - a data movement operation, which can be performed on three required and one optional input tensor. **Versioned name**: *Pad-12* @@ -19,7 +19,7 @@ Pad The following examples illustrate how output tensor is generated for the *Pad* layer for a given inputs: Positive pads example: -######################## +######################## .. code-block:: cpp @@ -82,7 +82,7 @@ depending on the *pad_mode* attribute: Negative pads example: -######################### +######################### .. code-block:: cpp @@ -111,7 +111,7 @@ for all of the *pad_mode* attribute options: Mixed pads example: -######################## +######################## .. code-block:: cpp @@ -178,7 +178,7 @@ Mixed pads example: * **Description**: *pad_mode* specifies the method used to generate the padding values. * **Range of values**: Name of the method in string format: - + * ``constant`` - padded values are taken from the *pad_value* input. If the input is not provided, the padding elements are equal to zero. * ``edge`` - padded values are copied from the respective edge of the input ``data`` tensor. * ``reflect`` - padded values are a reflection of the input `data` tensor. Values on the edges are not duplicated, ``pads_begin[D]`` and ``pads_end[D]`` must be not greater than ``data.shape[D] – 1`` for any valid ``D``. @@ -223,22 +223,22 @@ Mixed pads example: 40 - 4 < !-- pads_begin = [0, 5, 2, 1] --> + 4 - 4 < !-- pads_end = [1, 0, 3, 7] --> + 4 - < !-- pad_value = 15.0 --> + - 2 < !-- 2 = 0 + 1 + 1 = pads_begin[0] + input.shape[0] + pads_end[0] --> - 8 < !-- 8 = 5 + 3 + 0 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 37 < !-- 37 = 2 + 32 + 3 = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> - < !-- all new elements are filled with 15.0 value --> + 2 + 8 + 37 + 48 + @@ -247,7 +247,7 @@ Mixed pads example: **Example**: constant mode (positive and negative pads) .. code-block:: xml - :force: + :force: @@ -259,22 +259,22 @@ Mixed pads example: 40 - 4 < !-- pads_begin = [0, -2, -8, 1] --> + 4 - 4 < !-- pads_end = [-1, 4, -6, 7] --> + 4 - < !-- pad_value = 15.0 --> + - 1 < !-- 2 = 0 + 2 + (-1) = pads_begin[0] + input.shape[0] + pads_end[0] --> - 5 < !-- 5 = (-2) + 3 + 4 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 18 < !-- 18 = (-8) + 32 (-6) = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> - < !-- all new elements are filled with 15.0 value --> + 1 + 5 + 18 + 48 + @@ -283,7 +283,7 @@ Mixed pads example: **Example**: edge mode .. code-block:: xml - :force: + :force: @@ -295,18 +295,18 @@ Mixed pads example: 40 - 4 < !-- pads_begin = [0, 5, 2, 1] --> + 4 - 4 < !-- pads_end = [1, 0, 3, 7] --> + 4 - 2 < !-- 2 = 0 + 1 + 1 = pads_begin[0] + input.shape[0] + pads_end[0] --> - 8 < !-- 8 = 5 + 3 + 0 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 37 < !-- 37 = 2 + 32 + 3 = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> + 2 + 8 + 37 + 48 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.rst index 5b6e7909f67b10..9497c6a1a1fc94 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.rst @@ -5,7 +5,7 @@ ReverseSequence .. meta:: - :description: Learn about ReverseSequence-1 - a data movement operation, + :description: Learn about ReverseSequence-1 - a data movement operation, which can be performed on two required input tensors. **Versioned name**: *ReverseSequence-1* @@ -58,14 +58,14 @@ ReverseSequence - < !-- data --> - 4 < !-- batch_axis --> - 10 < !-- seq_axis --> + + 4 + 10 100 200 - 4 < !-- seq_lengths value: [2, 4, 8, 10] --> + 4 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.rst index e9325266d00294..fc22ffc74dd410 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.rst @@ -5,7 +5,7 @@ Reverse .. meta:: - :description: Learn about Reverse-1 - a data movement operation, + :description: Learn about Reverse-1 - a data movement operation, which can be performed on one required and one optional input tensor. **Versioned name**: *Reverse-1* @@ -62,7 +62,7 @@ If no axis specified, that means either the second input is empty if ``index`` m 200 - 1 < !-- reverting along single axis --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.rst index a8c9df7c993a74..20b086f93e225d 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.rst @@ -5,7 +5,7 @@ Roll .. meta:: - :description: Learn about Roll-7 - a data movement operation, which can be + :description: Learn about Roll-7 - a data movement operation, which can be performed on three required input tensors. **Versioned name**: *Roll-7* @@ -100,7 +100,7 @@ No attributes available. 2 - 2 < !-- shifting along specified axes with the corresponding shift values --> + 2 @@ -131,7 +131,7 @@ No attributes available. 1 - 2 < !-- shifting along specified axes with the same shift value --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.rst index d0e810326634f6..aa5b2809e71219 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.rst @@ -130,22 +130,22 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> - 4 < !-- values: [2, 3, 4, 6] --> + > + 4 - < !-- indices (negative values allowed) --> - 6 < !-- values: [1, 0, 0, -2, -1, 2] --> + + 6 - > < !-- updates --> - 6 < !-- values: [10, 20, 30, 40, 70, 60] --> + > + 6 - < !-- values: [0] --> + 1 - 4 < !-- values: [52, 13, 104, 76] --> + 4 @@ -157,22 +157,22 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> - 4 < !-- values: [2, 3, 4, 6] --> + > + 4 - < !-- indices --> - 6 < !-- values: [1, 0, 0, 2, 3, 2] --> + + 6 - > < !-- updates --> - 6 < !-- values: [10, 20, 30, 40, 70, 60] --> + > + 6 - < !-- values: [0] --> + 1 - 4 < !-- values: [50, 10, 100, 70] --> + 4 @@ -184,30 +184,30 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> + > 3 - 4 < !-- values: [[0, 0, 0, 0], + 4 - < !-- indices --> + 2 - 2 < !-- values: [[1, 2], + 2 - > < !-- updates --> + > 2 - 2 < !-- values: [[11, 12], + 2 - < !-- values: [1] --> + 1 3 - 4 < !-- values: [[ 0, 11, 12, 0], + 4 @@ -221,30 +221,30 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> + > 3 - 4 < !-- values: [[1, 1, 1, 1], + 4 - < !-- indices --> + 2 - 2 < !-- values: [[1, 1], + 2 - > < !-- updates --> + > 2 - 2 < !-- values: [[11, 12], + 2 - < !-- values: [1] --> + 1 3 - 4 < !-- values: [[ 1, 24, 1, 1], + 4 @@ -258,30 +258,30 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> + > 3 - 4 < !-- values: [[2, 2, 2, 2], + 4 - < !-- indices --> + 2 - 2 < !-- values: [[1, 1], + 2 - > < !-- updates --> + > 2 - 2 < !-- values: [[11, 12], + 2 - < !-- values: [1] --> + 1 3 - 4 < !-- values: [[ 2, 264, 2, 2], + 4 @@ -313,7 +313,7 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` 7 6 - < !-- values: [0] --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.rst index fe927abc9b9e95..69eafbb10f7c26 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.rst @@ -5,7 +5,7 @@ ScatterElementsUpdate .. meta:: - :description: Learn about ScatterElementsUpdate-3 - a data movement operation, which can be + :description: Learn about ScatterElementsUpdate-3 - a data movement operation, which can be performed on four required input tensors. **Versioned name**: *ScatterElementsUpdate-3* @@ -81,7 +81,7 @@ The value can be in range ``[-r, r - 1]`` where ``r`` is the rank of ``data``. * 7 6 - < !-- value [0] --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.rst index f6fba7c4427115..86bbacc8b1a7cc 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.rst @@ -5,7 +5,7 @@ ScatterUpdate .. meta:: - :description: Learn about ScatterUpdate-3 - a data movement operation, which can be + :description: Learn about ScatterUpdate-3 - a data movement operation, which can be performed on four required input tensors. **Versioned name**: *ScatterUpdate-3* @@ -35,14 +35,14 @@ Several examples for case when `axis = 0`: * **1**: ``data`` tensor of arbitrary rank ``r`` and type *T_NUMERIC*. **Required.** -* **2**: ``indices`` tensor with indices of type *T_IND*. All index values are expected to be within bounds ``[0, s - 1]`` along the axis of size ``s``. If multiple indices point to the -same output location, the order of updating the values is undefined. If an index points to a non-existing output -tensor element or is negative, then an exception is raised. **Required.** +* **2**: ``indices`` tensor with indices of type *T_IND*. All index values are expected to be within bounds ``[0, s - 1]`` along the axis + of size ``s``. If multiple indices point to the same output location, the order of updating the values is undefined. + If an index points to a non-existing output tensor element or is negative, then an exception is raised. **Required.** * **3**: ``updates`` tensor of type *T_NUMERIC* and rank equal to ``rank(indices) + rank(data) - 1`` **Required.** * **4**: ``axis`` tensor with scalar or 1D tensor with one element of type *T_AXIS* specifying axis for scatter. -The value can be in the range ``[ -r, r - 1]``, where ``r`` is the rank of ``data``. **Required.** + The value can be in the range ``[ -r, r - 1]``, where ``r`` is the rank of ``data``. **Required.** **Outputs**: @@ -65,29 +65,29 @@ The value can be in the range ``[ -r, r - 1]``, where ``r`` is the rank of ``dat - < !-- data --> + 1000 256 10 15 - < !-- indices --> + 125 20 - < !-- updates --> + 1000 125 20 10 15 - < !-- axis --> - 1 < !-- value [1] --> + + 1 - < !-- output --> + 1000 256 10 @@ -103,26 +103,26 @@ The value can be in the range ``[ -r, r - 1]``, where ``r`` is the rank of ``dat - < !-- data --> - 3 < !-- {{-1.0f, 1.0f, -1.0f, 3.0f, 4.0f}, --> - 5 < !-- {-1.0f, 6.0f, -1.0f, 8.0f, 9.0f}, --> - < !-- {-1.0f, 11.0f, 1.0f, 13.0f, 14.0f}} --> - < !-- indices --> - 2 < !-- {0, 2} --> + + 3 + 5 + + + 2 - < !-- updates --> - 3 < !-- {1.0f, 1.0f} --> - 2 < !-- {1.0f, 1.0f} --> - < !-- {1.0f, 2.0f} --> - < !-- axis --> - 1 < !-- {1} --> + + 3 + 2 + + + 1 - < !-- output --> - 3 < !-- {{1.0f, 1.0f, 1.0f, 3.0f, 4.0f}, --> - 5 < !-- {1.0f, 6.0f, 1.0f, 8.0f, 9.0f}, --> - < !-- {1.0f, 11.0f, 2.0f, 13.0f, 14.0f}} --> + + 3 + 5 + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.rst index 22b0a7bfbd72af..207ddda0721436 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.rst @@ -5,7 +5,7 @@ Slice .. meta:: - :description: Learn about Slice-8 - a data movement operation, + :description: Learn about Slice-8 - a data movement operation, which can be performed on four required and one optional input tensor. **Versioned name**: *Slice-8* @@ -82,24 +82,24 @@ Example 1: basic slicing - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [1] --> + 1 - < !-- stop: [8] --> + 1 - < !-- step: [1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [1, 2, 3, 4, 5, 6, 7] --> + 7 @@ -113,21 +113,21 @@ Example 2: basic slicing, ``axes`` default - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [1] --> + 1 - < !-- stop: [8] --> + 1 - < !-- step: [1] --> + 1 - < !-- output: [1, 2, 3, 4, 5, 6, 7] --> + 7 @@ -141,24 +141,24 @@ Example 3: basic slicing, ``step: [2]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [1] --> + 1 - < !-- stop: [8] --> + 1 - < !-- step: [2] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [1, 3, 5, 7] --> + 4 @@ -171,24 +171,24 @@ Example 4: ``start`` and ``stop`` out of the dimension size, ``step: [1]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [-100] --> + 1 - < !-- stop: [100] --> + 1 - < !-- step: [1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 @@ -202,24 +202,24 @@ Example 5: slicing backward all elements, ``step: [-1]``, ``stop: [-11]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [9] --> + 1 - < !-- stop: [-11] --> + 1 - < !-- step: [-1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] --> + 10 @@ -233,29 +233,29 @@ Example 6: slicing backward, ``step: [-1]``, ``stop: [0]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [9] --> + 1 - < !-- stop: [0] --> + 1 - < !-- step: [-1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 8, 7, 6, 5, 4, 3, 2, 1] --> + 9 - + Example 7: slicing backward, ``step: [-1]``, ``stop: [-10]`` @@ -264,24 +264,24 @@ Example 7: slicing backward, ``step: [-1]``, ``stop: [-10]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [9] --> + 1 - < !-- stop: [-10] --> + 1 - < !-- step: [-1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 8, 7, 6, 5, 4, 3, 2, 1] --> + 9 @@ -295,24 +295,24 @@ Example 8: slicing backward, ``step: [-2]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [9] --> + 1 - < !-- stop: [-11] --> + 1 - < !-- step: [-2] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 7, 5, 3, 1] --> + 5 @@ -326,24 +326,24 @@ Example 9: ``start`` and ``stop`` out of the dimension size, slicing backward - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [100] --> + 1 - < !-- stop: [-100] --> + 1 - < !-- step: [-1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] --> + 10 @@ -357,31 +357,31 @@ Example 10: slicing 2D tensor, all axes specified - < !-- data: data: [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] --> + 2 5 - < !-- start: [0, 1] --> + 2 - < !-- stop: [2, 4] --> + 2 - < !-- step: [1, 2] --> + 2 - < !-- axes: [0, 1] --> + 2 - < !-- output: [1, 3, 6, 8] --> + 2 2 - + Example 11: slicing 3D tensor, all axes specified @@ -390,26 +390,26 @@ Example 11: slicing 3D tensor, all axes specified - < !-- data --> + 20 10 5 - < !-- start: [0, 0, 0] --> + 2 - < !-- stop: [4, 10, 5] --> + 2 - < !-- step: [1, 1, 1] --> + 2 - < !-- axes: [0, 1, 2] --> + 2 - < !-- output --> + 4 10 5 @@ -424,26 +424,26 @@ Example 12: slicing 3D tensor, last axes default - < !-- data --> + 20 10 5 - < !-- start: [0, 0] --> + 2 - < !-- stop: [4, 10] --> + 2 - < !-- step: [1, 1] --> + 2 - < !-- axes: [0, 1] --> + 2 - < !-- output --> + 4 10 5 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.rst index 5b1f060e7bc7cb..6b7ddb69c2ef53 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.rst @@ -5,7 +5,7 @@ SpaceToBatch .. meta:: - :description: Learn about SpaceToBatch-2 - a data movement operation, + :description: Learn about SpaceToBatch-2 - a data movement operation, which can be performed on four required input tensors. **Versioned name**: *SpaceToBatch-2* @@ -79,30 +79,30 @@ No attributes available. - < !-- data --> - 2 < !-- batch --> - 6 < !-- spatial dimension 1 --> - 10 < !-- spatial dimension 2 --> - 3 < !-- spatial dimension 3 --> - 3 < !-- spatial dimension 4 --> + + 2 + 6 + 10 + 3 + 3 - < !-- block_shape value: [1, 2, 4, 3, 1] --> + 5 - < !-- pads_begin value: [0, 0, 1, 0, 0] --> + 5 - < !-- pads_end value: [0, 0, 1, 0, 0] --> + 5 - 48 < !-- data.shape[0] * block_shape.shape[0] * block_shape.shape[1] *... * block_shape.shape[4] --> - 3 < !-- (data.shape[1] + pads_begin[1] + pads_end[1]) / block_shape.shape[1] --> - 3 < !-- (data.shape[2] + pads_begin[2] + pads_end[2]) / block_shape.shape[2] --> - 1 < !-- (data.shape[3] + pads_begin[3] + pads_end[3]) / block_shape.shape[3] --> - 3 < !-- (data.shape[4] + pads_begin[4] + pads_end[4]) / block_shape.shape[4] --> + 48 + 3 + 3 + 1 + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.rst index bf955d0c8b6d58..0731920bd6db48 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.rst @@ -5,7 +5,7 @@ Split .. meta:: - :description: Learn about Split-1 - a data movement operation, + :description: Learn about Split-1 - a data movement operation, which can be performed on two required input tensors. **Versioned name**: *Split-1* @@ -58,13 +58,13 @@ Where D is the rank of input tensor ``data``. The axis being split must be evenl - < !-- some data --> + 6 12 10 24 - < !-- axis: 1 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.rst index b282848e4af0e1..a4025de9a9f924 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.rst @@ -5,7 +5,7 @@ StridedSlice .. meta:: - :description: Learn about StridedSlice-1 - a data movement operation, + :description: Learn about StridedSlice-1 - a data movement operation, which can be performed on three required and one optional input tensor. **Versioned name**: *StridedSlice-1* @@ -88,13 +88,13 @@ Example of ``begin_mask`` & ``end_mask`` usage. 4 - 2 < !-- begin: [1, 0, 0] --> + 2 - 2 < !-- end: [0, 0, 2] --> + 2 - 2 < !-- stride: [1, 1, 1] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.rst index 6c734cc3bba114..f788e136c8fa62 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.rst @@ -5,7 +5,7 @@ Tile .. meta:: - :description: Learn about Tile-1 - a data movement operation, which can be + :description: Learn about Tile-1 - a data movement operation, which can be performed on two required input tensors. **Versioned name**: *Tile-1* @@ -39,10 +39,10 @@ No attributes available. *Tile* operation extends input tensor and filling in output tensor by the following rules: -.. math:: +.. math:: out_i=input_i[inner_dim*t] - + .. math:: t \in \left ( 0, \quad tiles \right ) @@ -62,7 +62,7 @@ No attributes available. 4 - 3 < !-- [1, 2, 3] --> + 3 @@ -81,13 +81,13 @@ No attributes available. - < !-- will be promoted to shape (1, 2, 3, 4) --> + 2 3 4 - 4 < !-- [5, 1, 2, 3] --> + 4 @@ -114,7 +114,7 @@ No attributes available. 4 - 3 < !-- [1, 2, 3] will be promoted to [1, 1, 2, 3] --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.rst index 28cd04c2767e18..54dbdb1d13d1f3 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.rst @@ -5,7 +5,7 @@ Transpose .. meta:: - :description: Learn about Transpose-1 - a data movement operation, which can be + :description: Learn about Transpose-1 - a data movement operation, which can be performed on two required input tensors. **Versioned name**: *Transpose-1* @@ -53,7 +53,7 @@ Transpose 4 - 3 < !-- [2, 0, 1] --> + 3 @@ -79,7 +79,7 @@ Transpose 4 - 0 < !-- input_order is an empty 1D tensor --> + 0 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.rst index d87c037d44a9f0..19b64c2711d347 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.rst @@ -5,7 +5,7 @@ VariadicSplit .. meta:: - :description: Learn about VariadicSplit-1 - a data movement operation, which can be + :description: Learn about VariadicSplit-1 - a data movement operation, which can be performed on three required input tensors. **Versioned name**: *VariadicSplit-1* @@ -20,7 +20,7 @@ VariadicSplit The i-th output tensor shape is equal to the input tensor `data` shape, except for dimension along `axis` which is ``split_lengths[i]``. .. math:: - + shape\_output\_tensor = [data.shape[0], data.shape[1], \dotsc , split\_lengths[i], \dotsc , data.shape[D-1]] Where D is the rank of input tensor `data`. The sum of elements in ``split_lengths`` must match ``data.shape[axis]``. @@ -49,16 +49,16 @@ Where D is the rank of input tensor `data`. The sum of elements in ``split_lengt - < !-- some data --> + 6 12 10 24 - < !-- axis: 0 --> + - 3 < !-- split_lengths: [1, 2, 3] --> + 3 @@ -89,21 +89,21 @@ Where D is the rank of input tensor `data`. The sum of elements in ``split_lengt - < !-- some data --> + 6 12 10 24 - < !-- axis: 0 --> + - 2 < !-- split_lengths: [-1, 2] --> + 2 - 4 < !-- 4 = 6 - 2 --> + 4 12 10 24 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.rst index e5233fb3d3eb7a..91b803f757d519 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.rst @@ -5,7 +5,7 @@ BatchNormInference .. meta:: - :description: Learn about BatchNormInference-5 - a normalization operation, which can be + :description: Learn about BatchNormInference-5 - a normalization operation, which can be performed on five required input tensors. **Versioned name**: *BatchNormInference-5* @@ -19,19 +19,19 @@ BatchNormInference *BatchNormInference* performs the following operations on a given data batch input tensor ``data``: * Normalizes each activation :math:`x^{(k)}` by the mean and variance. - + .. math:: - + \hat{x}^{(k)}=\frac{x^{(k)} - E[x^{(k)}]}{\sqrt{Var(x^{(k)}) + \epsilon}} where :math:`E[x^{(k)}]` and :math:`Var(x^{(k)})` are the mean and variance, calculated per channel axis of ``data`` input, and correspond to ``mean`` and ``variance`` inputs, respectively. Additionally, :math:`\epsilon` is a value added to the variance for numerical stability and corresponds to ``epsilon`` attribute. * Performs linear transformation of each normalized activation based on ``gamma`` and ``beta`` input, representing the scaling factor and shift, respectively. - + .. math:: - + \hat{y}^{(k)}=\gamma^{(k)}\hat{x}^{(k)} + \beta^{(k)} - + where :math:`\gamma^{(k)}` and :math:`\beta^{(k)}` are learnable parameters, calculated per channel axis, and correspond to ``gamma`` and ``beta`` inputs. **Mathematical Formulation** @@ -41,46 +41,46 @@ Let ``x`` be a *d*-dimensional input, :math:`x=(x_{1}\dotsc x_{d})`. Since norma For a particular activation, consider a mini-batch :math:`\mathcal{B}` of m values. *BatchNormInference* performs Batch Normalization algorithm as follows: * **Input**: Values of :math:`x` over a mini-batch: - + .. math:: - + \mathcal{B} = {x_{1...m}} * **Parameters to learn**: :math:`\gamma, \beta` * **Output**: - + .. math:: - + {o_{i} = BN_{\gamma, \beta} ( b_{i} )} * **Mini-batch mean**: - + .. math:: - + \mu_{\mathcal{B}} \leftarrow \frac{1}{m}\sum_{i=1}^{m}b_{i} * **Mini-batch variance**: - + .. math:: - + \sigma_{\mathcal{B}}^{2}\leftarrow \frac{1}{m}\sum_{i=1}^{m} ( b_{i} - \mu_{\mathcal{B}})^{2} * **Normalize**: - + .. math:: - + \hat{b_{i}} \leftarrow \frac{b_{i} - \mu_{\mathcal{B}}}{\sqrt{\sigma_{\mathcal{B}}^{2} + \epsilon }} * **Scale and shift**: - + .. math:: - + o_{i} \leftarrow \gamma\hat{b_{i}} + \beta = BN_{\gamma ,\beta } ( b_{i} ) **Attributes**: * *epsilon* - + * **Description**: *epsilon* is a constant added to the variance for numerical stability. * **Range of values**: a floating-point number greater than or equal to zero * **Type**: ``float`` @@ -104,28 +104,28 @@ For a particular activation, consider a mini-batch :math:`\mathcal{B}` of m valu **Examples** -Example: 2D input tensor ``data`` +Example: 2D input tensor ``data`` .. code-block:: xml :force: - + - < !-- input --> + 10 128 - < !-- gamma --> + 128 - < !-- beta --> + 128 - < !-- mean --> + 128 - < !-- variance --> + 128 @@ -141,26 +141,26 @@ Example: 4D input tensor ``data`` .. code-block:: xml :force: - + - < !-- input --> + 1 3 224 224 - < !-- gamma --> + 3 - < !-- beta --> + 3 - < !-- mean --> + 3 - < !-- variance --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.rst index 5c8bb387c4a116..d5a11a0db718c8 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.rst @@ -5,7 +5,7 @@ BatchNormInference .. meta:: - :description: Learn about BatchNormInference-5 - a normalization operation, which can be + :description: Learn about BatchNormInference-5 - a normalization operation, which can be performed on five required input tensors. **Versioned name**: *BatchNormInference-5* @@ -21,17 +21,17 @@ BatchNormInference * Normalizes each activation :math:`x^{(k)}` by the mean and variance. .. math:: - + \hat{x}^{(k)}=\frac{x^{(k)} - E[x^{(k)}]}{\sqrt{Var(x^{(k)}) + \epsilon}} - + where :math:`E[x^{(k)}]` and :math:`Var(x^{(k)})` are the mean and variance, calculated per channel axis of ``data`` input, and correspond to ``mean`` and ``variance`` inputs, respectively. Additionally, :math:`\epsilon` is a value added to the variance for numerical stability and corresponds to ``epsilon`` attribute. * Performs linear transformation of each normalized activation based on ``gamma`` and ``beta`` input, representing the scaling factor and shift, respectively. .. math:: - + \hat{y}^{(k)}=\gamma^{(k)}\hat{x}^{(k)} + \beta^{(k)} - + where :math:`\gamma^{(k)}` and :math:`\beta^{(k)}` are learnable parameters, calculated per channel axis, and correspond to ``gamma`` and ``beta`` inputs. **Mathematical Formulation** @@ -41,47 +41,47 @@ Let ``x`` be a *d*-dimensional input, :math:`x=(x_{1}\dotsc x_{d})`. Since norma For a particular activation, consider a mini-batch :math:`\mathcal{B}` of m values. *BatchNormInference* performs Batch Normalization algorithm as follows: * **Input**: Values of :math:`x` over a mini-batch: - + .. math:: - + \mathcal{B} = {x_{1...m}} - + * **Parameters to learn**: :math:`\gamma, \beta` * **Output**: - + .. math:: - + {o_{i} = BN_{\gamma, \beta} ( b_{i} )} - + * **Mini-batch mean**: - + .. math:: - + \mu_{\mathcal{B}} \leftarrow \frac{1}{m}\sum_{i=1}^{m}b_{i} * **Mini-batch variance**: - + .. math:: - + \sigma_{\mathcal{B}}^{2}\leftarrow \frac{1}{m}\sum_{i=1}^{m} ( b_{i} - \mu_{\mathcal{B}})^{2} * **Normalize**: - + .. math:: - + \hat{b_{i}} \leftarrow \frac{b_{i} - \mu_{\mathcal{B}}}{\sqrt{\sigma_{\mathcal{B}}^{2} + \epsilon }} * **Scale and shift**: - + .. math:: - + o_{i} \leftarrow \gamma\hat{b_{i}} + \beta = BN_{\gamma ,\beta } ( b_{i} ) **Attributes**: * *epsilon* - + * **Description**: *epsilon* is a constant added to the variance for numerical stability. * **Range of values**: a floating-point number greater than or equal to zero * **Type**: ``float`` @@ -109,24 +109,24 @@ Example: 2D input tensor ``data`` .. code-block:: xml :force: - + - < !-- input --> + 10 128 - < !-- gamma --> + 128 - < !-- beta --> + 128 - < !-- mean --> + 128 - < !-- variance --> + 128 @@ -142,26 +142,26 @@ Example: 4D input tensor ``data`` .. code-block:: xml :force: - + - < !-- input --> + 1 3 224 224 - < !-- gamma --> + 3 - < !-- beta --> + 3 - < !-- mean --> + 3 - < !-- variance --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.rst index c1fb7927f3ddf5..2231e3bc7fa7ed 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.rst @@ -5,7 +5,7 @@ LRN .. meta:: - :description: Learn about LRN-1 - a normalization operation, which can be + :description: Learn about LRN-1 - a normalization operation, which can be performed on two required input tensors. **Versioned name**: *LRN-1* @@ -105,7 +105,7 @@ Example for 4D ``data`` input tensor and ``axes = [2, 3]``: 24 - 1 < !-- value is [1] that means independent normalization for each pixel along channels --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.rst index ba04fb3b8cec33..444762f7ef0b57 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.rst @@ -5,7 +5,7 @@ MVN .. meta:: - :description: Learn about MVN-6 - a normalization operation, which can be + :description: Learn about MVN-6 - a normalization operation, which can be performed on two required input tensors. **Versioned name**: *MVN-6* @@ -100,7 +100,7 @@ If *normalize_variance* is set to ``true``, the output blob is divided by varian 24 - 3 < !-- value of [0,2,3] means independent normalization per channels --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.rst index 8bd1da903fdcc9..61b3d439a2ef7f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.rst @@ -5,7 +5,7 @@ NormalizeL2 .. meta:: - :description: Learn about MVN-1 - a normalization operation, which can be + :description: Learn about MVN-1 - a normalization operation, which can be performed on two required input tensors. **Versioned name**: *NormalizeL2-1* @@ -79,7 +79,7 @@ Example: Normalization over channel dimension for ``NCHW`` layout 24 - 1 < !-- axes list [1] means normalization over channel dimension --> + 1 @@ -108,7 +108,7 @@ Example: Normalization over channel and spatial dimensions for ``NCHW`` layout 24 - 3 < !-- axes list [1, 2, 3] means normalization over channel and spatial dimensions --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/quantization/FakeConvert_13.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/quantization/FakeConvert_13.rst index d58eefc16f3983..93e56c4b4ce2a2 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/quantization/FakeConvert_13.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/quantization/FakeConvert_13.rst @@ -29,9 +29,9 @@ Each element of the output is defined as the result of the following expression: .. code-block:: py :force: - data = (data + shift) / scale - ConvertLike(Convert(data, destination_type), data) data = data * scale - shift + ConvertLike(Convert(data, destination_type), data) + data = (data + shift) / scale **Attributes** diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.rst index e2cd7c83c2feb3..6acdb9e5786943 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.rst @@ -5,7 +5,7 @@ ReduceL1 .. meta:: - :description: Learn about ReduceL1-4 - a reduction operation, which can be + :description: Learn about ReduceL1-4 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceL1-4* @@ -68,7 +68,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -96,7 +96,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -149,7 +149,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 @@ -160,4 +160,4 @@ Particular cases: - + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.rst index 0556516d5bfbd3..aa908c97f6c0c4 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.rst @@ -5,7 +5,7 @@ ReduceL2 .. meta:: - :description: Learn about ReduceL2-4 - a reduction operation, which can be + :description: Learn about ReduceL2-4 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceL2-4* @@ -68,7 +68,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -95,7 +95,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -120,7 +120,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -146,7 +146,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.rst index e92b9153a12d12..01dbfc47902f05 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.rst @@ -5,7 +5,7 @@ ReduceLogicalAnd .. meta:: - :description: Learn about ReduceLogicalAnd-1 - a reduction operation, which can be + :description: Learn about ReduceLogicalAnd-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceLogicalAnd-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -148,7 +148,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.rst index 7cfdbc95eea5a0..e033e136f5b0e2 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.rst @@ -5,7 +5,7 @@ ReduceLogicalOr .. meta:: - :description: Learn about ReduceLogicalOr-1 - a reduction operation, which can be + :description: Learn about ReduceLogicalOr-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceLogicalOr-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -121,7 +121,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -147,7 +147,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst index 5037372de4cbce..4e22be42d2636d 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst @@ -5,7 +5,7 @@ ReduceMax .. meta:: - :description: Learn about ReduceMax-1 - a reduction operation, which can be + :description: Learn about ReduceMax-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceMax-1* @@ -72,7 +72,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -99,7 +99,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -124,7 +124,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -150,7 +150,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.rst index c3b71fa89c95e1..9aef4e981cf46f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.rst @@ -5,7 +5,7 @@ ReduceMean .. meta:: - :description: Learn about ReduceMean-1 - a reduction operation, which can be + :description: Learn about ReduceMean-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceMean-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -147,7 +147,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst index 4986ddc474606f..f20a0cfda064d4 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst @@ -5,7 +5,7 @@ ReduceMin .. meta:: - :description: Learn about ReduceMin-1 - a reduction operation, which can be + :description: Learn about ReduceMin-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceMin-1* @@ -72,7 +72,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -99,7 +99,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -124,7 +124,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -150,7 +150,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.rst index 0c75cb833c6a43..04af9115fb93c5 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.rst @@ -5,7 +5,7 @@ ReduceProd .. meta:: - :description: Learn about ReduceProd-1 - a reduction operation, which can be + :description: Learn about ReduceProd-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceProd-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -148,7 +148,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.rst index d2a4858eae201e..b42536dc0baba7 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.rst @@ -5,7 +5,7 @@ ReduceSum .. meta:: - :description: Learn about ReduceSum-1 - a reduction operation, which can be + :description: Learn about ReduceSum-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceSum-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -148,7 +148,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.rst index e64cd14263bebe..6ff19be0776a80 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.rst @@ -5,7 +5,7 @@ CTCGreedyDecoderSeqLen .. meta:: - :description: Learn about CTCGreedyDecoderSeqLen-6 - a sequence processing + :description: Learn about CTCGreedyDecoderSeqLen-6 - a sequence processing operation, which can be performed on two required input tensors. **Versioned name**: *CTCGreedyDecoderSeqLen-6* @@ -77,7 +77,7 @@ The main difference between :doc:`CTCGreedyDecoder @@ -89,7 +89,7 @@ The main difference between :doc:`CTCGreedyDecoder 8 - < !-- blank_index = 120 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.rst index 8f43fc62309fda..49191ec889b5fe 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.rst @@ -5,7 +5,7 @@ CTCLoss .. meta:: - :description: Learn about CTCLoss-4 - a sequence processing operation, which + :description: Learn about CTCLoss-4 - a sequence processing operation, which can be performed on four required and one optional input tensor. **Versioned name**: *CTCLoss-4* @@ -29,19 +29,19 @@ Otherwise, the operation behaviour is undefined. 1. Compute probability of ``j``-th character at time step ``t`` for ``i``-th input sequence from ``logits`` using softmax formula: .. math:: - + p_{i,t,j} = \frac{\exp(logits[i,t,j])}{\sum^{K}_{k=0}{\exp(logits[i,t,k])}} 2. For a given ``i``-th target from ``labels[i,:]`` find all aligned paths. A path ``S = (c1,c2,...,cT)`` is aligned with a target ``G=(g1,g2,...,gT)`` if both chains are equal after decoding. The decoding extracts substring of length ``label_length[i]`` from a target ``G``, merges repeated characters in ``G`` in case *preprocess_collapse_repeated* equal to true and finds unique elements in the order of character occurrence in case *unique* equal to true. The decoding merges repeated characters in ``S`` in case *ctc_merge_repeated* equal to true and removes blank characters represented by ``blank_index``. By default, ``blank_index`` is equal to ``C-1``, where ``C`` is a number of classes including the blank. For example, in case default *ctc_merge_repeated*, *preprocess_collapse_repeated*, *unique* and ``blank_index`` a target sequence ``G=(0,3,2,2,2,2,2,4,3)`` of a length ``label_length[i]=4`` is processed to ``(0,3,2,2)`` and a path ``S=(0,0,4,3,2,2,4,2,4)`` of a length ``logit_length[i]=9`` is also processed to ``(0,3,2,2)``, where ``C=5``. There exist other paths that are also aligned with ``G``, for instance, ``0,4,3,3,2,4,2,2,2``. Paths checked for alignment with a target ``label[:,i]`` must be of length ``logit_length[i] = L_i``. Compute probabilities of these aligned paths (alignments) as follows: .. math:: - + p(S) = \prod_{t=1}^{L_i} p_{i,t,ct} 3. Finally, compute negative log of summed up probabilities of all found alignments: .. math:: - + CTCLoss = - \ln \sum_{S} p(S) **Note 1**: This calculation scheme does not provide steps for optimal implementation and primarily serves for better explanation. @@ -50,7 +50,7 @@ Otherwise, the operation behaviour is undefined. Having log-probabilities for aligned paths, log of summed up probabilities for these paths can be computed as follows: .. math:: - + \ln(a + b) = \ln(a) + \ln(1 + \exp(\ln(b) - \ln(a))) **Attributes** @@ -100,7 +100,7 @@ Having log-probabilities for aligned paths, log of summed up probabilities for t .. code-block:: xml :force: - + @@ -118,7 +118,7 @@ Having log-probabilities for aligned paths, log of summed up probabilities for t 8 - < !-- blank_index value is: 120 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.rst index 77631b7d1fa3f5..8e085896d9b05f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.rst @@ -5,7 +5,7 @@ OneHot .. meta:: - :description: Learn about OneHot-1 - a sequence processing operation, which + :description: Learn about OneHot-1 - a sequence processing operation, which can be performed on four required input tensors. **Versioned name**: *OneHot-1* @@ -64,18 +64,18 @@ The types of input scalars ``on_value`` and ``off_value`` should match and be eq - < !-- indices value: [0, 3, 1, 2] --> + 4 - < !-- depth value: 3 --> + - < !-- on_value 1 --> + - < !-- off_value 2 --> + - < !-- output value # [[1, 2, 2], [2, 2, 2], [2, 1, 2], [2, 2, 1]] --> + 4 3 @@ -90,20 +90,20 @@ The types of input scalars ``on_value`` and ``off_value`` should match and be eq - < !-- indices value: [[0, 3, 1], [1, 2, 4]] --> + 2 3 - < !-- depth value: 3 --> + - < !-- on_value 1 --> + - < !-- off_value 0 --> + - < !-- output value: [[[1, 0, 0], [0, 0, 1], [0, 0, 0]], --> - 2 < !-- [[0, 0, 0], [1, 0, 0], [0, 1, 0]]] --> + + 2 3 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.rst index 6700be5536c1d6..064fcb92a9b95e 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.rst @@ -5,7 +5,7 @@ Reshape .. meta:: - :description: Learn about Reshape-1 - a shape manipulation operation, which + :description: Learn about Reshape-1 - a shape manipulation operation, which can be performed on two required input tensors. **Versioned name**: *Reshape-1* @@ -65,7 +65,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 0 - 2 < !--The tensor contains 2 elements: 0, 4 --> + 2 @@ -92,7 +92,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 24 - 3 < !--The tensor contains 3 elements: 0, -1, 4 --> + 3 @@ -119,7 +119,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 3 - 4 < !--The tensor contains 4 elements: 0, 0, 1, -1 --> + 4 @@ -147,7 +147,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 1 - 2 < !--The tensor contains 2 elements: -1, 0 --> + 2 @@ -173,7 +173,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 1 - 2 < !--The tensor contains 2 elements: 0, -1 --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.rst index 3b98d8362d181f..176e79a927c0bd 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.rst @@ -5,7 +5,7 @@ ShapeOf .. meta:: - :description: Learn about ShapeOf-1 - a shape manipulation operation, which + :description: Learn about ShapeOf-1 - a shape manipulation operation, which can be performed on an arbitrary input tensor. **Versioned name**: *ShapeOf-1* @@ -39,7 +39,7 @@ ShapeOf - < !-- output value is: [2,3,224,224]--> + 4 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.rst index 7e68b4447beeaf..bb64e7517f60de 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.rst @@ -5,7 +5,7 @@ ShapeOf .. meta:: - :description: Learn about ShapeOf-3 - a shape manipulation operation, which + :description: Learn about ShapeOf-3 - a shape manipulation operation, which can be performed on an arbitrary input tensor. **Versioned name**: *ShapeOf-3* @@ -54,7 +54,7 @@ ShapeOf - < !-- output value is: [2,3,224,224]--> + 4 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.rst index e8933f2aceb603..9d426f88e5ff83 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.rst @@ -5,7 +5,7 @@ Squeeze .. meta:: - :description: Learn about Squeeze-1 - a shape manipulation operation, which + :description: Learn about Squeeze-1 - a shape manipulation operation, which can be performed on one required and one optional input tensor. **Versioned name**: *Squeeze-1* @@ -55,7 +55,7 @@ Squeeze - 2 < !-- value [0, 2] --> + 2 @@ -79,7 +79,7 @@ Squeeze - 1 < !-- value is [0] --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.rst index 62a908b5296dac..5bab816f35f0bc 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.rst @@ -5,7 +5,7 @@ Unsqueeze .. meta:: - :description: Learn about Unsqueeze-1 - a shape manipulation operation, which + :description: Learn about Unsqueeze-1 - a shape manipulation operation, which can be performed on two required input tensors. **Versioned name**: *Unsqueeze-1* @@ -48,7 +48,7 @@ Unsqueeze - 2 < !-- value is [0, 3] --> + 2 @@ -74,7 +74,7 @@ Unsqueeze - 1 < !-- value is [0] --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.rst index 90a42a0ac1fdbb..b32b1eb4c23729 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.rst @@ -5,7 +5,7 @@ DFT .. meta:: - :description: Learn about DFT-7 - a signal processing operation, which can be + :description: Learn about DFT-7 - a signal processing operation, which can be performed on two required and one optional input tensor. **Versioned name**: *DFT-7* @@ -23,14 +23,14 @@ No attributes available. * **1**: ``data`` - Input tensor of type *T* with data for the DFT transformation. Type of elements is any supported floating-point type. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form ``[D_0, D_1, ..., D_{N-1}, 2]``, representing the real and imaginary components of complex numbers in ``[:, ..., :, 0]`` and in ``[:, ..., :, 1]`` correspondingly. **Required.** * **2**: ``axes`` - 1D tensor of type *T_IND* specifying dimension indices where DFT is applied, and ``axes`` is any unordered list of indices of different dimensions of input tensor, for example, ``[0, 4]``, ``[4, 0]``, ``[4, 2, 1]``, ``[1, 2, 3]``, ``[-3, 0, -2]``. These indices should be integers from ``-(r - 1)`` to ``(r - 2)`` inclusively, where ``r = rank(data)``. A negative axis ``a`` is interpreted as an axis ``r - 1 + a``. Other dimensions do not change. The order of elements in ``axes`` attribute matters, and is mapped directly to elements in the third input ``signal_size``. **Required.** - .. note:: - + .. note:: + The following constraint must be satisfied: ``rank(data) >= len(axes) + 1 and input_shape[-1] == 2 and (rank(data) - 1) not in axes and (-1) not in axes``. * **3**: ``signal_size`` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input ``axes``. If ``signal_size[i] == -1``, then DFT is calculated for full size of the axis ``axes[i]``. If ``signal_size[i] > input_shape[: r - 1][axes[i]]``, then input data are zero-padded with respect to the axis ``axes[i]`` at the end. Finally, ``signal_size[i] < input_shape[: r - 1][axes[i]]``, then input data are trimmed with respect to the axis ``axes[i]``. More precisely, if ``signal_size[i] < input_shape[: r - 1][axes[i]]``, the slice ``0: signal_size[i]`` of the axis ``axes[i]`` is considered. Optional, with default value ```[input_shape[: r - 1][a] for a in axes]```. - .. note:: - + .. note:: + If the input ``signal_size`` is specified, the size of ``signal_size`` must be the same as the size of ``axes``. **Outputs** @@ -52,7 +52,7 @@ Let ``D`` be an input tensor ``A``, taking into account the ``signal_size``, and Next, put .. math:: - + X[j_0,\dots,j_{k-1},j_k,\dots,j_{k+r-1}]=D[j_0,\dots,j_{k-1},j_k,\dots,j_{k+r-1},0]+iD[j_0,\dots,j_{k-1},j_k,\dots,j_{k+r-1},1] for all indices ``j_0,...,j_{k+r-1}``, where ``i`` is an imaginary unit, that is ``X`` is a complex tensor. @@ -60,17 +60,17 @@ for all indices ``j_0,...,j_{k+r-1}``, where ``i`` is an imaginary unit, that is Then the discrete Fourier transform is the tensor :math:`Y` of the same shape as the tensor :math:`X`, such that .. math:: - + Y[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}]=\sum\limits_{j_0=0}^{S_0-1}\cdots\sum\limits_{j_{r-1}=0}^{S_{r-1}-1}X[n_0,\dots,n_{k-1},j_0,\dots,j_{r-1}]\exp\left(-2\pi i\sum\limits_{q=0}^{r-1}\frac{m_qj_q}{S_q}\right) for all indices ``n_0,...,n_{k-1}``, ``m_0,...,m_{r-1}``, and the result of the operation is the real tensor ``Z`` with the shape ``[B_0, ..., B_{k-1}, S_0, ..., S_{r-1}, 2]`` and such that .. math:: - + Z[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}, 0]=Re Y[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}], .. math:: - + Z[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}, 1]=Im Y[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}]. Calculations for the generic case of axes and signal sizes are similar. @@ -81,7 +81,7 @@ There is no ``signal_size`` input (4D input tensor): .. code-block:: xml :force: - + @@ -91,7 +91,7 @@ There is no ``signal_size`` input (4D input tensor): 2 - 2 < !-- axes input contains [1, 2] --> + 2 @@ -107,7 +107,7 @@ There is no ``signal_size`` input (3D input tensor): .. code-block:: xml :force: - + @@ -116,7 +116,7 @@ There is no ``signal_size`` input (3D input tensor): 2 - 2 < !-- axes input contains [0, 1] --> + 2 @@ -131,7 +131,7 @@ There is ``signal_size`` input (4D input tensor): .. code-block:: xml :force: - + @@ -141,10 +141,10 @@ There is ``signal_size`` input (4D input tensor): 2 - 2 < !-- axes input contains [1, 2] --> + 2 - 2 < !-- signal_size input contains [512, 100] --> + 2 @@ -160,7 +160,7 @@ There is ``signal_size`` input (3D input tensor): .. code-block:: xml :force: - + @@ -169,10 +169,10 @@ There is ``signal_size`` input (3D input tensor): 2 - 2 < !-- axes input contains [0, 1] --> + 2 - 2 < !-- signal_size input contains [512, 100] --> + 2 @@ -187,7 +187,7 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso .. code-block:: xml :force: - + @@ -198,10 +198,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 1, 2] --> + 3 - 3 < !-- signal_size input contains [170, -1, 1024] --> + 3 @@ -218,7 +218,7 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso .. code-block:: xml :force: - + @@ -229,10 +229,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 0, 2] --> + 3 - 3 < !-- signal_size input contains [258, -1, 2056] --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.rst index e3651b2c44dc58..0621a323428543 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.rst @@ -5,7 +5,7 @@ Inverse Discrete Fourier Transformation (IDFT) .. meta:: - :description: Learn about IDFT-7 - a signal processing operation, which can be + :description: Learn about IDFT-7 - a signal processing operation, which can be performed on two required and one optional input tensor. **Versioned name**: *IDFT-7* @@ -22,16 +22,16 @@ No attributes available. * **1**: ``data`` - Input tensor of type *T* with data for the IDFT transformation. Type of elements is any supported floating-point type. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form ``[D_0, D_1, ..., D_{N-1}, 2]``, representing the real and imaginary components of complex numbers in ``[:, ..., :, 0]`` and in ``[:, ..., :, 1]`` correspondingly. **Required.** * **2**: ``axes`` - 1D tensor of type *T_IND* specifying dimension indices where IDFT is applied, and ``axes`` is any unordered list of indices of different dimensions of input tensor, for example, ``[0, 4]``, ``[4, 0]``, ``[4, 2, 1]``, ``[1, 2, 3]``, ``[-3, 0, -2]``. These indices should be integers from ``-(r - 1)`` to ``(r - 2)`` inclusively, where ``r = rank(data)``. A negative axis ``a`` is interpreted as an axis ``r - 1 + a``. Other dimensions do not change. The order of elements in ``axes`` attribute matters, and is mapped directly to elements in the third input ``signal_size``. **Required.** -* +* .. note:: - + The following constraint must be satisfied: ``rank(data) >= len(axes) + 1 and input_shape[-1] == 2 and (rank(data) - 1) not in axes and (-1) not in axes``. * **3**: ``signal_size`` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input ``axes``. If ``signal_size[i] == -1``, then IDFT is calculated for full size of the axis ``axes[i]``. If ``signal_size[i] > input_shape[: r - 1][axes[i]]``, then input data are zero-padded with respect to the axis ``axes[i]`` at the end. Finally, if ``signal_size[i] < input_shape[: r - 1][axes[i]]``, then input data are trimmed with respect to the axis ``axes[i]``. More precisely, if ``signal_size[i] < input_shape[: r - 1][axes[i]]``, the slice ``0: signal_size[i]`` of the axis ``axes[i]`` is considered. Optional, with default value ``[input_shape[: r - 1][a] for a in axes]``. -* +* .. note:: - + If the input ``signal_size`` is specified, then the size of ``signal_size`` must be the same as the size of ``axes``. **Outputs** @@ -52,7 +52,7 @@ For simplicity, assume that an input tensor ``A`` has the shape ``[B_0, ..., B_{ Let ``D`` be an input tensor ``A``, taking into account the ``signal_size``, and, hence, ``D`` has the shape ``[B_0, ..., B_{k-1}, S_0, ..., S_{r-1}, 2]``. -Next, put +Next, put .. math:: @@ -94,7 +94,7 @@ There is no ``signal_size`` input (4D input tensor): 2 - 2 < !-- [1, 2] --> + 2  @@ -120,7 +120,7 @@ There is no ``signal_size`` input (3D input tensor): 2 - 2 < !-- [0, 1] --> + 2  @@ -147,10 +147,10 @@ There is ``signal_size`` input (4D input tensor): 2 - 2 < !-- [1, 2] --> + 2  - 2 < !-- [512, 100] --> + 2  @@ -177,10 +177,10 @@ There is ``signal_size`` input (3D input tensor): 2 - 2 < !-- [0, 1] --> + 2  - 2 < !-- [512, 100] --> + 2  @@ -208,10 +208,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 1, 2] --> + 3  - 3 < !-- signal_size input contains [170, -1, 1024] --> + 3  @@ -241,10 +241,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 0, 2] --> + 3  - 3 < !-- signal_size input contains [258, -1, 2056] --> + 3  diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.rst index 7b8804cd841871..d067654709da10 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.rst @@ -5,7 +5,7 @@ Inverse Discrete complex-to-real Fourier Transformation (IRDFT) .. meta:: - :description: Learn about IRDFT-9 - a signal processing operation, which can be + :description: Learn about IRDFT-9 - a signal processing operation, which can be performed on two required and one optional input tensor. **Versioned name**: *IRDFT-9* @@ -22,18 +22,18 @@ No attributes available. * **1**: ``data`` - Input tensor of type *T* with data for the IRDFT transformation. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form ``[D_0, D_1, ..., D_{N-1}, 2]``, representing the real and imaginary components of complex numbers in ``[:, ..., :, 0]`` and in ``[:, ..., :, 1]`` correspondingly. **Required.** * **2**: ``axes`` - 1D tensor of type *T_IND* specifying dimension indices where IRDFT is applied, and ``axes`` is any unordered list of indices of different dimensions of the input tensor, for example, ``[0, 4]``, ``[4, 0]``, ``[4, 2, 1]``, ``[1, 2, 3]``, ``[-3, 0, -2]``. These indices should be integers from ``-(r - 1)`` to ``(r - 2)`` inclusively, where ``r = rank(data)``. A negative axis ``a`` is interpreted as an axis ``r - 1 + a``. Other dimensions do not change. The order of elements in the ``axes`` attribute matters, and is mapped directly to elements in the third input ``signal_size``. **Required.** -* +* .. note:: - + The following constraint must be satisfied: ``rank(data) >= len(axes) + 1 and (rank(data) - 1) not in axes and (-1) not in axes``. * **3**: ``signal_size`` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input ``axes``. If ``signal_size[i] == -1``, then IRDFT is calculated for full size of the axis ``axes[i]``. If ``signal_size[i] > data_shape[: r - 1][axes[i]]``, then input data is zero-padded with respect to the axis ``axes[i]`` at the end. Finally, if ``signal_size[i] < data_shape[: r - 1][axes[i]]``, then input data is trimmed with respect to the axis ``axes[i]``. More precisely, if ``signal_size[i] < data_shape[: r - 1][axes[i]]``, the slice ``0: signal_size[i]`` of the axis ``axes[i]`` is considered. Optionally, with default value ``[data_shape[: r - 1][a] for a in axes]``. -* +* .. note:: - + If the input ``signal_size`` is specified, then the size of ``signal_size`` must be the same as the size of ``axes``. @@ -110,7 +110,7 @@ There is no ``signal_size`` input (4D input tensor): 2 - 2 < !-- [1, 2] --> + 2  @@ -135,7 +135,7 @@ There is no ``signal_size`` input (3D input tensor): 2 - 2 < !-- [0, 1] --> + 2  @@ -160,10 +160,10 @@ There is ``signal_size`` input (4D input tensor): 2 - 2 < !-- [1, 2] --> + 2  - 2 < !-- [512, 100] --> + 2  @@ -189,10 +189,10 @@ There is ``signal_size`` input (3D input tensor): 2 - 2 < !-- [0, 1] --> + 2  - 2 < !-- [512, 100] --> + 2  @@ -219,10 +219,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 1, 2] --> + 3  - 3 < !-- signal_size input contains [170, -1, 1024] --> + 3  @@ -250,10 +250,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 0, 2] --> + 3  - 3 < !-- signal_size input contains [258, -1, 2056] --> + 3  diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.rst index 325a34bcdb55b5..14270fa42458ca 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.rst @@ -5,7 +5,7 @@ Discrete Fourier Transformation for real-valued input (RDFT) .. meta:: - :description: Learn about RDFT-9 - a signal processing operation, which can be + :description: Learn about RDFT-9 - a signal processing operation, which can be performed on two required and one optional input tensor. **Versioned name**: *RDFT-9* @@ -85,7 +85,7 @@ There is no ``signal_size`` input (3D input tensor): 320 - 2 < !-- axes input contains [1, 2] --> + 2 @@ -110,7 +110,7 @@ There is no ``signal_size`` input (2D input tensor): 320 - 2 < !-- axes input contains [0, 1] --> + 2 @@ -136,10 +136,10 @@ There is ``signal_size`` input (3D input tensor): 320 - 2 < !-- axes input contains [1, 2] --> + 2 - 2 < !-- signal_size input contains [512, 100] --> + 2 @@ -163,10 +163,10 @@ There is ``signal_size`` input (2D input tensor): 320 - 2 < !-- axes input contains [0, 1] --> + 2 - 2 < !-- signal_size input contains [512, 100] --> + 2 @@ -192,10 +192,10 @@ There is ``signal_size`` input (4D input tensor, ``-1`` in ``signal_size``, unso 320 - 3 < !-- axes input contains [3, 1, 2] --> + 3 - 3 < !-- signal_size input contains [170, -1, 1024] --> + 3 @@ -222,10 +222,10 @@ There is ``signal_size`` input (4D input tensor, ``-1`` in ``signal_size``, unso 320 - 3 < !-- axes input contains [3, 0, 2] --> + 3 - 3 < !-- signal_size input contains [258, -1, 2056] --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.rst index c4d7cefdd3687c..881003047efe38 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.rst @@ -5,8 +5,8 @@ MatrixNonMaxSuppression .. meta:: - :description: Learn about MatrixNonMaxSuppression-8 - a sorting and - maximization operation, which can be performed on two required + :description: Learn about MatrixNonMaxSuppression-8 - a sorting and + maximization operation, which can be performed on two required input tensors. **Versioned name**: *MatrixNonMaxSuppression-8* @@ -176,7 +176,7 @@ When there is no box selected, ``selected_num`` is filled with ``0``. ``selected - -1 < !-- "-1" means a undefined dimension calculated during the model inference --> + -1 6 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.rst index cd992042f1f534..5d0fe3c3c3a518 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.rst @@ -5,8 +5,8 @@ MulticlassNonMaxSuppression .. meta:: - :description: Learn about MulticlassNonMaxSuppression-8 - a sorting and - maximization operation, which can be performed on two required + :description: Learn about MulticlassNonMaxSuppression-8 - a sorting and + maximization operation, which can be performed on two required input tensors. **Versioned name**: *MulticlassNonMaxSuppression-8* @@ -168,7 +168,7 @@ When there is no box selected, ``selected_num`` is filled with ``0``. ``selected - -1 < !-- "-1" means a undefined dimension calculated during the model inference --> + -1 6 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.rst index 7caccb99ac3e6d..ae8187d60598b0 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.rst @@ -2,8 +2,8 @@ .. meta:: - :description: Learn about MulticlassNonMaxSuppression-8 - a sorting and - maximization operation, which can be performed on two or three + :description: Learn about MulticlassNonMaxSuppression-8 - a sorting and + maximization operation, which can be performed on two or three required input tensors. **Versioned name**: *MulticlassNonMaxSuppression-9* @@ -174,7 +174,7 @@ When there is no box selected, ``selected_num`` is filled with ``0``. ``selected - -1 < !-- "-1" means a undefined dimension calculated during the model inference --> + -1 6 @@ -211,7 +211,7 @@ Another possible example with 3 inputs could be like: - -1 < !-- "-1" means a undefined dimension calculated during the model inference --> + -1 6 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.rst index 0400d62c414a6f..256e3ad76f637f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.rst @@ -131,11 +131,11 @@ Plugins that do not support dynamic output tensors produce ``selected_indices`` - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.rst index 34102251ddfefc..161a68b255b1c9 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.rst @@ -5,8 +5,8 @@ NonMaxSuppression .. meta:: - :description: Learn about NonMaxSuppression-4 - a sorting and maximization - operation, which can be performed on two required and three + :description: Learn about NonMaxSuppression-4 - a sorting and maximization + operation, which can be performed on two required and three optional input tensors. **Versioned name**: *NonMaxSuppression-4* @@ -108,7 +108,7 @@ The output tensor is filled with -1s for output tensor elements if the total num - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.rst index 21089de7445b14..f0756ca40d0b2a 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.rst @@ -5,8 +5,8 @@ NonMaxSuppression .. meta:: - :description: Learn about NonMaxSuppression-5 - a sorting and maximization - operation, which can be performed on two required and four + :description: Learn about NonMaxSuppression-5 - a sorting and maximization + operation, which can be performed on two required and four optional input tensors. **Versioned name**: *NonMaxSuppression-5* @@ -120,11 +120,11 @@ Plugins which do not support dynamic output tensors produce ``selected_indices`` - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.rst index 6dece225f34b95..54386e7fb41529 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.rst @@ -5,8 +5,8 @@ NonMaxSuppression .. meta:: - :description: Learn about NonMaxSuppression-9 - a sorting and maximization - operation, which can be performed on two required and four + :description: Learn about NonMaxSuppression-9 - a sorting and maximization + operation, which can be performed on two required and four optional input tensors. **Versioned name**: *NonMaxSuppression-9* @@ -120,11 +120,11 @@ Plugins which do not support dynamic output tensors produce ``selected_indices`` - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.rst index 722da5f3f8c4cc..0a0cb67afb0f06 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.rst @@ -5,7 +5,7 @@ EmbeddingBagOffsetsSum .. meta:: - :description: Learn about EmbeddingBagOffsetsSum-3 - a sparse operation, which + :description: Learn about EmbeddingBagOffsetsSum-3 - a sparse operation, which can be performed on three required and two optional input tensors. **Versioned name**: *EmbeddingBagOffsetsSum-3* @@ -38,26 +38,26 @@ EmbeddingBagOffsetsSum **Example** .. code-block:: cpp - + - < !-- emb_table value is: [[-0.2, -0.6], [-0.1, -0.4], [-1.9, -1.8], [-1., 1.5], [ 0.8, -0.7]] --> + 5 2 - < !-- indices value is: [0, 2, 3, 4] --> + 4 - < !-- offsets value is: [0, 2, 2] - 3 "bags" containing [2,0,4-2] elements, second "bag" is empty --> + 3 - < !-- default_index value is: 0 --> - < !-- per_sample_weigths value is: [0.5, 0.5, 0.5, 0.5] --> + + 4 - < !-- output value is: [[-1.05, -1.2], [-0.2, -0.6], [-0.1, 0.4]] --> + 3 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.rst index d389446505409d..9ef623ca7755eb 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.rst @@ -5,7 +5,7 @@ EmbeddingBagPackedSum .. meta:: - :description: Learn about EmbeddingBagPackedSum-3 - a sparse operation, which + :description: Learn about EmbeddingBagPackedSum-3 - a sparse operation, which can be performed on two required and one optional input tensor. **Versioned name**: *EmbeddingBagPackedSum-3* @@ -36,24 +36,24 @@ EmbeddingBagPackedSum **Example** .. code-block:: cpp - + - < !-- emb_table value is: [[-0.2, -0.6], [-0.1, -0.4], [-1.9, -1.8], [-1., 1.5], [ 0.8, -0.7]] --> + 5 2 - < !-- indices value is: [[0, 2], [1, 2], [3, 4]] --> + 3 2 - < !-- per_sample_weigths value is: [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]] --> + 3 2 - < !-- output value is: [[-1.05, -1.2], [-1., -1.1], [-0.1, 0.4]] --> + 3 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.rst index 583477506df52c..20ae7b30675361 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.rst @@ -5,7 +5,7 @@ EmbeddingSegmentsSum .. meta:: - :description: Learn about EmbeddingSegmentsSum-3 - a sparse operation, which + :description: Learn about EmbeddingSegmentsSum-3 - a sparse operation, which can be performed on four required and two optional input tensors. **Versioned name**: *EmbeddingSegmentsSum-3* @@ -39,27 +39,27 @@ EmbeddingSegmentsSum **Example** .. code-block:: cpp - + - < !-- emb_table value is: [[-0.2, -0.6], [-0.1, -0.4], [-1.9, -1.8], [-1., 1.5], [ 0.8, -0.7]] --> + 5 2 - < !-- indices value is: [0, 2, 3, 4] --> + 4 - < !-- segment_ids value is: [0, 0, 2, 2] - second segment is empty --> + 4 - < !-- num_segments value is: 3 --> - < !-- default_index value is: 0 --> - < !-- per_sample_weigths value is: [0.5, 0.5, 0.5, 0.5] --> + + + 4 - < !-- output value is: [[-1.05, -1.2], [-0.2, -0.6], [-0.1, 0.4]] --> + 3 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.rst index e93fe6cd59878a..3f2cf1d356de5a 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.rst @@ -5,7 +5,7 @@ ConvertLike .. meta:: - :description: Learn about ConvertLike-1 - an element-wise, type conversion + :description: Learn about ConvertLike-1 - an element-wise, type conversion operation, which can be performed two required input tensors. **Versioned name**: *ConvertLike-1* @@ -45,19 +45,19 @@ where ``a`` and ``b`` correspond to ``data`` and ``like`` input tensors, respect **Example** .. code-block:: cpp - + - < !-- type: int32 --> + 256 56 - < !-- type: float32 --> - 3 < !-- any data --> + + 3 - < !-- result type: float32 --> + 256 56 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.rst index 3f209cc5168377..50c99c14d0b878 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.rst @@ -5,7 +5,7 @@ Convert .. meta:: - :description: Learn about Convert-1 - an element-wise, type conversion + :description: Learn about Convert-1 - an element-wise, type conversion operation, which can be performed on a single input tensor. **Versioned name**: *Convert-1* @@ -23,7 +23,7 @@ Conversion of negative signed integer to unsigned integer value happens in accor The result of unsupported conversions is undefined. Output elements are represented as follows: .. math:: - + o_{i} = Convert(a_{i}) where ``a`` corresponds to the input tensor. @@ -52,17 +52,17 @@ where ``a`` corresponds to the input tensor. **Example** .. code-block:: cpp - + - < !-- type: i32 --> + 256 56 - < !-- result type: f32 --> + 256 56 diff --git a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst index c92e716f888d58..92683e866d6448 100644 --- a/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst +++ b/docs/articles_en/get_started/installing-openvino-overview/installing-openvino-windows-header/installing-openvino-from-archive-windows.rst @@ -148,16 +148,32 @@ throughout the OpenVINO documentation. Step 2: Configure the Environment +++++++++++++++++++++++++++++++++ -You must update several environment variables before you can compile and run OpenVINO™ applications. Open the Command Prompt, and run the ``setupvars.bat`` batch file to temporarily set your environment variables. If your ```` is not ``C:\Program Files (x86)\Intel\openvino_2023``, use the correct directory instead. +You must update several environment variables before you can compile and run OpenVINO™ applications. -.. code-block:: sh +.. tab-set:: + + .. tab-item:: PowerShell + :sync: powershell + + Open the PowerShell, and run the ``setupvars.ps1`` file to temporarily set your environment variables. + + .. code-block:: sh - "C:\Program Files (x86)\Intel\openvino_2023\setupvars.bat" + . /setupvars.ps1 + .. tab-item:: Command Prompt + :sync: cmd -.. important:: + Open the Command Prompt, and run the ``setupvars.bat`` batch file to temporarily set your environment variables. + If your ```` is not ``C:\Program Files (x86)\Intel\openvino_2023``, use the correct directory instead. + + .. code-block:: sh + + "C:\Program Files (x86)\Intel\openvino_2023\setupvars.bat" + + .. important:: - The above command must be re-run every time a new Command Prompt window is opened. + You need to run the command for each new Command Prompt window. .. note:: diff --git a/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst b/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst index 4d05da628df4fb..0725be596117d4 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst +++ b/docs/articles_en/learn_openvino/openvino_samples/get_started_demos.rst @@ -139,13 +139,29 @@ Instructions below show how to build sample applications with CMake. If you are If you want to use Microsoft Visual Studio 2019, you are required to install CMake 3.14 or higher. - To build the C or C++ sample applications on Windows, go to the ``\samples\c`` or ``\samples\cpp`` directory, respectively, and run the ``build_samples_msvc.bat`` batch file: + You can build the C or C++ sample applications on Windows with either PowerShell or Command Prompt. - .. code-block:: sh + .. tab-set:: + + .. tab-item:: PowerShell + :sync: powershell + + To build Samples with PowerShell, run the following command: + + .. code-block:: sh + + & /build_samples.ps1 + + .. tab-item:: Command Prompt + :sync: cmd + + To build Samples with CMD, go to the ``\samples\c`` or ``\samples\cpp`` directory, respectively, and run the ``build_samples_msvc.bat`` batch file: - build_samples_msvc.bat + .. code-block:: sh - By default, the script automatically detects the highest Microsoft Visual Studio version installed on the machine and uses it to create and build a solution for a sample code + build_samples_msvc.bat + + By default, the script automatically detects the highest Microsoft Visual Studio version installed on the system and uses it to create and build a solution for a sample code Once the build is completed, you can find sample binaries in the following folders: @@ -295,6 +311,14 @@ To run the code sample with an input image using the IR model: source /setupvars.sh +.. note:: + + OpenVINO environment variables can be set up by running the following command in PowerShell: + + .. code-block:: sh + + . /setupvars.ps1 + 2. Go to the code samples release directory created when you built the samples earlier: .. tab-set:: diff --git a/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst index 3d60fa22e0c512..7859f12d75d330 100644 --- a/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst @@ -21,7 +21,7 @@ Integrate OpenVINO™ with Your Application Following these steps, you can implement a typical OpenVINO™ Runtime inference pipeline in your application. Before proceeding, make sure you have -:doc:`installed OpenVINO Runtime ` and set environment variables (run ``/setupvars.sh`` for Linux or ``setupvars.bat`` for Windows, otherwise, the ``OpenVINO_DIR`` variable won't be configured properly to pass ``find_package`` calls). +:doc:`installed OpenVINO Runtime ` and set environment variables (run ``/setupvars.sh`` for Linux, ``setupvars.ps1`` for Windows PowerShell, or ``setupvars.bat`` for Windows CMD), otherwise, the ``OpenVINO_DIR`` variable won't be configured properly to pass ``find_package`` calls). .. image:: _static/images/IMPLEMENT_PIPELINE_with_API_C.svg diff --git a/docs/dev/installing.md b/docs/dev/installing.md index b1be0f2345ca82..a85fc68d44fa5d 100644 --- a/docs/dev/installing.md +++ b/docs/dev/installing.md @@ -1,7 +1,7 @@ # Installing Once the project is built you can install OpenVINO™ Runtime into custom location: - + ``` cmake --install --prefix ``` @@ -93,12 +93,17 @@ cd /samples/cpp ./build_samples.sh ``` -Windows: +Windows Command Prompt: ```sh cd \samples\cpp build_samples_msvc.bat ``` +Windows PowerShell: +```sh +& /build_samples.ps1 +``` + 2. Install OpenVINO Development Tools > **NOTE**: To build OpenVINO Development Tools (Model Optimizer, Post-Training Optimization Tool, Model Downloader, and Open Model Zoo tools) wheel package locally you are required to use the CMake option: `-DENABLE_WHEEL=ON`. @@ -165,11 +170,16 @@ Linux and macOS: source /setupvars.sh ``` -Windows: +Windows Command Prompt: ```bat \setupvars.bat ``` +Windows PowerShell: +```bat +. /setupvars.ps1 +``` + The following commands run the Image Classification Code Sample using the [`dog.bmp`](https://storage.openvinotoolkit.org/data/test_data/images/224x224/dog.bmp) file as an input image, the model in IR format from the `ir` directory, and on different hardware devices: Linux and macOS: @@ -265,4 +275,3 @@ target_link_libraries(ov_c_app PRIVATE openvino::runtime::c) * [OpenVINO Developer Documentation](index.md) * [OpenVINO How to Build](build.md) - \ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 3d7e164d1f9516..67ed291ff62913 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -23,6 +23,4 @@ add_subdirectory(bindings) if(ENABLE_TESTS) add_subdirectory(core/tests) add_subdirectory(tests) -else() - add_subdirectory(tests/ov_helpers/ov_models/ov_builders) endif() diff --git a/src/bindings/c/docs/api_overview.md b/src/bindings/c/docs/api_overview.md index 88f04b455a79ee..e0bbc5108cf769 100644 --- a/src/bindings/c/docs/api_overview.md +++ b/src/bindings/c/docs/api_overview.md @@ -27,7 +27,10 @@ Supported Python* versions: To configure the environment for the OpenVINO C* API, run: - On Ubuntu 20.04/22.04: `source /setupvars.sh .` -- On Windows 10/11: `\setupvars.bat ` +- On Windows 10/11: + + * `. /setupvars.ps1` in PowerShell + * `\setupvars.bat ` in Command Prompt The script automatically detects latest installed C* version and configures required environment if the version is supported. @@ -78,7 +81,7 @@ typedef struct { typedef struct ov_dimension { int64_t min; - + int64_t max; } ov_dimension_t; diff --git a/src/bindings/js/node/include/infer_request.hpp b/src/bindings/js/node/include/infer_request.hpp index f39489b23ef4ad..483ac422e69426 100644 --- a/src/bindings/js/node/include/infer_request.hpp +++ b/src/bindings/js/node/include/infer_request.hpp @@ -102,7 +102,7 @@ class InferRequestWrap : public Napi::ObjectWrap { /** @brief Checks incoming Napi::Value and calls overloaded infer() method */ Napi::Value infer_dispatch(const Napi::CallbackInfo& info); -// 128760 +// 131123 #ifndef _WIN32 /** @brief Checks incoming Napi::Value and asynchronously returns the result of inference. */ Napi::Value infer_async(const Napi::CallbackInfo& info); diff --git a/src/bindings/js/node/src/infer_request.cpp b/src/bindings/js/node/src/infer_request.cpp index 713c9b9e969113..b8e1f809af2e2a 100644 --- a/src/bindings/js/node/src/infer_request.cpp +++ b/src/bindings/js/node/src/infer_request.cpp @@ -198,7 +198,7 @@ void InferRequestWrap::infer(const Napi::Object& inputs) { Napi::Value InferRequestWrap::get_compiled_model(const Napi::CallbackInfo& info) { return CompiledModelWrap::wrap(info.Env(), _infer_request.get_compiled_model()); } -// 128760 +// 131123 #ifndef _WIN32 void FinalizerCallback(Napi::Env env, void* finalizeData, TsfnContext* context) { context->native_thread.join(); diff --git a/src/bindings/js/node/tests/infer_request.test.js b/src/bindings/js/node/tests/infer_request.test.js index 159b34f938f6e2..bd05da9400c998 100644 --- a/src/bindings/js/node/tests/infer_request.test.js +++ b/src/bindings/js/node/tests/infer_request.test.js @@ -79,7 +79,7 @@ describe('InferRequest', () => { }); }); - // 128760 + // 131123 if (os.platform() !== 'win32') { it('Test inferAsync(inputData: { [inputName: string]: Tensor })', () => { inferRequestAsync.inferAsync({ data: tensor }).then(result => { diff --git a/src/bindings/python/tests/test_graph/test_gather.py b/src/bindings/python/tests/test_graph/test_gather.py index 3431bcb540e7a7..c1da1c36fff15a 100644 --- a/src/bindings/python/tests/test_graph/test_gather.py +++ b/src/bindings/python/tests/test_graph/test_gather.py @@ -2,7 +2,7 @@ # Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from openvino import Type +from openvino import Tensor, Type import openvino.runtime.opset8 as ov import numpy as np import pytest @@ -24,3 +24,26 @@ def test_gather(input_shape, indices, axis, expected_shape, batch_dims): assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == expected_shape assert node.get_output_element_type(0) == Type.f32 + + +@pytest.mark.parametrize(("data_str", "input_shape", "indices", "axis", "expected_shape", "batch_dims"), [ + (["Abc", " C de, Fghi.."], [2], [0], [0], [1], 0), + (["Abc", " C de, Fghi.."], [1, 2], [1], [1], [1], 1), +]) +def test_gather_string(data_str, input_shape, indices, axis, expected_shape, batch_dims): + input_data = np.array(data_str).reshape(input_shape) + input_param = ov.parameter(input_shape, name="input_data", dtype=Type.string) + + input_indices = np.array(indices, np.int32) + input_axis = np.array(axis, np.int32) + + node = ov.gather(input_param, input_indices, input_axis, batch_dims) + out_tensor = Tensor(Type.string, input_shape) + + assert node.get_type_name() == "Gather" + assert node.get_output_size() == 1 + assert list(node.get_output_shape(0)) == expected_shape + assert node.get_output_element_type(0) == Type.string + + node.evaluate([out_tensor], [Tensor(input_data), Tensor(input_indices), Tensor(input_axis)]) + assert np.array(data_str[indices[0]]) == out_tensor.str_data diff --git a/src/common/low_precision_transformations/tests/eltwise_transformation_is_broadcasted_test.cpp b/src/common/low_precision_transformations/tests/eltwise_transformation_is_broadcasted_test.cpp index b953ea7b14c9e5..0a3c75649376b0 100644 --- a/src/common/low_precision_transformations/tests/eltwise_transformation_is_broadcasted_test.cpp +++ b/src/common/low_precision_transformations/tests/eltwise_transformation_is_broadcasted_test.cpp @@ -5,8 +5,6 @@ #include #include "low_precision/eltwise_base_transformation.hpp" -#include - using namespace ::testing; using namespace std; diff --git a/src/common/low_precision_transformations/tests/precision_details_test.cpp b/src/common/low_precision_transformations/tests/precision_details_test.cpp index f0c158f4d00c64..e8a81f200cd1fa 100644 --- a/src/common/low_precision_transformations/tests/precision_details_test.cpp +++ b/src/common/low_precision_transformations/tests/precision_details_test.cpp @@ -7,8 +7,6 @@ #include "low_precision/layer_transformation.hpp" #include "low_precision/fake_quantize.hpp" -#include - using namespace ::testing; using namespace std; using namespace ov::pass::low_precision; diff --git a/src/common/low_precision_transformations/tests/unit/calclulate_levels_test.cpp b/src/common/low_precision_transformations/tests/unit/calclulate_levels_test.cpp index 6344f22f359796..35917c62491015 100644 --- a/src/common/low_precision_transformations/tests/unit/calclulate_levels_test.cpp +++ b/src/common/low_precision_transformations/tests/unit/calclulate_levels_test.cpp @@ -3,7 +3,6 @@ // #include -#include #include "low_precision/network_helper.hpp" diff --git a/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp b/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp index f9f447a6222e24..4d6b83c9908046 100644 --- a/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp +++ b/src/common/low_precision_transformations/tests/unit/data_precision_check.cpp @@ -4,7 +4,6 @@ #include #include -#include #include "low_precision/layer_transformation.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp b/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp index a9d353c7440610..2002a5fa5327f3 100644 --- a/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp +++ b/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp @@ -4,7 +4,6 @@ #include #include -#include #include "low_precision/layer_transformation.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/common/low_precision_transformations/tests/unit/reshape_test.cpp b/src/common/low_precision_transformations/tests/unit/reshape_test.cpp index 74e0e8224d75b8..045e9714166226 100644 --- a/src/common/low_precision_transformations/tests/unit/reshape_test.cpp +++ b/src/common/low_precision_transformations/tests/unit/reshape_test.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "low_precision/reshape.hpp" diff --git a/src/common/low_precision_transformations/tests/unit/update_reshape_values.cpp b/src/common/low_precision_transformations/tests/unit/update_reshape_values.cpp index 1b29f8f18a8c6f..56c605fda53ed4 100644 --- a/src/common/low_precision_transformations/tests/unit/update_reshape_values.cpp +++ b/src/common/low_precision_transformations/tests/unit/update_reshape_values.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include "low_precision/network_helper.hpp" diff --git a/src/core/dev_api/legacy_op_extension.hpp b/src/core/dev_api/legacy_op_extension.hpp deleted file mode 100644 index 514e31da604b4c..00000000000000 --- a/src/core/dev_api/legacy_op_extension.hpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/core/op_extension.hpp" - -namespace ov { - -/** @brief Class to distinguish legacy extension. */ -class OPENVINO_API LegacyOpExtension : public BaseOpExtension { -public: - ~LegacyOpExtension() override; -}; -} // namespace ov diff --git a/src/core/docs/api_details.md b/src/core/docs/api_details.md index 99b1397525bfb5..87383f1a15b7e3 100644 --- a/src/core/docs/api_details.md +++ b/src/core/docs/api_details.md @@ -1,7 +1,6 @@ # OpenVINO Core API OpenVINO Core API contains two folders: - * [ngraph](../include/ngraph/) - is a legacy API, this API is no longer being developed. Only aliases to new operations and operation sets extend this API. * [openvino](../include/openvino/) - current public API, this part is described below. ## Structure of Core API diff --git a/src/core/include/ngraph/partial_shape.hpp b/src/core/include/ngraph/partial_shape.hpp deleted file mode 100644 index 4e47d5551442fb..00000000000000 --- a/src/core/include/ngraph/partial_shape.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "ngraph/rank.hpp" -#include "ngraph/shape.hpp" -#include "openvino/core/partial_shape.hpp" -#include "openvino/op/util/attr_types.hpp" - -namespace ngraph { -using ov::PartialShape; -} // namespace ngraph diff --git a/src/core/include/ngraph/rank.hpp b/src/core/include/ngraph/rank.hpp deleted file mode 100644 index f33600e8e3c074..00000000000000 --- a/src/core/include/ngraph/rank.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include "openvino/core/rank.hpp" - -namespace ngraph { -using ov::Rank; -} // namespace ngraph diff --git a/src/core/include/ngraph/shape.hpp b/src/core/include/ngraph/shape.hpp deleted file mode 100644 index cb802b7467a3ba..00000000000000 --- a/src/core/include/ngraph/shape.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "ngraph/strides.hpp" -#include "openvino/core/shape.hpp" - -namespace ngraph { -using ov::is_scalar; -using ov::is_vector; -using ov::row_major_stride; -using ov::row_major_strides; -using ov::Shape; -using ov::shape_size; -} // namespace ngraph diff --git a/src/core/include/ngraph/strides.hpp b/src/core/include/ngraph/strides.hpp deleted file mode 100644 index 9247549050bd67..00000000000000 --- a/src/core/include/ngraph/strides.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include -#include -#include - -#include "openvino/core/strides.hpp" - -namespace ngraph { -using ov::Strides; -} // namespace ngraph diff --git a/src/core/include/ngraph/validation_util.hpp b/src/core/include/ngraph/validation_util.hpp deleted file mode 100644 index 672b433ff1adca..00000000000000 --- a/src/core/include/ngraph/validation_util.hpp +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "openvino/core/coordinate_diff.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/util/attr_types.hpp" -#include "openvino/op/util/variable_context.hpp" -#include "openvino/runtime/tensor.hpp" - -namespace ngraph { -using ov::CoordinateDiff; -using ov::Shape; -using ov::Strides; -using ov::op::v0::Constant; - -namespace element { -using ov::element::Type; -using ov::element::Type_t; -} // namespace element - -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API -Strides conv_default_strides(const ov::Node* node, - const ov::PartialShape& data_batch_shape, - const ov::PartialShape& filters_shape); - -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API -CoordinateDiff conv_default_padding(const ov::Node* node, - const ov::PartialShape& data_batch_shape, - const ov::PartialShape& filters_shape); - -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API -ov::PartialShape infer_windowed_reduction_output_shape(const ov::Node* node, - const ov::PartialShape& data_shape, - const Strides& data_dilation, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const ov::PartialShape& window_shape, - const Strides& window_strides, - const Strides& window_dilation, - bool is_window_all_in_padding_allowed, - bool ceil_mode = false); - -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -void validate_conv_params_spatial_dimensions(const ov::Node* node, - const size_t num_spatial_dims, - const ov::op::PadType auto_pad, - Strides& strides, - Strides& dilations, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end); - -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API -ov::PartialShape infer_batched_pooling_forward(const ov::Node* node, - const ov::PartialShape& data_batch_shape, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const ov::PartialShape& window_shape, - const Strides& window_strides, - bool is_window_all_in_padding_allowed, - bool ceil_mode = false, - const Strides& window_dilation = Strides{}); - -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API -ov::PartialShape infer_slice_shape(const ov::Node* node, - const ov::PartialShape& input_shape, - const std::vector& begin, - const std::vector& end, - const std::vector& strides, - const ov::AxisSet& begin_mask, - const ov::AxisSet& end_mask, - const ov::AxisSet& new_axis_mask, - const ov::AxisSet& shrink_axis_mask, - const ov::AxisSet& ellipsis_mask); - -/// \brief Returns a Constant storing scalar value equal to std::numeric_limits::max() -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API std::shared_ptr get_constant_max_of_type(element::Type_t t); - -/// \brief Returns a Constant storing scalar value equal to std::numeric_limits::min() -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API std::shared_ptr get_constant_min_of_type(element::Type_t t); - -/// \brief Returns a Constant storing scalar value equal to std::numeric_limits::lowest() -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API std::shared_ptr get_constant_lowest_of_type(element::Type_t t); - -namespace opset1 { -/// -/// \brief Calculates padding values for ConvolutionBackpropData operator. -/// -/// \param[in] input_data_shape The input data shape. -/// \param[in] filters_shape The filters shape. -/// \param[in] output_shape The output shape defined only for spatial dimentions. -/// \param[in] strides The strides values. -/// \param[in] dilations The dilations values. -/// \param[in] auto_pad_type The automatic padding mode. -/// \param[in] output_padding The output padding values. -/// \param pads_begin The placeholder for paddings at the beginning of axis. -/// \param pads_end The placeholder for paddings at the end of axis. -/// -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API -void infer_conv_backprop_auto_padding(const Shape& input_data_shape, - const Shape& filters_shape, - const Shape& output_shape, - const Strides& strides, - const Strides& dilations, - const ov::op::PadType auto_pad_type, - const CoordinateDiff& output_padding, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end); -} // namespace opset1 -} // namespace ngraph diff --git a/src/core/include/openvino/core/core.hpp b/src/core/include/openvino/core/core.hpp index 0239a7970de6ab..823218666f2ffa 100644 --- a/src/core/include/openvino/core/core.hpp +++ b/src/core/include/openvino/core/core.hpp @@ -13,7 +13,6 @@ #include "openvino/core/coordinate.hpp" #include "openvino/core/coordinate_diff.hpp" #include "openvino/core/core_visibility.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/dimension.hpp" #include "openvino/core/enum_mask.hpp" #include "openvino/core/enum_names.hpp" diff --git a/src/core/include/openvino/core/descriptor/tensor.hpp b/src/core/include/openvino/core/descriptor/tensor.hpp index d7be44f6e025b3..8a37af24710e79 100644 --- a/src/core/include/openvino/core/descriptor/tensor.hpp +++ b/src/core/include/openvino/core/descriptor/tensor.hpp @@ -46,8 +46,6 @@ class OPENVINO_API Tensor { Tensor(const element::Type& element_type, const PartialShape& pshape, const std::unordered_set& names = {}); - OPENVINO_DEPRECATED("This constructor is deprecated. Please use constructor with set of names") - Tensor(const element::Type& element_type, const PartialShape& pshape, const std::string& name); Tensor(const element::Type& element_type, const PartialShape& pshape, Node* node, size_t node_output_number); Tensor(const Tensor&) = delete; diff --git a/src/core/include/openvino/core/node.hpp b/src/core/include/openvino/core/node.hpp index 393dc2be32d226..9006ab57a3a2a8 100644 --- a/src/core/include/openvino/core/node.hpp +++ b/src/core/include/openvino/core/node.hpp @@ -21,7 +21,6 @@ #include "openvino/core/attribute_visitor.hpp" #include "openvino/core/core_visibility.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/descriptor/input.hpp" #include "openvino/core/descriptor/output.hpp" #include "openvino/core/descriptor/tensor.hpp" diff --git a/src/core/include/openvino/core/type.hpp b/src/core/include/openvino/core/type.hpp index 6ceaa39cbe08e0..ca4435c47c32ea 100644 --- a/src/core/include/openvino/core/type.hpp +++ b/src/core/include/openvino/core/type.hpp @@ -14,7 +14,6 @@ #include #include "openvino/core/core_visibility.hpp" -#include "openvino/core/deprecated.hpp" namespace ov { @@ -115,9 +114,7 @@ struct AsTypePtr> { /// Type, nullptr otherwise template auto as_type_ptr(const U& value) -> decltype(::ov::util::AsTypePtr::template call(value)) { - OPENVINO_SUPPRESS_DEPRECATED_START return ::ov::util::AsTypePtr::template call(value); - OPENVINO_SUPPRESS_DEPRECATED_END } } // namespace ov diff --git a/src/core/include/openvino/op/gather.hpp b/src/core/include/openvino/op/gather.hpp index dcc53a465bf718..56cb602c453ed9 100644 --- a/src/core/include/openvino/op/gather.hpp +++ b/src/core/include/openvino/op/gather.hpp @@ -74,7 +74,7 @@ class OPENVINO_API Gather : public op::util::GatherBase { bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; int64_t get_batch_dims() const; - + bool has_evaluate() const override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; }; } // namespace v8 diff --git a/src/core/include/openvino/op/interpolate.hpp b/src/core/include/openvino/op/interpolate.hpp index ac6876e1286cc6..f48297e7257c8c 100644 --- a/src/core/include/openvino/op/interpolate.hpp +++ b/src/core/include/openvino/op/interpolate.hpp @@ -44,16 +44,7 @@ class OPENVINO_API Interpolate : public Op { std::vector pads_end; }; - enum class InterpolateMode { - NEAREST, - LINEAR, - CUBIC, - AREA, - nearest OPENVINO_ENUM_DEPRECATED("Please use NEAREST instead") = NEAREST, - linear OPENVINO_ENUM_DEPRECATED("Please use LINEAR instead") = LINEAR, - cubic OPENVINO_ENUM_DEPRECATED("Please use CUBIC instead") = CUBIC, - area OPENVINO_ENUM_DEPRECATED("Please use AREA instead") = AREA - }; + enum class InterpolateMode { NEAREST, LINEAR, CUBIC, AREA }; Interpolate() = default; /// \brief Constructs a Interpolate operation diff --git a/src/core/include/openvino/op/util/convolution_base.hpp b/src/core/include/openvino/op/util/convolution_base.hpp index 0681d1a2e8e2b0..920fa17d74b13a 100644 --- a/src/core/include/openvino/op/util/convolution_base.hpp +++ b/src/core/include/openvino/op/util/convolution_base.hpp @@ -65,10 +65,6 @@ class OPENVINO_API ConvolutionBase : public Op { const CoordinateDiff& get_pads_end() const { return m_pads_end; } - OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use set_pads_end instead.") - void set_adding_above(const CoordinateDiff& pads_end) { - set_pads_end(pads_end); - } void set_pads_end(const CoordinateDiff& pads_end) { m_pads_end = pads_end; } diff --git a/src/core/include/openvino/op/util/max_pool_base.hpp b/src/core/include/openvino/op/util/max_pool_base.hpp index 4e8d9937ea57ac..e633c364a0bc3c 100644 --- a/src/core/include/openvino/op/util/max_pool_base.hpp +++ b/src/core/include/openvino/op/util/max_pool_base.hpp @@ -58,10 +58,6 @@ class OPENVINO_API MaxPoolBase : public Op { const Shape& get_pads_end() const { return m_pads_end; } - OPENVINO_DEPRECATED("This method is deprecated and will be removed soon. Please use set_pads_end instead.") - void set_adding_above(const Shape& pads_end) { - m_pads_end = pads_end; - } void set_pads_end(Shape pads_end); /// \return The pad type for pooling. diff --git a/src/core/include/openvino/op/util/variable_value.hpp b/src/core/include/openvino/op/util/variable_value.hpp index 574a180882949c..57e25e893e2174 100644 --- a/src/core/include/openvino/op/util/variable_value.hpp +++ b/src/core/include/openvino/op/util/variable_value.hpp @@ -8,7 +8,6 @@ #include #include "openvino/core/core_visibility.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/runtime/tensor.hpp" namespace ov { @@ -32,20 +31,14 @@ class OPENVINO_API VariableValue { explicit VariableValue(const ov::Tensor& value); /// \brief Constructor for VariableValue. - /// \deprecated This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor - /// instead /// \param value Data for Variable. /// \param reset The current state of the reset flag. VariableValue(const ov::Tensor& value, bool reset); /// \brief Returns the current stored data. - /// \deprecated This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor - /// instead const ov::Tensor& get_state() const; /// \brief Sets new values for Variable. - /// \deprecated This method is deprecated and will be removed in 2024.0 release. Please use method with ov::Tensor - /// instead /// \param value New data for Variable. void set_state(const ov::Tensor& value); diff --git a/src/core/include/openvino/opsets/opset.hpp b/src/core/include/openvino/opsets/opset.hpp index c393eeeb581d2d..f14fc6feb85d7f 100644 --- a/src/core/include/openvino/opsets/opset.hpp +++ b/src/core/include/openvino/opsets/opset.hpp @@ -10,7 +10,6 @@ #include #include -#include "openvino/core/deprecated.hpp" #include "openvino/core/node.hpp" namespace ov { diff --git a/src/core/include/openvino/pass/pass.hpp b/src/core/include/openvino/pass/pass.hpp index 9cbbce52ba8b50..7f5ee620bfeb41 100644 --- a/src/core/include/openvino/pass/pass.hpp +++ b/src/core/include/openvino/pass/pass.hpp @@ -9,7 +9,6 @@ #include #include "openvino/core/core_visibility.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/enum_mask.hpp" #include "openvino/core/model.hpp" #include "openvino/core/node.hpp" diff --git a/src/core/include/openvino/pass/pass_config.hpp b/src/core/include/openvino/pass/pass_config.hpp index 04f020d3a2bdd6..67821abd8e1ec9 100644 --- a/src/core/include/openvino/pass/pass_config.hpp +++ b/src/core/include/openvino/pass/pass_config.hpp @@ -9,7 +9,6 @@ #include #include "openvino/core/core_visibility.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/model.hpp" #include "openvino/core/node.hpp" diff --git a/src/core/include/openvino/pass/serialize.hpp b/src/core/include/openvino/pass/serialize.hpp index 759bc3e7973adb..332b8d76b90e6d 100644 --- a/src/core/include/openvino/pass/serialize.hpp +++ b/src/core/include/openvino/pass/serialize.hpp @@ -14,7 +14,6 @@ namespace ov { namespace pass { -OPENVINO_SUPPRESS_DEPRECATED_START /** * @brief Serialize transformation converts ov::Model into IR files * @attention @@ -75,7 +74,5 @@ class OPENVINO_API StreamSerialize : public ov::pass::ModelPass { std::function m_custom_data_serializer; const Serialize::Version m_version; }; -OPENVINO_SUPPRESS_DEPRECATED_END - } // namespace pass } // namespace ov diff --git a/src/core/shape_inference/include/pooling_shape_inference_util.hpp b/src/core/shape_inference/include/pooling_shape_inference_util.hpp index 36c3eef787fe89..f53ef153580b4c 100644 --- a/src/core/shape_inference/include/pooling_shape_inference_util.hpp +++ b/src/core/shape_inference/include/pooling_shape_inference_util.hpp @@ -248,12 +248,10 @@ TRShape out_shape_infer(const TOp* op, const std::vector& input_shapes, const auto& data_rank = data_shape.rank(); - OPENVINO_SUPPRESS_DEPRECATED_START NODE_VALIDATION_CHECK(op, ov::util::is_rank_compatible_any_of(data_rank, {3, 4, 5}), "Expected a 3D, 4D or 5D tensor for the input. Got: ", data_shape); - OPENVINO_SUPPRESS_DEPRECATED_END TRShape output_shape; if (data_rank.is_static()) { diff --git a/src/core/shape_inference/include/utils.hpp b/src/core/shape_inference/include/utils.hpp index b0a7c508619d83..18d6158b41675e 100644 --- a/src/core/shape_inference/include/utils.hpp +++ b/src/core/shape_inference/include/utils.hpp @@ -8,7 +8,6 @@ #include "element_visitor.hpp" #include "openvino/core/bound_evaluation_util.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/opsets/opset1.hpp" #include "ov_optional.hpp" #include "shape_infer_type_utils.hpp" diff --git a/src/core/src/descriptor/tensor.cpp b/src/core/src/descriptor/tensor.cpp index 18c9fd3ff18f96..062a740bbd80b6 100644 --- a/src/core/src/descriptor/tensor.cpp +++ b/src/core/src/descriptor/tensor.cpp @@ -17,13 +17,6 @@ ov::descriptor::Tensor::Tensor(const element::Type& element_type, set_names(names); } -ov::descriptor::Tensor::Tensor(const element::Type& element_type, const PartialShape& pshape, const std::string& name) - : m_element_type(element_type), - m_partial_shape(pshape), - m_shape_changed(true) { - m_name_it = m_names.cend(); -} - ov::descriptor::Tensor::Tensor(const element::Type& element_type, const PartialShape& pshape, ov::Node* node, diff --git a/src/core/src/evaluator.hpp b/src/core/src/evaluator.hpp index 8705b44c3eb5f5..d784077a030222 100644 --- a/src/core/src/evaluator.hpp +++ b/src/core/src/evaluator.hpp @@ -9,6 +9,7 @@ #include #include "openvino/core/node.hpp" +#include "openvino/core/shape.hpp" #include "openvino/core/type/element_type_traits.hpp" namespace ov { diff --git a/src/core/src/legacy_op_extension.cpp b/src/core/src/legacy_op_extension.cpp deleted file mode 100644 index c943d20ef9d88a..00000000000000 --- a/src/core/src/legacy_op_extension.cpp +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "legacy_op_extension.hpp" - -ov::LegacyOpExtension::~LegacyOpExtension() = default; diff --git a/src/core/src/op/experimental_detectron_topkrois.cpp b/src/core/src/op/experimental_detectron_topkrois.cpp index 5aa1d1cf5b2cc2..3ef507c29542f5 100644 --- a/src/core/src/op/experimental_detectron_topkrois.cpp +++ b/src/core/src/op/experimental_detectron_topkrois.cpp @@ -38,9 +38,7 @@ void op::v6::ExperimentalDetectronTopKROIs::validate_and_infer_types() { (out_et.is_dynamic() || out_et.is_real()), "ROIs and probabilities of ROIs must same floating-point type."); - OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = ov::util::get_node_input_partial_shapes(*this); - OPENVINO_SUPPRESS_DEPRECATED_START const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, out_et, output_shapes[0]); diff --git a/src/core/src/op/gather.cpp b/src/core/src/op/gather.cpp index e53f290f1633c1..941637781b0ba6 100644 --- a/src/core/src/op/gather.cpp +++ b/src/core/src/op/gather.cpp @@ -108,6 +108,16 @@ std::shared_ptr Gather::clone_with_new_inputs(const OutputVector& new_args check_new_args_count(this, new_args); return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); } + +bool Gather::has_evaluate() const { + OV_OP_SCOPE(v8_Gather_has_evaluate); + switch (get_output_element_type(0)) { + case element::string: + return true; + default: + return false; + } +} } // namespace v8 } // namespace op } // namespace ov diff --git a/src/core/src/op/max_pool.cpp b/src/core/src/op/max_pool.cpp index f07980ce75af22..204e72a6cbf962 100644 --- a/src/core/src/op/max_pool.cpp +++ b/src/core/src/op/max_pool.cpp @@ -39,10 +39,8 @@ bool MaxPool::visit_attributes(AttributeVisitor& visitor) { void MaxPool::validate_and_infer_types() { OV_OP_SCOPE(v1_MaxPool_validate_and_infer_types); - OPENVINO_SUPPRESS_DEPRECATED_START const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this), m_pads_begin, m_pads_end); - OPENVINO_SUPPRESS_DEPRECATED_END set_output_type(0, get_input_element_type(0), output_shapes.front()); } @@ -169,10 +167,8 @@ void MaxPool::validate_and_infer_types() { m_axis = ov::util::normalize_axis(this, m_axis, input_shape.rank()); } - OPENVINO_SUPPRESS_DEPRECATED_START const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this), m_pads_begin, m_pads_end); - OPENVINO_SUPPRESS_DEPRECATED_END set_output_type(0, get_input_element_type(0), output_shapes[0]); set_output_type(1, m_index_element_type, output_shapes[1]); } diff --git a/src/core/src/op/non_max_suppression.cpp b/src/core/src/op/non_max_suppression.cpp index 09f4cd2174194f..ead5205593b17d 100644 --- a/src/core/src/op/non_max_suppression.cpp +++ b/src/core/src/op/non_max_suppression.cpp @@ -473,9 +473,7 @@ float op::v5::NonMaxSuppression::score_threshold_from_input() const { return score_threshold; } - OPENVINO_SUPPRESS_DEPRECATED_START const auto score_threshold_input = ov::util::get_constant_from_source(input_value(score_threshold_port)); - OPENVINO_SUPPRESS_DEPRECATED_END score_threshold = score_threshold_input->cast_vector().at(0); return score_threshold; @@ -488,9 +486,7 @@ float op::v5::NonMaxSuppression::soft_nms_sigma_from_input() const { return soft_nms_sigma; } - OPENVINO_SUPPRESS_DEPRECATED_START const auto soft_nms_sigma_input = ov::util::get_constant_from_source(input_value(soft_nms_sigma_port)); - OPENVINO_SUPPRESS_DEPRECATED_END soft_nms_sigma = soft_nms_sigma_input->cast_vector().at(0); return soft_nms_sigma; @@ -678,9 +674,7 @@ int64_t op::v9::NonMaxSuppression::max_boxes_output_from_input() const { return 0; } - OPENVINO_SUPPRESS_DEPRECATED_START const auto max_output_boxes_input = ov::util::get_constant_from_source(input_value(max_output_boxes_port)); - OPENVINO_SUPPRESS_DEPRECATED_END max_output_boxes = max_output_boxes_input->cast_vector().at(0); return max_output_boxes; @@ -693,9 +687,7 @@ float op::v9::NonMaxSuppression::iou_threshold_from_input() const { return iou_threshold; } - OPENVINO_SUPPRESS_DEPRECATED_START const auto iou_threshold_input = ov::util::get_constant_from_source(input_value(iou_threshold_port)); - OPENVINO_SUPPRESS_DEPRECATED_END iou_threshold = iou_threshold_input->cast_vector().at(0); return iou_threshold; @@ -708,9 +700,7 @@ float op::v9::NonMaxSuppression::score_threshold_from_input() const { return score_threshold; } - OPENVINO_SUPPRESS_DEPRECATED_START const auto score_threshold_input = ov::util::get_constant_from_source(input_value(score_threshold_port)); - OPENVINO_SUPPRESS_DEPRECATED_END score_threshold = score_threshold_input->cast_vector().at(0); return score_threshold; @@ -723,9 +713,7 @@ float op::v9::NonMaxSuppression::soft_nms_sigma_from_input() const { return soft_nms_sigma; } - OPENVINO_SUPPRESS_DEPRECATED_START const auto soft_nms_sigma_input = ov::util::get_constant_from_source(input_value(soft_nms_sigma_port)); - OPENVINO_SUPPRESS_DEPRECATED_END soft_nms_sigma = soft_nms_sigma_input->cast_vector().at(0); return soft_nms_sigma; diff --git a/src/core/src/op/proposal.cpp b/src/core/src/op/proposal.cpp index 1a6354680221f5..14a996e23739bf 100644 --- a/src/core/src/op/proposal.cpp +++ b/src/core/src/op/proposal.cpp @@ -91,9 +91,7 @@ void op::v4::Proposal::validate_and_infer_types() { OV_OP_SCOPE(v4_Proposal_validate_and_infer_types); validate_element_types(); - OPENVINO_SUPPRESS_DEPRECATED_START const auto intput_shapes = ov::util::get_node_input_partial_shapes(*this); - OPENVINO_SUPPRESS_DEPRECATED_END const auto output_shapes = shape_infer(this, intput_shapes); const auto& out_et = get_input_element_type(0); diff --git a/src/core/src/op/reduce_logical_and.cpp b/src/core/src/op/reduce_logical_and.cpp index 0178917aae3665..1d2cce23211bf5 100644 --- a/src/core/src/op/reduce_logical_and.cpp +++ b/src/core/src/op/reduce_logical_and.cpp @@ -39,7 +39,6 @@ std::shared_ptr ReduceLogicalAnd::clone_with_new_inputs(const OutputVector bool ReduceLogicalAnd::evaluate(TensorVector& outputs, const TensorVector& inputs) const { OV_OP_SCOPE(v1_ReduceLogicalAnd_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_ASSERT(inputs.size() == 2); OPENVINO_ASSERT(outputs.size() == 1); diff --git a/src/core/src/op/util/gather_base.cpp b/src/core/src/op/util/gather_base.cpp index a05bcdf1c55087..dcfac60c659eb7 100644 --- a/src/core/src/op/util/gather_base.cpp +++ b/src/core/src/op/util/gather_base.cpp @@ -204,7 +204,7 @@ bool GatherBase::evaluate(TensorVector& outputs, const TensorVector& inputs) con using namespace ov::element; return IF_TYPE_OF(util_GatherBase_evaluate, - OV_PP_ET_LIST(boolean, f16, f32, i8, i32, i64, u8, u32, u64), + OV_PP_ET_LIST(boolean, f16, f32, i8, i32, i64, u8, u32, u64, string), gather::Evaluate, data.get_element_type(), data, @@ -226,9 +226,7 @@ bool GatherBase::evaluate_upper(TensorVector& output_values) const { } bool GatherBase::evaluate_label(TensorLabelVector& output_labels) const { - OPENVINO_SUPPRESS_DEPRECATED_START return gather::have_indices_and_axis_bound_set(this) && ov::util::default_label_evaluator(this, output_labels); - OPENVINO_SUPPRESS_DEPRECATED_END } bool GatherBase::constant_fold(OutputVector& output_values, const OutputVector& input_values) { diff --git a/src/core/src/op/util/variable_value.cpp b/src/core/src/op/util/variable_value.cpp index 86ea10a87e9021..4d3662ecfda994 100644 --- a/src/core/src/op/util/variable_value.cpp +++ b/src/core/src/op/util/variable_value.cpp @@ -6,7 +6,6 @@ #include -#include "openvino/core/deprecated.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/shape_util.hpp" #include "openvino/runtime/allocator.hpp" diff --git a/src/core/src/pass/manager.cpp b/src/core/src/pass/manager.cpp index 69363fe72f7c18..a6bee008ca99d4 100644 --- a/src/core/src/pass/manager.cpp +++ b/src/core/src/pass/manager.cpp @@ -37,8 +37,7 @@ PerfCounters& perf_counters() { namespace { bool getenv_visualize_tracing() { - return ov::util::getenv_bool("NGRAPH_ENABLE_VISUALIZE_TRACING") || - ov::util::getenv_bool("OV_ENABLE_VISUALIZE_TRACING"); + return ov::util::getenv_bool("OV_ENABLE_VISUALIZE_TRACING"); } } // namespace @@ -93,11 +92,9 @@ class stopwatch { } // namespace bool ov::pass::Manager::run_passes(shared_ptr func) { - OPENVINO_SUPPRESS_DEPRECATED_START OV_ITT_SCOPED_TASK(ov::itt::domains::core, "pass::Manager::run_passes"); - static bool profile_enabled = - ov::util::getenv_bool("NGRAPH_PROFILE_PASS_ENABLE") || ov::util::getenv_bool("OV_PROFILE_PASS_ENABLE"); + static bool profile_enabled = ov::util::getenv_bool("OV_PROFILE_PASS_ENABLE"); size_t index = 0; stopwatch pass_timer; @@ -171,7 +168,6 @@ bool ov::pass::Manager::run_passes(shared_ptr func) { if (profile_enabled) { cout << "passes done in " << overall_timer.get_milliseconds() << "ms\n"; } - OPENVINO_SUPPRESS_DEPRECATED_END return function_changed; } diff --git a/src/core/src/pass/serialize.cpp b/src/core/src/pass/serialize.cpp index 0da8c27ea83db6..e3d133ee545d05 100644 --- a/src/core/src/pass/serialize.cpp +++ b/src/core/src/pass/serialize.cpp @@ -28,7 +28,6 @@ #include "transformations/rt_info/disable_fp16_compression.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START namespace { // helpers template std::string join(const Container& c, const char* glue = ", ") { diff --git a/src/core/src/pass/visualize_tree.cpp b/src/core/src/pass/visualize_tree.cpp index bcd3bcd1713390..5c433671f06472 100644 --- a/src/core/src/pass/visualize_tree.cpp +++ b/src/core/src/pass/visualize_tree.cpp @@ -461,10 +461,8 @@ std::string ov::pass::VisualizeTree::get_attributes(std::shared_ptr node) std::stringstream label; label << "label=\"" << get_node_name(node); - static const bool nvtos = ov::util::getenv_bool("NGRAPH_VISUALIZE_TREE_OUTPUT_SHAPES") || - ov::util::getenv_bool("OV_VISUALIZE_TREE_OUTPUT_SHAPES"); - static const bool nvtot = ov::util::getenv_bool("NGRAPH_VISUALIZE_TREE_OUTPUT_TYPES") || - ov::util::getenv_bool("OV_VISUALIZE_TREE_OUTPUT_TYPES"); + static const bool nvtos = ov::util::getenv_bool("OV_VISUALIZE_TREE_OUTPUT_SHAPES"); + static const bool nvtot = ov::util::getenv_bool("OV_VISUALIZE_TREE_OUTPUT_TYPES"); static const bool nvtio = ov::util::getenv_bool("OV_VISUALIZE_TREE_IO"); static const bool nvtrti = ov::util::getenv_bool("OV_VISUALIZE_TREE_RUNTIME_INFO"); static const bool ovpvl = ov::util::getenv_bool("OV_VISUALIZE_PARTIAL_VALUES_AND_LABELS"); diff --git a/src/core/src/pattern/matcher.cpp b/src/core/src/pattern/matcher.cpp index ab14a875a37168..063da2bdbe7c45 100644 --- a/src/core/src/pattern/matcher.cpp +++ b/src/core/src/pattern/matcher.cpp @@ -140,7 +140,6 @@ bool Matcher::match_permutation(const OutputVector& pattern_args, const OutputVe } bool Matcher::match_arguments(Node* pattern_node, const std::shared_ptr& graph_node) { - OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_DEBUG << "[MATCHER] Match arguments at " << *graph_node << " for pattern " << *pattern_node; auto args = graph_node->input_values(); @@ -174,7 +173,6 @@ bool Matcher::match_arguments(Node* pattern_node, const std::shared_ptr& g } OPENVINO_DEBUG << "[MATCHER] Aborting at " << *graph_node << " for pattern " << *pattern_node; - OPENVINO_SUPPRESS_DEPRECATED_END return false; } diff --git a/src/core/src/specialize_function.cpp b/src/core/src/specialize_function.cpp deleted file mode 100644 index d352b7635cb4ca..00000000000000 --- a/src/core/src/specialize_function.cpp +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/specialize_function.hpp" - -#include "itt.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/op/util/op_types.hpp" - -using namespace ngraph; -OPENVINO_SUPPRESS_DEPRECATED_START; - -using ov::op::v0::Constant; - -std::shared_ptr ngraph::specialize_function(std::shared_ptr f, - const std::vector& parameter_element_types, - const std::vector& parameter_shapes, - const std::vector& parameter_values) - -{ - OV_ITT_SCOPED_TASK(ov::itt::domains::core, "specialize_function"); - - OPENVINO_ASSERT(f->get_parameters().size() == parameter_shapes.size()); - OPENVINO_ASSERT(f->get_parameters().size() == parameter_element_types.size()); - OPENVINO_ASSERT(f->get_parameters().size() == parameter_values.size()); - - std::unordered_map> m; - - for (size_t i = 0; i < parameter_shapes.size(); i++) { - OPENVINO_ASSERT(f->get_parameters()[i]->get_element_type().is_dynamic() || - parameter_element_types[i] == f->get_parameters()[i]->get_element_type()); - - if (parameter_values[i] != nullptr && parameter_shapes[i].is_static() && - parameter_element_types[i].is_static()) { - m[f->get_parameters()[i].get()] = std::make_shared(parameter_element_types[i], - parameter_shapes[i].to_shape(), - parameter_values[i]); - } else { - m[f->get_parameters()[i].get()] = - std::make_shared(parameter_element_types[i], parameter_shapes[i]); - } - auto rt_info = f->get_parameters()[i]->get_rt_info(); - m[f->get_parameters()[i].get()]->get_rt_info() = rt_info; - } - - for (auto old_node : f->get_ordered_ops()) { - if (ov::op::util::is_parameter(old_node)) { - continue; - } - - ov::OutputVector new_args; - for (auto input : old_node->inputs()) { - auto output = input.get_source_output(); - new_args.push_back(output.for_node(m[output.get_node()])); - } - - ov::NodeVector cloned_dependencies; - for (auto& dependency : old_node->get_control_dependencies()) { - std::shared_ptr dependent = m.at(dependency.get()); - if (find(cloned_dependencies.begin(), cloned_dependencies.end(), dependent) == cloned_dependencies.end()) { - cloned_dependencies.push_back(dependent); - } - } - m[old_node.get()] = old_node->copy_with_new_inputs(new_args, cloned_dependencies); - - auto rt_info = old_node->get_rt_info(); - m[old_node.get()]->get_rt_info() = rt_info; - - m[old_node.get()]->set_friendly_name(old_node->get_friendly_name()); - } - - ov::ParameterVector new_parameters = f->get_parameters(); - for (size_t i = 0; i < new_parameters.size(); i++) { - auto name = new_parameters[i]->get_friendly_name(); - new_parameters[i] = ov::as_type_ptr(m[new_parameters[i].get()]); - - // If the replacement for a Parameter is not itself a Parameter, we must have replaced it - // with a constant. We will insert a dead Parameter into the clone's parameters, in order - // to maintain the arity of the original function. - if (new_parameters[i] == nullptr) { - new_parameters[i] = - std::make_shared(parameter_element_types[i], parameter_shapes[i]); - } - new_parameters[i]->set_friendly_name(name); - } - - ov::ResultVector new_results = f->get_results(); - for (size_t i = 0; i < new_results.size(); i++) { - auto name = new_results[i]->get_friendly_name(); - new_results[i] = std::static_pointer_cast(m[new_results[i].get()]); - new_results[i]->set_friendly_name(name); - } - auto new_sinks = f->get_sinks(); - for (size_t i = 0; i < new_sinks.size(); i++) { - new_sinks[i] = std::static_pointer_cast(m[new_sinks[i].get()]); - } - - return std::make_shared(new_results, new_sinks, new_parameters); -} diff --git a/src/core/src/validation_util.cpp b/src/core/src/validation_util.cpp index 13cccab1d3e0a3..963a229799106b 100644 --- a/src/core/src/validation_util.cpp +++ b/src/core/src/validation_util.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/validation_util.hpp" +#include "validation_util.hpp" #include #include @@ -16,560 +16,6 @@ #include "openvino/op/ops.hpp" #include "openvino/util/common_util.hpp" #include "sequnce_generator.hpp" -#include "validation_util.hpp" - -OPENVINO_SUPPRESS_DEPRECATED_START - -namespace ngraph { -using ov::Dimension; -namespace op { -namespace v0 { -using ov::op::v0::Constant; -using ov::op::v0::Negative; -} // namespace v0 -} // namespace op - -Strides conv_default_strides(const ov::Node* /* node */, - const ov::PartialShape& data_batch_shape, - const ov::PartialShape& filters_shape) { - size_t rank; - - if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() >= 2) { - rank = data_batch_shape.rank().get_length() - 2; - } else if (filters_shape.rank().is_static() && filters_shape.rank().get_length() >= 2) { - rank = filters_shape.rank().get_length() - 2; - } else { - rank = 0; - } - - return Strides(rank, 1); -} - -CoordinateDiff conv_default_padding(const ov::Node* /* node */, - const ov::PartialShape& data_batch_shape, - const ov::PartialShape& filters_shape) { - size_t rank; - - if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() >= 2) { - rank = data_batch_shape.rank().get_length() - 2; - } else if (filters_shape.rank().is_static() && filters_shape.rank().get_length() >= 2) { - rank = filters_shape.rank().get_length() - 2; - } else { - rank = 0; - } - - return CoordinateDiff(rank, 0); -} - -// -// Infers the output shape of a windowed reduction operation, where the data may be dilated and/or -// padded, and the reduction window may be strided and/or dilated. -// -// TODO(amprocte): The messages here would be a bit friendlier if we didn't say "after -// padding/after dilation" for cases where there is actually no padding/dilation. -// -ov::PartialShape infer_windowed_reduction_output_shape(const ov::Node* node, - const ov::PartialShape& data_shape, - const Strides& data_dilation, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const ov::PartialShape& window_shape, - const Strides& window_strides, - const Strides& window_dilation, - bool is_window_all_in_padding_allowed, - bool ceil_mode) { - ov::PartialShape data_shape_merged{ov::PartialShape::dynamic()}; - - NODE_VALIDATION_CHECK( - node, - data_shape_merged.merge_rank(data_shape.rank()) && data_shape_merged.merge_rank(data_dilation.size()) && - data_shape_merged.merge_rank(data_padding_below.size()) && - data_shape_merged.merge_rank(data_padding_above.size()) && - data_shape_merged.merge_rank(window_shape.rank()) && data_shape_merged.merge_rank(window_strides.size()) && - data_shape_merged.merge_rank(window_dilation.size()), - "Ranks for data shape (", - data_shape, - "), data dilation (", - data_dilation, - "), padding below (", - data_padding_below, - "), padding above (", - data_padding_above, - "), window shape (", - window_shape, - "), window strides (", - window_strides, - "), and window dilation (", - window_dilation, - ") do not match."); - - ov::PartialShape output_shape = ov::PartialShape::dynamic(data_shape_merged.rank()); - if (output_shape.rank().is_static()) { - for (int64_t i = 0; i < output_shape.rank().get_length(); i++) { - NODE_VALIDATION_CHECK(node, - data_dilation[i] > 0, - "Data dilation (", - data_dilation, - ") has zero dimension at axis ", - i, - "."); - NODE_VALIDATION_CHECK(node, - window_strides[i] > 0, - "Window strides (", - window_strides, - ") has zero dimension at axis ", - i, - "."); - NODE_VALIDATION_CHECK(node, - window_dilation[i] > 0, - "Window dilation (", - window_dilation, - ") has zero dimension at axis ", - i, - "."); - - bool data_dim_static = data_shape.rank().is_static() && data_shape[i].is_static(); - bool window_dim_static = window_shape.rank().is_static() && window_shape[i].is_static(); - - ptrdiff_t data_padded_dilated_dim = -1; - if (data_dim_static) { - data_padded_dilated_dim = (static_cast(data_dilation[i]) * (data_shape[i].get_length() - 1)) + - 1 + data_padding_below[i] + data_padding_above[i]; - NODE_VALIDATION_CHECK(node, - data_padded_dilated_dim > 0, - "Data shape after padding and dilation has dimension less than 1 (dim: ", - data_padded_dilated_dim, - ") at axis ", - i, - "."); - } - - ptrdiff_t window_dilated_dim = -1; - if (window_dim_static) { - window_dilated_dim = static_cast(window_dilation[i]) * (window_shape[i].get_length() - 1) + 1; - - NODE_VALIDATION_CHECK(node, - window_dilated_dim > 0, - "Window after dilation has dimension less than 1 (dim: ", - window_dilated_dim, - ") at axis ", - i, - "."); - - NODE_VALIDATION_CHECK(node, - is_window_all_in_padding_allowed || (window_dilated_dim > data_padding_below[i] && - window_dilated_dim > data_padding_above[i]), - "Window after dilation is sometimes entirely in the padding area for axis ", - i, - " (dilated window dimension: ", - window_dilated_dim, - ", padding below dimension: ", - data_padding_below[i], - ", padding above dimension: ", - data_padding_above[i], - ") and this is not ", - "allowed."); - } - - if (data_dim_static && window_dim_static) { - NODE_VALIDATION_CHECK(node, - window_dilated_dim <= data_padded_dilated_dim, - "Window after dilation has dimension (dim: ", - window_dilated_dim, - ") larger than the data shape after padding (dim: ", - data_padded_dilated_dim, - ") at axis ", - i, - "."); - - if (ceil_mode) { - output_shape[i] = ov::util::ceil_div(static_cast(data_padded_dilated_dim) - - static_cast(window_dilated_dim), - window_strides[i]) + - 1; - } else { - output_shape[i] = - ((static_cast(data_padded_dilated_dim) - static_cast(window_dilated_dim)) / - window_strides[i]) + - 1; - } - } - } - } - - return output_shape; -} - -void validate_conv_params_spatial_dimensions(const ov::Node* node, - const size_t num_spatial_dims, - const ov::op::PadType auto_pad, - Strides& strides, - Strides& dilations, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end) { - if (strides.size() == 0) { - strides = Strides(num_spatial_dims, 1); - } - if (dilations.size() == 0) { - dilations = Strides(num_spatial_dims, 1); - } - if (pads_begin.size() == 0 || auto_pad == ov::op::PadType::VALID) { - pads_begin = CoordinateDiff(num_spatial_dims, 0); - } - if (pads_end.size() == 0 || auto_pad == ov::op::PadType::VALID) { - pads_end = CoordinateDiff(num_spatial_dims, 0); - } - NODE_VALIDATION_CHECK(node, - strides.size() == num_spatial_dims, - "Strides should be defined for all and only spatial features."); - NODE_VALIDATION_CHECK(node, - dilations.size() == num_spatial_dims, - "Dilations should be defined for all and only spatial features."); - NODE_VALIDATION_CHECK(node, - pads_begin.size() == num_spatial_dims && pads_end.size() == num_spatial_dims, - "Pads should be defined for all and only spatial features."); -} - -// -// Infers the output batch shape and element type for batched pooling fprop. -// -ov::PartialShape infer_batched_pooling_forward(const ov::Node* node, - const ov::PartialShape& data_batch_shape, - const CoordinateDiff& data_padding_below, - const CoordinateDiff& data_padding_above, - const ov::PartialShape& window_shape, - const Strides& window_strides, - bool is_window_all_in_padding_allowed, - bool ceil_mode, - const Strides& window_dilation) { - NODE_VALIDATION_CHECK(node, - data_batch_shape.rank().is_dynamic() || - (data_batch_shape.rank().get_length() >= 3 && data_batch_shape.rank().get_length() <= 5), - "Data batch must have rank of at least 4 or 5 (one batch axis, ", - "one input-channel axis, and two or three spatial dimension) ", - "(data batch shape: ", - data_batch_shape, - ")."); - - ov::PartialShape data_spatial_shape{ov::PartialShape::dynamic()}; - - NODE_VALIDATION_CHECK(node, - data_spatial_shape.merge_rank(data_batch_shape.rank() - 2) && - data_spatial_shape.merge_rank(data_padding_below.size()) && - data_spatial_shape.merge_rank(data_padding_above.size()) && - data_spatial_shape.merge_rank(window_shape.rank()) && - data_spatial_shape.merge_rank(window_strides.size()), - "Ranks for data item shape (data batch has shape ", - data_batch_shape, - ", so data item rank is ", - (data_batch_shape.rank() - 2), - "), padding below (", - data_padding_below, - "), padding above (", - data_padding_above, - "), window shape (", - window_shape, - "), and window strides (", - window_strides, - ") do not match."); - - Dimension batch_size{Dimension::dynamic()}; - Dimension channel_count{Dimension::dynamic()}; - ov::PartialShape data_output_spatial_shape{ov::PartialShape::dynamic(data_spatial_shape.rank())}; - - if (data_batch_shape.rank().is_static()) { - batch_size = data_batch_shape[0]; - channel_count = data_batch_shape[1]; - - for (int64_t i = 0; i < data_spatial_shape.rank().get_length(); i++) { - data_spatial_shape[i] = data_batch_shape[i + 2]; - } - - NODE_VALIDATION_CHECK(node, batch_size.is_dynamic() || batch_size.get_length() > 0, "Batch size is zero."); - - NODE_VALIDATION_CHECK(node, - channel_count.is_dynamic() || channel_count.get_length() > 0, - "Channel count is zero."); - - // For pooling ops we don't need dilation, so we fill in the identity value (all 1). - Strides data_dilation(data_spatial_shape.rank().get_length(), 1); - Strides dilations = window_dilation; - // if the window_dilation was not specified, generate the default value (no dilations) - if (window_dilation.empty()) { - // dilations equal to 1 for each spatial axis mean that the window is not dilated - dilations = Strides(data_spatial_shape.rank().get_length(), 1); - } - - data_output_spatial_shape = infer_windowed_reduction_output_shape(node, - data_spatial_shape, - data_dilation, - data_padding_below, - data_padding_above, - window_shape, - window_strides, - dilations, - is_window_all_in_padding_allowed, - ceil_mode); - } - - ov::PartialShape data_batch_output_shape{ov::PartialShape::dynamic(data_output_spatial_shape.rank() + 2)}; - data_batch_output_shape[0] = batch_size; - data_batch_output_shape[1] = channel_count; - - for (int64_t i = 0; i < data_spatial_shape.rank().get_length(); i++) { - data_batch_output_shape[i + 2] = data_output_spatial_shape[i]; - } - - return data_batch_output_shape; -} - -ov::PartialShape infer_slice_shape(const ov::Node* node, - const ov::PartialShape& input_shape, - const std::vector& begin, - const std::vector& end, - const std::vector& strides, - const ov::AxisSet& begin_mask, - const ov::AxisSet& end_mask, - const ov::AxisSet& new_axis_mask, - const ov::AxisSet& shrink_axis_mask, - const ov::AxisSet& ellipsis_mask) { - if (begin.size() && end.size()) { - NODE_VALIDATION_CHECK(node, - begin.size() == end.size(), - "Lower bounds and Upper bounds needs to have same number of values"); - } - if (begin.size() && strides.size()) { - NODE_VALIDATION_CHECK(node, - begin.size() == strides.size(), - "Lower bounds and strides needs to have same number of values"); - } - if (end.size() && strides.size()) { - NODE_VALIDATION_CHECK(node, - end.size() == strides.size(), - "Upper bounds and strides needs to have same number of values"); - } - - NODE_VALIDATION_CHECK(node, ellipsis_mask.size() <= 1, "At most one ellipsis is allowed."); - - if (input_shape.rank().is_dynamic()) { - return ov::PartialShape::dynamic(); - } - - NODE_VALIDATION_CHECK(node, - input_shape.rank().get_length() + new_axis_mask.size() >= begin.size(), - "Input rank plus number of new axis has to be at least the size of Lower " - "and Upper bounds vector."); - - std::vector dim; - - int64_t input_shape_idx = 0; - for (size_t axis = 0; axis < begin.size(); ++axis) { - // add all dimensions hidden under the ellipsis mask if ellipsis mask is set - if (ellipsis_mask.count(axis)) { - // only one bit in ellipsis mask is allowed - int num_new_axis_after_ellipses = 0; - int num_input_axis_before_ellipses = 0; - for (size_t i = 0; i < axis; ++i) { - if (!new_axis_mask.count(i)) { - num_input_axis_before_ellipses++; - } - } - for (size_t i = axis + 1; i < begin.size(); ++i) { - if (new_axis_mask.count(i)) { - num_new_axis_after_ellipses++; - } - } - - int64_t num_input_axis_after_ellipses = - (begin.size() - axis - num_new_axis_after_ellipses - 1); // -1 because it's a position of ellipses - int64_t num_of_hidden_dims = - input_shape.rank().get_length() - num_input_axis_after_ellipses - num_input_axis_before_ellipses; - for (int64_t i = 0; i < num_of_hidden_dims; ++i) { - dim.emplace_back(input_shape[input_shape_idx]); - input_shape_idx++; - } - } else { - // add new single dimension if new_axis_mask is set - if (new_axis_mask.count(axis)) { - dim.emplace_back(1); - } - // skip this dimension if shrink_axis_mask is set - else if (shrink_axis_mask.count(axis)) { - input_shape_idx++; - } - // calculating dimension (begin, end, begin_mask, end_mask, stride) - else { - // check dynamic dimension - if (input_shape[input_shape_idx].is_dynamic()) { - input_shape_idx++; - dim.emplace_back(Dimension::dynamic()); - continue; - } - - int64_t lb = begin[axis]; - int64_t ub = end[axis]; - - // set default value for stride or use given value - int64_t stride = 1; - if (strides.size() > axis) { - stride = strides[axis]; - } - NODE_VALIDATION_CHECK(node, stride != 0, "Stride must be non-zero"); - - // convert negative indexes to positive - // take max for this case: if abs(lb) > input_shape[input_shape_idx],then after - // conversion lb < 0 - // so according to tensorflow and numpy we just get 0 - if (lb < 0) { - lb = std::max(input_shape[input_shape_idx].get_length() + lb, int64_t(0)); - } - - if (ub < 0) { - ub = - std::max(input_shape[input_shape_idx].get_length() + ub, stride > 0 ? int64_t(0) : int64_t(-1)); - } - - // apply restrictions when begin or end values more than max possible values. - lb = std::min(input_shape[input_shape_idx].get_length(), lb); - ub = std::min(input_shape[input_shape_idx].get_length(), ub); - - int64_t dimension = 0; - if (stride < 0) { - // apply masks - if (begin_mask.count(axis)) { - lb = input_shape[input_shape_idx].get_length() - 1; - } - if (end_mask.count(axis)) { - ub = -1; - } - - lb = std::min(lb, input_shape[input_shape_idx].get_length() - 1); - lb -= 1; // we always get 1st element, so we need decrease range - if (ub <= lb) { - dimension = (ub - lb) / stride + 1; - } - } else { - // apply masks - if (begin_mask.count(axis)) { - lb = 0; - } - if (end_mask.count(axis)) { - ub = input_shape[input_shape_idx].get_length(); - } - - lb += 1; // we always get 1st element, so we need decrease range - if (ub >= lb) { - dimension = (ub - lb) / stride + 1; - } - } - - dim.emplace_back(dimension); - input_shape_idx++; - } - } - } - // get remaining values - for (; input_shape_idx < input_shape.rank().get_length(); ++input_shape_idx) { - dim.emplace_back(input_shape[input_shape_idx]); - } - - return dim; -} - -void opset1::infer_conv_backprop_auto_padding(const Shape& input_data_shape, - const Shape& filters_shape, - const Shape& output_shape, - const Strides& strides, - const Strides& dilations, - const ov::op::PadType auto_pad_type, - const CoordinateDiff& output_padding, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end) { - OPENVINO_ASSERT(auto_pad_type == ov::op::PadType::SAME_UPPER || auto_pad_type == ov::op::PadType::SAME_LOWER); - - size_t num_spatial_dims = input_data_shape.size(); - OPENVINO_ASSERT(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims && - dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims && - pads_end.size() == num_spatial_dims && output_padding.size() == num_spatial_dims); - - pads_begin = CoordinateDiff(num_spatial_dims); - pads_end = CoordinateDiff(num_spatial_dims); - - for (uint64_t i = 0; i < num_spatial_dims; ++i) { - int total_padding = std::max( - static_cast(strides[i] * (input_data_shape[i] - 1) + dilations[i] * (filters_shape[i] - 1) + 1 - - output_shape[i] + output_padding[i]), - 0); - if (auto_pad_type != ov::op::PadType::SAME_UPPER) { - pads_begin[i] = total_padding / 2; - pads_end[i] = total_padding - pads_begin[i]; - } else { - pads_end[i] = total_padding / 2; - pads_begin[i] = total_padding - pads_end[i]; - } - } -} - -namespace { -/// \brief Scalar variant describes value of an Output, for use in max shape determination -/// -/// For tensor values, we use the maximum value in the tensor -struct MaxValue { - /// \brief No information known about the output - MaxValue() = default; - /// \brief uint64_t assoiated with the output - MaxValue(uint64_t value) : m_value(value) {} - MaxValue(const std::vector& slices, int64_t slice_axis) : m_slices(slices), m_slice_axis(slice_axis) { - m_value = *max_element(m_slices.begin(), m_slices.end()); - } - uint64_t m_value{std::numeric_limits::max()}; - std::vector m_slices; - int64_t m_slice_axis{-1}; -}; -} // namespace - -std::shared_ptr get_constant_max_of_type(element::Type_t t) { - auto tensor = ov::util::make_tensor_of_max_value(t); - return tensor ? std::make_shared(tensor) : nullptr; -} - -std::shared_ptr get_constant_min_of_type(element::Type_t t) { - auto tensor = ov::util::make_tensor_of_min_value(t); - return tensor ? std::make_shared(tensor) : nullptr; -} - -std::shared_ptr get_constant_lowest_of_type(element::Type_t t) { -#define OPENVINO_TYPE_TO_LOWEST_CONST(t) \ - case t: \ - return op::v0::Constant::create( \ - t, \ - {}, \ - {std::numeric_limits::value_type>::lowest()}); \ - break - - switch (t) { - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::boolean); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::bf16); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::f16); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::f32); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::f64); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::i8); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::i16); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::i32); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::i64); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::u1); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::u8); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::u16); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::u32); - OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::u64); - - case ov::element::undefined: - case ov::element::dynamic: - default: - return nullptr; - } -} -} // namespace ngraph namespace { const auto normalize_axis_to = [](const int64_t& tensor_rank) { diff --git a/src/core/tests/CMakeLists.txt b/src/core/tests/CMakeLists.txt index c3f2120e98e2f8..73a0fe75c591b9 100644 --- a/src/core/tests/CMakeLists.txt +++ b/src/core/tests/CMakeLists.txt @@ -37,7 +37,6 @@ ov_add_test_target( LINK_LIBRARIES common_test_utils openvino::reference - openvino::builders openvino::util openvino::shape_inference ${CMAKE_DL_LIBS} diff --git a/src/core/tests/build_graph.cpp b/src/core/tests/build_graph.cpp index 60d19fdb1f9fc7..a56fa20bf2fb90 100644 --- a/src/core/tests/build_graph.cpp +++ b/src/core/tests/build_graph.cpp @@ -7,6 +7,7 @@ #include #include "common_test_utils/graph_comparator.hpp" +#include "common_test_utils/node_builders/broadcast.hpp" #include "common_test_utils/test_tools.hpp" #include "common_test_utils/type_prop.hpp" #include "openvino/core/except.hpp" @@ -24,7 +25,6 @@ #include "openvino/op/split.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/op/util/variable.hpp" -#include "ov_models/ov_builders/broadcast.hpp" using namespace std; using namespace ov; @@ -35,8 +35,8 @@ TEST(build_graph, build_simple) { auto arg1 = make_shared(element::f32, Shape{3}); auto arg2 = make_shared(element::f32, Shape{32, 7}); auto arg3 = make_shared(element::f32, Shape{32, 7}); - auto broadcast_1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); - auto b1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto broadcast_1 = ov::test::utils::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto b1 = ov::test::utils::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); ASSERT_EQ(dot->input_value(0).get_node_shared_ptr(), arg2); ASSERT_EQ(dot->input_value(1).get_node_shared_ptr(), arg0); @@ -91,8 +91,8 @@ TEST(build_graph, function_undeclared_parameters) { auto arg1 = make_shared(element::f32, Shape{3}); auto arg2 = make_shared(element::f32, Shape{32, 7}); auto arg3 = make_shared(element::f32, Shape{32, 7}); - auto broadcast_1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); - auto b1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto broadcast_1 = ov::test::utils::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto b1 = ov::test::utils::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); ASSERT_EQ(dot->input_values()[0].get_node_shared_ptr(), arg2); ASSERT_EQ(dot->input_values()[1].get_node_shared_ptr(), arg0); @@ -436,8 +436,8 @@ TEST(build_graph, build_graph_parameters_autodetection) { auto arg1 = make_shared(element::f32, Shape{3}); auto arg2 = make_shared(element::f32, Shape{32, 7}); auto arg3 = make_shared(element::f32, Shape{32, 7}); - auto broadcast_1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); - auto b1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto broadcast_1 = ov::test::utils::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto b1 = ov::test::utils::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); auto f = make_shared(OutputVector{dot}); diff --git a/src/core/tests/eval.cpp b/src/core/tests/eval.cpp index 4962e5db4ef7c7..b74859ae0cd04f 100644 --- a/src/core/tests/eval.cpp +++ b/src/core/tests/eval.cpp @@ -4111,3 +4111,29 @@ TEST(eval, invalid_shape) { make_tensor({1, 3}, {7.0f, 6.0f, 1.0f})}; ASSERT_THROW(model->evaluate(out_vector, in_vector), ov::Exception); } + +TEST(eval, evaluate_gather_string_basic) { + std::vector input_values = {"Abc", "x", "1234", "...."}; + std::vector out_expected{"x", "...."}; + std::vector indices_val{1, 3}; + + const auto data_shape = Shape{input_values.size()}; + const auto exp_out_shape = Shape{out_expected.size()}; + auto data = make_shared(element::string, data_shape); + auto indices = ov::op::v0::Constant::create(element::i32, Shape{indices_val.size()}, indices_val); + auto axis = ov::op::v0::Constant::create(element::i32, Shape{1}, {0}); + auto op = make_shared(data, indices, axis, 0); + auto model = make_shared(OutputVector{op}, ParameterVector{data}); + + auto result = ov::Tensor(element::string, exp_out_shape); + auto out_vector = ov::TensorVector{result}; + auto in_tensor = ov::Tensor(element::string, data_shape, input_values.data()); + auto in_vector = ov::TensorVector{in_tensor}; + + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + EXPECT_EQ(result.get_element_type(), element::string); + EXPECT_EQ(result.get_shape(), exp_out_shape); + + const auto result_const = ov::op::v0::Constant(out_vector.at(0)); + EXPECT_EQ(out_expected, result_const.get_value_strings()); +} diff --git a/src/core/tests/type_prop/batch_to_space.cpp b/src/core/tests/type_prop/batch_to_space.cpp index 443831579968c0..66d992e08ec68a 100644 --- a/src/core/tests/type_prop/batch_to_space.cpp +++ b/src/core/tests/type_prop/batch_to_space.cpp @@ -363,7 +363,6 @@ TEST(type_prop, batch_to_space_output_dynamic_shape_5D_when_batch_is_static) { {100, 150}, {10 * 16, 20 * 16}})); } -OPENVINO_SUPPRESS_DEPRECATED_START TEST(type_prop, batch_to_space_output_dynamic_shape_5D_when_batch_is_dynamic) { auto data_shape = ov::PartialShape{{959, 962}, {2, 34}, {9, 21}, {100, 162}, {1, 1999}}; diff --git a/src/core/tests/type_prop/depth_to_space.cpp b/src/core/tests/type_prop/depth_to_space.cpp index e656bbe0a9be89..138d384aa643f7 100644 --- a/src/core/tests/type_prop/depth_to_space.cpp +++ b/src/core/tests/type_prop/depth_to_space.cpp @@ -39,8 +39,6 @@ TEST(type_prop, depth_to_space_input_interval_shape_default_block_size) { EXPECT_THAT(get_shape_labels(depth_to_space->get_output_partial_shape(0)), ElementsAre(10, 11, 12, 13, 14)); } -OPENVINO_SUPPRESS_DEPRECATED_START - TEST(type_prop, depth_to_space_output_dynamicshape_block_first_5D_when_depth_is_dynamic) { auto A = make_shared(element::f32, PartialShape{{2, 10}, {81, 82}, {3, 7}, {423, 3000}, {235, 1345}}); diff --git a/src/core/tests/type_prop/tensor_iterator.cpp b/src/core/tests/type_prop/tensor_iterator.cpp index 64bd9df242860f..9c78450716d4b0 100644 --- a/src/core/tests/type_prop/tensor_iterator.cpp +++ b/src/core/tests/type_prop/tensor_iterator.cpp @@ -6,10 +6,10 @@ #include +#include "common_test_utils/node_builders/reshape.hpp" #include "common_test_utils/type_prop.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset5.hpp" -#include "ov_models/ov_builders/reshape.hpp" using namespace std; using namespace ov; @@ -34,14 +34,14 @@ TEST(type_prop, tensor_iterator_lstm) { auto X = make_shared(element::f32, Shape{N, 1, I}); auto W_body = make_shared(element::f32, Shape{4 * H, I}); auto R_body = make_shared(element::f32, Shape{4 * H, H}); - auto LSTM_cell = make_shared(ov::op::util::reshape(X, Shape{N, I}), - ov::op::util::reshape(H_t, Shape{N, H}), - ov::op::util::reshape(C_t, Shape{N, H}), + auto LSTM_cell = make_shared(ov::test::utils::reshape(X, Shape{N, I}), + ov::test::utils::reshape(H_t, Shape{N, H}), + ov::test::utils::reshape(C_t, Shape{N, H}), W_body, R_body, H); - auto H_o = ov::op::util::reshape(LSTM_cell->output(0), Shape{N, 1, H}); - auto C_o = ov::op::util::reshape(LSTM_cell->output(1), Shape{N, 1, H}); + auto H_o = ov::test::utils::reshape(LSTM_cell->output(0), Shape{N, 1, H}); + auto C_o = ov::test::utils::reshape(LSTM_cell->output(1), Shape{N, 1, H}); auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); auto tensor_iterator = make_shared(); @@ -197,14 +197,14 @@ TEST(type_prop, tensor_iterator_with_dynamic_reshape) { auto X = make_shared(element::f32, Shape{N, 1, I}); auto W_body = make_shared(element::f32, Shape{4 * H, I}); auto R_body = make_shared(element::f32, Shape{4 * H, H}); - auto LSTM_cell = make_shared(ov::op::util::reshape(X, Shape{N, I}), - ov::op::util::reshape(H_t, Shape{N, H}), - ov::op::util::reshape(C_t, Shape{N, H}), + auto LSTM_cell = make_shared(ov::test::utils::reshape(X, Shape{N, I}), + ov::test::utils::reshape(H_t, Shape{N, H}), + ov::test::utils::reshape(C_t, Shape{N, H}), W_body, R_body, H); - auto H_o = ov::op::util::reshape(LSTM_cell->output(0), Shape{N, 1, H}); - auto C_o = ov::op::util::reshape(LSTM_cell->output(1), Shape{N, 1, H}); + auto H_o = ov::test::utils::reshape(LSTM_cell->output(0), Shape{N, 1, H}); + auto C_o = ov::test::utils::reshape(LSTM_cell->output(1), Shape{N, 1, H}); auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); auto tensor_iterator = make_shared(); diff --git a/src/core/tests/type_prop/variadic_split.cpp b/src/core/tests/type_prop/variadic_split.cpp index c053d272fc0c68..84d3463ef78f53 100644 --- a/src/core/tests/type_prop/variadic_split.cpp +++ b/src/core/tests/type_prop/variadic_split.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/shape_of.hpp" diff --git a/src/core/tests/type_relaxed_copy.cpp b/src/core/tests/type_relaxed_copy.cpp index bbda062e23271c..a6796ff5a466c2 100644 --- a/src/core/tests/type_relaxed_copy.cpp +++ b/src/core/tests/type_relaxed_copy.cpp @@ -7,7 +7,6 @@ #include #include -#include "ie_common.h" #include "openvino/op/matmul.hpp" #include "ov_ops/type_relaxed.hpp" diff --git a/src/core/tests/visitors/op/tensor_iterator.cpp b/src/core/tests/visitors/op/tensor_iterator.cpp index d4253d7969e6fd..a3f5e5efa53672 100644 --- a/src/core/tests/visitors/op/tensor_iterator.cpp +++ b/src/core/tests/visitors/op/tensor_iterator.cpp @@ -6,10 +6,10 @@ #include +#include "common_test_utils/node_builders/reshape.hpp" #include "openvino/op/add.hpp" #include "openvino/op/lstm_cell.hpp" #include "openvino/op/multiply.hpp" -#include "ov_models/ov_builders/reshape.hpp" #include "visitors/visitors.hpp" using namespace std; @@ -39,14 +39,14 @@ TEST(attributes, tensor_iterator_lstm) { auto X = make_shared(element::f32, Shape{N, 1, I}); auto W_body = make_shared(element::f32, Shape{4 * H, I}); auto R_body = make_shared(element::f32, Shape{4 * H, H}); - auto LSTM_cell = make_shared(ov::op::util::reshape(X, Shape{N, I}), - ov::op::util::reshape(H_t, Shape{N, H}), - ov::op::util::reshape(C_t, Shape{N, H}), + auto LSTM_cell = make_shared(ov::test::utils::reshape(X, Shape{N, I}), + ov::test::utils::reshape(H_t, Shape{N, H}), + ov::test::utils::reshape(C_t, Shape{N, H}), W_body, R_body, H); - auto H_o = ov::op::util::reshape(LSTM_cell->output(0), Shape{N, 1, H}); - auto C_o = ov::op::util::reshape(LSTM_cell->output(1), Shape{N, 1, H}); + auto H_o = ov::test::utils::reshape(LSTM_cell->output(0), Shape{N, 1, H}); + auto C_o = ov::test::utils::reshape(LSTM_cell->output(1), Shape{N, 1, H}); auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); auto tensor_iterator = make_shared(); diff --git a/src/core/tests/visitors/visitors.hpp b/src/core/tests/visitors/visitors.hpp index ce5884e2aff9fd..7fb6d1999f66d3 100644 --- a/src/core/tests/visitors/visitors.hpp +++ b/src/core/tests/visitors/visitors.hpp @@ -10,7 +10,6 @@ #include #include "openvino/core/attribute_visitor.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/op/util/framework_node.hpp" #include "openvino/op/util/sub_graph_base.hpp" #include "openvino/op/util/variable.hpp" diff --git a/src/frontends/onnx/docs/tests.md b/src/frontends/onnx/docs/tests.md index 6dc6068b2fc2b2..b1a8b7100e2883 100644 --- a/src/frontends/onnx/docs/tests.md +++ b/src/frontends/onnx/docs/tests.md @@ -25,7 +25,7 @@ For example: ## Pre-steps for all Python tests 1. Build OpenVINO with `-DENABLE_PYTHON=ON`, preferably in a `Python` virtual environment. To avoid problems with too many Python interpreters installed on the host, you can also set the `-DPython3_EXECUTABLE=` build option (requires cmake 3.16 and higher). > **NOTE**: If you want to run the tests from the installation directory (like in the CI), add the `-P cmake_install.cmake` and `-DCOMPONENT=tests` CMake build options, and install OpenVINO via `cmake --build . --target install` as additional steps. -2. Set up Python paths via `source /setupvars.sh` for Linux, or `sh \setupvars.bat` for Windows. +2. Set up Python paths via `source /setupvars.sh` for Linux, `. /setupvars.ps1` for Windows PowerShell, or `sh \setupvars.bat` for Windows Command Prompt. 3. Install Python dependencies: ``` pip install -r /src/bindings/python/requirements.txt diff --git a/src/frontends/onnx/frontend/CMakeLists.txt b/src/frontends/onnx/frontend/CMakeLists.txt index d5f52a115e8940..19e65e3603ac38 100644 --- a/src/frontends/onnx/frontend/CMakeLists.txt +++ b/src/frontends/onnx/frontend/CMakeLists.txt @@ -8,7 +8,7 @@ ov_add_frontend(NAME onnx PROTOBUF_LITE SKIP_NCC_STYLE FILEDESCRIPTION "FrontEnd to load and convert ONNX file format" - LINK_LIBRARIES openvino::builders openvino_onnx_common openvino::core::dev) + LINK_LIBRARIES openvino_onnx_common openvino::core::dev) set(ONNX_OPSET_VERSION 18 CACHE INTERNAL "Supported version of ONNX operator set") target_compile_definitions(${TARGET_NAME} PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION}) diff --git a/src/frontends/onnx/frontend/include/openvino/frontend/onnx/node_context.hpp b/src/frontends/onnx/frontend/include/openvino/frontend/onnx/node_context.hpp index 0aa11b50d4a237..b6dac7caf9a10b 100644 --- a/src/frontends/onnx/frontend/include/openvino/frontend/onnx/node_context.hpp +++ b/src/frontends/onnx/frontend/include/openvino/frontend/onnx/node_context.hpp @@ -4,26 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" #include "openvino/frontend/extension/conversion.hpp" #include "openvino/frontend/node_context.hpp" #include "openvino/frontend/onnx/visibility.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { -class Node; -} -} // namespace ngraph - namespace ov { namespace frontend { namespace onnx { +class Node; class ONNX_FRONTEND_API NodeContext : public ov::frontend::NodeContext { public: using Ptr = std::shared_ptr; - explicit NodeContext(const ngraph::onnx_import::Node& context); + explicit NodeContext(const ov::frontend::onnx::Node& context); size_t get_input_size() const override; Output get_input(int port_idx) const override; @@ -31,14 +24,13 @@ class ONNX_FRONTEND_API NodeContext : public ov::frontend::NodeContext { ov::Any get_attribute_as_any(const std::string& name) const override; protected: - const ngraph::onnx_import::Node& m_context; + const ov::frontend::onnx::Node& m_context; ov::OutputVector m_inputs; private: ov::Any apply_additional_conversion_rules(const ov::Any& data, const std::type_info& type_info) const override; }; -using CreatorFunction = std::function; +using CreatorFunction = std::function; } // namespace onnx } // namespace frontend } // namespace ov -OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/core/attribute.cpp b/src/frontends/onnx/frontend/src/core/attribute.cpp index 0f3ff1736e9a0e..5f4667acdba4b7 100644 --- a/src/frontends/onnx/frontend/src/core/attribute.cpp +++ b/src/frontends/onnx/frontend/src/core/attribute.cpp @@ -7,14 +7,15 @@ #include "core/graph.hpp" #include "core/model.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { Subgraph Attribute::get_subgraph(Graph* parent_graph) const { - if (m_attribute_proto->type() != ONNX_NAMESPACE::AttributeProto_AttributeType_GRAPH) { + if (m_attribute_proto->type() != AttributeProto_AttributeType::AttributeProto_AttributeType_GRAPH) { ONNX_INVALID_ATTR(m_attribute_proto->type(), "GRAPH"); } - auto model_proto = std::make_shared(); + auto model_proto = std::make_shared(); const auto& graph = m_attribute_proto->g(); model_proto->mutable_graph()->CopyFrom(graph); @@ -69,6 +70,6 @@ ov::Any Attribute::get_any() const { } } -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/attribute.hpp b/src/frontends/onnx/frontend/src/core/attribute.hpp index ea8bedb5a28d0f..92708e27a8863e 100644 --- a/src/frontends/onnx/frontend/src/core/attribute.hpp +++ b/src/frontends/onnx/frontend/src/core/attribute.hpp @@ -10,39 +10,34 @@ #include "core/tensor.hpp" #include "openvino/core/except.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { // forward declarations class Graph; class Subgraph; class Model; -// Detecting automatically the underlying type used to store the information -// for data type of values an attribute is holding. A bug was discovered in -// protobuf which forced ONNX team to switch from `enum AttributeProto_AttributeType` -// to `int32` in order to workaround the bug. This line allows using both versions -// of ONNX generated wrappers. -using AttributeProto_AttributeType = decltype(ONNX_NAMESPACE::AttributeProto{}.type()); +using ::ONNX_NAMESPACE::AttributeProto; +using ::ONNX_NAMESPACE::AttributeProto_AttributeType; +using ::ONNX_NAMESPACE::AttributeProto_AttributeType_Name; namespace detail { namespace attribute { template -inline T get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { +inline T get_value(const AttributeProto& attribute) { OPENVINO_THROW("Unsupported attribute type"); } -#define ONNX_INVALID_ATTR(attr, expected) \ - OPENVINO_THROW("Invalid attribute type ", \ - ONNX_NAMESPACE::AttributeProto_AttributeType_Name(attr), \ - " expected: ", \ - expected) +#define ONNX_INVALID_ATTR(attr, expected) \ + OPENVINO_THROW("Invalid attribute type ", AttributeProto_AttributeType_Name(attr), " expected: ", expected) template <> -inline float get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { +inline float get_value(const AttributeProto& attribute) { switch (attribute.type()) { - case ONNX_NAMESPACE::AttributeProto_AttributeType_INT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INT: return static_cast(attribute.i()); - case ONNX_NAMESPACE::AttributeProto_AttributeType_FLOAT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT: return attribute.f(); default: ONNX_INVALID_ATTR(attribute.type(), "INT, FLOAT"); @@ -50,15 +45,15 @@ inline float get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { } template <> -inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { +inline std::vector get_value(const AttributeProto& attribute) { switch (attribute.type()) { - case ONNX_NAMESPACE::AttributeProto_AttributeType_INT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INT: return {static_cast(attribute.i())}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_INTS: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INTS: return {std::begin(attribute.floats()), std::end(attribute.floats())}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_FLOAT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT: return {attribute.f()}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_FLOATS: + case AttributeProto_AttributeType::AttributeProto_AttributeType_FLOATS: return {std::begin(attribute.floats()), std::end(attribute.floats())}; default: ONNX_INVALID_ATTR(attribute.type(), "INT, INTS, FLOAT, FLOATS"); @@ -66,11 +61,11 @@ inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& attrib } template <> -inline double get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { +inline double get_value(const AttributeProto& attribute) { switch (attribute.type()) { - case ONNX_NAMESPACE::AttributeProto_AttributeType_FLOAT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT: return static_cast(attribute.f()); - case ONNX_NAMESPACE::AttributeProto_AttributeType_INT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INT: return static_cast(attribute.i()); default: ONNX_INVALID_ATTR(attribute.type(), "INT, FLOAT"); @@ -78,19 +73,19 @@ inline double get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { } template <> -inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { +inline std::vector get_value(const AttributeProto& attribute) { #if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable : 4244) #endif switch (attribute.type()) { - case ONNX_NAMESPACE::AttributeProto_AttributeType_INT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INT: return {static_cast(attribute.i())}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_INTS: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INTS: return {std::begin(attribute.ints()), std::end(attribute.ints())}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_FLOAT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT: return {static_cast(attribute.f())}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_FLOATS: + case AttributeProto_AttributeType::AttributeProto_AttributeType_FLOATS: return {std::begin(attribute.floats()), std::end(attribute.floats())}; default: ONNX_INVALID_ATTR(attribute.type(), "INT, INTS, FLOAT, FLOATS"); @@ -101,19 +96,19 @@ inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& attri } template <> -inline std::size_t get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { - if (attribute.type() != ONNX_NAMESPACE::AttributeProto_AttributeType_INT) { +inline std::size_t get_value(const AttributeProto& attribute) { + if (attribute.type() != AttributeProto_AttributeType::AttributeProto_AttributeType_INT) { ONNX_INVALID_ATTR(attribute.type(), "INT"); } return static_cast(attribute.i()); } template <> -inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { +inline std::vector get_value(const AttributeProto& attribute) { switch (attribute.type()) { - case ONNX_NAMESPACE::AttributeProto_AttributeType_INT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INT: return {static_cast(attribute.i())}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_INTS: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INTS: return {std::begin(attribute.ints()), std::end(attribute.ints())}; default: ONNX_INVALID_ATTR(attribute.type(), "INT, INTS"); @@ -121,19 +116,19 @@ inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& } template <> -inline int64_t get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { - if (attribute.type() != ONNX_NAMESPACE::AttributeProto_AttributeType_INT) { +inline int64_t get_value(const AttributeProto& attribute) { + if (attribute.type() != AttributeProto_AttributeType::AttributeProto_AttributeType_INT) { ONNX_INVALID_ATTR(attribute.type(), "INT"); } return attribute.i(); } template <> -inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { +inline std::vector get_value(const AttributeProto& attribute) { switch (attribute.type()) { - case ONNX_NAMESPACE::AttributeProto_AttributeType_INT: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INT: return {attribute.i()}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_INTS: + case AttributeProto_AttributeType::AttributeProto_AttributeType_INTS: return {std::begin(attribute.ints()), std::end(attribute.ints())}; default: ONNX_INVALID_ATTR(attribute.type(), "INT, INTS"); @@ -141,19 +136,19 @@ inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& attr } template <> -inline std::string get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { - if (attribute.type() != ONNX_NAMESPACE::AttributeProto_AttributeType_STRING) { +inline std::string get_value(const AttributeProto& attribute) { + if (attribute.type() != AttributeProto_AttributeType::AttributeProto_AttributeType_STRING) { ONNX_INVALID_ATTR(attribute.type(), "STRING"); } return attribute.s(); } template <> -inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& attribute) { +inline std::vector get_value(const AttributeProto& attribute) { switch (attribute.type()) { - case ONNX_NAMESPACE::AttributeProto_AttributeType_STRING: + case AttributeProto_AttributeType::AttributeProto_AttributeType_STRING: return {attribute.s()}; - case ONNX_NAMESPACE::AttributeProto_AttributeType_STRINGS: + case AttributeProto_AttributeType::AttributeProto_AttributeType_STRINGS: return {std::begin(attribute.strings()), std::end(attribute.strings())}; default: ONNX_INVALID_ATTR(attribute.type(), "STRING, STRINGS"); @@ -161,29 +156,28 @@ inline std::vector get_value(const ONNX_NAMESPACE::AttributeProto& } } // namespace attribute - } // namespace detail class Attribute { public: enum class Type { - undefined = ONNX_NAMESPACE::AttributeProto_AttributeType_UNDEFINED, - float_point = ONNX_NAMESPACE::AttributeProto_AttributeType_FLOAT, - integer = ONNX_NAMESPACE::AttributeProto_AttributeType_INT, - string = ONNX_NAMESPACE::AttributeProto_AttributeType_STRING, - tensor = ONNX_NAMESPACE::AttributeProto_AttributeType_TENSOR, - graph = ONNX_NAMESPACE::AttributeProto_AttributeType_GRAPH, - sparse_tensor = ONNX_NAMESPACE::AttributeProto_AttributeType_SPARSE_TENSOR, - float_point_array = ONNX_NAMESPACE::AttributeProto_AttributeType_FLOATS, - integer_array = ONNX_NAMESPACE::AttributeProto_AttributeType_INTS, - string_array = ONNX_NAMESPACE::AttributeProto_AttributeType_STRINGS, - tensor_array = ONNX_NAMESPACE::AttributeProto_AttributeType_TENSORS, - sparse_tensor_array = ONNX_NAMESPACE::AttributeProto_AttributeType_SPARSE_TENSORS, - graph_array = ONNX_NAMESPACE::AttributeProto_AttributeType_GRAPHS + undefined = AttributeProto_AttributeType::AttributeProto_AttributeType_UNDEFINED, + float_point = AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT, + integer = AttributeProto_AttributeType::AttributeProto_AttributeType_INT, + string = AttributeProto_AttributeType::AttributeProto_AttributeType_STRING, + tensor = AttributeProto_AttributeType::AttributeProto_AttributeType_TENSOR, + graph = AttributeProto_AttributeType::AttributeProto_AttributeType_GRAPH, + sparse_tensor = AttributeProto_AttributeType::AttributeProto_AttributeType_SPARSE_TENSOR, + float_point_array = AttributeProto_AttributeType::AttributeProto_AttributeType_FLOATS, + integer_array = AttributeProto_AttributeType::AttributeProto_AttributeType_INTS, + string_array = AttributeProto_AttributeType::AttributeProto_AttributeType_STRINGS, + tensor_array = AttributeProto_AttributeType::AttributeProto_AttributeType_TENSORS, + sparse_tensor_array = AttributeProto_AttributeType::AttributeProto_AttributeType_SPARSE_TENSORS, + graph_array = AttributeProto_AttributeType::AttributeProto_AttributeType_GRAPHS }; Attribute() = delete; - Attribute(const ONNX_NAMESPACE::AttributeProto& attribute_proto, + Attribute(const AttributeProto& attribute_proto, const std::string& model_dir, detail::MappedMemoryHandles mmap_cache) : m_attribute_proto{&attribute_proto}, @@ -285,7 +279,7 @@ class Attribute { return {std::begin(m_attribute_proto->strings()), std::end(m_attribute_proto->strings())}; } - /* explicit */ operator ONNX_NAMESPACE::AttributeProto_AttributeType() const { + /* explicit */ operator AttributeProto_AttributeType() const { return m_attribute_proto->type(); } @@ -337,11 +331,11 @@ class Attribute { ov::Any get_any() const; private: - const ONNX_NAMESPACE::AttributeProto* m_attribute_proto; + const AttributeProto* m_attribute_proto; std::string m_model_dir; detail::MappedMemoryHandles m_mmap_cache; }; -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/graph.cpp b/src/frontends/onnx/frontend/src/core/graph.cpp index 328289a8fb0cc2..4807ca248b34bc 100644 --- a/src/frontends/onnx/frontend/src/core/graph.cpp +++ b/src/frontends/onnx/frontend/src/core/graph.cpp @@ -21,12 +21,13 @@ #include "openvino/frontend/onnx/node_context.hpp" #include "openvino/op/util/op_types.hpp" #include "utils/common.hpp" -#include "utils/legacy_conversion_extension.hpp" using namespace ov; +using namespace ::ONNX_NAMESPACE; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace detail { bool common_node_for_all_outputs(const ov::OutputVector& outputs) { const auto first_out_node = outputs.at(0).get_node(); @@ -38,7 +39,6 @@ bool common_node_for_all_outputs(const ov::OutputVector& outputs) { return ret; }; -OPENVINO_SUPPRESS_DEPRECATED_START OperatorsBridge register_extensions(OperatorsBridge& bridge, const std::vector& conversions) { for (const auto& extension : conversions) { @@ -46,45 +46,33 @@ OperatorsBridge register_extensions(OperatorsBridge& bridge, bridge.overwrite_operator( common_conv_ext->get_op_type(), "", - [common_conv_ext](const ngraph::onnx_import::Node& node) -> ov::OutputVector { + [common_conv_ext](const ov::frontend::onnx::Node& node) -> ov::OutputVector { return common_conv_ext->get_converter()(ov::frontend::onnx::NodeContext(node)); }); } else if (const auto onnx_conv_ext = std::dynamic_pointer_cast(extension)) { bridge.overwrite_operator(onnx_conv_ext->get_op_type(), onnx_conv_ext->get_domain(), - [onnx_conv_ext](const ngraph::onnx_import::Node& node) -> ov::OutputVector { + [onnx_conv_ext](const ov::frontend::onnx::Node& node) -> ov::OutputVector { return onnx_conv_ext->get_converter()(ov::frontend::onnx::NodeContext(node)); }); } } return bridge; } -OPENVINO_SUPPRESS_DEPRECATED_END OperatorsBridge init_ops_bridge(const std::vector& conversions) { - const auto legacy_conv_ext = std::find_if(std::begin(conversions), - std::end(conversions), - [](const ov::frontend::ConversionExtensionBase::Ptr& conv) { - return std::dynamic_pointer_cast(conv); - }); - if (legacy_conv_ext == std::end(conversions)) { // no legacy extensions used - OperatorsBridge bridge; - return register_extensions(bridge, conversions); - } else { // legacy extensions can be mixed with the new one - return register_extensions(std::dynamic_pointer_cast(*legacy_conv_ext)->ops_bridge(), - conversions); - } + OperatorsBridge bridge; + return register_extensions(bridge, conversions); } -Model::ModelOpSet build_model_opset(const ONNX_NAMESPACE::ModelProto& model_proto, const OperatorsBridge& ops_bridge) { +Model::ModelOpSet build_model_opset(const ModelProto& model_proto, const OperatorsBridge& ops_bridge) { // copy the opset imports from the ONNX model and sort them by their version in ascending order // this will make sure that multiple opset imports for the same domain will cause the largest // version to be used for this model, for example: // [{domain:"", version:11}, {domain:"", version:1} {domain:"", version:13}] ==> {domain:"", version:13} auto opset_imports = model_proto.opset_import(); - const auto sort_by_version_ascending = [](const ONNX_NAMESPACE::OperatorSetIdProto& lhs, - const ONNX_NAMESPACE::OperatorSetIdProto& rhs) { + const auto sort_by_version_ascending = [](const OperatorSetIdProto& lhs, const OperatorSetIdProto& rhs) { return lhs.version() < rhs.version(); }; std::sort(std::begin(opset_imports), std::end(opset_imports), sort_by_version_ascending); @@ -92,7 +80,7 @@ Model::ModelOpSet build_model_opset(const ONNX_NAMESPACE::ModelProto& model_prot Model::ModelOpSet opset; std::for_each(opset_imports.rbegin(), opset_imports.rend(), - [&opset, &ops_bridge](const ONNX_NAMESPACE::OperatorSetIdProto& onnx_opset) { + [&opset, &ops_bridge](const OperatorSetIdProto& onnx_opset) { const auto domain = onnx_opset.has_domain() ? onnx_opset.domain() == "ai.onnx" ? "" : onnx_opset.domain() : ""; if (opset.find(domain) == std::end(opset)) { @@ -122,13 +110,13 @@ ov::frontend::ExtensionHolder subgraph_required_extensions( } // namespace detail Graph::Graph(const std::string& model_dir, - const std::shared_ptr& model_proto, + const std::shared_ptr& model_proto, detail::MappedMemoryHandles mmap_cache, ov::frontend::ExtensionHolder extensions) : Graph(model_dir, model_proto, common::make_unique(), mmap_cache, std::move(extensions)) {} Graph::Graph(const std::string& model_dir, - const std::shared_ptr& model_proto, + const std::shared_ptr& model_proto, std::unique_ptr&& cache, detail::MappedMemoryHandles mmap_cache, ov::frontend::ExtensionHolder extensions) @@ -155,7 +143,7 @@ Graph::Graph(const std::string& model_dir, // invalid external data makes initializers creation impossible throw; } catch (const ov::Exception&) { - ov_constant = ngraph::onnx_import::common::make_failsafe_constant(tensor.get_ov_type()); + ov_constant = ov::frontend::onnx::common::make_failsafe_constant(tensor.get_ov_type()); } initializers.emplace(initializer_tensor.name(), tensor); @@ -177,7 +165,6 @@ Graph::Graph(const std::string& model_dir, } } -OPENVINO_SUPPRESS_DEPRECATED_START void Graph::convert_to_ov_nodes() { const float total = static_cast(m_model->get_graph().node().size()); unsigned int completed = 0u; @@ -209,16 +196,14 @@ void Graph::convert_to_ov_nodes() { } } } -OPENVINO_SUPPRESS_DEPRECATED_END void Graph::remove_dangling_parameters() { - const auto any_tensor_name_matches_onnx_output = [](const Output& param_output, - const ONNX_NAMESPACE::GraphProto& graph) { + const auto any_tensor_name_matches_onnx_output = [](const Output& param_output, const GraphProto& graph) { const auto found_in_outputs = [&graph](const std::string& tensor_name) { const auto& graph_outputs = graph.output(); return std::any_of(std::begin(graph_outputs), std::end(graph_outputs), - [&tensor_name](const ONNX_NAMESPACE::ValueInfoProto& output) { + [&tensor_name](const ValueInfoProto& output) { return tensor_name == output.name(); }); }; @@ -256,12 +241,11 @@ std::shared_ptr Graph::convert() { return function; } -OPENVINO_SUPPRESS_DEPRECATED_START ov::OutputVector Graph::make_framework_nodes(const Node& onnx_node) { - std::shared_ptr framework_node; + std::shared_ptr framework_node; if (onnx_node.has_subgraphs()) { const auto& subgraphs = onnx_node.get_subgraphs(); - auto inputs = onnx_node.get_ng_inputs(); + auto inputs = onnx_node.get_ov_inputs(); std::vector> models; for (const auto& kv : subgraphs) { auto& subgraph = kv.second; @@ -275,9 +259,9 @@ ov::OutputVector Graph::make_framework_nodes(const Node& onnx_node) { } } } - framework_node = std::make_shared(onnx_node, models, inputs); + framework_node = std::make_shared(onnx_node, models, inputs); } else { - framework_node = std::make_shared(onnx_node); + framework_node = std::make_shared(onnx_node); } return framework_node->outputs(); } @@ -309,7 +293,6 @@ void Graph::decode_to_framework_nodes() { } } } -OPENVINO_SUPPRESS_DEPRECATED_END std::shared_ptr Graph::create_model() { auto model = std::make_shared(get_ov_outputs(), m_parameters, get_name()); @@ -340,7 +323,6 @@ Output Graph::get_ov_node_from_cache(const std::string& name) { return m_cache->get_node(name); } -OPENVINO_SUPPRESS_DEPRECATED_START ov::OutputVector Graph::get_ov_outputs() { ov::OutputVector results; for (const auto& output : m_model->get_graph().output()) { @@ -360,7 +342,7 @@ ov::OutputVector Graph::make_ov_nodes(const Node& onnx_node) { const auto ng_node_factory = m_model->get_operator(onnx_node.op_type(), onnx_node.domain()); try { ov_subgraph_outputs = ng_node_factory(onnx_node); - } catch (const ::ngraph::onnx_import::error::OnnxNodeValidationFailure& e) { + } catch (const ::ov::frontend::onnx::error::OnnxNodeValidationFailure& e) { error_message = e.what(); } catch (const std::exception& exc) { error_message = error::detail::get_error_msg_prefix(onnx_node); @@ -373,11 +355,12 @@ ov::OutputVector Graph::make_ov_nodes(const Node& onnx_node) { } } if (ov_subgraph_outputs.empty()) { // translation not possible (not supported op or exception during processing) - const auto not_supported_node = std::make_shared(onnx_node.get_ng_inputs(), - onnx_node.get_outputs_size(), - onnx_node.domain(), - onnx_node.op_type(), - error_message); + const auto not_supported_node = + std::make_shared(onnx_node.get_ov_inputs(), + onnx_node.get_outputs_size(), + onnx_node.domain(), + onnx_node.op_type(), + error_message); ov_subgraph_outputs = not_supported_node->outputs(); } @@ -443,18 +426,19 @@ void Graph::set_friendly_names(const Node& onnx_node, const ov::OutputVector& ov // null node does not have tensor if (!ov::op::util::is_null(ov_subgraph_outputs[i])) { ov_subgraph_outputs[i].get_tensor().set_names({onnx_node.output(static_cast(i))}); + OPENVINO_SUPPRESS_DEPRECATED_START ov::descriptor::set_ov_tensor_legacy_name(ov_subgraph_outputs[i].get_tensor(), onnx_node.output(static_cast(i))); + OPENVINO_SUPPRESS_DEPRECATED_END } } } -OPENVINO_SUPPRESS_DEPRECATED_END const OpsetImports& Graph::get_opset_imports() const { return m_model->get_opset_imports(); } -Subgraph::Subgraph(const std::shared_ptr& model_proto, Graph* parent_graph) +Subgraph::Subgraph(const std::shared_ptr& model_proto, Graph* parent_graph) : Graph(parent_graph->model_dir(), model_proto, common::make_unique(), @@ -507,6 +491,6 @@ void Subgraph::infer_inputs_from_parent() { } } -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/graph.hpp b/src/frontends/onnx/frontend/src/core/graph.hpp index b87258d75afff0..4ee94f564c7e8d 100644 --- a/src/frontends/onnx/frontend/src/core/graph.hpp +++ b/src/frontends/onnx/frontend/src/core/graph.hpp @@ -13,18 +13,18 @@ #include "core/graph_cache.hpp" #include "core/model.hpp" #include "core/operator_set.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/frontend/extension/holder.hpp" #include "openvino/op/parameter.hpp" #include "ops_bridge.hpp" #include "utils/tensor_external_data.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { class Graph : public std::enable_shared_from_this { public: Graph(const std::string& model_dir, - const std::shared_ptr& model_proto, + const std::shared_ptr& model_proto, detail::MappedMemoryHandles mmap_cache, ov::frontend::ExtensionHolder extensions = {}); Graph() = delete; @@ -51,9 +51,9 @@ class Graph : public std::enable_shared_from_this { } virtual bool is_ov_node_in_cache(const std::string& name) const; virtual ov::Output get_ov_node_from_cache(const std::string& name); - OPENVINO_SUPPRESS_DEPRECATED_START - ov::OutputVector make_ov_nodes(const Node& onnx_node); - OPENVINO_SUPPRESS_DEPRECATED_END + + ov::OutputVector make_ov_nodes(const ov::frontend::onnx::Node& onnx_node); + const OpsetImports& get_opset_imports() const; virtual ~Graph() = default; @@ -63,19 +63,16 @@ class Graph : public std::enable_shared_from_this { protected: Graph(const std::string& model_dir, - const std::shared_ptr& model, + const std::shared_ptr& model, std::unique_ptr&& cache, detail::MappedMemoryHandles mmap_cache, ov::frontend::ExtensionHolder extensions = {}); - OPENVINO_SUPPRESS_DEPRECATED_START void set_friendly_names(const Node& onnx_node, const ov::OutputVector& ng_subgraph_outputs) const; - OPENVINO_SUPPRESS_DEPRECATED_END protected: - OPENVINO_SUPPRESS_DEPRECATED_START - ov::OutputVector make_framework_nodes(const Node& onnx_node); - OPENVINO_SUPPRESS_DEPRECATED_END + ov::OutputVector make_framework_nodes(const ov::frontend::onnx::Node& onnx_node); + void decode_to_framework_nodes(); void convert_to_ov_nodes(); void remove_dangling_parameters(); @@ -88,9 +85,8 @@ class Graph : public std::enable_shared_from_this { ov::frontend::ExtensionHolder m_extensions = {}; private: - OPENVINO_SUPPRESS_DEPRECATED_START std::vector m_nodes; - OPENVINO_SUPPRESS_DEPRECATED_END + std::string m_model_dir; detail::MappedMemoryHandles m_mmap_cache; OperatorsBridge m_ops_bridge; @@ -105,7 +101,7 @@ class Subgraph : public Graph { /// /// \param[in] model The ONNX model object. /// \param[in] parent_graph The reference to the parent graph. - Subgraph(const std::shared_ptr& model, Graph* parent_graph); + Subgraph(const std::shared_ptr& model, Graph* parent_graph); /// \brief Return nodes which are on the edge the subgraph and the parent graph. /// \return Vector of edge nodes from parent scope. @@ -137,6 +133,6 @@ inline std::ostream& operator<<(std::ostream& outs, const Graph& graph) { static const char* const ONNX_GRAPH_RT_ATTRIBUTE = "onnx_graph"; -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/graph_cache.cpp b/src/frontends/onnx/frontend/src/core/graph_cache.cpp index 8ef624c76da4fa..ec686da8ae07d5 100644 --- a/src/frontends/onnx/frontend/src/core/graph_cache.cpp +++ b/src/frontends/onnx/frontend/src/core/graph_cache.cpp @@ -6,8 +6,9 @@ #include "openvino/core/except.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { void GraphCache::emplace_node(const std::string& name, ov::Output&& node) { m_graph_cache_map[name] = std::move(node); } @@ -30,5 +31,6 @@ ov::Output GraphCache::get_node(const std::string& name) const { bool GraphCache::contains(const std::string& name) const { return (m_graph_cache_map.count(name) > 0); } -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/graph_cache.hpp b/src/frontends/onnx/frontend/src/core/graph_cache.hpp index fe021e64289dfc..c706fed7a3674a 100644 --- a/src/frontends/onnx/frontend/src/core/graph_cache.hpp +++ b/src/frontends/onnx/frontend/src/core/graph_cache.hpp @@ -10,8 +10,9 @@ #include "openvino/core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { /// \brief GraphCache stores and provides access to ONNX graph initializers. class GraphCache { public: @@ -49,5 +50,6 @@ class GraphCache { private: std::map> m_graph_cache_map; }; -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/model.cpp b/src/frontends/onnx/frontend/src/core/model.cpp index 3e59a7edaf599f..32f5d21991259e 100644 --- a/src/frontends/onnx/frontend/src/core/model.cpp +++ b/src/frontends/onnx/frontend/src/core/model.cpp @@ -10,19 +10,22 @@ #include "openvino/util/log.hpp" #include "ops_bridge.hpp" -namespace ngraph { -namespace onnx_import { -std::string get_node_domain(const ONNX_NAMESPACE::NodeProto& node_proto) { +using namespace ::ONNX_NAMESPACE; + +namespace ov { +namespace frontend { +namespace onnx { +std::string get_node_domain(const NodeProto& node_proto) { return node_proto.has_domain() ? node_proto.domain() : ""; } -std::int64_t get_opset_version(const ONNX_NAMESPACE::ModelProto& model_proto, const std::string& domain) { +std::int64_t get_opset_version(const ModelProto& model_proto, const std::string& domain) { // copy the opsets and sort them (descending order) // then return the version from the first occurrence of a given domain auto opset_imports = model_proto.opset_import(); std::sort(std::begin(opset_imports), std::end(opset_imports), - [](const ONNX_NAMESPACE::OperatorSetIdProto& lhs, const ONNX_NAMESPACE::OperatorSetIdProto& rhs) { + [](const OperatorSetIdProto& lhs, const OperatorSetIdProto& rhs) { return lhs.version() > rhs.version(); }); @@ -35,7 +38,7 @@ std::int64_t get_opset_version(const ONNX_NAMESPACE::ModelProto& model_proto, co OPENVINO_THROW("Couldn't find operator set's version for domain: ", domain, "."); } -Model::Model(std::shared_ptr model_proto, ModelOpSet&& model_opset) +Model::Model(std::shared_ptr model_proto, ModelOpSet&& model_opset) : m_model_proto{std::move(model_proto)}, m_opset{std::move(model_opset)} {} @@ -76,6 +79,6 @@ void Model::enable_opset_domain(const std::string& domain, const OperatorsBridge } } -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/model.hpp b/src/frontends/onnx/frontend/src/core/model.hpp index 6c17f17fbf4a59..189fc3fbc2b15b 100644 --- a/src/frontends/onnx/frontend/src/core/model.hpp +++ b/src/frontends/onnx/frontend/src/core/model.hpp @@ -12,14 +12,20 @@ #include "core/operator_set.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { +using ::ONNX_NAMESPACE::GraphProto; +using ::ONNX_NAMESPACE::ModelProto; +using ::ONNX_NAMESPACE::NodeProto; +using ::ONNX_NAMESPACE::OperatorSetIdProto; + /// \brief Type of container which stores opset version and domain in ONNX format -using OpsetImports = ::google::protobuf::RepeatedPtrField; +using OpsetImports = ::google::protobuf::RepeatedPtrField; -std::string get_node_domain(const ONNX_NAMESPACE::NodeProto& node_proto); +std::string get_node_domain(const NodeProto& node_proto); -std::int64_t get_opset_version(const ONNX_NAMESPACE::ModelProto& model_proto, const std::string& domain); +std::int64_t get_opset_version(const ModelProto& model_proto, const std::string& domain); class OperatorsBridge; @@ -29,7 +35,7 @@ class Model { // built based on the opset imports in the ModelProto object using ModelOpSet = std::unordered_map; - explicit Model(std::shared_ptr model_proto, ModelOpSet&& model_opset); + explicit Model(std::shared_ptr model_proto, ModelOpSet&& model_opset); Model(const Model&) = delete; Model(Model&&) = delete; @@ -40,7 +46,7 @@ class Model { const std::string& get_producer_name() const { return m_model_proto->producer_name(); } - const ONNX_NAMESPACE::GraphProto& get_graph() const { + const GraphProto& get_graph() const { return m_model_proto->graph(); } std::int64_t get_model_version() const { @@ -92,7 +98,7 @@ class Model { void enable_opset_domain(const std::string& domain, const OperatorsBridge& ops_bridge); private: - const std::shared_ptr m_model_proto; + const std::shared_ptr m_model_proto; ModelOpSet m_opset; }; @@ -100,6 +106,6 @@ inline std::ostream& operator<<(std::ostream& outs, const Model& model) { return (outs << ""); } -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/node.cpp b/src/frontends/onnx/frontend/src/core/node.cpp index b8d5a6bdfc866b..ed2195e64d9415 100644 --- a/src/frontends/onnx/frontend/src/core/node.cpp +++ b/src/frontends/onnx/frontend/src/core/node.cpp @@ -11,13 +11,14 @@ #include "core/null_node.hpp" #include "core/tensor.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { class Node::Impl { public: Impl() = delete; - Impl(const ONNX_NAMESPACE::NodeProto& node_proto, Graph* graph) + Impl(const NodeProto& node_proto, Graph* graph) : m_node_proto{&node_proto}, m_name{node_proto.has_name() ? node_proto.name() : ""}, m_domain{get_node_domain(node_proto)}, @@ -33,7 +34,7 @@ class Node::Impl { } } - Impl(const ONNX_NAMESPACE::NodeProto& node_proto, + Impl(const NodeProto& node_proto, Graph* graph, const std::unordered_map>& subgraphs) : m_node_proto{&node_proto}, @@ -48,7 +49,7 @@ class Node::Impl { } const std::vector& attributes() const; - ov::OutputVector get_ng_inputs() const; + ov::OutputVector get_ov_inputs() const; const std::string& domain() const; const std::string& op_type() const; @@ -87,13 +88,13 @@ class Node::Impl { T default_value, ov::element::Type type) const; - const ONNX_NAMESPACE::NodeProto& node_proto() const; + const NodeProto& node_proto() const; Graph* graph() const; private: Subgraph get_subgraph_from_attribute(const std::string& name) const; - const ONNX_NAMESPACE::NodeProto* m_node_proto; + const NodeProto* m_node_proto; std::string m_name; std::string m_domain; Graph* m_graph; @@ -104,8 +105,7 @@ class Node::Impl { std::unordered_map> m_subgraphs; }; -OPENVINO_SUPPRESS_DEPRECATED_START -const ONNX_NAMESPACE::NodeProto& Node::Impl::node_proto() const { +const NodeProto& Node::Impl::node_proto() const { return *m_node_proto; } Graph* Node::Impl::graph() const { @@ -200,15 +200,13 @@ ov::Any Node::get_attribute_value(const std::string& name) const { return get_attribute(name).get_any(); } -ov::OutputVector Node::Impl::get_ng_inputs() const { +ov::OutputVector Node::Impl::get_ov_inputs() const { ov::OutputVector result; for (const auto& name : m_node_proto->input()) { if (!name.empty()) { result.push_back(m_graph->get_ov_node_from_cache(name)); } else { - OPENVINO_SUPPRESS_DEPRECATED_START result.push_back(std::make_shared()->output(0)); - OPENVINO_SUPPRESS_DEPRECATED_END } } return result; @@ -295,7 +293,7 @@ std::shared_ptr Node::Impl::get_attribute_as_constant(cons value); } -Node::Node(const ONNX_NAMESPACE::NodeProto& node_proto, Graph* graph) +Node::Node(const NodeProto& node_proto, Graph* graph) : m_pimpl{new Impl{node_proto, graph}, [](Impl* impl) { delete impl; }} {} @@ -307,8 +305,8 @@ Node::Node(const Node& other) delete impl; }} {} -ov::OutputVector Node::get_ng_inputs() const { - return m_pimpl->get_ng_inputs(); +ov::OutputVector Node::get_ov_inputs() const { + return m_pimpl->get_ov_inputs(); } const std::string& Node::domain() const { return m_pimpl->domain(); @@ -377,7 +375,6 @@ const Attribute& Node::get_attribute(const std::string& name) const { } return *found_attr; } -OPENVINO_SUPPRESS_DEPRECATED_END template <> float Node::get_attribute_value(const std::string& name, float default_value) const { @@ -638,6 +635,6 @@ std::shared_ptr Node::get_attribute_as_constant(const std: std::move(type)); } -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/node.hpp b/src/frontends/onnx/frontend/src/core/node.hpp index 31c898b8a75b24..c6083900337f23 100644 --- a/src/frontends/onnx/frontend/src/core/node.hpp +++ b/src/frontends/onnx/frontend/src/core/node.hpp @@ -17,11 +17,9 @@ namespace ONNX_NAMESPACE { class NodeProto; } // namespace ONNX_NAMESPACE -namespace ngraph { -namespace element { -using ov::element::Type; -} -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace error { namespace node { struct UnknownAttribute : ov::Exception { @@ -32,7 +30,6 @@ struct UnknownAttribute : ov::Exception { }; } // namespace node - } // namespace error // forward declaration @@ -42,11 +39,13 @@ class Tensor; class SparseTensor; class Attribute; +using ::ONNX_NAMESPACE::NodeProto; + class Node { public: Node() = delete; // TODO: hide this ctor since it uses protobufs generated structures - Node(const ONNX_NAMESPACE::NodeProto& node_proto, Graph* graph); + Node(const NodeProto& node_proto, Graph* graph); Node(Node&&) noexcept; Node(const Node&); @@ -54,7 +53,7 @@ class Node { Node& operator=(Node&&) noexcept = delete; Node& operator=(const Node&) = delete; - ov::OutputVector get_ng_inputs() const; + ov::OutputVector get_ov_inputs() const; const std::string& domain() const; const std::string& op_type() const; const std::string& get_name() const; @@ -281,6 +280,6 @@ inline std::ostream& operator<<(std::ostream& outs, const Node& node) { } OPENVINO_SUPPRESS_DEPRECATED_END -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/null_node.cpp b/src/frontends/onnx/frontend/src/core/null_node.cpp index f96e78d5ab43de..e95a2255c782bf 100644 --- a/src/frontends/onnx/frontend/src/core/null_node.cpp +++ b/src/frontends/onnx/frontend/src/core/null_node.cpp @@ -6,20 +6,20 @@ #include -#include "openvino/core/deprecated.hpp" #include "openvino/core/node.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { std::shared_ptr NullNode::clone_with_new_inputs(const ov::OutputVector& /* new_args */) const { return std::make_shared(); } -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov bool ov::op::util::is_null(const ov::Node* node) { - return dynamic_cast(node) != nullptr; + return dynamic_cast(node) != nullptr; } bool ov::op::util::is_null(const std::shared_ptr& node) { @@ -29,4 +29,3 @@ bool ov::op::util::is_null(const std::shared_ptr& node) { bool ov::op::util::is_null(const Output& output) { return is_null(output.get_node()); } -OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/core/null_node.hpp b/src/frontends/onnx/frontend/src/core/null_node.hpp index 484540b3d67360..c1e59b476cecba 100644 --- a/src/frontends/onnx/frontend/src/core/null_node.hpp +++ b/src/frontends/onnx/frontend/src/core/null_node.hpp @@ -4,6 +4,7 @@ #pragma once +#include "openvino/core/node.hpp" #include "openvino/op/op.hpp" namespace ov { @@ -14,9 +15,9 @@ bool is_null(const std::shared_ptr& node); bool is_null(const Output& output); } // namespace util } // namespace op -} // namespace ov -namespace ngraph { -namespace onnx_import { + +namespace frontend { +namespace onnx { /// \brief Represents a missing optional input or output of an ONNX node /// /// Some ONNX operators have inputs or outputs that are marked as optional, @@ -35,5 +36,6 @@ class NullNode : public ov::op::Op { virtual std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; }; -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/operator_set.hpp b/src/frontends/onnx/frontend/src/core/operator_set.hpp index 2773066adbe32b..60032edd133833 100644 --- a/src/frontends/onnx/frontend/src/core/operator_set.hpp +++ b/src/frontends/onnx/frontend/src/core/operator_set.hpp @@ -10,16 +10,16 @@ #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { /// \brief Function which transforms single ONNX operator to nGraph sub-graph. -OPENVINO_SUPPRESS_DEPRECATED_START -using Operator = std::function; -OPENVINO_SUPPRESS_DEPRECATED_END + +using Operator = std::function; /// \brief Map which contains ONNX operators accessible by std::string value as a key. using OperatorSet = std::unordered_map; -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp b/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp index 7044d63ae5fd2d..dc6845249a847f 100644 --- a/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp +++ b/src/frontends/onnx/frontend/src/core/sparse_tensor.hpp @@ -12,12 +12,15 @@ #include "openvino/core/type/element_type.hpp" #include "tensor.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { +using ::ONNX_NAMESPACE::SparseTensorProto; + class SparseTensor { public: SparseTensor() = delete; - SparseTensor(const ONNX_NAMESPACE::SparseTensorProto& sparse_tensor, + SparseTensor(const SparseTensorProto& sparse_tensor, const std::string& model_dir, detail::MappedMemoryHandles mmap_cache) : m_values{sparse_tensor.values(), model_dir, mmap_cache}, @@ -25,8 +28,8 @@ class SparseTensor { m_shape{std::begin(sparse_tensor.dims()), std::end(sparse_tensor.dims())} { if (m_shape == ov::Shape{0}) { // It's possible to construct a sparse tensor in ONNX with "dims: 0" property - // Such tensor contains a scalar. This results in a Shape{0} stored in m_shape. - // In OpenVINO a scalar is represented with Shape{} and thus this replacement. + // Such tensor contains a scalar. This results in a ov::Shape{0} stored in m_shape. + // In OpenVINO a scalar is represented with ov::Shape{} and thus this replacement. m_shape = ov::Shape{}; } } @@ -66,5 +69,6 @@ class SparseTensor { inline std::ostream& operator<<(std::ostream& outs, const SparseTensor& tensor) { return (outs << ""); } -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/tensor.cpp b/src/frontends/onnx/frontend/src/core/tensor.cpp index e5caa078716963..75d95f0dd56db6 100644 --- a/src/frontends/onnx/frontend/src/core/tensor.cpp +++ b/src/frontends/onnx/frontend/src/core/tensor.cpp @@ -4,8 +4,9 @@ #include "core/tensor.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { template <> std::vector Tensor::get_data() const { @@ -15,7 +16,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_DOUBLE) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_DOUBLE) { return detail::__get_data(m_tensor_proto->double_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "DOUBLE, raw data"); @@ -29,7 +30,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_FLOAT) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_FLOAT) { return detail::__get_data(m_tensor_proto->float_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT, raw data"); @@ -43,7 +44,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_FLOAT16) { using std::begin; using std::end; @@ -67,7 +68,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_BFLOAT16) { return detail::__get_data(m_tensor_proto->int32_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT32, raw data"); @@ -81,7 +82,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT8) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT8) { return detail::__get_data(m_tensor_proto->int32_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT8, raw data"); @@ -95,7 +96,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT16) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT16) { return detail::__get_data(m_tensor_proto->int32_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT16, raw data"); @@ -109,7 +110,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT32) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT32) { return detail::__get_data(m_tensor_proto->int32_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT32, raw data"); @@ -123,7 +124,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_INT64) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT64) { return detail::__get_data(m_tensor_proto->int64_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT64, raw data"); @@ -137,7 +138,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_UINT8) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT8) { return detail::__get_data(m_tensor_proto->int32_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT8, raw data"); @@ -151,7 +152,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_UINT16) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT16) { return detail::__get_data(m_tensor_proto->int32_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT16, raw data"); @@ -165,7 +166,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_UINT32) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT32) { return detail::__get_data(m_tensor_proto->uint64_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT32, raw data"); @@ -179,7 +180,7 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_UINT64) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT64) { return detail::__get_data(m_tensor_proto->uint64_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT63, raw data"); @@ -195,11 +196,12 @@ std::vector Tensor::get_data() const { if (m_tensor_proto->has_raw_data()) { return detail::__get_raw_data(m_tensor_proto->raw_data(), m_tensor_proto->data_type()); } - if (m_tensor_proto->data_type() == ONNX_NAMESPACE::TensorProto_DataType_BOOL) { + if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_BOOL) { return detail::__get_data(m_tensor_proto->int32_data()); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "BOOL, raw data"); } -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/tensor.hpp b/src/frontends/onnx/frontend/src/core/tensor.hpp index e780b8b10246e3..67f5fbbb1a08f0 100644 --- a/src/frontends/onnx/frontend/src/core/tensor.hpp +++ b/src/frontends/onnx/frontend/src/core/tensor.hpp @@ -21,22 +21,19 @@ using namespace ov::frontend::onnx::common; -namespace ngraph { -namespace onnx_import { -// Detecting automatically the underlying type used to store the information -// for data type of values a tensor is holding. A bug was discovered in protobuf -// which forced ONNX team to switch from `enum TensorProto_DataType` to `int32` -// in order to workaround the bug. This line allows using both versions of ONNX -// generated wrappers. -using TensorProto_DataType = decltype(ONNX_NAMESPACE::TensorProto{}.data_type()); +namespace ov { +namespace frontend { +namespace onnx { + +using ::ONNX_NAMESPACE::TensorProto; +using ::ONNX_NAMESPACE::TensorProto_DataLocation; +using ::ONNX_NAMESPACE::TensorProto_DataType; +using ::ONNX_NAMESPACE::TensorProto_DataType_Name; #define ONNX_INVALID_DATA_TYPE(data_type, expected) \ - OPENVINO_THROW("Invalid data type ", ONNX_NAMESPACE::TensorProto_DataType_Name(data_type), " expected: ", expected) -#define ONNX_UNSUPPORTED_DATA_TYPE(data_type, expected) \ - OPENVINO_THROW("Unsupported data type ", \ - ONNX_NAMESPACE::TensorProto_DataType_Name(data_type), \ - " expected: ", \ - expected) + OPENVINO_THROW("Invalid data type ", TensorProto_DataType_Name(data_type), " expected: ", expected) +#define ONNX_UNSUPPORTED_DATA_TYPE(data_type, expected) \ + OPENVINO_THROW("Unsupported data type ", TensorProto_DataType_Name(data_type), " expected: ", expected) namespace detail { namespace { @@ -65,37 +62,35 @@ inline std::vector __get_raw_data(const std::string& raw_data, int onnx_data_ class Tensor { public: enum class Type { - undefined = ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED, - float32 = ONNX_NAMESPACE::TensorProto_DataType_FLOAT, - uint8 = ONNX_NAMESPACE::TensorProto_DataType_UINT8, - int8 = ONNX_NAMESPACE::TensorProto_DataType_INT8, - uint16 = ONNX_NAMESPACE::TensorProto_DataType_UINT16, - int16 = ONNX_NAMESPACE::TensorProto_DataType_INT16, - int32 = ONNX_NAMESPACE::TensorProto_DataType_INT32, - int64 = ONNX_NAMESPACE::TensorProto_DataType_INT64, - string = ONNX_NAMESPACE::TensorProto_DataType_STRING, - boolean = ONNX_NAMESPACE::TensorProto_DataType_BOOL, - float16 = ONNX_NAMESPACE::TensorProto_DataType_FLOAT16, - float64 = ONNX_NAMESPACE::TensorProto_DataType_DOUBLE, - uint32 = ONNX_NAMESPACE::TensorProto_DataType_UINT32, - uint64 = ONNX_NAMESPACE::TensorProto_DataType_UINT64, - bfloat16 = ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16, - complex64 = ONNX_NAMESPACE::TensorProto_DataType_COMPLEX64, - complex128 = ONNX_NAMESPACE::TensorProto_DataType_COMPLEX128 + undefined = TensorProto_DataType::TensorProto_DataType_UNDEFINED, + float32 = TensorProto_DataType::TensorProto_DataType_FLOAT, + uint8 = TensorProto_DataType::TensorProto_DataType_UINT8, + int8 = TensorProto_DataType::TensorProto_DataType_INT8, + uint16 = TensorProto_DataType::TensorProto_DataType_UINT16, + int16 = TensorProto_DataType::TensorProto_DataType_INT16, + int32 = TensorProto_DataType::TensorProto_DataType_INT32, + int64 = TensorProto_DataType::TensorProto_DataType_INT64, + string = TensorProto_DataType::TensorProto_DataType_STRING, + boolean = TensorProto_DataType::TensorProto_DataType_BOOL, + float16 = TensorProto_DataType::TensorProto_DataType_FLOAT16, + float64 = TensorProto_DataType::TensorProto_DataType_DOUBLE, + uint32 = TensorProto_DataType::TensorProto_DataType_UINT32, + uint64 = TensorProto_DataType::TensorProto_DataType_UINT64, + bfloat16 = TensorProto_DataType::TensorProto_DataType_BFLOAT16, + complex64 = TensorProto_DataType::TensorProto_DataType_COMPLEX64, + complex128 = TensorProto_DataType::TensorProto_DataType_COMPLEX128 }; Tensor() = delete; - Tensor(const ONNX_NAMESPACE::TensorProto& tensor, - const std::string& model_dir, - detail::MappedMemoryHandles mmap_cache) + Tensor(const TensorProto& tensor, const std::string& model_dir, detail::MappedMemoryHandles mmap_cache) : m_tensor_proto{&tensor}, m_shape{std::begin(tensor.dims()), std::end(tensor.dims())}, m_model_dir{model_dir}, m_mmap_cache{mmap_cache} { if (m_shape == ov::Shape{0}) { // It's possible to construct a tensor in ONNX with "dims: 0" property - // Such tensor contains a scalar. This results in a Shape{0} stored in m_shape. - // In OpenVINO a scalar is represented with Shape{} and thus this replacement. + // Such tensor contains a scalar. This results in a ov::Shape{0} stored in m_shape. + // In OpenVINO a scalar is represented with ov::Shape{} and thus this replacement. m_shape = ov::Shape{}; } } @@ -136,33 +131,33 @@ class Tensor { FRONT_END_THROW("Tensor has no specified data type"); } switch (m_tensor_proto->data_type()) { - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: + case TensorProto_DataType::TensorProto_DataType_BOOL: return ov::element::boolean; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: + case TensorProto_DataType::TensorProto_DataType_FLOAT: return ov::element::f32; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: + case TensorProto_DataType::TensorProto_DataType_FLOAT16: return ov::element::f16; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: + case TensorProto_DataType::TensorProto_DataType_DOUBLE: return ov::element::f64; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: + case TensorProto_DataType::TensorProto_DataType_INT8: return ov::element::i8; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: + case TensorProto_DataType::TensorProto_DataType_INT16: return ov::element::i16; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: + case TensorProto_DataType::TensorProto_DataType_INT32: return ov::element::i32; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: + case TensorProto_DataType::TensorProto_DataType_INT64: return ov::element::i64; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: + case TensorProto_DataType::TensorProto_DataType_UINT8: return ov::element::u8; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: + case TensorProto_DataType::TensorProto_DataType_UINT16: return ov::element::u16; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: + case TensorProto_DataType::TensorProto_DataType_UINT32: return ov::element::u32; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: + case TensorProto_DataType::TensorProto_DataType_UINT64: return ov::element::u64; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BFLOAT16: + case TensorProto_DataType::TensorProto_DataType_BFLOAT16: return ov::element::bf16; - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UNDEFINED: + case TensorProto_DataType::TensorProto_DataType_UNDEFINED: FRONT_END_THROW("Data type is Undefined"); default: ONNX_UNSUPPORTED_DATA_TYPE( @@ -172,7 +167,7 @@ class Tensor { } operator TensorProto_DataType() const { - return m_tensor_proto->data_type(); + return static_cast(m_tensor_proto->data_type()); } std::shared_ptr get_ov_constant() const { @@ -180,31 +175,31 @@ class Tensor { FRONT_END_THROW("Loading segments isn't supported"); } switch (m_tensor_proto->data_type()) { - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: + case TensorProto_DataType::TensorProto_DataType_BOOL: return make_ov_constant(ov::element::boolean); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: + case TensorProto_DataType::TensorProto_DataType_FLOAT: return make_ov_constant(ov::element::f32); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: + case TensorProto_DataType::TensorProto_DataType_FLOAT16: return make_ov_constant(ov::element::f16); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: + case TensorProto_DataType::TensorProto_DataType_DOUBLE: return make_ov_constant(ov::element::f64); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: + case TensorProto_DataType::TensorProto_DataType_INT8: return make_ov_constant(ov::element::i8); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: + case TensorProto_DataType::TensorProto_DataType_INT16: return make_ov_constant(ov::element::i16); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: + case TensorProto_DataType::TensorProto_DataType_INT32: return make_ov_constant(ov::element::i32); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: + case TensorProto_DataType::TensorProto_DataType_INT64: return make_ov_constant(ov::element::i64); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: + case TensorProto_DataType::TensorProto_DataType_UINT8: return make_ov_constant(ov::element::u8); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: + case TensorProto_DataType::TensorProto_DataType_UINT16: return make_ov_constant(ov::element::u16); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: + case TensorProto_DataType::TensorProto_DataType_UINT32: return make_ov_constant(ov::element::u32); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: + case TensorProto_DataType::TensorProto_DataType_UINT64: return make_ov_constant(ov::element::u64); - case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BFLOAT16: + case TensorProto_DataType::TensorProto_DataType_BFLOAT16: return make_ov_constant(ov::element::bf16); default: ONNX_UNSUPPORTED_DATA_TYPE( @@ -276,8 +271,7 @@ class Tensor { bool has_external_data() const { return m_tensor_proto->has_data_location() && - m_tensor_proto->data_location() == - ONNX_NAMESPACE::TensorProto_DataLocation::TensorProto_DataLocation_EXTERNAL; + m_tensor_proto->data_location() == TensorProto_DataLocation::TensorProto_DataLocation_EXTERNAL; } template @@ -297,15 +291,15 @@ class Tensor { return m_tensor_proto->raw_data().data(); } switch (m_tensor_proto->data_type()) { - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: + case TensorProto_DataType::TensorProto_DataType_FLOAT: return m_tensor_proto->float_data().data(); - case ONNX_NAMESPACE::TensorProto_DataType_INT32: + case TensorProto_DataType::TensorProto_DataType_INT32: return m_tensor_proto->int32_data().data(); - case ONNX_NAMESPACE::TensorProto_DataType_INT64: + case TensorProto_DataType::TensorProto_DataType_INT64: return m_tensor_proto->int64_data().data(); - case ONNX_NAMESPACE::TensorProto_DataType_UINT64: + case TensorProto_DataType::TensorProto_DataType_UINT64: return m_tensor_proto->uint64_data().data(); - case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: + case TensorProto_DataType::TensorProto_DataType_DOUBLE: return m_tensor_proto->double_data().data(); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT, INT32, INT64, UINT64, DOUBLE"); @@ -316,21 +310,21 @@ class Tensor { return m_tensor_proto->raw_data().size() / get_onnx_data_size(m_tensor_proto->data_type()); } switch (m_tensor_proto->data_type()) { - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: + case TensorProto_DataType::TensorProto_DataType_FLOAT: return m_tensor_proto->float_data_size(); - case ONNX_NAMESPACE::TensorProto_DataType_INT32: + case TensorProto_DataType::TensorProto_DataType_INT32: return m_tensor_proto->int32_data_size(); - case ONNX_NAMESPACE::TensorProto_DataType_INT64: + case TensorProto_DataType::TensorProto_DataType_INT64: return m_tensor_proto->int64_data_size(); - case ONNX_NAMESPACE::TensorProto_DataType_UINT64: + case TensorProto_DataType::TensorProto_DataType_UINT64: return m_tensor_proto->uint64_data_size(); - case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: + case TensorProto_DataType::TensorProto_DataType_DOUBLE: return m_tensor_proto->double_data_size(); } ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT, INT32, INT64, UINT64, DOUBLE"); } - const ONNX_NAMESPACE::TensorProto* m_tensor_proto; + const TensorProto* m_tensor_proto; ov::Shape m_shape; std::string m_model_dir; detail::MappedMemoryHandles m_mmap_cache; @@ -379,5 +373,6 @@ std::vector Tensor::get_data() const; template <> std::vector Tensor::get_data() const; -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/transform.cpp b/src/frontends/onnx/frontend/src/core/transform.cpp index b39665aa9b1da4..39e828edeae104 100644 --- a/src/frontends/onnx/frontend/src/core/transform.cpp +++ b/src/frontends/onnx/frontend/src/core/transform.cpp @@ -20,11 +20,14 @@ #include "openvino/util/log.hpp" #include "ops_bridge.hpp" -namespace ngraph { -namespace onnx_import { +using namespace ::ONNX_NAMESPACE; + +namespace ov { +namespace frontend { +namespace onnx { namespace transform { namespace { -ONNX_NAMESPACE::TypeProto get_input_type(std::string const& name, ONNX_NAMESPACE::GraphProto& graph) { +TypeProto get_input_type(std::string const& name, GraphProto& graph) { for (const auto& input : graph.input()) { if (input.name() == name) { return input.type(); @@ -32,7 +35,7 @@ ONNX_NAMESPACE::TypeProto get_input_type(std::string const& name, ONNX_NAMESPACE } for (const auto& initializer : graph.initializer()) { if (initializer.name() == name) { - ONNX_NAMESPACE::TypeProto ret; + TypeProto ret; auto* tensor_type = ret.mutable_tensor_type(); tensor_type->set_elem_type(initializer.data_type()); @@ -51,15 +54,15 @@ ONNX_NAMESPACE::TypeProto get_input_type(std::string const& name, ONNX_NAMESPACE return value_info.type(); } } - return ONNX_NAMESPACE::TypeProto(); + return TypeProto(); } -void function_expand_and_remove_original_node(const ONNX_NAMESPACE::NodeProto& node, - const ONNX_NAMESPACE::FunctionProto& func_proto, - ONNX_NAMESPACE::GraphProto* graph, +void function_expand_and_remove_original_node(const NodeProto& node, + const FunctionProto& func_proto, + GraphProto* graph, int current_node_idx) { const auto before_expand_size = graph->node().size(); - ONNX_NAMESPACE::FunctionExpandHelper(node, func_proto, *graph); + FunctionExpandHelper(node, func_proto, *graph); const auto added_nodes = graph->node().size() - before_expand_size; // Remove the original node which contained the function @@ -73,14 +76,15 @@ void function_expand_and_remove_original_node(const ONNX_NAMESPACE::NodeProto& n } // namespace } // namespace transform -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov -void ngraph::onnx_import::transform::expand_onnx_functions(ONNX_NAMESPACE::ModelProto& model_proto) { +void ov::frontend::onnx::transform::expand_onnx_functions(ModelProto& model_proto) { auto graph_proto = model_proto.mutable_graph(); for (int i = 0; i < graph_proto->node().size(); ++i) { - ONNX_NAMESPACE::NodeProto node = graph_proto->node().Get(i); + NodeProto node = graph_proto->node().Get(i); // Check if node operation is one of the functions we want to expand if (std::find(onnx_functions_to_expand.begin(), onnx_functions_to_expand.end(), node.op_type()) == @@ -90,7 +94,7 @@ void ngraph::onnx_import::transform::expand_onnx_functions(ONNX_NAMESPACE::Model // Retrieve the operation schema from ONNX library int opset_version = static_cast(get_opset_version(model_proto, node.domain())); - const auto* schema_registry = ONNX_NAMESPACE::OpSchemaRegistry::Instance(); + const auto* schema_registry = OpSchemaRegistry::Instance(); const auto node_op_schema = schema_registry->GetSchema(node.op_type(), opset_version, node.domain()); // Check if operation schema found @@ -108,18 +112,18 @@ void ngraph::onnx_import::transform::expand_onnx_functions(ONNX_NAMESPACE::Model else if (node_op_schema->HasContextDependentFunction()) { // In order to expand a context-dependent function, we need to infer types try { - ONNX_NAMESPACE::shape_inference::InferShapes(model_proto); + shape_inference::InferShapes(model_proto); } catch (const std::exception& e) { - OPENVINO_WARN << "ONNX Shape inference failed: " << e.what(); + OPENVINO_WARN << "ONNX ov::Shape inference failed: " << e.what(); } - std::vector input_types; + std::vector input_types; for (const auto& input : node.input()) { input_types.push_back(get_input_type(input, *graph_proto)); } - ONNX_NAMESPACE::FunctionBodyBuildContextImpl ctx(node, input_types); - ONNX_NAMESPACE::FunctionProto func_proto; + FunctionBodyBuildContextImpl ctx(node, input_types); + FunctionProto func_proto; node_op_schema->BuildContextDependentFunction(ctx, func_proto); // Move index to the previous position because a first node of expanded function can have also function function_expand_and_remove_original_node(node, func_proto, graph_proto, i--); @@ -127,7 +131,7 @@ void ngraph::onnx_import::transform::expand_onnx_functions(ONNX_NAMESPACE::Model } } -void ngraph::onnx_import::transform::fixup_legacy_operators(ONNX_NAMESPACE::ModelProto& model_proto) { +void ov::frontend::onnx::transform::fixup_legacy_operators(ModelProto& model_proto) { auto graph_proto = model_proto.mutable_graph(); for (auto& node : *graph_proto->mutable_node()) { auto it = std::find(legacy_ops_to_fixup.begin(), legacy_ops_to_fixup.end(), node.op_type()); diff --git a/src/frontends/onnx/frontend/src/core/transform.hpp b/src/frontends/onnx/frontend/src/core/transform.hpp index 56d751baf0fb96..fd33ac5a7bcfc0 100644 --- a/src/frontends/onnx/frontend/src/core/transform.hpp +++ b/src/frontends/onnx/frontend/src/core/transform.hpp @@ -6,10 +6,13 @@ #include -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace transform { +using ::ONNX_NAMESPACE::ModelProto; + static const std::vector onnx_functions_to_expand = {"AffineGrid", "Bernoulli", "Celu", "CenterCropPad", "NegativeLogLikelihoodLoss", "SoftmaxCrossEntropyLoss"}; @@ -21,7 +24,7 @@ static const std::vector onnx_functions_to_expand = /// with their expanded subgraphs. /// /// \param model_proto Protobuf message with ONNX model to transform. -void expand_onnx_functions(ONNX_NAMESPACE::ModelProto& model_proto); +void expand_onnx_functions(ModelProto& model_proto); static const std::vector legacy_ops_to_fixup = {"DeformableConv2D", "DetectionOutput", @@ -48,8 +51,9 @@ static const std::vector legacy_ops_to_fixup = {"DeformableConv2D", /// in the OpenVINO ONNX Frontend. /// /// \param model_proto Protobuf message with ONNX model to transform. -void fixup_legacy_operators(ONNX_NAMESPACE::ModelProto& model_proto); +void fixup_legacy_operators(ModelProto& model_proto); } // namespace transform -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/value_info.hpp b/src/frontends/onnx/frontend/src/core/value_info.hpp index e162cc6bab8c10..2c47b7711c6f6f 100644 --- a/src/frontends/onnx/frontend/src/core/value_info.hpp +++ b/src/frontends/onnx/frontend/src/core/value_info.hpp @@ -13,19 +13,19 @@ #include "openvino/op/parameter.hpp" #include "utils/common.hpp" -using namespace ov::frontend::onnx::common; +namespace ov { +namespace frontend { +namespace onnx { -using namespace ov::op; +using ::ONNX_NAMESPACE::ValueInfoProto; -namespace ngraph { -namespace onnx_import { class ValueInfo { public: ValueInfo(ValueInfo&&) = default; ValueInfo(const ValueInfo&) = default; ValueInfo() = delete; - explicit ValueInfo(const ONNX_NAMESPACE::ValueInfoProto& value_info_proto) : m_value_info_proto{&value_info_proto} { + explicit ValueInfo(const ValueInfoProto& value_info_proto) : m_value_info_proto{&value_info_proto} { if (value_info_proto.type().has_tensor_type()) { const auto& onnx_tensor = value_info_proto.type().tensor_type(); @@ -62,19 +62,19 @@ class ValueInfo { } protected: - std::shared_ptr get_ov_parameter() const { - auto parameter = std::make_shared(get_element_type(), get_shape()); + std::shared_ptr get_ov_parameter() const { + auto parameter = std::make_shared(get_element_type(), get_shape()); parameter->set_friendly_name(get_name()); parameter->get_output_tensor(0).set_names({get_name()}); return parameter; } - std::shared_ptr get_ov_constant(const Tensor& tensor) const { + std::shared_ptr get_ov_constant(const Tensor& tensor) const { return tensor.get_ov_constant(); } private: - const ONNX_NAMESPACE::ValueInfoProto* m_value_info_proto; + const ValueInfoProto* m_value_info_proto; ov::PartialShape m_partial_shape = ov::PartialShape::dynamic(); }; @@ -82,6 +82,6 @@ inline std::ostream& operator<<(std::ostream& outs, const ValueInfo& info) { return (outs << ""); } -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/detail/subgraph_extraction.cpp b/src/frontends/onnx/frontend/src/detail/subgraph_extraction.cpp index f2341a7fccc711..2c4a1bc9c94fe3 100644 --- a/src/frontends/onnx/frontend/src/detail/subgraph_extraction.cpp +++ b/src/frontends/onnx/frontend/src/detail/subgraph_extraction.cpp @@ -15,12 +15,13 @@ #include "openvino/frontend/exception.hpp" -using namespace ov::onnx_editor; +using namespace ::ONNX_NAMESPACE; +using namespace ov::frontend::onnx; enum class PortType { InputPort, OutputPort }; namespace { -void validate_node_index(const ONNX_NAMESPACE::GraphProto& graph, const int node_idx) { +void validate_node_index(const GraphProto& graph, const int node_idx) { FRONT_END_GENERAL_CHECK(node_idx >= 0 && node_idx < graph.node_size(), "The specified node index is out of range of nodes in the original model(idx: ", std::to_string(node_idx), @@ -29,10 +30,7 @@ void validate_node_index(const ONNX_NAMESPACE::GraphProto& graph, const int node ")"); } -void validate_port_index(const ONNX_NAMESPACE::GraphProto& graph, - const int node_idx, - const int port_idx, - const PortType& port_type) { +void validate_port_index(const GraphProto& graph, const int node_idx, const int port_idx, const PortType& port_type) { const int ports_number = (port_type == PortType::InputPort) ? graph.node(node_idx).input().size() : graph.node(node_idx).output().size(); FRONT_END_GENERAL_CHECK(port_idx >= 0 && port_idx < ports_number, @@ -67,21 +65,19 @@ bool already_exists(const Container& items, const std::string& name) { } /// \brief Checks if a tensor with name "name" is produced by an input of the graph -bool is_graph_input(const ONNX_NAMESPACE::GraphProto& graph, const std::string& name) { +bool is_graph_input(const GraphProto& graph, const std::string& name) { return already_exists(graph.input(), name); } /// \brief Checks if a tensor with name "name" is produced by an initializer of the graph -bool is_graph_initializer(const ONNX_NAMESPACE::GraphProto& graph, const std::string& name) { +bool is_graph_initializer(const GraphProto& graph, const std::string& name) { return already_exists(graph.initializer(), name); } /// \brief Looks up the index of a node that produces a tensor "input_name". Used to traverse /// the graph bottom-up. Starts from a node index "current_node_idx" because it operates /// on a topologically sorted graph. -int find_source_node_idx(const ONNX_NAMESPACE::GraphProto& graph, - const int current_node_idx, - const std::string& input_name) { +int find_source_node_idx(const GraphProto& graph, const int current_node_idx, const std::string& input_name) { // Some operators (e.g. Clip) have optional inputs if (input_name.empty()) return -1; @@ -101,11 +97,10 @@ int find_source_node_idx(const ONNX_NAMESPACE::GraphProto& graph, /// \brief Looks up a descriptor for a given tensor name. This descriptor contains inferred /// shape information which is required to create new inputs and outputs in the graph. -const ONNX_NAMESPACE::ValueInfoProto find_tensor_descriptor(const ONNX_NAMESPACE::GraphProto& graph, - const std::string& tensor_name) { +const ValueInfoProto find_tensor_descriptor(const GraphProto& graph, const std::string& tensor_name) { const auto it = std::find_if(std::begin(graph.value_info()), std::end(graph.value_info()), - name_equals(tensor_name)); + name_equals(tensor_name)); if (it != std::end(graph.value_info())) { return *it; @@ -114,23 +109,23 @@ const ONNX_NAMESPACE::ValueInfoProto find_tensor_descriptor(const ONNX_NAMESPACE // as fully dynamic: // - Fully dynamic shape // - Unknown data type - auto dynamic_value_info = ONNX_NAMESPACE::ValueInfoProto(); + auto dynamic_value_info = ValueInfoProto(); dynamic_value_info.set_name(tensor_name); auto type = dynamic_value_info.mutable_type(); auto tensor_type = type->mutable_tensor_type(); - tensor_type->set_elem_type(ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UNDEFINED); + tensor_type->set_elem_type(TensorProto_DataType::TensorProto_DataType_UNDEFINED); return dynamic_value_info; } } -std::string get_input_tensor_name(const ONNX_NAMESPACE::GraphProto& graph, const InputEdge& edge) { +std::string get_input_tensor_name(const GraphProto& graph, const InputEdge& edge) { validate_node_index(graph, edge.m_node_idx); validate_port_index(graph, edge.m_node_idx, edge.m_port_idx, PortType::InputPort); return graph.node(edge.m_node_idx).input(edge.m_port_idx); } -std::string get_output_tensor_name(const ONNX_NAMESPACE::GraphProto& graph, const OutputEdge& edge) { +std::string get_output_tensor_name(const GraphProto& graph, const OutputEdge& edge) { validate_node_index(graph, edge.m_node_idx); validate_port_index(graph, edge.m_node_idx, edge.m_port_idx, PortType::OutputPort); @@ -139,11 +134,11 @@ std::string get_output_tensor_name(const ONNX_NAMESPACE::GraphProto& graph, cons /// \brief Inserts a new input to the graph and removes an initializer that produced a tensor /// specified by an input edge passed to this function. -void replace_initializer_with_new_input(ONNX_NAMESPACE::GraphProto& graph, const InputEdge& edge) { +void replace_initializer_with_new_input(GraphProto& graph, const InputEdge& edge) { const auto tensor_name = get_input_tensor_name(graph, edge); const auto it = std::find_if(std::begin(graph.initializer()), std::end(graph.initializer()), - name_equals(tensor_name)); + name_equals(tensor_name)); FRONT_END_GENERAL_CHECK(it != std::end(graph.initializer()), "Could not find an initializer in the graph: '", @@ -173,9 +168,7 @@ void replace_initializer_with_new_input(ONNX_NAMESPACE::GraphProto& graph, const /// \note input_consumers is number of nodes which consume a new input /// \return A new input edge (along with "true") if a new input was added to the graph, /// false + the original edge otherwise. -std::pair append_new_graph_input(ONNX_NAMESPACE::GraphProto& graph, - const InputEdge& edge, - int input_consumers) { +std::pair append_new_graph_input(GraphProto& graph, const InputEdge& edge, int input_consumers) { const auto tensor_name = get_input_tensor_name(graph, edge); if (already_exists(graph.input(), tensor_name) && !is_graph_initializer(graph, tensor_name)) { // no need to append a new input if an edge points to an existing one in the model @@ -222,7 +215,7 @@ std::pair append_new_graph_input(ONNX_NAMESPACE::GraphProto& /// \brief Adds new outputs to the ONNX graph for an edge specified by a user /// The shape for this output is taken from a previously executed shape inference of the /// original model. -void append_new_graph_output(ONNX_NAMESPACE::GraphProto& graph, const OutputEdge& edge) { +void append_new_graph_output(GraphProto& graph, const OutputEdge& edge) { const auto tensor_name = get_output_tensor_name(graph, edge); auto& new_output = *(graph.add_output()); // copy the intermediate tensor's properties to the newly created @@ -234,8 +227,8 @@ void append_new_graph_output(ONNX_NAMESPACE::GraphProto& graph, const OutputEdge /// It's intended to work with ONNX graph inputs, outputs and initializers only. template void discard_by_name(Container& all_items, const std::set& items_to_keep) { - static_assert(std::is_same::value || - std::is_same::value, + static_assert(std::is_same::value || + std::is_same::value, "Unsupported value type of the container"); // The tested item can be discarded if its name is not found in the items_to_keep set @@ -255,7 +248,7 @@ void discard_by_name(Container& all_items, const std::set& items_to /// \brief Removes all nodes from a container keeping the ones whose index is in nodes_to_keep template void discard_nodes(Container& all_nodes, const std::set& nodes_to_keep) { - static_assert(std::is_same::value, + static_assert(std::is_same::value, "Unsupported value type of the container"); int idx = 0; @@ -274,9 +267,7 @@ void discard_nodes(Container& all_nodes, const std::set& nodes_to_keep) { /* -----------------------------------------------------------------------------------------------*/ -SubgraphExtractor::SubgraphExtractor(ONNX_NAMESPACE::GraphProto& graph) - : m_onnx_graph(graph), - m_node_inputs(graph.node_size()) { +SubgraphExtractor::SubgraphExtractor(GraphProto& graph) : m_onnx_graph(graph), m_node_inputs(graph.node_size()) { // gathers information about the graph - input edges of every node and number of "consumers" // of all tensors in the graph for (int i = 0; i < graph.node_size(); ++i) { diff --git a/src/frontends/onnx/frontend/src/detail/subgraph_extraction.hpp b/src/frontends/onnx/frontend/src/detail/subgraph_extraction.hpp index d16db361b95333..b520ac80b80b63 100644 --- a/src/frontends/onnx/frontend/src/detail/subgraph_extraction.hpp +++ b/src/frontends/onnx/frontend/src/detail/subgraph_extraction.hpp @@ -19,10 +19,14 @@ class ValueInfoProto; } // namespace ONNX_NAMESPACE namespace ov { -namespace onnx_editor { +namespace frontend { +namespace onnx { + +using ::ONNX_NAMESPACE::GraphProto; + /// \brief Subgraph extraction helper structure struct SubgraphExtractor { - SubgraphExtractor(ONNX_NAMESPACE::GraphProto& graph); + SubgraphExtractor(GraphProto& graph); /// \brief Adds new inputs to the graph and connects them to the nodes indicated by /// the provided input edges. @@ -70,7 +74,7 @@ struct SubgraphExtractor { }; private: - ONNX_NAMESPACE::GraphProto& m_onnx_graph; + GraphProto& m_onnx_graph; // Graph traversal helper: the input names of each node std::vector> m_node_inputs; @@ -96,5 +100,6 @@ struct SubgraphExtractor { /// \param subgraph An object describing the subgraph to be extracted (elems to be kept) void extract_subgraph_from_onnx_model(const SubgraphComponents& subgraph); }; -} // namespace onnx_editor +} // namespace onnx +} // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/edge_mapper.cpp b/src/frontends/onnx/frontend/src/edge_mapper.cpp index 22e1fb4b8bc009..aad4b09e32bf89 100644 --- a/src/frontends/onnx/frontend/src/edge_mapper.cpp +++ b/src/frontends/onnx/frontend/src/edge_mapper.cpp @@ -12,9 +12,9 @@ #include "openvino/frontend/exception.hpp" using namespace ov; -using namespace ov::onnx_editor; +using namespace ov::frontend::onnx; -onnx_editor::EdgeMapper::EdgeMapper(const ONNX_NAMESPACE::GraphProto& graph_proto) +EdgeMapper::EdgeMapper(const GraphProto& graph_proto) : m_node_inputs(graph_proto.node().size()), m_node_outputs(graph_proto.node().size()) { int topological_index = 0; @@ -36,8 +36,7 @@ onnx_editor::EdgeMapper::EdgeMapper(const ONNX_NAMESPACE::GraphProto& graph_prot } } -std::vector onnx_editor::EdgeMapper::find_node_indexes(const std::string& node_name, - const std::string& output_name) const { +std::vector EdgeMapper::find_node_indexes(const std::string& node_name, const std::string& output_name) const { if (!output_name.empty()) { const auto& index_iter = m_node_output_name_to_index.find(output_name); if (index_iter != std::end(m_node_output_name_to_index)) { @@ -57,7 +56,7 @@ std::vector onnx_editor::EdgeMapper::find_node_indexes(const std::string& n return result; }; -int onnx_editor::EdgeMapper::get_node_output_idx(int node_index, const std::string& output_name) const { +int EdgeMapper::get_node_output_idx(int node_index, const std::string& output_name) const { FRONT_END_GENERAL_CHECK(node_index >= 0 && node_index < static_cast(m_node_outputs.size()), "Node with index: ", std::to_string(node_index), @@ -73,7 +72,7 @@ int onnx_editor::EdgeMapper::get_node_output_idx(int node_index, const std::stri return static_cast(out_port_idx - std::begin(node_outputs)); } -std::vector onnx_editor::EdgeMapper::get_node_input_indexes(int node_index, const std::string& input_name) const { +std::vector EdgeMapper::get_node_input_indexes(int node_index, const std::string& input_name) const { FRONT_END_GENERAL_CHECK(node_index >= 0 && node_index < static_cast(m_node_inputs.size()), "Node with index: ", std::to_string(node_index), @@ -96,7 +95,7 @@ std::vector onnx_editor::EdgeMapper::get_node_input_indexes(int node_index, return node_inputs_indexes; } -InputEdge onnx_editor::EdgeMapper::find_input_edge(const EditorNode& node, const EditorInput& in) const { +InputEdge EdgeMapper::find_input_edge(const EditorNode& node, const EditorInput& in) const { int node_index = node.m_node_index; if (node_index == -1) { // the node index is not provided // identification can be both based on node name and output name (if the node index is not provided) @@ -160,7 +159,7 @@ InputEdge onnx_editor::EdgeMapper::find_input_edge(const EditorNode& node, const } } -OutputEdge onnx_editor::EdgeMapper::find_output_edge(const EditorNode& node, const EditorOutput& out) const { +OutputEdge EdgeMapper::find_output_edge(const EditorNode& node, const EditorOutput& out) const { int node_index = node.m_node_index; if (node_index == -1) { // the node index is not provided // identification can be both based on node name and output name (if the node index is not provided) @@ -210,11 +209,11 @@ OutputEdge onnx_editor::EdgeMapper::find_output_edge(const EditorNode& node, con return OutputEdge{node_index, output_idx}; } -OutputEdge onnx_editor::EdgeMapper::find_output_edge(const std::string& output_name) const { +OutputEdge EdgeMapper::find_output_edge(const std::string& output_name) const { return find_output_edge(EditorNode{EditorOutput{output_name}}, EditorOutput{output_name}); } -std::vector onnx_editor::EdgeMapper::find_output_consumers(const std::string& output_name) const { +std::vector EdgeMapper::find_output_consumers(const std::string& output_name) const { const auto matched_nodes_range = m_output_consumers_index.equal_range(output_name); std::vector input_edges; for (auto it = matched_nodes_range.first; it != matched_nodes_range.second; ++it) { @@ -233,7 +232,7 @@ std::vector onnx_editor::EdgeMapper::find_output_consumers(const std: return input_edges; } -bool onnx_editor::EdgeMapper::is_correct_and_unambiguous_node(const EditorNode& node) const { +bool EdgeMapper::is_correct_and_unambiguous_node(const EditorNode& node) const { if (node.m_node_index >= 0 && node.m_node_index < static_cast(m_node_inputs.size())) { return true; } @@ -250,7 +249,7 @@ void check_node(bool condition, const EditorNode& node) { } } // namespace -int onnx_editor::EdgeMapper::get_node_index(const EditorNode& node) const { +int EdgeMapper::get_node_index(const EditorNode& node) const { if (node.m_node_index != -1) { // the node index provided check_node_index(node.m_node_index); return node.m_node_index; @@ -260,7 +259,7 @@ int onnx_editor::EdgeMapper::get_node_index(const EditorNode& node) const { return indexes[0]; } -bool onnx_editor::EdgeMapper::is_correct_tensor_name(const std::string& name) const { +bool EdgeMapper::is_correct_tensor_name(const std::string& name) const { if (m_node_output_name_to_index.find(name) != std::end(m_node_output_name_to_index)) { return true; } @@ -270,7 +269,7 @@ bool onnx_editor::EdgeMapper::is_correct_tensor_name(const std::string& name) co return false; } -std::vector onnx_editor::EdgeMapper::get_input_ports(const EditorNode& node) const { +std::vector EdgeMapper::get_input_ports(const EditorNode& node) const { check_node(is_correct_and_unambiguous_node(node), node); auto node_index = node.m_node_index; if (node_index == -1) { // the node index is provided @@ -281,7 +280,7 @@ std::vector onnx_editor::EdgeMapper::get_input_ports(const EditorNo return m_node_inputs[node_index]; } -std::vector onnx_editor::EdgeMapper::get_output_ports(const EditorNode& node) const { +std::vector EdgeMapper::get_output_ports(const EditorNode& node) const { check_node(is_correct_and_unambiguous_node(node), node); auto node_index = node.m_node_index; if (node_index == -1) // the node index is provided @@ -293,7 +292,7 @@ std::vector onnx_editor::EdgeMapper::get_output_ports(const EditorN return m_node_outputs[node_index]; } -std::string onnx_editor::EdgeMapper::get_source_tensor_name(const InputEdge& edge) const { +std::string EdgeMapper::get_source_tensor_name(const InputEdge& edge) const { if (edge.m_node_idx >= 0 && edge.m_node_idx < static_cast(m_node_inputs.size()) && edge.m_port_idx >= 0 && edge.m_port_idx < static_cast(m_node_inputs[edge.m_node_idx].size())) { return m_node_inputs[edge.m_node_idx][edge.m_port_idx]; @@ -301,7 +300,7 @@ std::string onnx_editor::EdgeMapper::get_source_tensor_name(const InputEdge& edg return ""; } -std::string onnx_editor::EdgeMapper::get_target_tensor_name(const OutputEdge& edge) const { +std::string EdgeMapper::get_target_tensor_name(const OutputEdge& edge) const { if (edge.m_node_idx >= 0 && edge.m_node_idx < static_cast(m_node_outputs.size()) && edge.m_port_idx >= 0 && edge.m_port_idx < static_cast(m_node_outputs[edge.m_node_idx].size())) { return m_node_outputs[edge.m_node_idx][edge.m_port_idx]; @@ -309,7 +308,7 @@ std::string onnx_editor::EdgeMapper::get_target_tensor_name(const OutputEdge& ed return ""; } -void onnx_editor::EdgeMapper::check_node_index(int node_index) const { +void EdgeMapper::check_node_index(int node_index) const { FRONT_END_GENERAL_CHECK(node_index >= 0 && node_index < static_cast(m_node_inputs.size()), "Provided node index: " + std::to_string(node_index) + " is out of scope"); } diff --git a/src/frontends/onnx/frontend/src/edge_mapper.hpp b/src/frontends/onnx/frontend/src/edge_mapper.hpp index b4b4d846337bbb..2c4ae25259efb5 100644 --- a/src/frontends/onnx/frontend/src/edge_mapper.hpp +++ b/src/frontends/onnx/frontend/src/edge_mapper.hpp @@ -17,7 +17,10 @@ class GraphProto; } // namespace ONNX_NAMESPACE namespace ov { -namespace onnx_editor { +namespace frontend { +namespace onnx { +using ::ONNX_NAMESPACE::GraphProto; + /// \brief A class which allows specifying InputEdge and OutputEdge by user-friendly ONNX /// names. class EdgeMapper { @@ -30,7 +33,7 @@ class EdgeMapper { /// is outdated. In such a case the update method should be called. /// /// \param graph_proto Reference to a GraphProto object. - EdgeMapper(const ONNX_NAMESPACE::GraphProto& graph_proto); + EdgeMapper(const GraphProto& graph_proto); /// \brief Returns the InputEdge based on a node (node name or output name) /// and an input (input name or input index). @@ -148,5 +151,6 @@ class EdgeMapper { std::map m_node_output_name_to_index; std::multimap m_output_consumers_index; }; -} // namespace onnx_editor +} // namespace onnx +} // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/editor.cpp b/src/frontends/onnx/frontend/src/editor.cpp index b3b0d28f99c24d..b141ef9bffc91b 100644 --- a/src/frontends/onnx/frontend/src/editor.cpp +++ b/src/frontends/onnx/frontend/src/editor.cpp @@ -19,14 +19,11 @@ #include "utils/onnx_internal.hpp" using namespace ov; -using namespace ov::onnx_editor; +using namespace ov::frontend::onnx; using namespace ov::frontend::onnx::common; - -OPENVINO_SUPPRESS_DEPRECATED_START +using namespace ::ONNX_NAMESPACE; namespace { -using namespace ONNX_NAMESPACE; - ValueInfoProto* find_graph_input(GraphProto& graph, const std::string& name) { for (int i = 0; i < graph.input_size(); ++i) { auto* input_desc = graph.mutable_input(i); @@ -92,7 +89,7 @@ void modify_input_type(ValueInfoProto& onnx_input, const ov::element::Type_t ele tensor_type->set_elem_type(ov_to_onnx_data_type(elem_type)); } -void add_dim_to_onnx_shape(const Dimension& dim, ONNX_NAMESPACE::TensorShapeProto& onnx_shape) { +void add_dim_to_onnx_shape(const Dimension& dim, TensorShapeProto& onnx_shape) { auto* new_dim = onnx_shape.add_dim(); if (dim.is_static()) { new_dim->set_dim_value(dim.get_length()); @@ -179,15 +176,15 @@ bool is_topologically_sorted(const GraphProto& graph) { std::transform(std::begin(graph.input()), std::end(graph.input()), std::inserter(known_tensors, std::end(known_tensors)), - extract_name); + extract_name); std::transform(std::begin(graph.output()), std::end(graph.output()), std::inserter(known_tensors, std::end(known_tensors)), - extract_name); + extract_name); std::transform(std::begin(graph.initializer()), std::end(graph.initializer()), std::inserter(known_tensors, std::end(known_tensors)), - extract_name); + extract_name); for (const auto& node : graph.node()) { for (const auto& input : node.input()) { @@ -256,13 +253,13 @@ void graph_topological_sort(GraphProto* graph) { class InferShapesAutoRelease { public: - InferShapesAutoRelease(std::shared_ptr model_proto) + InferShapesAutoRelease(std::shared_ptr model_proto) : m_model_proto{model_proto}, m_infer_shapes_was_run{false} {} bool infer_shapes() { try { // unexpected exceptions of external onnx lib - ONNX_NAMESPACE::shape_inference::InferShapes(*m_model_proto); + shape_inference::InferShapes(*m_model_proto); m_infer_shapes_was_run = true; } catch (...) { release(); @@ -284,38 +281,35 @@ class InferShapesAutoRelease { } private: - std::shared_ptr m_model_proto; + std::shared_ptr m_model_proto; bool m_infer_shapes_was_run; }; } // namespace /// \brief A helper class used to hold the ModelProto object as its field -struct onnx_editor::ONNXModelEditor::Impl { - std::shared_ptr m_model_proto; +struct ONNXModelEditor::Impl { + std::shared_ptr m_model_proto; EdgeMapper m_edge_mapper; bool m_is_mapper_updated = false; Impl() = delete; - Impl(const std::shared_ptr& model_proto) : m_model_proto{model_proto} { + Impl(const std::shared_ptr& model_proto) : m_model_proto{model_proto} { graph_topological_sort(m_model_proto->mutable_graph()); } - Impl(const std::string& model_path) - : Impl(std::make_shared(parse_from_file(model_path))) {} + Impl(const std::string& model_path) : Impl(std::make_shared(parse_from_file(model_path))) {} - Impl(std::istream& model_stream) - : Impl(std::make_shared(parse_from_istream(model_stream))) {} + Impl(std::istream& model_stream) : Impl(std::make_shared(parse_from_istream(model_stream))) {} #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - Impl(const std::wstring& model_path) - : Impl(std::make_shared(parse_from_file(model_path))) {} + Impl(const std::wstring& model_path) : Impl(std::make_shared(parse_from_file(model_path))) {} #endif }; -onnx_editor::ONNXModelEditor::ONNXModelEditor(const std::string& model_path, - const bool enable_mmap, - frontend::ExtensionHolder extensions) +ONNXModelEditor::ONNXModelEditor(const std::string& model_path, + const bool enable_mmap, + frontend::ExtensionHolder extensions) : m_model_path{model_path}, m_mmap_cache{enable_mmap ? std::make_shared>>() : nullptr}, @@ -325,9 +319,9 @@ onnx_editor::ONNXModelEditor::ONNXModelEditor(const std::string& model_path, }} {} #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) -onnx_editor::ONNXModelEditor::ONNXModelEditor(const std::wstring& model_path, - const bool enable_mmap, - frontend::ExtensionHolder extensions) +ONNXModelEditor::ONNXModelEditor(const std::wstring& model_path, + const bool enable_mmap, + frontend::ExtensionHolder extensions) : m_extensions{std::move(extensions)}, m_model_path{ov::util::wstring_to_string(model_path)}, m_mmap_cache{enable_mmap ? std::make_shared>>() @@ -337,10 +331,10 @@ onnx_editor::ONNXModelEditor::ONNXModelEditor(const std::wstring& model_path, }} {} #endif -onnx_editor::ONNXModelEditor::ONNXModelEditor(std::istream& model_stream, - const std::string& model_path, - const bool enable_mmap, - frontend::ExtensionHolder extensions) +ONNXModelEditor::ONNXModelEditor(std::istream& model_stream, + const std::string& model_path, + const bool enable_mmap, + frontend::ExtensionHolder extensions) : m_model_path{model_path}, m_mmap_cache{enable_mmap ? std::make_shared>>() : nullptr}, @@ -349,11 +343,11 @@ onnx_editor::ONNXModelEditor::ONNXModelEditor(std::istream& model_stream, delete impl; }} {} -const std::string& onnx_editor::ONNXModelEditor::model_path() const { +const std::string& ONNXModelEditor::model_path() const { return m_model_path; } -void onnx_editor::ONNXModelEditor::serialize(const std::string& out_file_path) const { +void ONNXModelEditor::serialize(const std::string& out_file_path) const { std::ofstream out_file{out_file_path, std::ios::out | std::ios::binary}; OPENVINO_ASSERT(out_file.is_open(), "Could not open the file: ", out_file_path); @@ -364,7 +358,7 @@ void onnx_editor::ONNXModelEditor::serialize(const std::string& out_file_path) c out_file.close(); } -void onnx_editor::ONNXModelEditor::set_input_types(const std::map& input_types) { +void ONNXModelEditor::set_input_types(const std::map& input_types) { auto* onnx_graph = m_pimpl->m_model_proto->mutable_graph(); for (const auto& input_desc : input_types) { @@ -377,7 +371,7 @@ void onnx_editor::ONNXModelEditor::set_input_types(const std::mapm_model_proto->mutable_graph(); auto* onnx_input = find_graph_input(*onnx_graph, tensor_name); @@ -389,10 +383,10 @@ ov::element::Type_t onnx_editor::ONNXModelEditor::get_input_type(const std::stri onnx_input->name()); auto& tensor_type = type_proto.tensor_type(); auto type = tensor_type.elem_type(); - return ngraph::onnx_import::common::get_ov_element_type(type); + return ov::frontend::onnx::common::get_ov_element_type(type); } -void onnx_editor::ONNXModelEditor::set_input_shapes(const std::map& input_shapes) { +void ONNXModelEditor::set_input_shapes(const std::map& input_shapes) { auto* onnx_graph = m_pimpl->m_model_proto->mutable_graph(); for (const auto& input_desc : input_shapes) { @@ -405,7 +399,7 @@ void onnx_editor::ONNXModelEditor::set_input_shapes(const std::mapm_model_proto->mutable_graph(); @@ -447,9 +441,9 @@ PartialShape onnx_editor::ONNXModelEditor::get_tensor_shape(const std::string& t } } -void onnx_editor::ONNXModelEditor::extract_subgraph(const std::vector& inputs, - const std::vector& outputs, - const bool merge_inputs) { +void ONNXModelEditor::extract_subgraph(const std::vector& inputs, + const std::vector& outputs, + const bool merge_inputs) { if (inputs.empty() && outputs.empty()) { return; } @@ -469,7 +463,7 @@ void onnx_editor::ONNXModelEditor::extract_subgraph(const std::vector m_pimpl->m_is_mapper_updated = false; } -std::vector onnx_editor::ONNXModelEditor::model_inputs() const { +std::vector ONNXModelEditor::model_inputs() const { const auto& graph = m_pimpl->m_model_proto->graph(); std::vector inputs; for (const auto& in : graph.input()) { @@ -483,7 +477,7 @@ std::vector onnx_editor::ONNXModelEditor::model_inputs() const { return inputs; } -std::vector onnx_editor::ONNXModelEditor::model_outputs() const { +std::vector ONNXModelEditor::model_outputs() const { const auto& graph = m_pimpl->m_model_proto->graph(); std::vector outputs; outputs.reserve(graph.output_size()); @@ -491,17 +485,17 @@ std::vector onnx_editor::ONNXModelEditor::model_outputs() const { std::transform(graph.output().begin(), graph.output().end(), std::back_inserter(outputs), - extract_name); + extract_name); return outputs; } -std::string onnx_editor::ONNXModelEditor::get_source_tensor_name(const InputEdge& edge) const { +std::string ONNXModelEditor::get_source_tensor_name(const InputEdge& edge) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.get_source_tensor_name(edge); } -bool onnx_editor::ONNXModelEditor::is_input(const InputEdge& edge) const { +bool ONNXModelEditor::is_input(const InputEdge& edge) const { const auto& tensor_name = get_source_tensor_name(edge); if (tensor_name.empty()) { return false; @@ -511,12 +505,12 @@ bool onnx_editor::ONNXModelEditor::is_input(const InputEdge& edge) const { } } -std::string onnx_editor::ONNXModelEditor::get_target_tensor_name(const OutputEdge& edge) const { +std::string ONNXModelEditor::get_target_tensor_name(const OutputEdge& edge) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.get_target_tensor_name(edge); } -bool onnx_editor::ONNXModelEditor::is_output(const OutputEdge& edge) const { +bool ONNXModelEditor::is_output(const OutputEdge& edge) const { const auto& tensor_name = get_target_tensor_name(edge); if (tensor_name.empty()) { return false; @@ -526,20 +520,18 @@ bool onnx_editor::ONNXModelEditor::is_output(const OutputEdge& edge) const { } } -std::string onnx_editor::ONNXModelEditor::model_string() const { +std::string ONNXModelEditor::model_string() const { return m_pimpl->m_model_proto->SerializeAsString(); } -std::shared_ptr onnx_editor::ONNXModelEditor::get_function() const { - OPENVINO_SUPPRESS_DEPRECATED_START - return ngraph::onnx_import::detail::import_onnx_model(m_pimpl->m_model_proto, - m_model_path, - m_mmap_cache, - m_extensions); - OPENVINO_SUPPRESS_DEPRECATED_END +std::shared_ptr ONNXModelEditor::get_function() const { + return ov::frontend::onnx::detail::import_onnx_model(m_pimpl->m_model_proto, + m_model_path, + m_mmap_cache, + m_extensions); } -void onnx_editor::ONNXModelEditor::set_input_values( +void ONNXModelEditor::set_input_values( const std::map>& input_values) { auto onnx_graph = m_pimpl->m_model_proto->mutable_graph(); @@ -563,7 +555,7 @@ void onnx_editor::ONNXModelEditor::set_input_values( } } -void onnx_editor::ONNXModelEditor::set_tensor_name(const std::string& current_name, const std::string& new_name) { +void ONNXModelEditor::set_tensor_name(const std::string& current_name, const std::string& new_name) { OPENVINO_ASSERT(!new_name.empty(), "New name must not be empty."); const auto graph = m_pimpl->m_model_proto->mutable_graph(); @@ -606,7 +598,7 @@ void onnx_editor::ONNXModelEditor::set_tensor_name(const std::string& current_na } } -void onnx_editor::ONNXModelEditor::set_node_name(const EditorNode& node, const std::string& new_name) { +void ONNXModelEditor::set_node_name(const EditorNode& node, const std::string& new_name) { const auto node_idx = m_pimpl->m_edge_mapper.get_node_index(node); const auto graph = m_pimpl->m_model_proto->mutable_graph(); @@ -615,7 +607,7 @@ void onnx_editor::ONNXModelEditor::set_node_name(const EditorNode& node, const s *graph->mutable_node(node_idx)->mutable_name() = new_name; } -std::string onnx_editor::ONNXModelEditor::get_node_name(const EditorNode& node) const { +std::string ONNXModelEditor::get_node_name(const EditorNode& node) const { if (node.m_node_index >= 0) { if (node.m_node_index >= m_pimpl->m_model_proto->graph().node().size()) { return ""; @@ -627,7 +619,7 @@ std::string onnx_editor::ONNXModelEditor::get_node_name(const EditorNode& node) } } -void onnx_editor::ONNXModelEditor::clear_nodes_name(const std::string& name) { +void ONNXModelEditor::clear_nodes_name(const std::string& name) { const auto graph = m_pimpl->m_model_proto->mutable_graph(); m_pimpl->m_is_mapper_updated = false; @@ -639,9 +631,9 @@ void onnx_editor::ONNXModelEditor::clear_nodes_name(const std::string& name) { } } -void onnx_editor::ONNXModelEditor::set_name_for_dimension(const std::string& node_name, - size_t shape_dim_index, - const std::string& dim_name) { +void ONNXModelEditor::set_name_for_dimension(const std::string& node_name, + size_t shape_dim_index, + const std::string& dim_name) { OPENVINO_ASSERT(!dim_name.empty(), "Dimension name must not be empty."); const auto graph = m_pimpl->m_model_proto->mutable_graph(); @@ -674,68 +666,68 @@ void onnx_editor::ONNXModelEditor::set_name_for_dimension(const std::string& nod set_dim_param(value_info); } -void onnx_editor::ONNXModelEditor::update_mapper_if_needed() const { +void ONNXModelEditor::update_mapper_if_needed() const { if (!m_pimpl->m_is_mapper_updated) { m_pimpl->m_edge_mapper = EdgeMapper(m_pimpl->m_model_proto->graph()); } m_pimpl->m_is_mapper_updated = true; } -InputEdge onnx_editor::ONNXModelEditor::find_input_edge(const EditorNode& node, const EditorInput& input) const { +InputEdge ONNXModelEditor::find_input_edge(const EditorNode& node, const EditorInput& input) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.find_input_edge(node, input); } -OutputEdge onnx_editor::ONNXModelEditor::find_output_edge(const EditorNode& node, const EditorOutput& input) const { +OutputEdge ONNXModelEditor::find_output_edge(const EditorNode& node, const EditorOutput& input) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.find_output_edge(node, input); } -OutputEdge onnx_editor::ONNXModelEditor::find_output_edge(const std::string& output_name) const { +OutputEdge ONNXModelEditor::find_output_edge(const std::string& output_name) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.find_output_edge(output_name); } -std::vector onnx_editor::ONNXModelEditor::find_output_consumers(const std::string& output_name) const { +std::vector ONNXModelEditor::find_output_consumers(const std::string& output_name) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.find_output_consumers(output_name); } -bool onnx_editor::ONNXModelEditor::is_correct_and_unambiguous_node(const EditorNode& node) const { +bool ONNXModelEditor::is_correct_and_unambiguous_node(const EditorNode& node) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.is_correct_and_unambiguous_node(node); } -int onnx_editor::ONNXModelEditor::get_node_index(const EditorNode& node) const { +int ONNXModelEditor::get_node_index(const EditorNode& node) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.get_node_index(node); } -bool onnx_editor::ONNXModelEditor::is_correct_tensor_name(const std::string& name) const { +bool ONNXModelEditor::is_correct_tensor_name(const std::string& name) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.is_correct_tensor_name(name); } -std::vector onnx_editor::ONNXModelEditor::get_input_ports(const EditorNode& node) const { +std::vector ONNXModelEditor::get_input_ports(const EditorNode& node) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.get_input_ports(node); } -std::vector onnx_editor::ONNXModelEditor::get_output_ports(const EditorNode& node) const { +std::vector ONNXModelEditor::get_output_ports(const EditorNode& node) const { update_mapper_if_needed(); return m_pimpl->m_edge_mapper.get_output_ports(node); } -std::shared_ptr onnx_editor::ONNXModelEditor::decode() { - return ngraph::onnx_import::detail::decode_to_framework_nodes(m_pimpl->m_model_proto, - m_model_path, - m_mmap_cache, - m_extensions); +std::shared_ptr ONNXModelEditor::decode() { + return ov::frontend::onnx::detail::decode_to_framework_nodes(m_pimpl->m_model_proto, + m_model_path, + m_mmap_cache, + m_extensions); } -void onnx_editor::ONNXModelEditor::add_output(const OutputEdge& output_edge) const { +void ONNXModelEditor::add_output(const OutputEdge& output_edge) const { auto onnx_graph = m_pimpl->m_model_proto->mutable_graph(); - std::vector onnx_output; + std::vector onnx_output; onnx_output.push_back(output_edge); SubgraphExtractor editor{*onnx_graph}; editor.add_new_outputs(onnx_output); diff --git a/src/frontends/onnx/frontend/src/editor.hpp b/src/frontends/onnx/frontend/src/editor.hpp index b66d2e6cd757fc..ee5a159d8a4cb3 100644 --- a/src/frontends/onnx/frontend/src/editor.hpp +++ b/src/frontends/onnx/frontend/src/editor.hpp @@ -9,7 +9,6 @@ #include #include "editor_types.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/model.hpp" #include "openvino/frontend/extension/holder.hpp" #include "openvino/frontend/extension/progress_reporter.hpp" @@ -18,7 +17,8 @@ #include "utils/tensor_external_data.hpp" namespace ov { -namespace onnx_editor { +namespace frontend { +namespace onnx { /// \brief A class representing a set of utilities allowing modification of an ONNX model /// /// \note This class can be used to modify an ONNX model before it gets translated to @@ -305,11 +305,12 @@ class ONNXModelEditor final { void update_mapper_if_needed() const; const std::string m_model_path; - ngraph::onnx_import::detail::MappedMemoryHandles m_mmap_cache; + ov::frontend::onnx::detail::MappedMemoryHandles m_mmap_cache; frontend::ExtensionHolder m_extensions; struct Impl; std::unique_ptr m_pimpl; }; -} // namespace onnx_editor +} // namespace onnx +} // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/editor_types.hpp b/src/frontends/onnx/frontend/src/editor_types.hpp index 56b341e2c903ec..75d8659c263340 100644 --- a/src/frontends/onnx/frontend/src/editor_types.hpp +++ b/src/frontends/onnx/frontend/src/editor_types.hpp @@ -22,7 +22,8 @@ struct Edge { const int m_port_idx; const std::string m_new_input_name; }; -namespace onnx_editor { +namespace frontend { +namespace onnx { /// \brief Defines an edge connected to an input of any node in the graph. /// It consists of a node index in the processed ONNX model and the port index. /// The node index should point to a node in the topological sort of the underlying @@ -123,5 +124,6 @@ struct EditorNode { std::string m_output_name = ""; int m_node_index = -1; }; -} // namespace onnx_editor +} // namespace onnx +} // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/exceptions.cpp b/src/frontends/onnx/frontend/src/exceptions.cpp index 95cc74d5009d16..ffbf966b2130c9 100644 --- a/src/frontends/onnx/frontend/src/exceptions.cpp +++ b/src/frontends/onnx/frontend/src/exceptions.cpp @@ -6,15 +6,11 @@ #include -#include "openvino/core/deprecated.hpp" - -OPENVINO_SUPPRESS_DEPRECATED_START - namespace ov { namespace frontend { namespace onnx_error { namespace detail { -std::string get_error_msg_prefix(const ngraph::onnx_import::Node& node) { +std::string get_error_msg_prefix(const ov::frontend::onnx::Node& node) { std::stringstream ss; ss << "While validating ONNX node '" << node << "'"; return ss.str(); @@ -24,7 +20,7 @@ std::string get_error_msg_prefix(const ngraph::onnx_import::Node& node) { void OnnxNodeValidationFailure::create(const char* file, int line, const char* check_string, - const ngraph::onnx_import::Node& node, + const ov::frontend::onnx::Node& node, const std::string& explanation) { throw OnnxNodeValidationFailure( make_what(file, line, check_string, detail::get_error_msg_prefix(node), explanation)); @@ -32,5 +28,3 @@ void OnnxNodeValidationFailure::create(const char* file, } // namespace onnx_error } // namespace frontend } // namespace ov - -OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/exceptions.hpp b/src/frontends/onnx/frontend/src/exceptions.hpp index 37938f7b0e8598..388c9a4241f55e 100644 --- a/src/frontends/onnx/frontend/src/exceptions.hpp +++ b/src/frontends/onnx/frontend/src/exceptions.hpp @@ -17,7 +17,7 @@ namespace onnx_error { namespace detail { OPENVINO_SUPPRESS_DEPRECATED_START -std::string get_error_msg_prefix(const ngraph::onnx_import::Node& node); +std::string get_error_msg_prefix(const ov::frontend::onnx::Node& node); OPENVINO_SUPPRESS_DEPRECATED_END } // namespace detail @@ -26,7 +26,7 @@ class OnnxNodeValidationFailure : public ov::AssertFailure { OPENVINO_SUPPRESS_DEPRECATED_START [[noreturn]] static void create(const char* file, int line, const char* check_string, - const ngraph::onnx_import::Node& node, + const ov::frontend::onnx::Node& node, const std::string& explanation); OPENVINO_SUPPRESS_DEPRECATED_END @@ -36,7 +36,7 @@ class OnnxNodeValidationFailure : public ov::AssertFailure { OPENVINO_SUPPRESS_DEPRECATED_START struct invalid_external_data : ov::Exception { - invalid_external_data(const ngraph::onnx_import::detail::TensorExternalData& external_data) + invalid_external_data(const ov::frontend::onnx::detail::TensorExternalData& external_data) : ov::Exception{std::string{"invalid external data: "} + external_data.to_string()} {} invalid_external_data(const std::string& what_arg) : ov::Exception{what_arg} {} }; @@ -46,13 +46,15 @@ OPENVINO_SUPPRESS_DEPRECATED_END } // namespace frontend } // namespace ov -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace error { using namespace ov::frontend::onnx_error; } // namespace error -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov #define CHECK_VALID_NODE(node_, cond_, ...) \ OPENVINO_ASSERT_HELPER(ov::frontend::onnx_error::OnnxNodeValidationFailure, (node_), (cond_), ##__VA_ARGS__) diff --git a/src/frontends/onnx/frontend/src/frontend.cpp b/src/frontends/onnx/frontend/src/frontend.cpp index 4fda48e197e539..27ccd18a5f757c 100644 --- a/src/frontends/onnx/frontend/src/frontend.cpp +++ b/src/frontends/onnx/frontend/src/frontend.cpp @@ -16,7 +16,6 @@ #include #include "input_model.hpp" -#include "legacy_op_extension.hpp" #include "onnx_common/onnx_model_validator.hpp" #include "openvino/core/so_extension.hpp" #include "openvino/frontend/exception.hpp" @@ -28,7 +27,6 @@ #include "ops_bridge.hpp" #include "transformations/resolve_names_collisions.hpp" #include "utils/common.hpp" -#include "utils/legacy_conversion_extension.hpp" #include "utils/onnx_internal.hpp" using namespace ov; @@ -120,7 +118,7 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const { const auto partially_converted = convert_partially(model); - const auto error_message = ngraph::onnx_import::common::collect_translation_exceptions(partially_converted); + const auto error_message = ov::frontend::onnx::common::collect_translation_exceptions(partially_converted); FRONT_END_GENERAL_CHECK(error_message.empty(), error_message); normalize(partially_converted); @@ -129,7 +127,7 @@ std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const } void FrontEnd::convert(const std::shared_ptr& partially_converted) const { - ngraph::onnx_import::detail::convert_decoded_model(partially_converted); + ov::frontend::onnx::detail::convert_decoded_model(partially_converted); normalize(partially_converted); } @@ -195,13 +193,6 @@ bool FrontEnd::supported_impl(const std::vector& variants) const { return false; } -namespace { -const auto legacy_conversion_extension = std::make_shared(); -const ngraph::onnx_import::LegacyConversionExtension::Ptr get_legacy_conversion_extension() { - return legacy_conversion_extension; -} -} // namespace - void FrontEnd::add_extension(const std::shared_ptr& extension) { if (auto telemetry = std::dynamic_pointer_cast(extension)) { m_extensions.telemetry = telemetry; @@ -216,10 +207,5 @@ void FrontEnd::add_extension(const std::shared_ptr& extension) { m_extensions.conversions.push_back(onnx_conv_ext); } else if (auto progress_reporter = std::dynamic_pointer_cast(extension)) { m_extensions.progress_reporter = progress_reporter; - } else if (const auto& legacy_ext = std::dynamic_pointer_cast(extension)) { - m_other_extensions.push_back(legacy_ext); - std::call_once(has_legacy_extension, [this] { - m_extensions.conversions.push_back(get_legacy_conversion_extension()); - }); } } diff --git a/src/frontends/onnx/frontend/src/input_model.cpp b/src/frontends/onnx/frontend/src/input_model.cpp index ae24f22b3fc84d..aa367b5c5ecf6f 100644 --- a/src/frontends/onnx/frontend/src/input_model.cpp +++ b/src/frontends/onnx/frontend/src/input_model.cpp @@ -12,25 +12,22 @@ using namespace ov; using namespace ov::frontend::onnx; -OPENVINO_SUPPRESS_DEPRECATED_START - InputModel::InputModel(const std::string& path, const bool enable_mmap, frontend::ExtensionHolder extensions) - : m_editor{std::make_shared(path, enable_mmap, std::move(extensions))} {} + : m_editor{std::make_shared(path, enable_mmap, std::move(extensions))} {} #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) InputModel::InputModel(const std::wstring& path, const bool enable_mmap, frontend::ExtensionHolder extensions) - : m_editor{std::make_shared(path, enable_mmap, std::move(extensions))} {} + : m_editor{std::make_shared(path, enable_mmap, std::move(extensions))} {} #endif InputModel::InputModel(std::istream& model_stream, const bool enable_mmap, frontend::ExtensionHolder extensions) - : m_editor{std::make_shared(model_stream, "", enable_mmap, std::move(extensions))} {} + : m_editor{std::make_shared(model_stream, "", enable_mmap, std::move(extensions))} {} InputModel::InputModel(std::istream& model_stream, const std::string& path, const bool enable_mmap, frontend::ExtensionHolder extensions) - : m_editor{std::make_shared(model_stream, path, enable_mmap, std::move(extensions))} { -} + : m_editor{std::make_shared(model_stream, path, enable_mmap, std::move(extensions))} {} #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT InputModel::InputModel(std::istream& model_stream, @@ -69,8 +66,8 @@ ov::frontend::Place::Ptr InputModel::get_place_by_tensor_name(const std::string& ov::frontend::Place::Ptr InputModel::get_place_by_operation_name(const std::string& operation_name) const { if (m_editor->is_correct_and_unambiguous_node(operation_name)) { - const auto node_index = m_editor->get_node_index(onnx_editor::EditorNode{operation_name}); - onnx_editor::EditorNode node{node_index}; + const auto node_index = m_editor->get_node_index(EditorNode{operation_name}); + EditorNode node{node_index}; node.m_node_name = operation_name; return std::make_shared(node, m_editor); } @@ -340,8 +337,8 @@ void InputModel::override_all_inputs(const std::vector void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { - std::vector onnx_inputs = convert_place_to_input_edge(inputs); - std::vector onnx_outputs = convert_place_to_output_edge(outputs); + std::vector onnx_inputs = convert_place_to_input_edge(inputs); + std::vector onnx_outputs = convert_place_to_output_edge(outputs); m_editor->extract_subgraph(onnx_inputs, onnx_outputs); } @@ -442,9 +439,8 @@ void InputModel::set_tensor_value(const ov::frontend::Place::Ptr& place, const v } } -std::vector InputModel::convert_place_to_input_edge( - const std::vector& inputs) { - std::vector onnx_inputs; +std::vector InputModel::convert_place_to_input_edge(const std::vector& inputs) { + std::vector onnx_inputs; onnx_inputs.reserve(inputs.size()); for (const auto& input : inputs) { if (const auto input_port = std::dynamic_pointer_cast(input)) { @@ -456,7 +452,7 @@ std::vector InputModel::convert_place_to_input_edge( std::transform(std::begin(consumers), std::end(consumers), std::back_inserter(onnx_inputs), - [](const onnx_editor::InputEdge& edge) { + [](const InputEdge& edge) { return edge; }); } else if (const auto op = std::dynamic_pointer_cast(input)) { @@ -469,7 +465,7 @@ std::vector InputModel::convert_place_to_input_edge( std::end(op_inputs), std::back_inserter(onnx_inputs), [&node_idx, &port_idx](const std::string&) { - return onnx_editor::InputEdge{node_idx, port_idx++}; + return InputEdge{node_idx, port_idx++}; }); } } @@ -477,9 +473,8 @@ std::vector InputModel::convert_place_to_input_edge( return onnx_inputs; } -std::vector InputModel::convert_place_to_output_edge( - const std::vector& outputs) { - std::vector onnx_outputs; +std::vector InputModel::convert_place_to_output_edge(const std::vector& outputs) { + std::vector onnx_outputs; onnx_outputs.reserve(outputs.size()); for (const auto& output : outputs) { if (const auto output_port = std::dynamic_pointer_cast(output)) { @@ -501,7 +496,7 @@ std::vector InputModel::convert_place_to_output_edge( std::end(op_outputs), std::back_inserter(onnx_outputs), [&node_idx, &port_idx](const std::string&) { - return onnx_editor::OutputEdge{node_idx, port_idx++}; + return OutputEdge{node_idx, port_idx++}; }); } } diff --git a/src/frontends/onnx/frontend/src/input_model.hpp b/src/frontends/onnx/frontend/src/input_model.hpp index 803766ae25af8c..eeba81b8cbd7c2 100644 --- a/src/frontends/onnx/frontend/src/input_model.hpp +++ b/src/frontends/onnx/frontend/src/input_model.hpp @@ -78,13 +78,11 @@ class InputModel : public ov::frontend::InputModel { void set_tensor_value(const ov::frontend::Place::Ptr& place, const void* value) override; // internal usage - std::vector convert_place_to_input_edge( - const std::vector& inputs); - std::vector convert_place_to_output_edge( - const std::vector& outputs); + std::vector convert_place_to_input_edge(const std::vector& inputs); + std::vector convert_place_to_output_edge(const std::vector& outputs); private: - std::shared_ptr m_editor; + std::shared_ptr m_editor; bool is_correct_place(const ov::frontend::Place::Ptr& place) const; std::unordered_map> m_additional_tensor_names; diff --git a/src/frontends/onnx/frontend/src/node_context.cpp b/src/frontends/onnx/frontend/src/node_context.cpp index 8ea1e9a0b4ddef..91a0d7bdf49b31 100644 --- a/src/frontends/onnx/frontend/src/node_context.cpp +++ b/src/frontends/onnx/frontend/src/node_context.cpp @@ -6,11 +6,10 @@ #include #include -OPENVINO_SUPPRESS_DEPRECATED_START -ov::frontend::onnx::NodeContext::NodeContext(const ngraph::onnx_import::Node& context) +ov::frontend::onnx::NodeContext::NodeContext(const ov::frontend::onnx::Node& context) : ov::frontend::NodeContext(context.op_type()), m_context(context), - m_inputs(context.get_ng_inputs()) {} + m_inputs(context.get_ov_inputs()) {} ov::Output ov::frontend::onnx::NodeContext::get_input(int port_idx) const { return m_inputs.at(port_idx); @@ -19,7 +18,7 @@ ov::Output ov::frontend::onnx::NodeContext::get_input(int port_idx) co ov::Any ov::frontend::onnx::NodeContext::get_attribute_as_any(const std::string& name) const { try { return m_context.get_attribute_value(name); - } catch (ngraph::onnx_import::error::node::UnknownAttribute&) { + } catch (ov::frontend::onnx::error::node::UnknownAttribute&) { return ov::Any(); } } @@ -31,16 +30,15 @@ size_t ov::frontend::onnx::NodeContext::get_input_size() const { ov::Any ov::frontend::onnx::NodeContext::apply_additional_conversion_rules(const ov::Any& data, const std::type_info& type_info) const { if (data.is() && type_info == typeid(ov::element::Type)) { - return ngraph::onnx_import::common::get_ov_element_type(data.as()); + return ov::frontend::onnx::common::get_ov_element_type(data.as()); } else if (data.is>() && type_info == typeid(std::vector)) { const auto& casted = data.as>(); std::vector types(casted.size()); for (size_t i = 0; i < casted.size(); ++i) { - types[i] = ngraph::onnx_import::common::get_ov_element_type(casted[i]); + types[i] = ov::frontend::onnx::common::get_ov_element_type(casted[i]); } return types; } // no conversion rules found return data; } -OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/onnx_framework_node.cpp b/src/frontends/onnx/frontend/src/onnx_framework_node.cpp index 5af271e2794be4..0a3f148983cac1 100644 --- a/src/frontends/onnx/frontend/src/onnx_framework_node.cpp +++ b/src/frontends/onnx/frontend/src/onnx_framework_node.cpp @@ -16,8 +16,9 @@ #include "onnx_framework_node.hpp" -namespace ngraph { +namespace ov { namespace frontend { +namespace onnx { std::shared_ptr ONNXFrameworkNode::clone_with_new_inputs(const ov::OutputVector& inputs) const { return std::make_shared(m_node, inputs); } @@ -45,5 +46,6 @@ bool NotSupportedONNXNode::visit_attributes(ov::AttributeVisitor& visitor) { return true; } +} // namespace onnx } // namespace frontend -} // namespace ngraph +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/onnx_framework_node.hpp b/src/frontends/onnx/frontend/src/onnx_framework_node.hpp index 2aee7b83b9197b..f1807abc4e9543 100644 --- a/src/frontends/onnx/frontend/src/onnx_framework_node.hpp +++ b/src/frontends/onnx/frontend/src/onnx_framework_node.hpp @@ -26,20 +26,18 @@ namespace ONNX_NAMESPACE { class ModelProto; } // namespace ONNX_NAMESPACE -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { class Model; -} -namespace frontend { -OPENVINO_SUPPRESS_DEPRECATED_START class ONNXFrameworkNode : public ov::op::util::FrameworkNode { public: OPENVINO_OP("ONNXFrameworkNode", "util", ov::op::util::FrameworkNode); - ONNXFrameworkNode(const onnx_import::Node& node) : ONNXFrameworkNode(node, node.get_ng_inputs()) {} + ONNXFrameworkNode(const ov::frontend::onnx::Node& node) : ONNXFrameworkNode(node, node.get_ov_inputs()) {} - ONNXFrameworkNode(const onnx_import::Node& node, const ov::OutputVector& inputs) + ONNXFrameworkNode(const ov::frontend::onnx::Node& node, const ov::OutputVector& inputs) : ov::op::util::FrameworkNode(inputs, node.get_outputs_size()), m_node(node) { ov::op::util::FrameworkNodeAttrs attrs; @@ -48,7 +46,7 @@ class ONNXFrameworkNode : public ov::op::util::FrameworkNode { set_attrs(attrs); } - ov::OutputVector get_ov_nodes(const std::shared_ptr& graph) const { + ov::OutputVector get_ov_nodes(const std::shared_ptr& graph) const { ov::OutputVector ov_nodes{graph->make_ov_nodes(m_node)}; if (ov_nodes.size() > get_output_size()) { ov_nodes.resize(get_output_size()); @@ -68,14 +66,14 @@ class ONNXFrameworkNode : public ov::op::util::FrameworkNode { } protected: - onnx_import::Node m_node; + ov::frontend::onnx::Node m_node; }; class ONNXSubgraphFrameworkNode : public ONNXFrameworkNode { public: OPENVINO_OP("ONNXSubgraphFrameworkNode", "util", ONNXFrameworkNode); - ONNXSubgraphFrameworkNode(const onnx_import::Node& node, + ONNXSubgraphFrameworkNode(const ov::frontend::onnx::Node& node, const std::vector>& models, const ov::OutputVector& inputs) : ONNXFrameworkNode(node, inputs), @@ -95,9 +93,8 @@ class ONNXSubgraphFrameworkNode : public ONNXFrameworkNode { private: std::vector> m_models; }; -OPENVINO_SUPPRESS_DEPRECATED_END -// Be careful with using protobuf references (also onnx_import::Node) inside NotSupportedONNXNode +// Be careful with using protobuf references (also ov::frontend::onnx::Node) inside NotSupportedONNXNode // which are inserted into ov::Model due to different lifetime and problematic sharing between dynamic libs. class NotSupportedONNXNode : public ov::op::util::FrameworkNode { static constexpr const char* failed_conversion_key = "onnx::NotSupportedONNXNode::failed_conversion_key"; @@ -127,5 +124,6 @@ class NotSupportedONNXNode : public ov::op::util::FrameworkNode { virtual bool visit_attributes(ov::AttributeVisitor& visitor) override; }; +} // namespace onnx } // namespace frontend -} // namespace ngraph +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/abs.hpp b/src/frontends/onnx/frontend/src/op/abs.hpp index ac9e543d2645ff..ff6eab7a7fd74d 100644 --- a/src/frontends/onnx/frontend/src/op/abs.hpp +++ b/src/frontends/onnx/frontend/src/op/abs.hpp @@ -4,22 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "exceptions.hpp" #include "openvino/op/abs.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector abs(const Node& node) { +inline ov::OutputVector abs(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, !node.has_attribute("consumed_inputs"), "consumed_inputs legacy attribute of Abs op is not supported"); - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 @@ -30,10 +28,7 @@ using set_1::abs; namespace set_13 { using set_6::abs; } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/acos.hpp b/src/frontends/onnx/frontend/src/op/acos.hpp index f51b19fe30d4c8..d26956409233a3 100644 --- a/src/frontends/onnx/frontend/src/op/acos.hpp +++ b/src/frontends/onnx/frontend/src/op/acos.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/acos.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_7 { -inline ov::OutputVector acos(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector acos(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_7 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/acosh.hpp b/src/frontends/onnx/frontend/src/op/acosh.hpp index f8b75112f26762..fe7b07d0ab60c9 100644 --- a/src/frontends/onnx/frontend/src/op/acosh.hpp +++ b/src/frontends/onnx/frontend/src/op/acosh.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/acosh.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_9 { -inline ov::OutputVector acosh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector acosh(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_9 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp index ee11de0e02cb56..c7e260e50eafc5 100644 --- a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp +++ b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.cpp @@ -9,14 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { - -ov::OutputVector adaptive_avg_pooling2d(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector adaptive_avg_pooling2d(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); const auto num_inputs = inputs.size(); CHECK_VALID_NODE(node, num_inputs == 2, "adaptive_avg_pooling2d expects 2 input tensors. Got: ", num_inputs); @@ -26,6 +25,6 @@ ov::OutputVector adaptive_avg_pooling2d(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.hpp b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.hpp index e806f8e657673f..b7c3fc8b9b06c7 100644 --- a/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.hpp +++ b/src/frontends/onnx/frontend/src/op/adaptive_avg_pooling2d.hpp @@ -3,18 +3,16 @@ // #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector adaptive_avg_pooling2d(const Node& node); +ov::OutputVector adaptive_avg_pooling2d(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/add.cpp b/src/frontends/onnx/frontend/src/op/add.cpp index 853c11d21eda78..4b4babda2239b9 100644 --- a/src/frontends/onnx/frontend/src/op/add.cpp +++ b/src/frontends/onnx/frontend/src/op/add.cpp @@ -10,12 +10,12 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector add(const Node& node) { +ov::OutputVector add(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, !node.has_attribute("consumed_inputs"), "consumed_inputs legacy attribute of Add op is not supported"); @@ -24,20 +24,17 @@ ov::OutputVector add(const Node& node) { } // namespace set_1 namespace set_6 { -ov::OutputVector add(const Node& node) { +ov::OutputVector add(const ov::frontend::onnx::Node& node) { return common::handle_opset6_binary_op(node); } } // namespace set_6 namespace set_7 { -ov::OutputVector add(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +ov::OutputVector add(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_7 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/add.hpp b/src/frontends/onnx/frontend/src/op/add.hpp index c516542a2c95b5..ab84790178d8da 100644 --- a/src/frontends/onnx/frontend/src/op/add.hpp +++ b/src/frontends/onnx/frontend/src/op/add.hpp @@ -4,26 +4,24 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector add(const Node& node); +ov::OutputVector add(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_6 { -ov::OutputVector add(const Node& node); +ov::OutputVector add(const ov::frontend::onnx::Node& node); } // namespace set_6 namespace set_7 { -ov::OutputVector add(const Node& node); +ov::OutputVector add(const ov::frontend::onnx::Node& node); } // namespace set_7 @@ -34,10 +32,7 @@ using set_7::add; namespace set_14 { using set_13::add; } // namespace set_14 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/affine.cpp b/src/frontends/onnx/frontend/src/op/affine.cpp index c6c2de23c55f08..651edea79ce7af 100644 --- a/src/frontends/onnx/frontend/src/op/affine.cpp +++ b/src/frontends/onnx/frontend/src/op/affine.cpp @@ -10,17 +10,17 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector affine(const Node& node) { +ov::OutputVector affine(const ov::frontend::onnx::Node& node) { // Affine is an obsolete experimental ONNX operation. // It takes one input tensor and produces one output tensor where // the affine function, y = alpha * x + beta, is applied to the input // elementwise. - const auto inputs = node.get_ng_inputs(); + const auto inputs = node.get_ov_inputs(); CHECK_VALID_NODE(node, inputs.size() == 1, "Affine expects 1 input tensor. Got: ", inputs.size()); CHECK_VALID_NODE(node, node.has_attribute("alpha"), "\"alpha\" attribute is required."); @@ -34,10 +34,7 @@ ov::OutputVector affine(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/affine.hpp b/src/frontends/onnx/frontend/src/op/affine.hpp index 4535a772fb239f..7b0f15743fe7f4 100644 --- a/src/frontends/onnx/frontend/src/op/affine.hpp +++ b/src/frontends/onnx/frontend/src/op/affine.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector affine(const Node& node); +ov::OutputVector affine(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/and.hpp b/src/frontends/onnx/frontend/src/op/and.hpp index 2e9eae6490f23e..b791a797aeb11f 100644 --- a/src/frontends/onnx/frontend/src/op/and.hpp +++ b/src/frontends/onnx/frontend/src/op/and.hpp @@ -4,28 +4,25 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/logical_and.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector logical_and(const Node& node) { +inline ov::OutputVector logical_and(const ov::frontend::onnx::Node& node) { return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_7 { -inline ov::OutputVector logical_and(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector logical_and(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_7 - } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/argmax.cpp b/src/frontends/onnx/frontend/src/op/argmax.cpp index cd04b737c782b7..8a6c414a4cd0e6 100644 --- a/src/frontends/onnx/frontend/src/op/argmax.cpp +++ b/src/frontends/onnx/frontend/src/op/argmax.cpp @@ -7,12 +7,12 @@ #include "exceptions.hpp" #include "utils/arg_min_max_factory.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector argmax(const Node& node) { +ov::OutputVector argmax(const ov::frontend::onnx::Node& node) { const utils::ArgMinMaxFactory arg_factory(node); return {arg_factory.make_arg_max()}; } @@ -20,16 +20,13 @@ ov::OutputVector argmax(const Node& node) { } // namespace set_1 namespace set_12 { -ov::OutputVector argmax(const Node& node) { +ov::OutputVector argmax(const ov::frontend::onnx::Node& node) { const utils::ArgMinMaxFactory arg_factory(node); return {arg_factory.make_arg_max()}; } } // namespace set_12 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/argmax.hpp b/src/frontends/onnx/frontend/src/op/argmax.hpp index 6fd7c27a4f1386..380c7297f7c8e2 100644 --- a/src/frontends/onnx/frontend/src/op/argmax.hpp +++ b/src/frontends/onnx/frontend/src/op/argmax.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Convert ONNX ArgMax operation to an OV node. @@ -19,7 +17,7 @@ namespace set_1 { /// /// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. -ov::OutputVector argmax(const Node& node); +ov::OutputVector argmax(const ov::frontend::onnx::Node& node); } // namespace set_1 @@ -30,13 +28,10 @@ namespace set_12 { /// /// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. -ov::OutputVector argmax(const Node& node); +ov::OutputVector argmax(const ov::frontend::onnx::Node& node); } // namespace set_12 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/argmin.cpp b/src/frontends/onnx/frontend/src/op/argmin.cpp index cf5074d89ba284..a2ed93569c07ff 100644 --- a/src/frontends/onnx/frontend/src/op/argmin.cpp +++ b/src/frontends/onnx/frontend/src/op/argmin.cpp @@ -7,12 +7,12 @@ #include "exceptions.hpp" #include "utils/arg_min_max_factory.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector argmin(const Node& node) { +ov::OutputVector argmin(const ov::frontend::onnx::Node& node) { const utils::ArgMinMaxFactory arg_factory(node); return {arg_factory.make_arg_min()}; } @@ -20,16 +20,13 @@ ov::OutputVector argmin(const Node& node) { } // namespace set_1 namespace set_12 { -ov::OutputVector argmin(const Node& node) { +ov::OutputVector argmin(const ov::frontend::onnx::Node& node) { const utils::ArgMinMaxFactory arg_factory(node); return {arg_factory.make_arg_min()}; } } // namespace set_12 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/argmin.hpp b/src/frontends/onnx/frontend/src/op/argmin.hpp index 62c148aa85fd62..25712fd1d49626 100644 --- a/src/frontends/onnx/frontend/src/op/argmin.hpp +++ b/src/frontends/onnx/frontend/src/op/argmin.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Convert ONNX ArgMin operation to an OV node. @@ -19,7 +17,7 @@ namespace set_1 { /// /// \return The vector containing an OV node which produces the output /// of an ONNX ArgMin operation. -ov::OutputVector argmin(const Node& node); +ov::OutputVector argmin(const ov::frontend::onnx::Node& node); } // namespace set_1 @@ -30,13 +28,10 @@ namespace set_12 { /// /// \return The vector containing an OV node which produces the output /// of an ONNX ArgMax operation. -ov::OutputVector argmin(const Node& node); +ov::OutputVector argmin(const ov::frontend::onnx::Node& node); } // namespace set_12 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/asin.hpp b/src/frontends/onnx/frontend/src/op/asin.hpp index bbfcc2715f873b..e9cb09e322efd1 100644 --- a/src/frontends/onnx/frontend/src/op/asin.hpp +++ b/src/frontends/onnx/frontend/src/op/asin.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/asin.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector asin(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector asin(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/asinh.hpp b/src/frontends/onnx/frontend/src/op/asinh.hpp index 52a8e44e87ef9e..927d38abb06666 100644 --- a/src/frontends/onnx/frontend/src/op/asinh.hpp +++ b/src/frontends/onnx/frontend/src/op/asinh.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/asinh.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector asinh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector asinh(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/atan.hpp b/src/frontends/onnx/frontend/src/op/atan.hpp index 72ae701b6b992d..42d4b5379bcbb9 100644 --- a/src/frontends/onnx/frontend/src/op/atan.hpp +++ b/src/frontends/onnx/frontend/src/op/atan.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/atan.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector atan(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector atan(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/atanh.hpp b/src/frontends/onnx/frontend/src/op/atanh.hpp index 65a71a8ce60675..3de6abe6599e2a 100644 --- a/src/frontends/onnx/frontend/src/op/atanh.hpp +++ b/src/frontends/onnx/frontend/src/op/atanh.hpp @@ -4,23 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/atanh.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector atanh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector atanh(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/aten.cpp b/src/frontends/onnx/frontend/src/op/aten.cpp index 6e3de7a83fab1f..b357d43fa116e8 100644 --- a/src/frontends/onnx/frontend/src/op/aten.cpp +++ b/src/frontends/onnx/frontend/src/op/aten.cpp @@ -19,14 +19,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector aten(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector aten(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; const auto operator_name = node.get_attribute_value("operator", ""); CHECK_VALID_NODE(node, @@ -98,6 +98,6 @@ ov::OutputVector aten(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/aten.hpp b/src/frontends/onnx/frontend/src/op/aten.hpp index b8b90737085b69..2393eb0fc152f7 100644 --- a/src/frontends/onnx/frontend/src/op/aten.hpp +++ b/src/frontends/onnx/frontend/src/op/aten.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector aten(const Node& node); +ov::OutputVector aten(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/average_pool.cpp b/src/frontends/onnx/frontend/src/op/average_pool.cpp index bc646c83ddd0b6..5e731ab8580e46 100644 --- a/src/frontends/onnx/frontend/src/op/average_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/average_pool.cpp @@ -6,20 +6,17 @@ #include "utils/pooling_factory.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector average_pool(const Node& node) { +ov::OutputVector average_pool(const ov::frontend::onnx::Node& node) { return pooling::PoolingFactory(node).make_avg_pool(); } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/average_pool.hpp b/src/frontends/onnx/frontend/src/op/average_pool.hpp index b08deccadc92b8..c80aa54517b99b 100644 --- a/src/frontends/onnx/frontend/src/op/average_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/average_pool.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Convert ONNX AveragePool operation to an OV node. @@ -19,13 +17,10 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX AveragePool /// operation. -ov::OutputVector average_pool(const Node& node); +ov::OutputVector average_pool(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/batch_norm.cpp b/src/frontends/onnx/frontend/src/op/batch_norm.cpp index 2f8493fc6a2261..67daaddf0539c6 100644 --- a/src/frontends/onnx/frontend/src/op/batch_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/batch_norm.cpp @@ -13,14 +13,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { // This version supports ONNX BatchNormalization-1 and BatchNormalization-6 -ov::OutputVector batch_norm(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; auto x = inputs.at(0); auto scale = inputs.at(1); auto bias = inputs.at(2); @@ -55,8 +55,8 @@ ov::OutputVector batch_norm(const Node& node) { namespace set_7 { // This version supports ONNX BatchNormalization-7 and BatchNormalization-9 -ov::OutputVector batch_norm(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; auto x = inputs.at(0); auto scale = inputs.at(1); auto bias = inputs.at(2); @@ -73,10 +73,7 @@ ov::OutputVector batch_norm(const Node& node) { } } // namespace set_7 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/batch_norm.hpp b/src/frontends/onnx/frontend/src/op/batch_norm.hpp index c3263c5f436537..f060b740eb0fad 100644 --- a/src/frontends/onnx/frontend/src/op/batch_norm.hpp +++ b/src/frontends/onnx/frontend/src/op/batch_norm.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector batch_norm(const Node& node); +ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_7 { -ov::OutputVector batch_norm(const Node& node); +ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node); } // namespace set_7 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitshift.cpp b/src/frontends/onnx/frontend/src/op/bitshift.cpp index afc942242d0fe6..63b5b37304d4af 100644 --- a/src/frontends/onnx/frontend/src/op/bitshift.cpp +++ b/src/frontends/onnx/frontend/src/op/bitshift.cpp @@ -12,14 +12,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitshift(const Node& node) { - const ov::Output input_x = node.get_ng_inputs().at(0); - const ov::Output input_y = node.get_ng_inputs().at(1); +ov::OutputVector bitshift(const ov::frontend::onnx::Node& node) { + const ov::Output input_x = node.get_ov_inputs().at(0); + const ov::Output input_y = node.get_ov_inputs().at(1); std::string direction = node.get_attribute_value("direction", ""); @@ -42,10 +42,7 @@ ov::OutputVector bitshift(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitshift.hpp b/src/frontends/onnx/frontend/src/op/bitshift.hpp index c1b0fcdceb5329..6450a88bf22b30 100644 --- a/src/frontends/onnx/frontend/src/op/bitshift.hpp +++ b/src/frontends/onnx/frontend/src/op/bitshift.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitshift(const Node& node); +ov::OutputVector bitshift(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_and.cpp b/src/frontends/onnx/frontend/src/op/bitwise_and.cpp index d13ea78c249dde..5d88117815121a 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_and.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_and.cpp @@ -3,22 +3,23 @@ // #include "op/bitwise_and.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START #include "openvino/op/bitwise_and.hpp" using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitwise_and(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector bitwise_and(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 2); return {std::make_shared(inputs[0], inputs[1])}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_and.hpp b/src/frontends/onnx/frontend/src/op/bitwise_and.hpp index 8c86cbd8160d78..5b30fe5adf76a2 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_and.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_and.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitwise_and(const Node& node); +ov::OutputVector bitwise_and(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_not.cpp b/src/frontends/onnx/frontend/src/op/bitwise_not.cpp index d59e4593e1aa09..4e9dd55052c292 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_not.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_not.cpp @@ -3,23 +3,23 @@ // #include "op/bitwise_not.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START #include "openvino/op/bitwise_not.hpp" using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitwise_not(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector bitwise_not(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 1); return {std::make_shared(inputs[0])}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_not.hpp b/src/frontends/onnx/frontend/src/op/bitwise_not.hpp index c70a3658e4df6f..e1ac92a436dc6f 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_not.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_not.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitwise_not(const Node& node); +ov::OutputVector bitwise_not(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_or.cpp b/src/frontends/onnx/frontend/src/op/bitwise_or.cpp index 7a7b6a415825a0..f0278dde8f3ced 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_or.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_or.cpp @@ -3,22 +3,23 @@ // #include "op/bitwise_or.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START #include "openvino/op/bitwise_or.hpp" using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitwise_or(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector bitwise_or(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 2); return {std::make_shared(inputs[0], inputs[1])}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_or.hpp b/src/frontends/onnx/frontend/src/op/bitwise_or.hpp index bcb71fb696daf2..7bee82aacc24d7 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_or.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_or.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitwise_or(const Node& node); +ov::OutputVector bitwise_or(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp b/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp index 8eed1800cece3e..0e965aebb081cc 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_xor.cpp @@ -3,22 +3,23 @@ // #include "op/bitwise_xor.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START #include "openvino/op/bitwise_xor.hpp" using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitwise_xor(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector bitwise_xor(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 2); return {std::make_shared(inputs[0], inputs[1])}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp b/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp index cd0ce129391e38..682082c00bc367 100644 --- a/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp +++ b/src/frontends/onnx/frontend/src/op/bitwise_xor.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bitwise_xor(const Node& node); +ov::OutputVector bitwise_xor(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp index da32d73f11314e..7636b79e08d62d 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.cpp @@ -19,13 +19,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector blackmanwindow(const Node& node) { - const auto size = node.get_ng_inputs().at(0); +ov::OutputVector blackmanwindow(const ov::frontend::onnx::Node& node) { + const auto size = node.get_ov_inputs().at(0); const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1) == 1; @@ -78,6 +78,6 @@ ov::OutputVector blackmanwindow(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp index 58d4b6ea1b3055..719c75ef7b8c37 100644 --- a/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/blackmanwindow.hpp @@ -3,20 +3,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector blackmanwindow(const Node& node); +ov::OutputVector blackmanwindow(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cast.cpp b/src/frontends/onnx/frontend/src/op/cast.cpp index f3a4fb1c5cd9e2..18edc116f4f511 100644 --- a/src/frontends/onnx/frontend/src/op/cast.cpp +++ b/src/frontends/onnx/frontend/src/op/cast.cpp @@ -9,14 +9,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cast(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector cast(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); int64_t target_type = node.get_attribute_value("to"); ov::element::Type elem_type = common::get_ov_element_type(target_type); @@ -25,6 +25,6 @@ ov::OutputVector cast(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cast.hpp b/src/frontends/onnx/frontend/src/op/cast.hpp index d14ea2536e7d38..313fac756e1a16 100644 --- a/src/frontends/onnx/frontend/src/op/cast.hpp +++ b/src/frontends/onnx/frontend/src/op/cast.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cast(const Node& node); +ov::OutputVector cast(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cast_like.cpp b/src/frontends/onnx/frontend/src/op/cast_like.cpp index 508b678f9fa3df..7eea8c808206d4 100644 --- a/src/frontends/onnx/frontend/src/op/cast_like.cpp +++ b/src/frontends/onnx/frontend/src/op/cast_like.cpp @@ -8,19 +8,19 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cast_like(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector cast_like(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); return {std::make_shared(inputs.at(0), inputs.at(1))}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cast_like.hpp b/src/frontends/onnx/frontend/src/op/cast_like.hpp index 06b129e816876d..aa7f65326b5e30 100644 --- a/src/frontends/onnx/frontend/src/op/cast_like.hpp +++ b/src/frontends/onnx/frontend/src/op/cast_like.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cast_like(const Node& node); +ov::OutputVector cast_like(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/ceil.hpp b/src/frontends/onnx/frontend/src/op/ceil.hpp index 2669996af3f288..f7ba5c8c2ca015 100644 --- a/src/frontends/onnx/frontend/src/op/ceil.hpp +++ b/src/frontends/onnx/frontend/src/op/ceil.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/ceiling.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector ceil(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector ceil(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/clip.cpp b/src/frontends/onnx/frontend/src/op/clip.cpp index 45e01fc7514918..5442b54465cda8 100644 --- a/src/frontends/onnx/frontend/src/op/clip.cpp +++ b/src/frontends/onnx/frontend/src/op/clip.cpp @@ -14,13 +14,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector clip(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector clip(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); const double max_value = node.get_attribute_value("max", std::numeric_limits::max()); @@ -71,8 +71,8 @@ std::shared_ptr get_constant_max_of_type(ov::element::Type } } // namespace -ov::OutputVector clip(const Node& node) { - const ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector clip(const ov::frontend::onnx::Node& node) { + const ov::OutputVector inputs{node.get_ov_inputs()}; const ov::Output data = inputs.at(0); const ov::element::Type data_type = data.get_element_type(); ov::Output min; @@ -100,10 +100,7 @@ ov::OutputVector clip(const Node& node) { } } // namespace set_11 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/clip.hpp b/src/frontends/onnx/frontend/src/op/clip.hpp index 1115e3630b31c7..fb1cb1759e5069 100644 --- a/src/frontends/onnx/frontend/src/op/clip.hpp +++ b/src/frontends/onnx/frontend/src/op/clip.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector clip(const Node& node); +ov::OutputVector clip(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_11 { -ov::OutputVector clip(const Node& node); +ov::OutputVector clip(const ov::frontend::onnx::Node& node); } // namespace set_11 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp index dcc663fff128e3..375e90f5084c43 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp @@ -36,13 +36,14 @@ #include "openvino/op/subtract.hpp" #include "openvino/op/transpose.hpp" #include "openvino/op/unsqueeze.hpp" -#include "ov_models/ov_builders/split.hpp" +#include "utils/split.hpp" using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace detail { namespace { @@ -70,8 +71,8 @@ std::shared_ptr get_present_state(const std::shared_ptr& K, } // namespace detail namespace set_1 { -ov::OutputVector attention(const Node& node) { - auto nodes = node.get_ng_inputs(); +ov::OutputVector attention(const ov::frontend::onnx::Node& node) { + auto nodes = node.get_ov_inputs(); const auto& input = nodes[0]; const auto& weights = nodes[1]; const auto& bias = nodes[2]; @@ -120,8 +121,8 @@ namespace detail { namespace { std::shared_ptr get_dimensions(const std::shared_ptr& shape, const std::vector& dims) { - static const auto zero = v0::Constant::create(ov::element::i32, Shape{}, {0}); - const auto dims_const = v0::Constant::create(ov::element::i32, Shape{dims.size()}, dims); + static const auto zero = v0::Constant::create(ov::element::i32, ov::Shape{}, {0}); + const auto dims_const = v0::Constant::create(ov::element::i32, ov::Shape{dims.size()}, dims); return std::make_shared(shape, dims_const, zero); } @@ -131,9 +132,9 @@ std::shared_ptr get_dimensions(const std::shared_ptr& node, std::shared_ptr get_hidden_size(const std::shared_ptr& node_shape) { // node has shape (batch_size, sequence_length, 3 * hidden_size) - const auto zero = v0::Constant::create(ov::element::i32, Shape{}, {0}); + const auto zero = v0::Constant::create(ov::element::i32, ov::Shape{}, {0}); const auto hidden_size_x3 = get_dimensions(node_shape, {2}); - const auto three = v0::Constant::create(ov::element::i64, Shape{}, {3}); + const auto three = v0::Constant::create(ov::element::i64, ov::Shape{}, {3}); const auto hidden_size = std::make_shared(hidden_size_x3, three); return hidden_size; } @@ -148,13 +149,13 @@ ov::NodeVector split_to_QKV(const std::shared_ptr& node, // node has shape (batch_size, sequence_length, 3 * hidden_size) // fetch the first two dimensions const auto batch_size_seq_len = get_dimensions(node_shape, {0, 1}); - const auto num_heads_node = v0::Constant::create(ov::element::i64, Shape{1}, {num_heads}); + const auto num_heads_node = v0::Constant::create(ov::element::i64, ov::Shape{1}, {num_heads}); if (qkv_hidden_sizes.size() == 0) { const auto hidden_size = get_hidden_size(node_shape); // head_size = hidden_size / num_heads head_size = std::make_shared(hidden_size, num_heads_node); // split the node into 3 even parts Q, K, V with shape (batch_size, sequence_len, hidden_size) - split = ov::op::util::split(node, 3, 2); + split = ov::op::util::make_split(node, 3, 2); // and reshape each part to new shape (batch_size, sequence_len, num_heads, head_size) auto new_shape = std::make_shared(ov::NodeVector{batch_size_seq_len, num_heads_node, head_size}, 0); for (size_t i = 0; i < split.size(); i++) { @@ -172,23 +173,23 @@ ov::NodeVector split_to_QKV(const std::shared_ptr& node, // Q: (batch_size, sequence_len, qkv_hidden_sizes[0]) // K: (batch_size, sequence_len, qkv_hidden_sizes[1]) // V: (batch_size, sequence_len, qkv_hidden_sizes[2]) - split = ov::op::util::split(node, qkv_hidden_sizes, 2); + split = ov::op::util::make_split(node, qkv_hidden_sizes, 2); // and reshape each part to new shape (batch_size, sequence_len, num_heads, head_size) for (size_t i = 0; i < split.size(); i++) { auto new_shape = std::make_shared( ov::NodeVector{batch_size_seq_len, num_heads_node, - v0::Constant::create(ov::element::i64, Shape{1}, {qkv_hidden_sizes[i] / num_heads})}, + v0::Constant::create(ov::element::i64, ov::Shape{1}, {qkv_hidden_sizes[i] / num_heads})}, 0); split[i] = std::make_shared(split[i], new_shape, false); } float head_size_val = qkv_hidden_sizes[0] > 0 ? static_cast(qkv_hidden_sizes[0]) / num_heads : static_cast(qkv_hidden_sizes[2]) / num_heads; - head_size = v0::Constant::create(node_type, Shape{1}, {head_size_val}); + head_size = v0::Constant::create(node_type, ov::Shape{1}, {head_size_val}); } // transpose Q, K and V to (batch_size, num_heads, sequence_len, head_size) - auto perm = v0::Constant::create(ov::element::i64, Shape{4}, {0, 2, 1, 3}); + auto perm = v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 1, 3}); auto Q = std::make_shared(split[0], perm); auto K = std::make_shared(split[1], perm); auto V = std::make_shared(split[2], perm); @@ -270,42 +271,43 @@ std::shared_ptr attention_mask_from_indices(const ov::Output const ov::element::Type_t& type, const std::shared_ptr& batch_size, const std::shared_ptr& all_seq_len) { - const auto zero = v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto one = v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto zero = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); + const auto one = v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); const auto stop = std::make_shared(all_seq_len, zero); std::shared_ptr base = std::make_shared(zero, stop, one, mask_index.get_element_type()); const auto target_shape = std::make_shared(ov::NodeVector{batch_size, all_seq_len}, 0); // broadcast 'base' to (batch_size, all_seq_len) base = std::make_shared(base, target_shape); - const auto indices_shape = - std::make_shared(ov::NodeVector{v0::Constant::create(ov::element::i64, Shape{1}, {-1}), batch_size}, - 0); + const auto indices_shape = std::make_shared( + ov::NodeVector{v0::Constant::create(ov::element::i64, ov::Shape{1}, {-1}), batch_size}, + + 0); std::shared_ptr indices = std::make_shared(mask_index, indices_shape, false); // fetch first row from indices std::shared_ptr tail_range_indices = std::make_shared(indices, zero, zero); tail_range_indices = std::make_shared(tail_range_indices, - v0::Constant::create(ov::element::i32, Shape{2}, {-1, 1}), + v0::Constant::create(ov::element::i32, ov::Shape{2}, {-1, 1}), false); const auto greater_eq = std::make_shared(base, tail_range_indices); std::shared_ptr tail_range_mask = std::make_shared(std::make_shared(greater_eq, type), - v0::Constant::create(type, Shape{}, {-10000})); + v0::Constant::create(type, ov::Shape{}, {-10000})); tail_range_mask = - std::make_shared(tail_range_mask, v0::Constant::create(ov::element::i64, Shape{2}, {1, 2})); + std::make_shared(tail_range_mask, v0::Constant::create(ov::element::i64, ov::Shape{2}, {1, 2})); - const auto gather_index = std::make_shared(v0::Constant::create(ov::element::i64, Shape{}, {1}), + const auto gather_index = std::make_shared(v0::Constant::create(ov::element::i64, ov::Shape{}, {1}), get_dimensions(indices, {0})); // fetch indices from the second row (or first if not available) std::shared_ptr head_range_indices = std::make_shared(indices, gather_index, zero); head_range_indices = std::make_shared(head_range_indices, - v0::Constant::create(ov::element::i32, Shape{2}, {-1, 1}), + v0::Constant::create(ov::element::i32, ov::Shape{2}, {-1, 1}), false); const auto less = std::make_shared(base, head_range_indices); std::shared_ptr mask = std::make_shared(less, greater_eq); mask = std::make_shared(std::make_shared(mask, type), - v0::Constant::create(type, Shape{}, {-10000})); + v0::Constant::create(type, ov::Shape{}, {-10000})); // reshape from (batch_size, all_seq_len) to (batch_size, 1, 1, all_seq_len) - mask = std::make_shared(mask, v0::Constant::create(ov::element::i64, Shape{2}, {1, 2})); + mask = std::make_shared(mask, v0::Constant::create(ov::element::i64, ov::Shape{2}, {1, 2})); const auto mask_index_first_dim = get_dimensions(mask_index.get_node_shared_ptr(), {0}); // compare mask_index.shape[0] with batch_size value @@ -346,8 +348,8 @@ NodeTuple unidirectional_mask(const ov::element::Type_t& type, const std::shared_ptr& seq_len, const std::shared_ptr& all_seq_len, const std::shared_ptr& past_seq_len) { - const auto zero = v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto one = v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto zero = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); + const auto one = v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); const auto stop = std::make_shared(all_seq_len, zero); std::shared_ptr bin_mask = std::make_shared(zero, stop, one, ov::element::i32); auto target_shape = std::make_shared(ov::NodeVector{seq_len, all_seq_len}, 0); @@ -355,11 +357,11 @@ NodeTuple unidirectional_mask(const ov::element::Type_t& type, auto start = std::make_shared(std::make_shared(past_seq_len, one), zero); auto end = std::make_shared(std::make_shared(all_seq_len, one), zero); auto indices = std::make_shared(std::make_shared(start, end, one, ov::element::i32), - v0::Constant::create(ov::element::i32, Shape{1}, {1})); + v0::Constant::create(ov::element::i32, ov::Shape{1}, {1})); bin_mask = std::make_shared(bin_mask, indices); std::shared_ptr attention_mask = std::make_shared(std::make_shared(bin_mask, type), - v0::Constant::create(type, Shape{}, {-10000})); + v0::Constant::create(type, ov::Shape{}, {-10000})); bin_mask = std::make_shared(std::make_shared(bin_mask), type); return NodeTuple{attention_mask, bin_mask}; } @@ -373,27 +375,29 @@ NodeTuple unidirectional_mask(const ov::element::Type_t& type, // (batch_size, past_sequence_length + sequence_length) or // (batch_size, sequence_length, past_sequence_length + sequence_length) // -// Shape (batch_size, 1, max_sequence_length, max_sequence_length) is not supported in onnxruntime: +// ov::Shape (batch_size, 1, max_sequence_length, max_sequence_length) is not supported in onnxruntime: // https://github.com/microsoft/onnxruntime/blob/851554536ca8185b3413ee57449ea5ac93370193/onnxruntime/contrib_ops/cpu/bert/attention_helper.h#L78 std::shared_ptr raw_mask(const ov::Output& mask_index, ov::Dimension::value_type mask_rank, const ov::element::Type_t& type) { std::shared_ptr mask = std::make_shared(mask_index, type); mask = std::make_shared(mask, type); - mask = std::make_shared(v0::Constant::create(type, Shape{}, {1}), mask); - mask = std::make_shared(mask, v0::Constant::create(type, Shape{}, {-10000})); + mask = std::make_shared(v0::Constant::create(type, ov::Shape{}, {1}), mask); + mask = std::make_shared(mask, v0::Constant::create(type, ov::Shape{}, {-10000})); switch (mask_rank) { // Handle mask_index with (batch_size, past_sequence_length + sequence_length) shape // Reshape it to (batch_size, 1, 1, past_sequence_length + sequence_length) case 2: - mask = - std::make_shared(mask, v0::Constant::create(ov::element::i64, Shape{4}, {0, 1, 1, -1}), true); + mask = std::make_shared(mask, + v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 1, 1, -1}), + true); break; // Handle mask_index with (batch_size, sequence_length, past_sequence_length + sequence_length) shape // Reshape it to (batch_size, 1, sequence_length, past_sequence_length + sequence_length) case 3: - mask = - std::make_shared(mask, v0::Constant::create(ov::element::i64, Shape{4}, {0, 1, 0, -1}), true); + mask = std::make_shared(mask, + v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 1, 0, -1}), + true); break; } return mask; @@ -404,8 +408,8 @@ bool is_past_input_available(const ov::OutputVector& op_inputs) { } NodeTuple get_attention_mask(const ov::OutputVector& op_inputs, bool unidirectional) { - const auto zero = v0::Constant::create(ov::element::i64, Shape{1}, {0}); - const auto one = v0::Constant::create(ov::element::i64, Shape{1}, {1}); + const auto zero = v0::Constant::create(ov::element::i64, ov::Shape{1}, {0}); + const auto one = v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); std::shared_ptr past_seq_len; // get the value of past_sequence_length @@ -464,7 +468,7 @@ std::shared_ptr attention_softmax(const ov::OutputVector& op_inputs, const std::shared_ptr& bin_mask, const std::shared_ptr& head_size, bool unidirectional) { - auto zero = v0::Constant::create(ov::element::i64, Shape{}, {0}); + auto zero = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); if (is_past_input_available(op_inputs)) { // concat past K and V with present ones const auto& past = op_inputs[4]; @@ -473,7 +477,7 @@ std::shared_ptr attention_softmax(const ov::OutputVector& op_inputs, // (2, batch_size, num_heads, past_sequence_length + sequence_length, head_size) // so we need to split it into two parts, remove first dimension from each part and concatenate first part // with current K and second part with current V - const auto split = ov::op::util::split(past, 2, 0); + const auto split = ov::op::util::make_split(past, 2, 0); const auto past_K = std::make_shared(split[0], zero); K = std::make_shared(ov::NodeVector{past_K, K}, 2); const auto past_V = std::make_shared(split[1], zero); @@ -508,9 +512,9 @@ std::shared_ptr attention_softmax(const ov::OutputVector& op_inputs, std::shared_ptr output = std::make_shared(softmax, V); // transpose the result from (batch_size, num_heads, sequence_length, head_size) // to (batch_size, sequence_length, num_heads, head_size) - const auto perm = v0::Constant::create(ov::element::i64, Shape{4}, {0, 2, 1, 3}); + const auto perm = v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 1, 3}); output = std::make_shared(output, perm); - auto new_shape = v0::Constant::create(ov::element::i32, Shape{3}, {0, 0, -1}); + auto new_shape = v0::Constant::create(ov::element::i32, ov::Shape{3}, {0, 0, -1}); // reshape the result from (batch_size, sequence_length, num_heads, head_size) to (batch_size, sequence_length, // num_heads * head_size) output = std::make_shared(output, new_shape, true); @@ -525,7 +529,7 @@ std::shared_ptr attention_softmax(const ov::OutputVector& op_inputs, std::shared_ptr get_present_state(const std::shared_ptr& K, const std::shared_ptr& V, const ov::OutputVector& op_inputs) { - auto zero = v0::Constant::create(ov::element::i64, Shape{1}, {0}); + auto zero = v0::Constant::create(ov::element::i64, ov::Shape{1}, {0}); // expand K shape (batch_size, num_heads, sequence_length, head_size) to // (1, batch_size, num_heads, sequence_length, head_size) auto K_unsqueezed = std::make_shared(K, zero); @@ -557,5 +561,6 @@ std::shared_ptr get_present_state(const std::shared_ptr& K, } // namespace } // namespace detail } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.hpp index 090a364d5aa0b5..9b851563526a56 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.hpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.hpp @@ -4,17 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector attention(const Node& node); +ov::OutputVector attention(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp index 9011638b1e0ef1..a0eec5fceeaf20 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.cpp @@ -10,16 +10,18 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bias_gelu(const Node& node) { - auto nodes = node.get_ng_inputs(); +ov::OutputVector bias_gelu(const ov::frontend::onnx::Node& node) { + auto nodes = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(nodes.size() == 2, "BiasGelu takes 2 inputs. Provided " + std::to_string(nodes.size())); return {std::make_shared(std::make_shared(nodes.at(0), nodes.at(1)))}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.hpp index bf4c58d0036861..6bf0ad41033a4a 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.hpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/bias_gelu.hpp @@ -4,17 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector bias_gelu(const Node& node); +ov::OutputVector bias_gelu(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp index 22fd04e616ab11..eae772aaeac2f4 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.cpp @@ -19,12 +19,13 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector embed_layer_normalization(const Node& node) { - auto nodes = node.get_ng_inputs(); +ov::OutputVector embed_layer_normalization(const ov::frontend::onnx::Node& node) { + auto nodes = node.get_ov_inputs(); auto num_nodes = nodes.size(); FRONT_END_GENERAL_CHECK(num_nodes >= 7 && num_nodes <= 9, @@ -39,7 +40,7 @@ ov::OutputVector embed_layer_normalization(const Node& node) { const auto& gamma = nodes[5]; const auto& beta = nodes[6]; - const auto zero = v0::Constant::create(ov::element::i32, Shape{1}, {0}); + const auto zero = v0::Constant::create(ov::element::i32, ov::Shape{1}, {0}); std::shared_ptr input = std::make_shared(word_embeddings, input_ids, zero, 0); // add position embeddings if (num_nodes > 8 && !ov::op::util::is_null(nodes[8])) { @@ -55,7 +56,7 @@ ov::OutputVector embed_layer_normalization(const Node& node) { // therefore input and position_embeddings cannot be added together // so we need slice the position_embeddings to [sequence_length, hidden_size] first // then add it with input. - const auto one = v0::Constant::create(ov::element::i32, Shape{1}, {1}); + const auto one = v0::Constant::create(ov::element::i32, ov::Shape{1}, {1}); const auto input_ids_shape = std::make_shared(input_ids, ov::element::i32); const auto seqlen = std::make_shared(input_ids_shape, one, zero, 0); const auto gathered_position_embeddings = @@ -76,7 +77,7 @@ ov::OutputVector embed_layer_normalization(const Node& node) { // hidden_size dimension is 2 here, because the shape after Gather(word_embedding, input_ids) // is (batch_size, seq_len, hidden_size) int hidden_size_dim = 2; - const auto reduction_axes = v0::Constant::create(ov::element::i32, Shape{1}, {hidden_size_dim}); + const auto reduction_axes = v0::Constant::create(ov::element::i32, ov::Shape{1}, {hidden_size_dim}); std::shared_ptr result = std::make_shared(input, reduction_axes, true, eps, ov::op::MVNEpsMode::INSIDE_SQRT); @@ -88,7 +89,7 @@ ov::OutputVector embed_layer_normalization(const Node& node) { std::shared_ptr mask_index; if (num_nodes > 7 && !ov::op::util::is_null(nodes[7])) { FRONT_END_GENERAL_CHECK(nodes[7].get_element_type() == ov::element::i32, "mask must have int32 type"); - auto axis = v0::Constant::create(ov::element::i32, Shape{}, {1}); + auto axis = v0::Constant::create(ov::element::i32, ov::Shape{}, {1}); mask_index = std::make_shared(nodes[7], axis, false); } else { auto batch_size = std::make_shared(std::make_shared(nodes[0]), @@ -100,5 +101,6 @@ ov::OutputVector embed_layer_normalization(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.hpp index a0bc8531dea074..2106b64aadcd36 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.hpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/embed_layer_normalization.hpp @@ -4,17 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector embed_layer_normalization(const Node& node); +ov::OutputVector embed_layer_normalization(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp index 5f7e07727d5591..79badc0d048bf0 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.cpp @@ -22,15 +22,16 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector fused_conv(const Node& node) { +ov::OutputVector fused_conv(const ov::frontend::onnx::Node& node) { auto conv_res = conv(node).at(0); - if (node.get_ng_inputs().size() == 4) { // Z input provided - conv_res = std::make_shared(conv_res, node.get_ng_inputs()[3]); + if (node.get_ov_inputs().size() == 4) { // Z input provided + conv_res = std::make_shared(conv_res, node.get_ov_inputs()[3]); } const auto activation_type = node.get_attribute_value("activation"); @@ -51,14 +52,14 @@ ov::OutputVector fused_conv(const Node& node) { CHECK_VALID_NODE(node, activation_params.size() == 1, "activation_alpha attribute of LeakyRelu activation function was not provided"); - const auto activation_alpha_node = v0::Constant::create(ov::element::f32, Shape{}, activation_params); + const auto activation_alpha_node = v0::Constant::create(ov::element::f32, ov::Shape{}, activation_params); return {std::make_shared(conv_res, activation_alpha_node)}; } else if (activation_type == "HardSigmoid") { CHECK_VALID_NODE(node, activation_params.size() == 2, "alpha and beta attributes of HardSigmoid activation function were not provided"); - const auto alpha = v0::Constant::create(ov::element::f32, Shape{}, {activation_params[0]}); - const auto beta = v0::Constant::create(ov::element::f32, Shape{}, {activation_params[1]}); + const auto alpha = v0::Constant::create(ov::element::f32, ov::Shape{}, {activation_params[0]}); + const auto beta = v0::Constant::create(ov::element::f32, ov::Shape{}, {activation_params[1]}); return {std::make_shared(conv_res, alpha, beta)}; } CHECK_VALID_NODE(node, @@ -71,9 +72,7 @@ ov::OutputVector fused_conv(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.hpp index 6083e08913ee3c..2d46fceeded4be 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.hpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fused_conv.hpp @@ -4,17 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector fused_conv(const Node& node); +ov::OutputVector fused_conv(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp index e08312c4358a3e..8359d76617b79a 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp @@ -18,12 +18,13 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector fusedgemm(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector fusedgemm(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; auto num_inputs = inputs.size(); FRONT_END_GENERAL_CHECK(num_inputs == 2 || num_inputs == 3, "FusedGemm takes 2/3 inputs. Provided " + std::to_string(num_inputs)); @@ -56,16 +57,14 @@ ov::OutputVector fusedgemm(const Node& node) { if (activation_type == "LeakyRelu") { double activation_alpha = node.get_attribute_value("activation_alpha", 0.01); std::shared_ptr activation_alpha_node = - v0::Constant::create(input_c.get_element_type(), Shape{1}, {activation_alpha}); + v0::Constant::create(input_c.get_element_type(), ov::Shape{1}, {activation_alpha}); return {std::make_shared(gemm_res, activation_alpha_node)}; } return {std::make_shared(gemm_res)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.hpp index 1c70a9beed2ee5..630a03154a2ef7 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.hpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.hpp @@ -4,17 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector fusedgemm(const Node& node); +ov::OutputVector fusedgemm(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp index 8e853aa0818c2a..f003ace40cdfd2 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.cpp @@ -13,12 +13,13 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector skip_layer_normalization(const Node& node) { - auto nodes = node.get_ng_inputs(); +ov::OutputVector skip_layer_normalization(const ov::frontend::onnx::Node& node) { + auto nodes = node.get_ov_inputs(); auto num_nodes = nodes.size(); FRONT_END_GENERAL_CHECK(num_nodes >= 3 && num_nodes <= 5, "SkipLayerNormalization takes 3, 4 or 5 inputs. Provided " + std::to_string(num_nodes)); @@ -32,7 +33,7 @@ ov::OutputVector skip_layer_normalization(const Node& node) { float eps = node.get_attribute_value("epsilon"); // reduce over hidden_size int hidden_size_dim = 2; - const auto reduction_axes = v0::Constant::create(ov::element::i32, Shape{1}, {hidden_size_dim}); + const auto reduction_axes = v0::Constant::create(ov::element::i32, ov::Shape{1}, {hidden_size_dim}); std::shared_ptr result = std::make_shared(input, reduction_axes, true, eps, ov::op::MVNEpsMode::INSIDE_SQRT); // multiply by gamma @@ -48,5 +49,6 @@ ov::OutputVector skip_layer_normalization(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.hpp b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.hpp index 11e19bf1bbbda6..7f54e5e9344dcf 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.hpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/skip_layer_normalization.hpp @@ -4,17 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector skip_layer_normalization(const Node& node); +ov::OutputVector skip_layer_normalization(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/compress.cpp b/src/frontends/onnx/frontend/src/op/compress.cpp index 023465f803b65c..84a11c2f17734a 100644 --- a/src/frontends/onnx/frontend/src/op/compress.cpp +++ b/src/frontends/onnx/frontend/src/op/compress.cpp @@ -8,19 +8,19 @@ #include "openvino/op/gather.hpp" #include "openvino/op/non_zero.hpp" #include "openvino/op/squeeze.hpp" -#include "ov_models/ov_builders/reshape.hpp" +#include "utils/reshape.hpp" using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector compress(const Node& node) { - auto data = node.get_ng_inputs().at(0); - auto condition = node.get_ng_inputs().at(1); +ov::OutputVector compress(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); + auto condition = node.get_ov_inputs().at(1); int64_t axis = 0; if (node.has_attribute("axis")) { @@ -30,8 +30,8 @@ ov::OutputVector compress(const Node& node) { data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); } - auto axis_node = v0::Constant::create(ov::element::i64, Shape{}, {axis}); - auto zero_node = v0::Constant::create(ov::element::i64, Shape{}, {0}); + auto axis_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {axis}); + auto zero_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); auto result = std::make_shared(data, std::make_shared(std::make_shared(condition), zero_node), @@ -40,10 +40,7 @@ ov::OutputVector compress(const Node& node) { return {result}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/compress.hpp b/src/frontends/onnx/frontend/src/op/compress.hpp index 06b67050fc0ba3..7ea4ed5fc5a999 100644 --- a/src/frontends/onnx/frontend/src/op/compress.hpp +++ b/src/frontends/onnx/frontend/src/op/compress.hpp @@ -4,19 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector compress(const Node& node); +ov::OutputVector compress(const ov::frontend::onnx::Node& node); } } // namespace op -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/concat.cpp b/src/frontends/onnx/frontend/src/op/concat.cpp index 6c9e87daaf8ec3..23bbf970657457 100644 --- a/src/frontends/onnx/frontend/src/op/concat.cpp +++ b/src/frontends/onnx/frontend/src/op/concat.cpp @@ -9,13 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector concat(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector concat(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; std::int64_t axis = node.get_attribute_value("axis"); ov::OutputVector valid_inputs; std::copy_if(inputs.begin(), inputs.end(), std::back_inserter(valid_inputs), [](ov::Output& in) -> bool { @@ -25,10 +25,7 @@ ov::OutputVector concat(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/concat.hpp b/src/frontends/onnx/frontend/src/op/concat.hpp index c0244222e2ab61..026c253c433df7 100644 --- a/src/frontends/onnx/frontend/src/op/concat.hpp +++ b/src/frontends/onnx/frontend/src/op/concat.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector concat(const Node& node); +ov::OutputVector concat(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant.cpp b/src/frontends/onnx/frontend/src/op/constant.cpp index 90571df7d6705f..79a001aeefd615 100644 --- a/src/frontends/onnx/frontend/src/op/constant.cpp +++ b/src/frontends/onnx/frontend/src/op/constant.cpp @@ -15,9 +15,9 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace { template @@ -39,7 +39,7 @@ std::vector get_dense_vector(const std::vector& values, const std::vector< template std::shared_ptr make_dense_tensor_as_constant(const std::vector& indices, const Tensor& values_tensor, - const Shape& shape) { + const ov::Shape& shape) { auto values = values_tensor.get_data(); auto dense_vector = get_dense_vector(values, indices, shape_size(shape)); return v0::Constant::create(values_tensor.get_ov_type(), shape, dense_vector); @@ -47,7 +47,7 @@ std::shared_ptr make_dense_tensor_as_constant(const std::vector get_dense_tensor_as_constant(const std::vector& absolute_indices, const Tensor& values_tensor, - const Shape& shape) { + const ov::Shape& shape) { switch (values_tensor.get_ov_type()) { case ov::element::boolean: return make_dense_tensor_as_constant(absolute_indices, values_tensor, shape); @@ -80,7 +80,7 @@ std::shared_ptr get_dense_tensor_as_constant(const std::vector get_absolute_indices(const Tensor& indices_tensor, const Shape& shape, const size_t& nnz) { +std::vector get_absolute_indices(const Tensor& indices_tensor, const ov::Shape& shape, const size_t& nnz) { auto rank = shape.size(); auto indices = indices_tensor.get_data(); auto indices_shape = indices_tensor.get_shape(); @@ -107,7 +107,7 @@ std::vector get_absolute_indices(const Tensor& indices_tensor, const Sh } // namespace namespace set_1 { -ov::OutputVector constant(const onnx_import::Node& node) { +ov::OutputVector constant(const ov::frontend::onnx::Node& node) { auto tensor = node.get_attribute_value("value"); return {tensor.get_ov_constant()}; } @@ -115,7 +115,7 @@ ov::OutputVector constant(const onnx_import::Node& node) { } // namespace set_1 namespace set_13 { -ov::OutputVector constant(const onnx_import::Node& node) { +ov::OutputVector constant(const ov::frontend::onnx::Node& node) { auto attributes_names = node.get_attribute_names(); FRONT_END_GENERAL_CHECK(attributes_names.size() == 1, "The Constant op expects exactly one attribute." @@ -138,7 +138,7 @@ ov::OutputVector constant(const onnx_import::Node& node) { auto sparse_tensor = attribute.get_sparse_tensor(); const Tensor& values_tensor = sparse_tensor.get_values(); const Tensor& indices_tensor = sparse_tensor.get_indices(); - const Shape& shape = sparse_tensor.get_shape(); + const ov::Shape& shape = sparse_tensor.get_shape(); auto rank = shape.size(); // NNZ - the number of non-zero values in the sparse-tensor auto nnz = values_tensor.get_shape().at(0); @@ -185,6 +185,6 @@ ov::OutputVector constant(const onnx_import::Node& node) { } } // namespace set_13 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant.hpp b/src/frontends/onnx/frontend/src/op/constant.hpp index 4106a2d32814fb..d5f9e0e9ab58c7 100644 --- a/src/frontends/onnx/frontend/src/op/constant.hpp +++ b/src/frontends/onnx/frontend/src/op/constant.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector constant(const Node& node); +ov::OutputVector constant(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_13 { -ov::OutputVector constant(const Node& node); +ov::OutputVector constant(const ov::frontend::onnx::Node& node); } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.cpp b/src/frontends/onnx/frontend/src/op/constant_fill.cpp index f800d0c876fbb4..619891101ba6b3 100644 --- a/src/frontends/onnx/frontend/src/op/constant_fill.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_fill.cpp @@ -6,6 +6,8 @@ #include // onnx types +using namespace ::ONNX_NAMESPACE; + #include "exceptions.hpp" #include "onnx_common/utils.hpp" #include "openvino/op/broadcast.hpp" @@ -14,12 +16,12 @@ using namespace ov::op; using namespace ov::frontend::onnx::common; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector constant_fill(const Node& node) { +ov::OutputVector constant_fill(const ov::frontend::onnx::Node& node) { ov::Output target_shape; const auto dtype = node.get_attribute_value("dtype", static_cast(TensorProto_DataType_FLOAT)); const auto ng_type = onnx_to_ov_data_type(static_cast(dtype)); @@ -28,9 +30,9 @@ ov::OutputVector constant_fill(const Node& node) { if (input_as_shape == 1) // use the first input as target shape { CHECK_VALID_NODE(node, - node.get_ng_inputs().size() > 0, + node.get_ov_inputs().size() > 0, "The input which determines output shape was not provided"); - target_shape = node.get_ng_inputs().at(0); + target_shape = node.get_ov_inputs().at(0); if (node.has_attribute("extra_shape")) { const auto extra_shape_const = node.get_attribute_as_constant>("extra_shape", target_shape.get_element_type()); @@ -46,7 +48,6 @@ ov::OutputVector constant_fill(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant_fill.hpp b/src/frontends/onnx/frontend/src/op/constant_fill.hpp index 48c14bd170a67d..ddd1f7a230dc0f 100644 --- a/src/frontends/onnx/frontend/src/op/constant_fill.hpp +++ b/src/frontends/onnx/frontend/src/op/constant_fill.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { // ConstantFill is a deprecated experimental operator removed in ONNX 1.4 -ov::OutputVector constant_fill(const Node& node); +ov::OutputVector constant_fill(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp index 49ef373b9025e4..4e1bf529f6ba0e 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.cpp @@ -14,12 +14,12 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector constant_of_shape(const onnx_import::Node& node) { +ov::OutputVector constant_of_shape(const ov::frontend::onnx::Node& node) { ov::Output constant_value; if (node.has_attribute("value")) { auto value_tensor = node.get_attribute_value("value"); @@ -28,7 +28,7 @@ ov::OutputVector constant_of_shape(const onnx_import::Node& node) { } else { constant_value = v0::Constant::create(ov::element::f32, {}, {0}); } - const auto& inputs = node.get_ng_inputs(); + const auto& inputs = node.get_ov_inputs(); if (inputs.size() == 0 || common::is_failsafe_node(inputs[0].get_node_shared_ptr()) || ov::op::util::is_null(inputs[0])) { return {constant_value}; @@ -37,10 +37,7 @@ ov::OutputVector constant_of_shape(const onnx_import::Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp b/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp index 93f61d4825494a..41c82fdee90009 100644 --- a/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp +++ b/src/frontends/onnx/frontend/src/op/constant_of_shape.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector constant_of_shape(const Node& node); +ov::OutputVector constant_of_shape(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv.cpp b/src/frontends/onnx/frontend/src/op/conv.cpp index 8849bb39ad1859..24a53af88eeff9 100644 --- a/src/frontends/onnx/frontend/src/op/conv.cpp +++ b/src/frontends/onnx/frontend/src/op/conv.cpp @@ -9,16 +9,15 @@ #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" #include "openvino/op/shape_of.hpp" -#include "ov_models/ov_builders/reshape.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { namespace detail { @@ -30,7 +29,7 @@ std::shared_ptr add_bias(const ov::Output& ng_conv, const ov return {std::make_shared(ng_conv, reshape::reshape_channel_shaped_node_to_nchw(bias, conv_rank))}; } -ov::OutputVector conv(const Node& node, +ov::OutputVector conv(const ov::frontend::onnx::Node& node, ov::Output data, ov::Output filters, ov::Output bias) { @@ -73,15 +72,12 @@ ov::OutputVector conv(const Node& node, } } // namespace detail -ov::OutputVector conv(const Node& node) { - const ov::OutputVector& inputs = node.get_ng_inputs(); +ov::OutputVector conv(const ov::frontend::onnx::Node& node) { + const ov::OutputVector& inputs = node.get_ov_inputs(); return detail::conv(node, inputs[0], inputs[1], inputs.size() < 3 ? std::make_shared() : inputs[2]); } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv.hpp b/src/frontends/onnx/frontend/src/op/conv.hpp index e9ac179a27899a..9882758056ad7b 100644 --- a/src/frontends/onnx/frontend/src/op/conv.hpp +++ b/src/frontends/onnx/frontend/src/op/conv.hpp @@ -4,18 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { namespace detail { -ov::OutputVector conv(const Node& node, +ov::OutputVector conv(const ov::frontend::onnx::Node& node, ov::Output data, ov::Output filters, ov::Output bias); @@ -26,13 +24,10 @@ ov::OutputVector conv(const Node& node, /// /// \return The vector containing OV nodes producing output of ONNX convolution /// operation. -ov::OutputVector conv(const Node& node); +ov::OutputVector conv(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv_integer.cpp b/src/frontends/onnx/frontend/src/op/conv_integer.cpp index b729e00cfd4e1b..e40453655bf01e 100644 --- a/src/frontends/onnx/frontend/src/op/conv_integer.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_integer.cpp @@ -16,9 +16,9 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace { std::shared_ptr get_filter_zero_point(const ov::OutputVector& inputs) { const auto& original_zero_point = @@ -45,8 +45,8 @@ std::shared_ptr get_filter_zero_point(const ov::OutputVector& inputs) namespace op { namespace set_1 { -ov::OutputVector conv_integer(const Node& node) { - const ov::OutputVector& inputs = node.get_ng_inputs(); +ov::OutputVector conv_integer(const ov::frontend::onnx::Node& node) { + const ov::OutputVector& inputs = node.get_ov_inputs(); const auto& input = inputs.at(0); const auto& filter = inputs.at(1); @@ -82,6 +82,6 @@ ov::OutputVector conv_integer(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv_integer.hpp b/src/frontends/onnx/frontend/src/op/conv_integer.hpp index bbbab49881e97b..2d86bec3176f99 100644 --- a/src/frontends/onnx/frontend/src/op/conv_integer.hpp +++ b/src/frontends/onnx/frontend/src/op/conv_integer.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Performs ONNX ConvInteger operation. @@ -19,13 +17,10 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of quantized ONNX /// convolution operation. -ov::OutputVector conv_integer(const Node& node); +ov::OutputVector conv_integer(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp index c6bd9c80b919bd..2cdc88ba784d22 100644 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp @@ -16,24 +16,24 @@ #include "openvino/op/shape_of.hpp" #include "openvino/op/strided_slice.hpp" #include "openvino/op/subtract.hpp" -#include "ov_models/ov_builders/reshape.hpp" #include "utils/convpool.hpp" +#include "utils/reshape.hpp" using namespace ov::op; using ov::CoordinateDiff; using ov::Shape; using ov::Strides; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { namespace { ov::Output make_group_conv_backprop(const ov::Output& data, const ov::Output& filters, - const Strides& strides, - const Strides& dilations, + const ov::Strides& strides, + const ov::Strides& dilations, const ov::CoordinateDiff& pads_begin, const ov::CoordinateDiff& pads_end, const ov::op::PadType& auto_pad_type, @@ -53,7 +53,7 @@ ov::Output make_group_conv_backprop(const ov::Output& data, return std::make_shared( data, filters, - v0::Constant::create(ov::element::i64, Shape{output_shape.size()}, output_shape), + v0::Constant::create(ov::element::i64, ov::Shape{output_shape.size()}, output_shape), strides, dilations, auto_pad_type, @@ -63,8 +63,8 @@ ov::Output make_group_conv_backprop(const ov::Output& data, ov::Output make_conv_backprop(const ov::Output& data, const ov::Output& filters, - const Strides& strides, - const Strides& dilations, + const ov::Strides& strides, + const ov::Strides& dilations, const ov::CoordinateDiff& pads_begin, const ov::CoordinateDiff& pads_end, const ov::op::PadType& auto_pad_type, @@ -84,7 +84,7 @@ ov::Output make_conv_backprop(const ov::Output& data, return std::make_shared( data, filters, - v0::Constant::create(ov::element::i64, Shape{output_shape.size()}, output_shape), + v0::Constant::create(ov::element::i64, ov::Shape{output_shape.size()}, output_shape), strides, pads_begin, pads_end, @@ -100,17 +100,17 @@ ov::Output get_prepared_bias(const ov::Output& bias, const o std::shared_ptr bias_shape_node; if (conv_pshape.rank().is_static() && conv_pshape[1].is_static()) { - Shape new_bias_shape(conv_pshape.rank().get_length(), 1); + ov::Shape new_bias_shape(conv_pshape.rank().get_length(), 1); new_bias_shape[1] = conv_pshape[1].get_length(); - bias_shape_node = v0::Constant::create(ov::element::i64, Shape{new_bias_shape.size()}, new_bias_shape); + bias_shape_node = v0::Constant::create(ov::element::i64, ov::Shape{new_bias_shape.size()}, new_bias_shape); } else { const auto conv_shape = std::make_shared(conv); const auto conv_rank = std::make_shared(conv_shape); // Prepare new bias shape base: [1, 1, 1, 1, ... ] - const auto one_node = v0::Constant::create(ov::element::i64, Shape{1}, {1}); - const auto two_node = v0::Constant::create(ov::element::i64, Shape{1}, {2}); + const auto one_node = v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); + const auto two_node = v0::Constant::create(ov::element::i64, ov::Shape{1}, {2}); const auto remaining_shape_length = std::make_shared(conv_rank, two_node); const auto remaining_bias_shape_ones = std::make_shared(one_node, remaining_shape_length); @@ -128,8 +128,8 @@ ov::Output get_prepared_bias(const ov::Output& bias, const o } } // namespace -ov::OutputVector conv_transpose(const Node& node) { - const ov::OutputVector& inputs = node.get_ng_inputs(); +ov::OutputVector conv_transpose(const ov::frontend::onnx::Node& node) { + const ov::OutputVector& inputs = node.get_ov_inputs(); CHECK_VALID_NODE(node, inputs.size() == 2 || inputs.size() == 3, @@ -143,7 +143,7 @@ ov::OutputVector conv_transpose(const Node& node) { const auto& filters_pshape = filters.get_partial_shape(); std::size_t num_spatial_dims = 0; - Strides strides, dilations; + ov::Strides strides, dilations; std::pair paddings; ov::op::PadType auto_pad_type = convpool::get_auto_pad(node); @@ -215,10 +215,7 @@ ov::OutputVector conv_transpose(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.hpp b/src/frontends/onnx/frontend/src/op/conv_transpose.hpp index 910aeba3a5f465..40e59d65c8764d 100644 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.hpp +++ b/src/frontends/onnx/frontend/src/op/conv_transpose.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Performs ONNX Transposed Convolution operation. @@ -19,13 +17,10 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX convolution /// operation. -ov::OutputVector conv_transpose(const Node& node); +ov::OutputVector conv_transpose(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cos.cpp b/src/frontends/onnx/frontend/src/op/cos.cpp index e1aa371b04d94b..ff55e15c5d0bee 100644 --- a/src/frontends/onnx/frontend/src/op/cos.cpp +++ b/src/frontends/onnx/frontend/src/op/cos.cpp @@ -8,19 +8,16 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cos(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +ov::OutputVector cos(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cos.hpp b/src/frontends/onnx/frontend/src/op/cos.hpp index fca9c03486eea2..039e5129649c73 100644 --- a/src/frontends/onnx/frontend/src/op/cos.hpp +++ b/src/frontends/onnx/frontend/src/op/cos.hpp @@ -4,20 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cos(const Node& node); +ov::OutputVector cos(const ov::frontend::onnx::Node& node); } } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cosh.cpp b/src/frontends/onnx/frontend/src/op/cosh.cpp index 8f291542ee978f..cc3d81e32b8dd3 100644 --- a/src/frontends/onnx/frontend/src/op/cosh.cpp +++ b/src/frontends/onnx/frontend/src/op/cosh.cpp @@ -8,19 +8,16 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cosh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +ov::OutputVector cosh(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cosh.hpp b/src/frontends/onnx/frontend/src/op/cosh.hpp index a555640a5d551b..974352d8ba416f 100644 --- a/src/frontends/onnx/frontend/src/op/cosh.hpp +++ b/src/frontends/onnx/frontend/src/op/cosh.hpp @@ -4,19 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cosh(const Node& node); +ov::OutputVector cosh(const ov::frontend::onnx::Node& node); } } // namespace op -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/crop.cpp b/src/frontends/onnx/frontend/src/op/crop.cpp index fe959c230b1bdb..3dad7bb587863b 100644 --- a/src/frontends/onnx/frontend/src/op/crop.cpp +++ b/src/frontends/onnx/frontend/src/op/crop.cpp @@ -13,16 +13,16 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector crop(const Node& node) { +ov::OutputVector crop(const ov::frontend::onnx::Node& node) { // Crop is an obsolete experimental ONNX operation. // Crops an image's spatial dimensions. - const auto inputs = node.get_ng_inputs(); + const auto inputs = node.get_ov_inputs(); const auto& input_data = inputs.at(0); // Border values: leftBorder, topBorder, rightBorder, bottomBorder. @@ -32,7 +32,7 @@ ov::OutputVector crop(const Node& node) { // Set slice begin values to border values (note order of indexes) const auto begin = - v0::Constant::create(ov::element::i64, Shape{4}, std::vector{0, 0, border[1], border[0]}); + v0::Constant::create(ov::element::i64, ov::Shape{4}, std::vector{0, 0, border[1], border[0]}); // If scale is given, then start crop at left/top `border` // and end on left/top `border` + `scale`. @@ -48,7 +48,7 @@ ov::OutputVector crop(const Node& node) { // Set slice end values to topBorder+heightScale and leftBorder+widthScale // Note that indexes don't match, e.g. border[0] + scale[1] end = v0::Constant::create(ov::element::i64, - Shape{4}, + ov::Shape{4}, std::vector{0, 0, border[1] + scale[0], border[0] + scale[1]}); } // If scale is not provided, crop the image by values provided in `border`. @@ -60,8 +60,9 @@ ov::OutputVector crop(const Node& node) { // Calculate ends as shape(input) - border[2:3] const auto input_shape = std::make_shared(input_data); - const auto end_offset = - v0::Constant::create(ov::element::i64, Shape{4}, std::vector{0, 0, -border[3], -border[2]}); + const auto end_offset = v0::Constant::create(ov::element::i64, + ov::Shape{4}, + std::vector{0, 0, -border[3], -border[2]}); end = std::make_shared(input_shape, end_offset); } @@ -73,10 +74,7 @@ ov::OutputVector crop(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/crop.hpp b/src/frontends/onnx/frontend/src/op/crop.hpp index 62c3c7e5207978..e9bff06481e7ab 100644 --- a/src/frontends/onnx/frontend/src/op/crop.hpp +++ b/src/frontends/onnx/frontend/src/op/crop.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector crop(const Node& node); +ov::OutputVector crop(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cum_sum.cpp b/src/frontends/onnx/frontend/src/op/cum_sum.cpp index e53913ea084fbd..127551c208460e 100644 --- a/src/frontends/onnx/frontend/src/op/cum_sum.cpp +++ b/src/frontends/onnx/frontend/src/op/cum_sum.cpp @@ -10,13 +10,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cum_sum(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector cum_sum(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); auto data = inputs.at(0); bool exclusive = node.get_attribute_value("exclusive", 0); bool reverse = node.get_attribute_value("reverse", 0); @@ -25,7 +25,7 @@ ov::OutputVector cum_sum(const Node& node) { if (inputs.size() > 1) { // optional input, 0-D or 1-D tensor const auto& axis_shape = inputs.at(1).get_partial_shape(); - axis = axis_shape.is_dynamic() ? inputs.at(1) : ngraph::onnx_import::reshape::interpret_as_scalar(inputs.at(1)); + axis = axis_shape.is_dynamic() ? inputs.at(1) : ov::frontend::onnx::reshape::interpret_as_scalar(inputs.at(1)); } else { axis = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); // default } @@ -33,10 +33,7 @@ ov::OutputVector cum_sum(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/cum_sum.hpp b/src/frontends/onnx/frontend/src/op/cum_sum.hpp index 34d8d6ab38687c..94ca7d0bc35110 100644 --- a/src/frontends/onnx/frontend/src/op/cum_sum.hpp +++ b/src/frontends/onnx/frontend/src/op/cum_sum.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector cum_sum(const Node& node); +ov::OutputVector cum_sum(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/depth_to_space.cpp b/src/frontends/onnx/frontend/src/op/depth_to_space.cpp index 2fccf37d7245fc..d78624ef0c82cf 100644 --- a/src/frontends/onnx/frontend/src/op/depth_to_space.cpp +++ b/src/frontends/onnx/frontend/src/op/depth_to_space.cpp @@ -9,13 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector depth_to_space(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector depth_to_space(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); const auto& shape = data.get_partial_shape(); FRONT_END_GENERAL_CHECK(shape.rank().is_static() && shape.rank().get_length() == 4, "Input must be 4-dimensional"); @@ -32,10 +32,7 @@ ov::OutputVector depth_to_space(const Node& node) { return ov::OutputVector{std::make_shared(data, ov_mode, block_size)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/depth_to_space.hpp b/src/frontends/onnx/frontend/src/op/depth_to_space.hpp index f0b8169c9cdccf..31e100cb3cca5a 100644 --- a/src/frontends/onnx/frontend/src/op/depth_to_space.hpp +++ b/src/frontends/onnx/frontend/src/op/depth_to_space.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Permutes input tensor data from depth into blocks of spatial data. @@ -22,12 +20,9 @@ namespace set_1 { /// /// \return ov::OutputVector containing Tensor with shape: /// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] -ov::OutputVector depth_to_space(const Node& node); +ov::OutputVector depth_to_space(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index 0582235a0c34cd..4082368fd68163 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -19,9 +19,9 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace detail { std::shared_ptr get_zero_point(const ov::OutputVector& inputs) { @@ -38,8 +38,8 @@ std::shared_ptr get_zero_point(const ov::OutputVector& inputs) { } } // namespace detail namespace set_1 { -ov::OutputVector dequantize_linear(const Node& node) { - const ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node) { + const ov::OutputVector inputs{node.get_ov_inputs()}; FRONT_END_GENERAL_CHECK(2 <= inputs.size() && inputs.size() <= 3, "The DequantizeLinear op expects 2 required and one optional input. Got: ", @@ -162,8 +162,8 @@ ov::OutputVector dequantize_linear(const ov::Output& x, } } // namespace detail -ov::OutputVector dequantize_linear(const Node& node) { - const ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node) { + const ov::OutputVector inputs{node.get_ov_inputs()}; FRONT_END_GENERAL_CHECK(2 <= inputs.size() && inputs.size() <= 3, "The DequantizeLinear op expects 2 required and one optional " @@ -191,6 +191,6 @@ ov::OutputVector dequantize_linear(const Node& node) { } } // namespace set_13 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp index 0d2e58e6c682c1..dffd8c7762b18d 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.hpp @@ -4,18 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector dequantize_linear(const Node& node); +ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node); } // namespace set_1 @@ -27,12 +25,9 @@ ov::OutputVector dequantize_linear(const ov::Output& x, int64_t axis, const Node& node); } -ov::OutputVector dequantize_linear(const Node& node); +ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node); } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dft.cpp b/src/frontends/onnx/frontend/src/op/dft.cpp index 6462a9ccec5021..5120f0b7e17901 100644 --- a/src/frontends/onnx/frontend/src/op/dft.cpp +++ b/src/frontends/onnx/frontend/src/op/dft.cpp @@ -8,13 +8,13 @@ #include "utils/common.hpp" #include "utils/dft.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector dft(const Node& node) { - const ov::OutputVector ng_inputs{node.get_ng_inputs()}; +ov::OutputVector dft(const ov::frontend::onnx::Node& node) { + const ov::OutputVector ng_inputs{node.get_ov_inputs()}; const ov::Output data = ng_inputs.at(0); const auto dft_length_provided = ng_inputs.size() > 1 && !ov::op::util::is_null(ng_inputs[1]); @@ -30,10 +30,7 @@ ov::OutputVector dft(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dft.hpp b/src/frontends/onnx/frontend/src/op/dft.hpp index 2b808488f80d6a..d3c35133e7d9a7 100644 --- a/src/frontends/onnx/frontend/src/op/dft.hpp +++ b/src/frontends/onnx/frontend/src/op/dft.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector dft(const Node& node); +ov::OutputVector dft(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/div.hpp b/src/frontends/onnx/frontend/src/op/div.hpp index 80578c9478b1d5..c133d5908a0d44 100644 --- a/src/frontends/onnx/frontend/src/op/div.hpp +++ b/src/frontends/onnx/frontend/src/op/div.hpp @@ -4,32 +4,27 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/divide.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector div(const Node& node) { +inline ov::OutputVector div(const ov::frontend::onnx::Node& node) { return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_7 { -inline ov::OutputVector div(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector div(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_7 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dropout.cpp b/src/frontends/onnx/frontend/src/op/dropout.cpp index bfc998a8ba4635..636bc9dda486f0 100644 --- a/src/frontends/onnx/frontend/src/op/dropout.cpp +++ b/src/frontends/onnx/frontend/src/op/dropout.cpp @@ -13,15 +13,15 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace { -ov::OutputVector build_dropout(const Node& node, bool training_mode) { +ov::OutputVector build_dropout(const ov::frontend::onnx::Node& node, bool training_mode) { CHECK_VALID_NODE(node, !training_mode, "Training mode is not supported for Dropout op"); - const auto input_data = node.get_ng_inputs().at(0); + const auto input_data = node.get_ov_inputs().at(0); const bool return_mask = node.get_outputs_size() > 1; if (return_mask) { @@ -36,8 +36,8 @@ ov::OutputVector build_dropout(const Node& node, bool training_mode) { } // namespace namespace set_12 { -ov::OutputVector dropout(const Node& node) { - const auto ng_inputs = node.get_ng_inputs(); +ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { + const auto ng_inputs = node.get_ov_inputs(); // seed attribute and ratio input are ignored because traning mode is not // supported anyway bool training_mode = false; // default value @@ -52,7 +52,7 @@ ov::OutputVector dropout(const Node& node) { } // namespace set_12 namespace set_7 { -ov::OutputVector dropout(const Node& node) { +ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { // "is_test" attribute was removed // ratio attribute is ignored because traning mode is not supported const bool training_mode = false; @@ -62,7 +62,7 @@ ov::OutputVector dropout(const Node& node) { } // namespace set_7 namespace set_1 { -ov::OutputVector dropout(const Node& node) { +ov::OutputVector dropout(const ov::frontend::onnx::Node& node) { // legacy consumed_inputs attribute ignored // ratio attribute is ignored because traning mode is not supported const bool training_mode = !node.get_attribute_value("is_test", 0); @@ -70,10 +70,7 @@ ov::OutputVector dropout(const Node& node) { return build_dropout(node, training_mode); } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dropout.hpp b/src/frontends/onnx/frontend/src/op/dropout.hpp index 010123f044fc5f..e0826063c2ae94 100644 --- a/src/frontends/onnx/frontend/src/op/dropout.hpp +++ b/src/frontends/onnx/frontend/src/op/dropout.hpp @@ -4,29 +4,24 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_12 { -ov::OutputVector dropout(const Node& node); +ov::OutputVector dropout(const ov::frontend::onnx::Node& node); } // namespace set_12 namespace set_7 { -ov::OutputVector dropout(const Node& node); +ov::OutputVector dropout(const ov::frontend::onnx::Node& node); } // namespace set_7 namespace set_1 { -ov::OutputVector dropout(const Node& node); +ov::OutputVector dropout(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp index fab204a0bc648b..1009d05d8d9018 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp @@ -25,13 +25,13 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace { std::shared_ptr find_min_value(const ov::Output& input) { - const auto& zero_node = v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto& one_node = v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto& zero_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); + const auto& one_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); const auto& input_shape = std::make_shared(input); const auto& input_rank = std::make_shared(input_shape); @@ -41,13 +41,13 @@ std::shared_ptr find_min_value(const ov::Output& input) { const auto& input_min = std::make_shared(input, reduce_axes); - const auto& zero_node_u8 = v0::Constant::create(ov::element::f32, Shape{}, {0}); + const auto& zero_node_u8 = v0::Constant::create(ov::element::f32, ov::Shape{}, {0}); return std::make_shared(zero_node_u8, input_min); } std::shared_ptr find_max_value(const ov::Output& input) { - const auto& zero_node = v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto& one_node = v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto& zero_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); + const auto& one_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); const auto& input_shape = std::make_shared(input); const auto& input_rank = std::make_shared(input_shape); @@ -57,7 +57,7 @@ std::shared_ptr find_max_value(const ov::Output& input) { const auto& input_max = std::make_shared(input, reduce_axes); - const auto& zero_node_u8 = v0::Constant::create(ov::element::f32, Shape{}, {0}); + const auto& zero_node_u8 = v0::Constant::create(ov::element::f32, ov::Shape{}, {0}); return std::make_shared(zero_node_u8, input_max); } @@ -79,13 +79,13 @@ std::shared_ptr quantize_linear(ov::Output x, } // namespace namespace op { namespace set_1 { -ov::OutputVector dynamic_quantize_linear(const Node& node) { - const ov::OutputVector& inputs = node.get_ng_inputs(); +ov::OutputVector dynamic_quantize_linear(const ov::frontend::onnx::Node& node) { + const ov::OutputVector& inputs = node.get_ov_inputs(); const auto& x = inputs.at(0); // quantization range in case of uint8 is [0, 255] - const auto& quant_range_min = v0::Constant::create(ov::element::f32, Shape{}, {0}); - const auto& quant_range_max = v0::Constant::create(ov::element::f32, Shape{}, {255}); + const auto& quant_range_min = v0::Constant::create(ov::element::f32, ov::Shape{}, {0}); + const auto& quant_range_max = v0::Constant::create(ov::element::f32, ov::Shape{}, {255}); const auto& quant_range_span = std::make_shared(quant_range_max, quant_range_min); const auto& x_max = find_max_value(x); @@ -108,6 +108,6 @@ ov::OutputVector dynamic_quantize_linear(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp index 577c43cea4ce0f..2218f74c40a95a 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.hpp @@ -4,19 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector dynamic_quantize_linear(const Node& node); +ov::OutputVector dynamic_quantize_linear(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/einsum.cpp b/src/frontends/onnx/frontend/src/op/einsum.cpp index 204f770bf055a9..2cf832a78e1af2 100644 --- a/src/frontends/onnx/frontend/src/op/einsum.cpp +++ b/src/frontends/onnx/frontend/src/op/einsum.cpp @@ -8,22 +8,19 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector einsum(const Node& node) { +ov::OutputVector einsum(const ov::frontend::onnx::Node& node) { const std::string& equation{node.get_attribute_value("equation")}; - return ov::OutputVector{std::make_shared(node.get_ng_inputs(), equation)}; + return {std::make_shared(node.get_ov_inputs(), equation)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/einsum.hpp b/src/frontends/onnx/frontend/src/op/einsum.hpp index d588ca5aca5a1b..22821224537918 100644 --- a/src/frontends/onnx/frontend/src/op/einsum.hpp +++ b/src/frontends/onnx/frontend/src/op/einsum.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector einsum(const Node& node); +ov::OutputVector einsum(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/elu.cpp b/src/frontends/onnx/frontend/src/op/elu.cpp index fc8345df202d67..fb45f05a1c53ec 100644 --- a/src/frontends/onnx/frontend/src/op/elu.cpp +++ b/src/frontends/onnx/frontend/src/op/elu.cpp @@ -8,23 +8,20 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector elu(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector elu(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); double alpha = node.get_attribute_value("alpha", 1); - return ov::OutputVector{std::make_shared(data, alpha)}; + return {std::make_shared(data, alpha)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/elu.hpp b/src/frontends/onnx/frontend/src/op/elu.hpp index a645bc5e5280f2..87ef5d46639726 100644 --- a/src/frontends/onnx/frontend/src/op/elu.hpp +++ b/src/frontends/onnx/frontend/src/op/elu.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector elu(const Node& node); +ov::OutputVector elu(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/equal.hpp b/src/frontends/onnx/frontend/src/op/equal.hpp index 5a8a6da6bc2b3e..4591e7da6da44e 100644 --- a/src/frontends/onnx/frontend/src/op/equal.hpp +++ b/src/frontends/onnx/frontend/src/op/equal.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/equal.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector equal(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector equal(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/erf.hpp b/src/frontends/onnx/frontend/src/op/erf.hpp index 2a5a75b2516546..83a6b19ffccaa8 100644 --- a/src/frontends/onnx/frontend/src/op/erf.hpp +++ b/src/frontends/onnx/frontend/src/op/erf.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/erf.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector erf(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector erf(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/exp.hpp b/src/frontends/onnx/frontend/src/op/exp.hpp index c133241934a601..44c243670f1594 100644 --- a/src/frontends/onnx/frontend/src/op/exp.hpp +++ b/src/frontends/onnx/frontend/src/op/exp.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/exp.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector exp(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector exp(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/expand.cpp b/src/frontends/onnx/frontend/src/op/expand.cpp index 54d6299cd77f4e..c96331627c43a7 100644 --- a/src/frontends/onnx/frontend/src/op/expand.cpp +++ b/src/frontends/onnx/frontend/src/op/expand.cpp @@ -11,20 +11,20 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector expand(const Node& node) { - const ov::Output data{node.get_ng_inputs().at(0)}; - const ov::Output shape{node.get_ng_inputs().at(1)}; +ov::OutputVector expand(const ov::frontend::onnx::Node& node) { + const ov::Output data{node.get_ov_inputs().at(0)}; + const ov::Output shape{node.get_ov_inputs().at(1)}; if (common::is_failsafe_node(shape.get_node_shared_ptr())) { // in case the "shape" input is connected to a failsafe node created in place of an invalid initializer // the target shape should be ignored and this Expand operation should not modify its input tensor // the Broadcast created below should be eliminated later on by an appropriate optimization pass - const auto identity_broadcast = v0::Constant::create(ov::element::i64, Shape{1}, {1}); + const auto identity_broadcast = v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); return {std::make_shared(data, identity_broadcast, ov::op::BroadcastType::BIDIRECTIONAL)}; } else { return {std::make_shared(data, shape, ov::op::BroadcastType::BIDIRECTIONAL)}; @@ -32,10 +32,7 @@ ov::OutputVector expand(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/expand.hpp b/src/frontends/onnx/frontend/src/op/expand.hpp index 80cf415bc7dd07..7ffd8478465c14 100644 --- a/src/frontends/onnx/frontend/src/op/expand.hpp +++ b/src/frontends/onnx/frontend/src/op/expand.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 // Expand operator has been available since version 8 of the default ONNX operator set. // Currently, Expand is assigned to version 1 due to temporary reason. { -ov::OutputVector expand(const Node& node); +ov::OutputVector expand(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/eye_like.cpp b/src/frontends/onnx/frontend/src/op/eye_like.cpp index 6b1bd5088eda77..aae1f88edf0b74 100644 --- a/src/frontends/onnx/frontend/src/op/eye_like.cpp +++ b/src/frontends/onnx/frontend/src/op/eye_like.cpp @@ -13,9 +13,9 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace detail { namespace { @@ -33,8 +33,8 @@ ov::OutputVector get_shape_width_and_height(const ov::Output& shape) { namespace set_1 { -ov::OutputVector eye_like(const Node& node) { - const auto input = node.get_ng_inputs().at(0); +ov::OutputVector eye_like(const ov::frontend::onnx::Node& node) { + const auto input = node.get_ov_inputs().at(0); const auto& input_rank = input.get_partial_shape().rank(); CHECK_VALID_NODE(node, @@ -65,6 +65,6 @@ ov::OutputVector eye_like(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/eye_like.hpp b/src/frontends/onnx/frontend/src/op/eye_like.hpp index f9e5d69b34b280..700a57e61ce032 100644 --- a/src/frontends/onnx/frontend/src/op/eye_like.hpp +++ b/src/frontends/onnx/frontend/src/op/eye_like.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector eye_like(const Node& node); +ov::OutputVector eye_like(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/flatten.cpp b/src/frontends/onnx/frontend/src/op/flatten.cpp index 7a8e29a11a816b..2e07903270e051 100644 --- a/src/frontends/onnx/frontend/src/op/flatten.cpp +++ b/src/frontends/onnx/frontend/src/op/flatten.cpp @@ -5,18 +5,18 @@ #include "op/flatten.hpp" #include "exceptions.hpp" -#include "ov_models/ov_builders/reshape.hpp" +#include "utils/reshape.hpp" #include "validation_util.hpp" using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector flatten(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector flatten(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; auto data = inputs.at(0); auto axis = node.get_attribute_value("axis", 1); const auto data_rank = data.get_partial_shape().rank(); @@ -31,9 +31,7 @@ ov::OutputVector flatten(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/flatten.hpp b/src/frontends/onnx/frontend/src/op/flatten.hpp index fb68711ddf79ff..aa3165f12d0798 100644 --- a/src/frontends/onnx/frontend/src/op/flatten.hpp +++ b/src/frontends/onnx/frontend/src/op/flatten.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector flatten(const Node& node); +ov::OutputVector flatten(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/floor.hpp b/src/frontends/onnx/frontend/src/op/floor.hpp index 1688df283691e4..ce4640bf730b67 100644 --- a/src/frontends/onnx/frontend/src/op/floor.hpp +++ b/src/frontends/onnx/frontend/src/op/floor.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/floor.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector floor(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector floor(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gather.hpp b/src/frontends/onnx/frontend/src/op/gather.hpp index 302a449fb2472b..e36f6eecaedd6b 100644 --- a/src/frontends/onnx/frontend/src/op/gather.hpp +++ b/src/frontends/onnx/frontend/src/op/gather.hpp @@ -4,19 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector gather(const Node& node) { - ov::OutputVector ng_inputs{node.get_ng_inputs()}; +inline ov::OutputVector gather(const ov::frontend::onnx::Node& node) { + ov::OutputVector ng_inputs{node.get_ov_inputs()}; auto data = ng_inputs.at(0); auto indices = ng_inputs.at(1); auto axis = node.get_attribute_value("axis", 0); @@ -28,6 +26,6 @@ inline ov::OutputVector gather(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gather_elements.hpp b/src/frontends/onnx/frontend/src/op/gather_elements.hpp index f2f4536392aefd..d225fdd7c2911c 100644 --- a/src/frontends/onnx/frontend/src/op/gather_elements.hpp +++ b/src/frontends/onnx/frontend/src/op/gather_elements.hpp @@ -4,17 +4,15 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "openvino/op/gather_elements.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector gather_elements(const Node& node) { - ov::OutputVector ng_inputs{node.get_ng_inputs()}; +inline ov::OutputVector gather_elements(const ov::frontend::onnx::Node& node) { + ov::OutputVector ng_inputs{node.get_ov_inputs()}; auto data = ng_inputs.at(0); auto indices = ng_inputs.at(1); auto axis = node.get_attribute_value("axis", 0); @@ -23,6 +21,6 @@ inline ov::OutputVector gather_elements(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gather_nd.cpp b/src/frontends/onnx/frontend/src/op/gather_nd.cpp index 2f2c21560a5c50..c9f6d241d8b98a 100644 --- a/src/frontends/onnx/frontend/src/op/gather_nd.cpp +++ b/src/frontends/onnx/frontend/src/op/gather_nd.cpp @@ -11,13 +11,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector gather_nd(const Node& node) { - const ov::OutputVector ng_inputs{node.get_ng_inputs()}; +ov::OutputVector gather_nd(const ov::frontend::onnx::Node& node) { + const ov::OutputVector ng_inputs{node.get_ov_inputs()}; const auto data = ng_inputs.at(0); const auto indices = ng_inputs.at(1); const auto batch_dims = node.get_attribute_value("batch_dims", 0); @@ -26,10 +26,7 @@ ov::OutputVector gather_nd(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gather_nd.hpp b/src/frontends/onnx/frontend/src/op/gather_nd.hpp index 38f6e3fbbe6542..5053780c00a215 100644 --- a/src/frontends/onnx/frontend/src/op/gather_nd.hpp +++ b/src/frontends/onnx/frontend/src/op/gather_nd.hpp @@ -7,22 +7,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector gather_nd(const Node& node); +ov::OutputVector gather_nd(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gelu.cpp b/src/frontends/onnx/frontend/src/op/gelu.cpp index 31e3a2caeee4ad..e7950a29ad9967 100644 --- a/src/frontends/onnx/frontend/src/op/gelu.cpp +++ b/src/frontends/onnx/frontend/src/op/gelu.cpp @@ -9,13 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector gelu(const Node& node) { - const auto& inputs = node.get_ng_inputs(); +ov::OutputVector gelu(const ov::frontend::onnx::Node& node) { + const auto& inputs = node.get_ov_inputs(); std::string approximate = node.get_attribute_value("approximate", ""); FRONT_END_GENERAL_CHECK(inputs.size() == 1, "Wrong number of inputs, expected 1, found ", inputs.size()); @@ -34,6 +34,6 @@ ov::OutputVector gelu(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gelu.hpp b/src/frontends/onnx/frontend/src/op/gelu.hpp index a202ed43b3d283..242da0fb5b7730 100644 --- a/src/frontends/onnx/frontend/src/op/gelu.hpp +++ b/src/frontends/onnx/frontend/src/op/gelu.hpp @@ -3,20 +3,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector gelu(const Node& node); +ov::OutputVector gelu(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gemm.cpp b/src/frontends/onnx/frontend/src/op/gemm.cpp index eb4a04bae754f6..b679a407d51707 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.cpp +++ b/src/frontends/onnx/frontend/src/op/gemm.cpp @@ -8,18 +8,18 @@ #include "openvino/op/constant.hpp" #include "openvino/op/matmul.hpp" #include "openvino/op/multiply.hpp" -#include "ov_models/ov_builders/reshape.hpp" +#include "utils/reshape.hpp" using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector gemm(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector gemm(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; ov::Output input_a = inputs.at(0); ov::Output input_b = inputs.at(1); ov::Output input_c; @@ -50,7 +50,7 @@ ov::OutputVector gemm(const Node& node) { std::shared_ptr matmul_node = std::make_shared(input_a, input_b); if (alpha != 1) { - const auto alpha_node = v0::Constant::create(input_b.get_element_type(), Shape{}, {alpha}); + const auto alpha_node = v0::Constant::create(input_b.get_element_type(), ov::Shape{}, {alpha}); matmul_node = std::make_shared(matmul_node, alpha_node); } @@ -62,8 +62,8 @@ ov::OutputVector gemm(const Node& node) { } // namespace set_1 namespace set_6 { -ov::OutputVector gemm(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector gemm(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; ov::Output input_a = inputs.at(0); ov::Output input_b = inputs.at(1); ov::Output input_c; @@ -90,9 +90,7 @@ ov::OutputVector gemm(const Node& node) { } } // namespace set_6 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gemm.hpp b/src/frontends/onnx/frontend/src/op/gemm.hpp index bc25e2cfac3aa5..71b43dea77cca6 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.hpp +++ b/src/frontends/onnx/frontend/src/op/gemm.hpp @@ -4,26 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector gemm(const Node& node); +ov::OutputVector gemm(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_6 { -ov::OutputVector gemm(const Node& node); +ov::OutputVector gemm(const ov::frontend::onnx::Node& node); } // namespace set_6 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/global_average_pool.cpp b/src/frontends/onnx/frontend/src/op/global_average_pool.cpp index 3e3abdad6638c9..a37c346a7a5118 100644 --- a/src/frontends/onnx/frontend/src/op/global_average_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/global_average_pool.cpp @@ -13,12 +13,12 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector global_average_pool(const Node& node) { +ov::OutputVector global_average_pool(const ov::frontend::onnx::Node& node) { // Generate axes for reduce operation which contain all spatial dims indexes. // Examples: // Input shape: [N, C, H, W] @@ -28,11 +28,11 @@ ov::OutputVector global_average_pool(const Node& node) { // Input shape: [N, C, H, W, D] // Input spatial dimensions are H, W and D // Expected spatial dims indexes: [2, 3, 4] - auto data = node.get_ng_inputs()[0]; + auto data = node.get_ov_inputs()[0]; - const auto zero_node = v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto one_node = v0::Constant::create(ov::element::i64, Shape{}, {1}); - const auto two_node = v0::Constant::create(ov::element::i64, Shape{}, {2}); + const auto zero_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); + const auto one_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); + const auto two_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {2}); const auto data_shape = std::make_shared(data); const auto data_rank = std::make_shared(data_shape); @@ -44,10 +44,7 @@ ov::OutputVector global_average_pool(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/global_average_pool.hpp b/src/frontends/onnx/frontend/src/op/global_average_pool.hpp index 80125b2e42c813..798055ff530afd 100644 --- a/src/frontends/onnx/frontend/src/op/global_average_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/global_average_pool.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Convert ONNX GlobalAveragePool operation to an OV node. @@ -19,13 +17,10 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX /// GlobalAveragePool operation. -ov::OutputVector global_average_pool(const Node& node); +ov::OutputVector global_average_pool(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/global_max_pool.cpp b/src/frontends/onnx/frontend/src/op/global_max_pool.cpp index c2b3ff92bef595..e975c4fe423e9b 100644 --- a/src/frontends/onnx/frontend/src/op/global_max_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/global_max_pool.cpp @@ -13,12 +13,12 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector global_max_pool(const Node& node) { +ov::OutputVector global_max_pool(const ov::frontend::onnx::Node& node) { // Generate axes for reduce operation which contain all spatial dims indexes. // Examples: // Input shape: [N, C, H, W] @@ -28,11 +28,11 @@ ov::OutputVector global_max_pool(const Node& node) { // Input shape: [N, C, H, W, D] // Input spatial dimensions are H, W and D // Expected spatial dims indexes: [2, 3, 4] - auto data = node.get_ng_inputs()[0]; + auto data = node.get_ov_inputs()[0]; - const auto zero_node = v0::Constant::create(ov::element::i64, Shape{}, {0}); - const auto one_node = v0::Constant::create(ov::element::i64, Shape{}, {1}); - const auto two_node = v0::Constant::create(ov::element::i64, Shape{}, {2}); + const auto zero_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); + const auto one_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); + const auto two_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {2}); const auto data_shape = std::make_shared(data); const auto data_rank = std::make_shared(data_shape); @@ -44,10 +44,7 @@ ov::OutputVector global_max_pool(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/global_max_pool.hpp b/src/frontends/onnx/frontend/src/op/global_max_pool.hpp index 744c10d0dd8ab3..b282fdd2993e71 100644 --- a/src/frontends/onnx/frontend/src/op/global_max_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/global_max_pool.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Convert ONNX GlobalMaxPool operation to an OV node. @@ -19,13 +17,10 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX /// GlobalMaxPool operation. -ov::OutputVector global_max_pool(const Node& node); +ov::OutputVector global_max_pool(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/greater.hpp b/src/frontends/onnx/frontend/src/op/greater.hpp index 5e8aec2fceb6a8..e9a162a6bcec4c 100644 --- a/src/frontends/onnx/frontend/src/op/greater.hpp +++ b/src/frontends/onnx/frontend/src/op/greater.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/greater.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector greater(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector greater(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp b/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp index f21531a4a2e80e..52c8f798858294 100644 --- a/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp +++ b/src/frontends/onnx/frontend/src/op/greater_or_equal.cpp @@ -10,14 +10,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector greater_or_equal(const Node& node) { - const auto A = node.get_ng_inputs().at(0); - const auto B = node.get_ng_inputs().at(1); +ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node) { + const auto A = node.get_ov_inputs().at(0); + const auto B = node.get_ov_inputs().at(1); FRONT_END_GENERAL_CHECK(A.get_element_type() != ov::element::bf16 && B.get_element_type() != ov::element::bf16, "The input data bfloat16 isn't supported in opset 12"); @@ -29,9 +29,9 @@ ov::OutputVector greater_or_equal(const Node& node) { } // namespace set_1 namespace set_16 { -ov::OutputVector greater_or_equal(const Node& node) { - const auto A = node.get_ng_inputs().at(0); - const auto B = node.get_ng_inputs().at(1); +ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node) { + const auto A = node.get_ov_inputs().at(0); + const auto B = node.get_ov_inputs().at(1); const auto C = std::make_shared(A, B); @@ -39,6 +39,6 @@ ov::OutputVector greater_or_equal(const Node& node) { } } // namespace set_16 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp b/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp index a10a44148d966e..ecdaa2fe6020a4 100644 --- a/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp +++ b/src/frontends/onnx/frontend/src/op/greater_or_equal.hpp @@ -4,24 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector greater_or_equal(const Node& node); +ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_16 { -ov::OutputVector greater_or_equal(const Node& node); +ov::OutputVector greater_or_equal(const ov::frontend::onnx::Node& node); } // namespace set_16 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/grid_sample.cpp b/src/frontends/onnx/frontend/src/op/grid_sample.cpp index 63f2cb0166fd41..8a7e6785311055 100644 --- a/src/frontends/onnx/frontend/src/op/grid_sample.cpp +++ b/src/frontends/onnx/frontend/src/op/grid_sample.cpp @@ -8,14 +8,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector grid_sample(const Node& node) { - const auto data = node.get_ng_inputs().at(0); - const auto grid = node.get_ng_inputs().at(1); +ov::OutputVector grid_sample(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); + const auto grid = node.get_ov_inputs().at(1); v9::GridSample::Attributes attributes{}; attributes.align_corners = node.get_attribute_value("align_corners", 0); @@ -30,6 +30,6 @@ ov::OutputVector grid_sample(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/grid_sample.hpp b/src/frontends/onnx/frontend/src/op/grid_sample.hpp index 875f0d085afb8f..2997f1581c95ec 100644 --- a/src/frontends/onnx/frontend/src/op/grid_sample.hpp +++ b/src/frontends/onnx/frontend/src/op/grid_sample.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector grid_sample(const Node& node); +ov::OutputVector grid_sample(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/group_normalization.cpp b/src/frontends/onnx/frontend/src/op/group_normalization.cpp index 99837fa716dbc9..5c5339748d4ce3 100644 --- a/src/frontends/onnx/frontend/src/op/group_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/group_normalization.cpp @@ -16,13 +16,13 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector group_normalization(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector group_normalization(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); OPENVINO_ASSERT(inputs.size() == 3); const auto& data = inputs[0]; // Shape [N, C, ...] @@ -32,10 +32,10 @@ ov::OutputVector group_normalization(const Node& node) { const auto eps = node.get_attribute_value("epsilon", 1e-05f); const auto num_groups = node.get_attribute_value("num_groups"); - const auto zero = v0::Constant::create(ov::element::i64, Shape{1}, {0}); - const auto one = v0::Constant::create(ov::element::i64, Shape{1}, {1}); + const auto zero = v0::Constant::create(ov::element::i64, ov::Shape{1}, {0}); + const auto one = v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); const auto c_dim = std::make_shared(std::make_shared(data), one, zero); - const auto g_dim = v0::Constant::create(ov::element::i64, Shape{1}, {num_groups}); + const auto g_dim = v0::Constant::create(ov::element::i64, ov::Shape{1}, {num_groups}); const auto c_g_div = std::make_shared(c_dim, g_dim); @@ -54,6 +54,6 @@ ov::OutputVector group_normalization(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/group_normalization.hpp b/src/frontends/onnx/frontend/src/op/group_normalization.hpp index f71164f5e50779..a7d13af83293f3 100644 --- a/src/frontends/onnx/frontend/src/op/group_normalization.hpp +++ b/src/frontends/onnx/frontend/src/op/group_normalization.hpp @@ -4,19 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector group_normalization(const Node& node); +ov::OutputVector group_normalization(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gru.cpp b/src/frontends/onnx/frontend/src/op/gru.cpp index 713963a53e5581..1fa59be80b6d3f 100644 --- a/src/frontends/onnx/frontend/src/op/gru.cpp +++ b/src/frontends/onnx/frontend/src/op/gru.cpp @@ -9,16 +9,16 @@ #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/gru_sequence.hpp" -#include "ov_models/ov_builders/reshape.hpp" -#include "ov_models/ov_builders/split.hpp" #include "utils/recurrent.hpp" +#include "utils/reshape.hpp" +#include "utils/split.hpp" using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { namespace { @@ -28,14 +28,14 @@ struct GRUInputMap : public recurrent::OpInputMap { // Override bias, since we need separated W and R biases for `h` gate. if (linear_before_reset) { - const auto& ng_inputs = node.get_ng_inputs(); + const auto& ng_inputs = node.get_ov_inputs(); const auto el_type = ng_inputs.at(0).get_element_type(); if (ng_inputs.size() > 3 && !ov::op::util::is_null(ng_inputs.at(3))) { auto bias = ng_inputs.at(3); // gates_count * 2 since B is: [Wb, Rb] const int split_parts = 2 * 3; - const auto split_bias = ov::op::util::split(bias, split_parts, 1); + const auto split_bias = ov::op::util::make_split(bias, split_parts, 1); const auto wr_z_bias = std::make_shared(split_bias.at(0), split_bias.at(3)); const auto wr_r_bias = std::make_shared(split_bias.at(1), split_bias.at(4)); // The result has shape: [num_directions, 4 * hidden_size] @@ -56,7 +56,7 @@ struct GRUInputMap : public recurrent::OpInputMap { m_map[recurrent::OpInput::B] = std::make_shared(el_type, - Shape{num_directions, (gates_count + 1) * hidden_size}, + ov::Shape{num_directions, (gates_count + 1) * hidden_size}, 0.f); } } @@ -78,7 +78,7 @@ struct GRUAttributes : public recurrent::OpAttributes { }; } // namespace -ov::OutputVector gru(const Node& node) { +ov::OutputVector gru(const ov::frontend::onnx::Node& node) { constexpr std::size_t gates_count = 3; GRUInputMap input_map{node, gates_count}; GRUAttributes attributes{node}; @@ -104,10 +104,7 @@ ov::OutputVector gru(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/gru.hpp b/src/frontends/onnx/frontend/src/op/gru.hpp index a128b3753a7216..3d79096b2d69d9 100644 --- a/src/frontends/onnx/frontend/src/op/gru.hpp +++ b/src/frontends/onnx/frontend/src/op/gru.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector gru(const Node& node); +ov::OutputVector gru(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp index 91ed09a1efa94a..e090e30a1ea145 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.cpp @@ -18,13 +18,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector hammingwindow(const Node& node) { - const auto size = node.get_ng_inputs().at(0); +ov::OutputVector hammingwindow(const ov::frontend::onnx::Node& node) { + const auto size = node.get_ov_inputs().at(0); const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1) == 1; @@ -69,6 +69,6 @@ ov::OutputVector hammingwindow(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp index 2d0e46cf9e4015..c6feeaa3ade805 100644 --- a/src/frontends/onnx/frontend/src/op/hammingwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hammingwindow.hpp @@ -3,20 +3,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector hammingwindow(const Node& node); +ov::OutputVector hammingwindow(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.cpp b/src/frontends/onnx/frontend/src/op/hannwindow.cpp index c35e0c6c9791ad..bf021025474f6a 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.cpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.cpp @@ -18,13 +18,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector hannwindow(const Node& node) { - const auto size = node.get_ng_inputs().at(0); +ov::OutputVector hannwindow(const ov::frontend::onnx::Node& node) { + const auto size = node.get_ov_inputs().at(0); const auto output_datatype = common::get_ov_element_type(node.get_attribute_value("output_datatype", 1)); const bool periodic = node.get_attribute_value("periodic", 1) == 1; @@ -65,6 +65,6 @@ ov::OutputVector hannwindow(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hannwindow.hpp b/src/frontends/onnx/frontend/src/op/hannwindow.hpp index b2e6f11967ec6d..120a8af9ef3d37 100644 --- a/src/frontends/onnx/frontend/src/op/hannwindow.hpp +++ b/src/frontends/onnx/frontend/src/op/hannwindow.hpp @@ -3,20 +3,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector hannwindow(const Node& node); +ov::OutputVector hannwindow(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp b/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp index 3613aa28183d42..a252ccafc811cd 100644 --- a/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp +++ b/src/frontends/onnx/frontend/src/op/hard_sigmoid.cpp @@ -10,31 +10,28 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector hard_sigmoid(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector hard_sigmoid(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); const auto alpha = v0::Constant::create(data.get_element_type(), - Shape{}, + ov::Shape{}, std::vector{node.get_attribute_value("alpha", 0.2)}); const auto beta = v0::Constant::create(data.get_element_type(), - Shape{}, + ov::Shape{}, std::vector{node.get_attribute_value("beta", 0.5)}); return {std::make_shared(data, alpha, beta)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp b/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp index 3cd2a23cc16159..3b6d36c38d8a74 100644 --- a/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp +++ b/src/frontends/onnx/frontend/src/op/hard_sigmoid.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector hard_sigmoid(const Node& node); +ov::OutputVector hard_sigmoid(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hard_swish.hpp b/src/frontends/onnx/frontend/src/op/hard_swish.hpp index d296977b3814e3..fc38bfb6a7d8c2 100644 --- a/src/frontends/onnx/frontend/src/op/hard_swish.hpp +++ b/src/frontends/onnx/frontend/src/op/hard_swish.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/hswish.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector hard_swish(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector hard_swish(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hardmax.cpp b/src/frontends/onnx/frontend/src/op/hardmax.cpp index 8a2c28406a72a8..10a5e254abbdce 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.cpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.cpp @@ -12,7 +12,6 @@ #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/topk.hpp" -#include "ov_models/ov_builders/reshape.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" #include "validation_util.hpp" @@ -20,13 +19,13 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector hardmax(const Node& node) { - const auto input = node.get_ng_inputs().at(0); +ov::OutputVector hardmax(const ov::frontend::onnx::Node& node) { + const auto input = node.get_ov_inputs().at(0); const auto& input_shape = input.get_partial_shape(); auto axis = node.get_attribute_value("axis", 1); @@ -42,17 +41,17 @@ ov::OutputVector hardmax(const Node& node) { std::make_shared(coerced_tensor_shape, ov::op::v0::Constant::create(ov::element::i64, {1}, {1}), ov::op::v0::Constant::create(ov::element::i64, {}, {0})); - row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); + row_size = ov::frontend::onnx::reshape::interpret_as_scalar(row_size); const auto indices_axis = 1; const auto topk = std::make_shared(coerced_tensor, - ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}), + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {1}), indices_axis, ov::op::v11::TopK::Mode::MAX, ov::op::v11::TopK::SortType::NONE); - const auto on_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}); - const auto off_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {0}); + const auto on_value = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); + const auto off_value = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, indices_axis); const auto converted_results = std::make_shared(results, input.get_element_type()); @@ -63,8 +62,8 @@ ov::OutputVector hardmax(const Node& node) { } // namespace set_1 namespace set_13 { -ov::OutputVector hardmax(const Node& node) { - const auto input = node.get_ng_inputs().at(0); +ov::OutputVector hardmax(const ov::frontend::onnx::Node& node) { + const auto input = node.get_ov_inputs().at(0); const auto& input_shape = input.get_partial_shape(); auto axis = node.get_attribute_value("axis", -1); @@ -75,16 +74,16 @@ ov::OutputVector hardmax(const Node& node) { std::make_shared(input_runtime_shape, ov::op::v0::Constant::create(ov::element::i64, {1}, {axis}), ov::op::v0::Constant::create(ov::element::i64, {}, {0})); - row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); + row_size = ov::frontend::onnx::reshape::interpret_as_scalar(row_size); const auto topk = std::make_shared(input, - ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}), + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {1}), axis, ov::op::v11::TopK::Mode::MAX, ov::op::v11::TopK::SortType::NONE); - const auto on_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}); - const auto off_value = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {0}); + const auto on_value = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); + const auto off_value = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); const auto results = std::make_shared(topk->output(1), row_size, on_value, off_value, axis); const auto converted_results = std::make_shared(results, input.get_element_type()); @@ -95,8 +94,6 @@ ov::OutputVector hardmax(const Node& node) { } // namespace set_13 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/hardmax.hpp b/src/frontends/onnx/frontend/src/op/hardmax.hpp index 1f6404a91ad9ab..a75c38538f421d 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.hpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.hpp @@ -4,24 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector hardmax(const Node& node); +ov::OutputVector hardmax(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_13 { -ov::OutputVector hardmax(const Node& node); +ov::OutputVector hardmax(const ov::frontend::onnx::Node& node); } // namespace set_13 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/identity.hpp b/src/frontends/onnx/frontend/src/op/identity.hpp index d9e60db248386c..e70aeb8a04dabe 100644 --- a/src/frontends/onnx/frontend/src/op/identity.hpp +++ b/src/frontends/onnx/frontend/src/op/identity.hpp @@ -4,28 +4,23 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "utils/common.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector identity(const Node& node) { - ov::OutputVector outputs = node.get_ng_inputs(); +inline ov::OutputVector identity(const ov::frontend::onnx::Node& node) { + ov::OutputVector outputs = node.get_ov_inputs(); for (auto& out : outputs) { common::mark_as_optimized_out(out); } return outputs; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/if.cpp b/src/frontends/onnx/frontend/src/op/if.cpp index 9db991311a6ac4..f06bf83315a801 100644 --- a/src/frontends/onnx/frontend/src/op/if.cpp +++ b/src/frontends/onnx/frontend/src/op/if.cpp @@ -5,18 +5,19 @@ #include "op/if.hpp" #include "core/graph.hpp" +#include "openvino/core/model.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/if.hpp" using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector if_op(const Node& node) { - const auto& ng_inputs = node.get_ng_inputs(); +ov::OutputVector if_op(const ov::frontend::onnx::Node& node) { + const auto& ng_inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(ng_inputs.size() == 1, "If operator takes only one input"); const auto& subgraphs = node.get_subgraphs(); @@ -68,6 +69,6 @@ ov::OutputVector if_op(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/if.hpp b/src/frontends/onnx/frontend/src/op/if.hpp index 2f0c6542385301..97ed4b0e5129a3 100644 --- a/src/frontends/onnx/frontend/src/op/if.hpp +++ b/src/frontends/onnx/frontend/src/op/if.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Convert ONNX If operation to an OV node. @@ -19,10 +17,10 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX If /// operation. -ov::OutputVector if_op(const Node& node); +ov::OutputVector if_op(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/image_scaler.cpp b/src/frontends/onnx/frontend/src/op/image_scaler.cpp index b0a595ed607491..a17e3d4d9f644f 100644 --- a/src/frontends/onnx/frontend/src/op/image_scaler.cpp +++ b/src/frontends/onnx/frontend/src/op/image_scaler.cpp @@ -11,13 +11,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector image_scaler(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector image_scaler(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 1, "ImageScaler 1 input tensor. Got: ", inputs.size()); const auto data = inputs[0]; @@ -44,6 +44,6 @@ ov::OutputVector image_scaler(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/image_scaler.hpp b/src/frontends/onnx/frontend/src/op/image_scaler.hpp index 9b00e165de5dc4..21fb1970fe2d07 100644 --- a/src/frontends/onnx/frontend/src/op/image_scaler.hpp +++ b/src/frontends/onnx/frontend/src/op/image_scaler.hpp @@ -4,18 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector image_scaler(const Node& node); +ov::OutputVector image_scaler(const ov::frontend::onnx::Node& node); } } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/instance_norm.cpp b/src/frontends/onnx/frontend/src/op/instance_norm.cpp index 2cb00787c6e377..f834bd4efc96d1 100644 --- a/src/frontends/onnx/frontend/src/op/instance_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/instance_norm.cpp @@ -14,15 +14,15 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector instance_norm(const Node& node) { - ov::Output data(node.get_ng_inputs().at(0)); - ov::Output scale(node.get_ng_inputs().at(1)); - ov::Output bias(node.get_ng_inputs().at(2)); +ov::OutputVector instance_norm(const ov::frontend::onnx::Node& node) { + ov::Output data(node.get_ov_inputs().at(0)); + ov::Output scale(node.get_ov_inputs().at(1)); + ov::Output bias(node.get_ov_inputs().at(2)); const ov::PartialShape& data_pshape = data.get_partial_shape(); const ov::PartialShape& scale_pshape = scale.get_partial_shape(); const ov::PartialShape& bias_pshape = bias.get_partial_shape(); @@ -78,10 +78,7 @@ ov::OutputVector instance_norm(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/instance_norm.hpp b/src/frontends/onnx/frontend/src/op/instance_norm.hpp index 7dcbf380f5b809..d8c4063c15abba 100644 --- a/src/frontends/onnx/frontend/src/op/instance_norm.hpp +++ b/src/frontends/onnx/frontend/src/op/instance_norm.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Creates OV node representing ONNX InstanceNormalization @@ -24,12 +22,9 @@ namespace set_1 { /// /// \return Vector of nodes containting resulting OV nodes. /// -ov::OutputVector instance_norm(const Node& node); +ov::OutputVector instance_norm(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_finite.cpp b/src/frontends/onnx/frontend/src/op/is_finite.cpp index aa33d3cdaf1e94..89923ac23fb276 100644 --- a/src/frontends/onnx/frontend/src/op/is_finite.cpp +++ b/src/frontends/onnx/frontend/src/op/is_finite.cpp @@ -8,19 +8,19 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector is_finite(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector is_finite(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_finite.hpp b/src/frontends/onnx/frontend/src/op/is_finite.hpp index c3e30fa5451813..8988aa5d8002a9 100644 --- a/src/frontends/onnx/frontend/src/op/is_finite.hpp +++ b/src/frontends/onnx/frontend/src/op/is_finite.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector is_finite(const Node& node); +ov::OutputVector is_finite(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_inf.cpp b/src/frontends/onnx/frontend/src/op/is_inf.cpp index f206d74fe90def..d9462cbbb0af57 100644 --- a/src/frontends/onnx/frontend/src/op/is_inf.cpp +++ b/src/frontends/onnx/frontend/src/op/is_inf.cpp @@ -8,13 +8,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector is_inf(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector is_inf(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); ov::opset10::IsInf::Attributes attributes{}; attributes.detect_negative = node.get_attribute_value("detect_negative", 1); @@ -24,6 +24,6 @@ ov::OutputVector is_inf(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_inf.hpp b/src/frontends/onnx/frontend/src/op/is_inf.hpp index a57cdc47a4b9e1..c168141b8b9337 100644 --- a/src/frontends/onnx/frontend/src/op/is_inf.hpp +++ b/src/frontends/onnx/frontend/src/op/is_inf.hpp @@ -4,19 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector is_inf(const Node& node); +ov::OutputVector is_inf(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_nan.cpp b/src/frontends/onnx/frontend/src/op/is_nan.cpp index 523a0fb4ebecdd..74e345e15acf6b 100644 --- a/src/frontends/onnx/frontend/src/op/is_nan.cpp +++ b/src/frontends/onnx/frontend/src/op/is_nan.cpp @@ -8,19 +8,19 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector is_nan(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector is_nan(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/is_nan.hpp b/src/frontends/onnx/frontend/src/op/is_nan.hpp index 6a4511bb16e629..065253f66a473e 100644 --- a/src/frontends/onnx/frontend/src/op/is_nan.hpp +++ b/src/frontends/onnx/frontend/src/op/is_nan.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector is_nan(const Node& node); +ov::OutputVector is_nan(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/layer_normalization.cpp b/src/frontends/onnx/frontend/src/op/layer_normalization.cpp index aa3f9b30cced94..c8d1d6b4c7b798 100644 --- a/src/frontends/onnx/frontend/src/op/layer_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/layer_normalization.cpp @@ -20,23 +20,24 @@ #include "openvino/op/slice.hpp" #include "openvino/op/sqrt.hpp" #include "openvino/op/subtract.hpp" -#include "ov_models/ov_builders/reshape.hpp" #include "utils/common.hpp" +#include "utils/reshape.hpp" using namespace ov::op; using namespace ov::op::v0; using namespace ov::op::v1; using namespace ov::op::v8; +using ::ONNX_NAMESPACE::TensorProto_DataType; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector layer_normalization(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector layer_normalization(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); const auto num_inputs = inputs.size(); CHECK_VALID_NODE(node, num_inputs == 2 || num_inputs == 3, @@ -50,7 +51,7 @@ ov::OutputVector layer_normalization(const Node& node) { double epsilon = node.get_attribute_value("epsilon", 1e-5); int64_t stash_type_i = node.get_attribute_value("stash_type", - static_cast(ONNX_NAMESPACE::TensorProto_DataType_FLOAT)); + static_cast(TensorProto_DataType::TensorProto_DataType_FLOAT)); element::Type stash_type = common::get_ov_element_type(stash_type_i); // following calculations are kept as close to the onnx\defs.cc description as possible @@ -67,7 +68,7 @@ ov::OutputVector layer_normalization(const Node& node) { auto SuffixShape = std::make_shared(One1D, NumReducedAxes); auto ReducedShape = std::make_shared(ov::OutputVector{PrefixShape, SuffixShape}, 0); - auto X2D = util::flatten(X, static_cast(axis)); + auto X2D = ov::op::util::flatten(X, static_cast(axis)); auto XU = std::make_shared(X2D, stash_type); auto Mean2D = std::make_shared(XU, One1D, true); @@ -82,10 +83,10 @@ ov::OutputVector layer_normalization(const Node& node) { auto Normalized = std::make_shared(Deviation, StdDev); auto NormalizedT = std::make_shared(Normalized, X); - auto Scale2D = util::flatten(Scale, 0); + auto Scale2D = ov::op::util::flatten(Scale, 0); auto Scaled = std::make_shared(NormalizedT, Scale2D); ov::Output Biased = - (num_inputs == 3 ? std::make_shared(Scaled, util::flatten(inputs.at(2), 0))->output(0) + (num_inputs == 3 ? std::make_shared(Scaled, ov::op::util::flatten(inputs.at(2), 0))->output(0) : Scaled->output(0)); auto Y = std::make_shared(Biased, XShape, false); @@ -98,6 +99,6 @@ ov::OutputVector layer_normalization(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/layer_normalization.hpp b/src/frontends/onnx/frontend/src/op/layer_normalization.hpp index edb1ee12201357..b0b79213be4cd7 100644 --- a/src/frontends/onnx/frontend/src/op/layer_normalization.hpp +++ b/src/frontends/onnx/frontend/src/op/layer_normalization.hpp @@ -8,13 +8,15 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector layer_normalization(const Node& node); +ov::OutputVector layer_normalization(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/frontend/src/op/leaky_relu.cpp b/src/frontends/onnx/frontend/src/op/leaky_relu.cpp index af35bd1cfd48b7..15b108bf5730d7 100644 --- a/src/frontends/onnx/frontend/src/op/leaky_relu.cpp +++ b/src/frontends/onnx/frontend/src/op/leaky_relu.cpp @@ -11,24 +11,21 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector leaky_relu(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector leaky_relu(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); double alpha = node.get_attribute_value("alpha", 0.01); - std::shared_ptr alpha_node = v0::Constant::create(data.get_element_type(), Shape{1}, {alpha}); + std::shared_ptr alpha_node = v0::Constant::create(data.get_element_type(), ov::Shape{1}, {alpha}); return {std::make_shared(data, alpha_node)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/leaky_relu.hpp b/src/frontends/onnx/frontend/src/op/leaky_relu.hpp index a3cabf56c1923a..49c390a78d07ce 100644 --- a/src/frontends/onnx/frontend/src/op/leaky_relu.hpp +++ b/src/frontends/onnx/frontend/src/op/leaky_relu.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector leaky_relu(const Node& node); +ov::OutputVector leaky_relu(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/less.hpp b/src/frontends/onnx/frontend/src/op/less.hpp index 275c6c41dd9961..07d30b80a5fb3e 100644 --- a/src/frontends/onnx/frontend/src/op/less.hpp +++ b/src/frontends/onnx/frontend/src/op/less.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/less.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector less(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector less(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/less_or_equal.cpp b/src/frontends/onnx/frontend/src/op/less_or_equal.cpp index cf4bd6ae5ebbf6..c44d0de6334a69 100644 --- a/src/frontends/onnx/frontend/src/op/less_or_equal.cpp +++ b/src/frontends/onnx/frontend/src/op/less_or_equal.cpp @@ -10,13 +10,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector less_or_equal(const Node& node) { - const auto& input = node.get_ng_inputs(); +ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node) { + const auto& input = node.get_ov_inputs(); const auto a = input.at(0); const auto b = input.at(1); FRONT_END_GENERAL_CHECK(a.get_element_type() != ov::element::bf16 && b.get_element_type() != ov::element::bf16, @@ -26,14 +26,14 @@ ov::OutputVector less_or_equal(const Node& node) { } // namespace set_1 namespace set_16 { -ov::OutputVector less_or_equal(const Node& node) { - const auto& input = node.get_ng_inputs(); +ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node) { + const auto& input = node.get_ov_inputs(); const auto a = input.at(0); const auto b = input.at(1); return {std::make_shared(a, b)}; } } // namespace set_16 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/less_or_equal.hpp b/src/frontends/onnx/frontend/src/op/less_or_equal.hpp index 2060bd5f16e79b..bae2e37014ec73 100644 --- a/src/frontends/onnx/frontend/src/op/less_or_equal.hpp +++ b/src/frontends/onnx/frontend/src/op/less_or_equal.hpp @@ -4,26 +4,24 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector less_or_equal(const Node& node); +ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_16 { -ov::OutputVector less_or_equal(const Node& node); +ov::OutputVector less_or_equal(const ov::frontend::onnx::Node& node); } // namespace set_16 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/log.cpp b/src/frontends/onnx/frontend/src/op/log.cpp index 22cf9d809399c3..294c824ca574ed 100644 --- a/src/frontends/onnx/frontend/src/op/log.cpp +++ b/src/frontends/onnx/frontend/src/op/log.cpp @@ -8,20 +8,17 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector log(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +ov::OutputVector log(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/log.hpp b/src/frontends/onnx/frontend/src/op/log.hpp index 1bd9edd731b807..02b8332cd7c649 100644 --- a/src/frontends/onnx/frontend/src/op/log.hpp +++ b/src/frontends/onnx/frontend/src/op/log.hpp @@ -4,20 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector log(const Node& node); +ov::OutputVector log(const ov::frontend::onnx::Node& node); } } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/log_softmax.cpp b/src/frontends/onnx/frontend/src/op/log_softmax.cpp index 2a0c064ebd9b05..1a660d535f251b 100644 --- a/src/frontends/onnx/frontend/src/op/log_softmax.cpp +++ b/src/frontends/onnx/frontend/src/op/log_softmax.cpp @@ -10,15 +10,15 @@ #include "openvino/op/log_softmax.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" -#include "ov_models/ov_builders/reshape.hpp" +#include "utils/reshape.hpp" #include "validation_util.hpp" using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace { std::shared_ptr onnx_logsoftmax(const ov::Output data, const int64_t axis) { const auto coerced_data = ov::op::util::flatten(data, static_cast(axis)); @@ -27,8 +27,8 @@ std::shared_ptr onnx_logsoftmax(const ov::Output data, const return std::make_shared(result, data_shape, false); } -ov::OutputVector log_softmax(const Node& node, const int64_t DEFAULT_AXIS) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node, const int64_t DEFAULT_AXIS) { + ov::OutputVector inputs{node.get_ov_inputs()}; const auto data = inputs.at(0); const auto data_rank = data.get_partial_shape().rank(); @@ -39,7 +39,7 @@ ov::OutputVector log_softmax(const Node& node, const int64_t DEFAULT_AXIS) { std::shared_ptr result; switch (data_rank.get_length()) { case 0: { - result = v0::Constant::create(data.get_element_type(), Shape{}, {1}); + result = v0::Constant::create(data.get_element_type(), ov::Shape{}, {1}); break; } case 1: { @@ -62,21 +62,18 @@ ov::OutputVector log_softmax(const Node& node, const int64_t DEFAULT_AXIS) { namespace op { namespace set_1 { -ov::OutputVector log_softmax(const Node& node) { - return ngraph::onnx_import::log_softmax(node, 1); +ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node) { + return ov::frontend::onnx::log_softmax(node, 1); } } // namespace set_1 namespace set_13 { -ov::OutputVector log_softmax(const Node& node) { +ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node) { const auto axis = node.get_attribute_value("axis", -1); - return {std::make_shared(node.get_ng_inputs()[0], axis)}; + return {std::make_shared(node.get_ov_inputs()[0], axis)}; } } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/log_softmax.hpp b/src/frontends/onnx/frontend/src/op/log_softmax.hpp index bbd65a9b579099..2a9d799d949bb8 100644 --- a/src/frontends/onnx/frontend/src/op/log_softmax.hpp +++ b/src/frontends/onnx/frontend/src/op/log_softmax.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector log_softmax(const Node& node); +ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_13 { -ov::OutputVector log_softmax(const Node& node); +ov::OutputVector log_softmax(const ov::frontend::onnx::Node& node); } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/loop.cpp b/src/frontends/onnx/frontend/src/op/loop.cpp index cc399a53d46029..20d1c4f9f0ae57 100644 --- a/src/frontends/onnx/frontend/src/op/loop.cpp +++ b/src/frontends/onnx/frontend/src/op/loop.cpp @@ -16,9 +16,9 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { namespace { @@ -39,8 +39,8 @@ bool is_termination_condition_always_true(const ov::Node* cond_in, const ov::Nod } } // namespace -ov::OutputVector loop(const Node& node) { - const auto& ng_inputs = node.get_ng_inputs(); +ov::OutputVector loop(const ov::frontend::onnx::Node& node) { + const auto& ng_inputs = node.get_ov_inputs(); const ov::OutputVector loop_carried_dependencies{std::next(ng_inputs.begin(), 2), ng_inputs.end()}; @@ -175,6 +175,6 @@ ov::OutputVector loop(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/loop.hpp b/src/frontends/onnx/frontend/src/op/loop.hpp index c08ee77c3e1ffa..94702c2412e640 100644 --- a/src/frontends/onnx/frontend/src/op/loop.hpp +++ b/src/frontends/onnx/frontend/src/op/loop.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Creates OV node representing ONNX loop operator. @@ -22,12 +20,9 @@ namespace set_1 { /// /// \return Vector of nodes containting resulting OV nodes. /// -ov::OutputVector loop(const Node& node); +ov::OutputVector loop(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lp_norm.cpp b/src/frontends/onnx/frontend/src/op/lp_norm.cpp index 5ba71d10c56047..836be70c76eb83 100644 --- a/src/frontends/onnx/frontend/src/op/lp_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/lp_norm.cpp @@ -7,23 +7,21 @@ #include "exceptions.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/divide.hpp" -#include "ov_models/ov_builders/norm.hpp" +#include "utils/norm.hpp" #include "validation_util.hpp" using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector lp_norm(const Node& node) { - const ov::Output data{node.get_ng_inputs().at(0)}; +ov::OutputVector lp_norm(const ov::frontend::onnx::Node& node) { + const ov::Output data{node.get_ov_inputs().at(0)}; const auto data_shape = data.get_partial_shape(); const auto data_rank = data_shape.rank(); - const std::int64_t p_norm{node.get_attribute_value("p", 2)}; - const std::int64_t axis{node.get_attribute_value("axis", -1)}; const size_t normalize_axis = ov::util::normalize_axis(node.get_description(), axis, data_rank); @@ -41,10 +39,7 @@ ov::OutputVector lp_norm(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lp_norm.hpp b/src/frontends/onnx/frontend/src/op/lp_norm.hpp index 479f171958ea86..9cd7fdf86a7432 100644 --- a/src/frontends/onnx/frontend/src/op/lp_norm.hpp +++ b/src/frontends/onnx/frontend/src/op/lp_norm.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Creates OV node representing ONNX LpNormalization operator. @@ -25,12 +23,9 @@ namespace set_1 { /// /// \return Vector of nodes containting resulting OV nodes. /// -ov::OutputVector lp_norm(const Node& node); +ov::OutputVector lp_norm(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lp_pool.cpp b/src/frontends/onnx/frontend/src/op/lp_pool.cpp index 86154a6b837c3b..715d1f49d929ef 100644 --- a/src/frontends/onnx/frontend/src/op/lp_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/lp_pool.cpp @@ -9,20 +9,20 @@ #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/reshape.hpp" -#include "ov_models/ov_builders/norm.hpp" -#include "ov_models/ov_builders/split.hpp" #include "utils/common.hpp" +#include "utils/norm.hpp" +#include "utils/split.hpp" using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector global_lp_pool(const Node& node) { - const ov::Output data{node.get_ng_inputs().at(0)}; +ov::OutputVector global_lp_pool(const ov::frontend::onnx::Node& node) { + const ov::Output data{node.get_ov_inputs().at(0)}; const std::size_t channel_axis{1}; const auto data_shape = data.get_partial_shape(); @@ -36,7 +36,7 @@ ov::OutputVector global_lp_pool(const Node& node) { CHECK_VALID_NODE(node, p_norm >= 0, "Only positive (including zero) values are supported for 'p' attribute."); - ov::OutputVector slices = ov::op::util::split(data, channels_count, channel_axis); + ov::OutputVector slices = ov::op::util::make_split(data, channels_count, channel_axis); for (auto& slice : slices) { // all dimensions except spatial/feature @@ -45,10 +45,11 @@ ov::OutputVector global_lp_pool(const Node& node) { slice = ov::op::util::lp_norm(slice, reduction_axes, static_cast(p_norm)); // output shape is all ones except N channel - Shape output_shape(data_shape.rank().get_length(), 1); + ov::Shape output_shape(data_shape.rank().get_length(), 1); output_shape.at(0) = data_shape[0].get_length(); - const auto reshape_pattern = v0::Constant::create(ov::element::i64, Shape{output_shape.size()}, output_shape); + const auto reshape_pattern = + v0::Constant::create(ov::element::i64, ov::Shape{output_shape.size()}, output_shape); slice = std::make_shared(slice, reshape_pattern, false); } @@ -57,10 +58,7 @@ ov::OutputVector global_lp_pool(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lp_pool.hpp b/src/frontends/onnx/frontend/src/op/lp_pool.hpp index feab084a23f556..f56012c21f3c8e 100644 --- a/src/frontends/onnx/frontend/src/op/lp_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/lp_pool.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Creates OV node representing ONNX GlobalLpPool operator. @@ -30,12 +28,9 @@ namespace set_1 { /// /// \return Vector of nodes containting resulting OV nodes. /// -ov::OutputVector global_lp_pool(const Node& node); +ov::OutputVector global_lp_pool(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lrn.cpp b/src/frontends/onnx/frontend/src/op/lrn.cpp index 08e7e5c64c067d..f389ebfb8c6bb0 100644 --- a/src/frontends/onnx/frontend/src/op/lrn.cpp +++ b/src/frontends/onnx/frontend/src/op/lrn.cpp @@ -8,13 +8,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector lrn(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector lrn(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); double alpha = node.get_attribute_value("alpha", 1e-4); double beta = node.get_attribute_value("beta", 0.75); double bias = node.get_attribute_value("bias", 1); @@ -24,10 +24,7 @@ ov::OutputVector lrn(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lrn.hpp b/src/frontends/onnx/frontend/src/op/lrn.hpp index a7f1ca0e4eee13..ce2e048973fe11 100644 --- a/src/frontends/onnx/frontend/src/op/lrn.hpp +++ b/src/frontends/onnx/frontend/src/op/lrn.hpp @@ -4,21 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector lrn(const Node& node); +ov::OutputVector lrn(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lstm.cpp b/src/frontends/onnx/frontend/src/op/lstm.cpp index a3709a11486791..907bb881ca0067 100644 --- a/src/frontends/onnx/frontend/src/op/lstm.cpp +++ b/src/frontends/onnx/frontend/src/op/lstm.cpp @@ -15,15 +15,15 @@ #include "openvino/op/multiply.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/util/common_util.hpp" -#include "ov_models/ov_builders/reshape.hpp" -#include "ov_models/ov_builders/split.hpp" +#include "utils/reshape.hpp" +#include "utils/split.hpp" using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INPUT NODES PARSING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -41,7 +41,7 @@ enum class LSTMInput { struct LSTMNgInputMap { explicit LSTMNgInputMap(const Node& node) { - const auto& ng_inputs = node.get_ng_inputs(); + const auto& ng_inputs = node.get_ov_inputs(); // We have input, output, forget and cell gates constexpr std::size_t gates_count{4}; constexpr std::size_t P_gates_count{3}; @@ -70,24 +70,24 @@ struct LSTMNgInputMap { // Get dimensions needed for default inputs creation auto shape_of_x = std::make_shared(m_input_map[LSTMInput::LSTM_INPUT_X]); - auto axes = v0::Constant::create(ov::element::Type_t::i32, Shape{1}, {0}); + auto axes = v0::Constant::create(ov::element::Type_t::i32, ov::Shape{1}, {0}); auto batch_size_node = std::make_shared(shape_of_x, - v0::Constant::create(ov::element::Type_t::i32, Shape{1}, {0}), + v0::Constant::create(ov::element::Type_t::i32, ov::Shape{1}, {0}), axes); auto seq_length_node = std::make_shared(shape_of_x, - v0::Constant::create(ov::element::Type_t::i32, Shape{1}, {1}), + v0::Constant::create(ov::element::Type_t::i32, ov::Shape{1}, {1}), axes); auto shape_of_r = std::make_shared(m_input_map[LSTMInput::LSTM_INPUT_R]); auto num_directions_node = std::make_shared(shape_of_r, - v0::Constant::create(ov::element::Type_t::i32, Shape{1}, {0}), + v0::Constant::create(ov::element::Type_t::i32, ov::Shape{1}, {0}), axes); auto hidden_size_node = std::make_shared(shape_of_r, - v0::Constant::create(ov::element::Type_t::i32, Shape{1}, {2}), + v0::Constant::create(ov::element::Type_t::i32, ov::Shape{1}, {2}), axes); // ------ Optional inputs ------ @@ -96,7 +96,7 @@ struct LSTMNgInputMap { // OpenVino Shape: [num_directions, 4*hidden_size] if (ng_inputs.size() > 3 && !ov::op::util::is_null(ng_inputs.at(3))) { auto bias = ng_inputs.at(3); - auto split_bias = ov::op::util::split(bias, 2, 1); + auto split_bias = ov::op::util::make_split(bias, 2, 1); m_input_map[LSTMInput::LSTM_INPUT_B] = std::make_shared(split_bias.at(0), split_bias.at(1)); m_input_map[LSTMInput::LSTM_INPUT_B] = ov::op::util::convert_lstm_node_format(m_input_map[LSTMInput::LSTM_INPUT_B], @@ -107,11 +107,11 @@ struct LSTMNgInputMap { auto b_shape = std::make_shared( ov::OutputVector{num_directions_node, std::make_shared( - v0::Constant::create(ov::element::Type_t::i64, Shape{1}, {gates_count}), + v0::Constant::create(ov::element::Type_t::i64, ov::Shape{1}, {gates_count}), hidden_size_node)}, 0); m_input_map[LSTMInput::LSTM_INPUT_B] = std::make_shared( - v0::Constant::create(m_input_map[LSTMInput::LSTM_INPUT_X].get_element_type(), Shape{}, {0}), + v0::Constant::create(m_input_map[LSTMInput::LSTM_INPUT_X].get_element_type(), ov::Shape{}, {0}), b_shape); } // `sequence_lens`- The lengths of the sequences in a batch. @@ -132,7 +132,7 @@ struct LSTMNgInputMap { std::make_shared(ov::OutputVector{batch_size_node, num_directions_node, hidden_size_node}, 0); m_input_map[LSTMInput::LSTM_INPUT_INIT_H] = std::make_shared( - v0::Constant::create(m_input_map[LSTMInput::LSTM_INPUT_X].get_element_type(), Shape{}, {0}), + v0::Constant::create(m_input_map[LSTMInput::LSTM_INPUT_X].get_element_type(), ov::Shape{}, {0}), init_h_shape); } // `initial_c` - The initial value of the cell. @@ -145,7 +145,7 @@ struct LSTMNgInputMap { std::make_shared(ov::OutputVector{batch_size_node, num_directions_node, hidden_size_node}, 0); m_input_map[LSTMInput::LSTM_INPUT_INIT_C] = std::make_shared( - v0::Constant::create(m_input_map[LSTMInput::LSTM_INPUT_X].get_element_type(), Shape{}, {0}), + v0::Constant::create(m_input_map[LSTMInput::LSTM_INPUT_X].get_element_type(), ov::Shape{}, {0}), init_c_shape); } // `P` - The weight tensor for peepholes. @@ -161,11 +161,11 @@ struct LSTMNgInputMap { auto p_shape = std::make_shared( ov::OutputVector{num_directions_node, std::make_shared( - v0::Constant::create(ov::element::Type_t::i64, Shape{1}, {P_gates_count}), + v0::Constant::create(ov::element::Type_t::i64, ov::Shape{1}, {P_gates_count}), hidden_size_node)}, 0); m_input_map[LSTMInput::LSTM_INPUT_P] = std::make_shared( - v0::Constant::create(m_input_map[LSTMInput::LSTM_INPUT_X].get_element_type(), Shape{}, {0}), + v0::Constant::create(m_input_map[LSTMInput::LSTM_INPUT_X].get_element_type(), ov::Shape{}, {0}), p_shape); m_input_map[LSTMInput::LSTM_INPUT_P].set_names({"P_blank"}); } @@ -190,9 +190,8 @@ struct LSTMAttributes { m_activation_beta{node.get_attribute_value>("activation_beta", std::vector{})}, m_input_forget{static_cast(node.get_attribute_value("input_forget", 0))} { m_clip_threshold = std::abs(m_clip_threshold); - OPENVINO_SUPPRESS_DEPRECATED_START + std::string direction = ov::util::to_lower(node.get_attribute_value("direction", "forward")); - OPENVINO_SUPPRESS_DEPRECATED_END m_direction = ov::as_enum(direction); } @@ -209,7 +208,7 @@ struct LSTMAttributes { } // anonymous namespace namespace set_1 { -ov::OutputVector lstm(const Node& node) { +ov::OutputVector lstm(const ov::frontend::onnx::Node& node) { LSTMNgInputMap input_map{node}; LSTMAttributes attributes{node}; std::shared_ptr lstm_sequence; @@ -257,10 +256,7 @@ ov::OutputVector lstm(const Node& node) { ov::op::util::reorder_axes(Y_c, {1, 0, 2})}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/lstm.hpp b/src/frontends/onnx/frontend/src/op/lstm.hpp index 53498fbde0b69c..c7a94162744582 100644 --- a/src/frontends/onnx/frontend/src/op/lstm.hpp +++ b/src/frontends/onnx/frontend/src/op/lstm.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector lstm(const Node& node); +ov::OutputVector lstm(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/matmul.hpp b/src/frontends/onnx/frontend/src/op/matmul.hpp index 059a4c9ba42586..1389daa09bbabf 100644 --- a/src/frontends/onnx/frontend/src/op/matmul.hpp +++ b/src/frontends/onnx/frontend/src/op/matmul.hpp @@ -4,14 +4,12 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/matmul.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace detail { inline ov::OutputVector matmul(const ov::Output& a, const ov::Output& b) { @@ -19,11 +17,11 @@ inline ov::OutputVector matmul(const ov::Output& a, const ov::Output(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector matmul(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/matmul_integer.cpp b/src/frontends/onnx/frontend/src/op/matmul_integer.cpp index 8cc090509da18c..3faa4f23c5ab8f 100644 --- a/src/frontends/onnx/frontend/src/op/matmul_integer.cpp +++ b/src/frontends/onnx/frontend/src/op/matmul_integer.cpp @@ -12,13 +12,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector matmul_integer(const Node& node) { - const ov::OutputVector& inputs = node.get_ng_inputs(); +ov::OutputVector matmul_integer(const ov::frontend::onnx::Node& node) { + const ov::OutputVector& inputs = node.get_ov_inputs(); const auto& A = inputs.at(0); const auto& B = inputs.at(1); @@ -51,6 +51,6 @@ ov::OutputVector matmul_integer(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/matmul_integer.hpp b/src/frontends/onnx/frontend/src/op/matmul_integer.hpp index d386e97c736cb6..855b2d4f9a78ff 100644 --- a/src/frontends/onnx/frontend/src/op/matmul_integer.hpp +++ b/src/frontends/onnx/frontend/src/op/matmul_integer.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Performs ONNX MatMulInteger operation. @@ -19,9 +17,9 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX quantizied /// matrix multiplication integer operation. -ov::OutputVector matmul_integer(const Node& node); +ov::OutputVector matmul_integer(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max.hpp b/src/frontends/onnx/frontend/src/op/max.hpp index 11cb9476caafd8..421bd52e530945 100644 --- a/src/frontends/onnx/frontend/src/op/max.hpp +++ b/src/frontends/onnx/frontend/src/op/max.hpp @@ -4,33 +4,28 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/maximum.hpp" #include "utils/variadic.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector max(const Node& node) { +inline ov::OutputVector max(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node, ov::op::AutoBroadcastType::NONE); } } // namespace set_1 namespace set_8 { -inline ov::OutputVector max(const Node& node) { +inline ov::OutputVector max(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node); } } // namespace set_8 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max_pool.cpp b/src/frontends/onnx/frontend/src/op/max_pool.cpp index bf997d3cbfcb24..78371705456b2d 100644 --- a/src/frontends/onnx/frontend/src/op/max_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/max_pool.cpp @@ -10,12 +10,12 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector max_pool(const Node& node) { +ov::OutputVector max_pool(const ov::frontend::onnx::Node& node) { if (node.get_outputs_size() > 1) { OPENVINO_WARN << "MaxPool: Indices output is not supported and was ignored"; } @@ -27,14 +27,11 @@ ov::OutputVector max_pool(const Node& node) { } // namespace set_1 namespace set_8 { -ov::OutputVector max_pool(const Node& node) { +ov::OutputVector max_pool(const ov::frontend::onnx::Node& node) { return pooling::PoolingFactory(node).make_max_pool_with_indices(); } } // namespace set_8 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max_pool.hpp b/src/frontends/onnx/frontend/src/op/max_pool.hpp index 6036e0d750ff29..a9e305941e8f27 100644 --- a/src/frontends/onnx/frontend/src/op/max_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/max_pool.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// @@ -21,7 +19,7 @@ namespace set_1 { /// \return The vector containing OV nodes producing output of ONNX MaxPool /// operation. /// -ov::OutputVector max_pool(const Node& node); +ov::OutputVector max_pool(const ov::frontend::onnx::Node& node); } // namespace set_1 @@ -34,13 +32,10 @@ namespace set_8 { /// \return The vector containing OV nodes producing output of ONNX MaxPool /// operation. /// -ov::OutputVector max_pool(const Node& node); +ov::OutputVector max_pool(const ov::frontend::onnx::Node& node); } // namespace set_8 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp b/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp index b63c4c3519f059..c19d3c0fee57e6 100644 --- a/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/max_roi_pool.cpp @@ -9,13 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector max_roi_pool(const Node& node) { - const auto& inputs = node.get_ng_inputs(); +ov::OutputVector max_roi_pool(const ov::frontend::onnx::Node& node) { + const auto& inputs = node.get_ov_inputs(); const auto X = inputs.at(0); const auto rois = inputs.at(1); @@ -30,6 +30,6 @@ ov::OutputVector max_roi_pool(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/max_roi_pool.hpp b/src/frontends/onnx/frontend/src/op/max_roi_pool.hpp index 8dee07b34c8c4c..62bc77f0282ebc 100644 --- a/src/frontends/onnx/frontend/src/op/max_roi_pool.hpp +++ b/src/frontends/onnx/frontend/src/op/max_roi_pool.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector max_roi_pool(const Node& node); +ov::OutputVector max_roi_pool(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mean.cpp b/src/frontends/onnx/frontend/src/op/mean.cpp index ce2390c056badc..d4ab881bbe34f6 100644 --- a/src/frontends/onnx/frontend/src/op/mean.cpp +++ b/src/frontends/onnx/frontend/src/op/mean.cpp @@ -11,23 +11,20 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector mean(const Node& node) { +ov::OutputVector mean(const ov::frontend::onnx::Node& node) { auto sum = variadic::make_ng_variadic_op(node).front(); - auto count = v0::Constant::create(sum.get_element_type(), ov::Shape{}, {node.get_ng_inputs().size()}); + auto count = v0::Constant::create(sum.get_element_type(), ov::Shape{}, {node.get_ov_inputs().size()}); return {std::make_shared(sum, count)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mean.hpp b/src/frontends/onnx/frontend/src/op/mean.hpp index 2f01266af01a30..792ad97b335356 100644 --- a/src/frontends/onnx/frontend/src/op/mean.hpp +++ b/src/frontends/onnx/frontend/src/op/mean.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector mean(const Node& node); +ov::OutputVector mean(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp index f610097ea3e80a..b92acbdc1cca8d 100644 --- a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp +++ b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.cpp @@ -10,13 +10,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector mean_variance_normalization(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); bool across_channels = node.get_attribute_value("across_channels", 0); bool normalize_variance = node.get_attribute_value("normalize_variance", 1); @@ -26,8 +26,8 @@ ov::OutputVector mean_variance_normalization(const Node& node) { } // namespace set_1 namespace set_9 { -ov::OutputVector mean_variance_normalization(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); auto axes = node.get_attribute_value>("axes", {0, 2, 3}); const std::vector normalized_axes = ov::util::normalize_axes(node.get_description(), axes, data.get_partial_shape().rank()); @@ -36,10 +36,7 @@ ov::OutputVector mean_variance_normalization(const Node& node) { } } // namespace set_9 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.hpp b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.hpp index 4340362ff3ec23..4544600b4c0611 100644 --- a/src/frontends/onnx/frontend/src/op/mean_variance_normalization.hpp +++ b/src/frontends/onnx/frontend/src/op/mean_variance_normalization.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector mean_variance_normalization(const Node& node); +ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_9 { -ov::OutputVector mean_variance_normalization(const Node& node); +ov::OutputVector mean_variance_normalization(const ov::frontend::onnx::Node& node); } // namespace set_9 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/min.hpp b/src/frontends/onnx/frontend/src/op/min.hpp index 9f89374aabf92f..66c6c629cea9a9 100644 --- a/src/frontends/onnx/frontend/src/op/min.hpp +++ b/src/frontends/onnx/frontend/src/op/min.hpp @@ -4,32 +4,27 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/minimum.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector min(const Node& node) { +inline ov::OutputVector min(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node, ov::op::AutoBroadcastType::NONE); } } // namespace set_1 namespace set_8 { -inline ov::OutputVector min(const Node& node) { +inline ov::OutputVector min(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node); } } // namespace set_8 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mod.cpp b/src/frontends/onnx/frontend/src/op/mod.cpp index 53239b372045ce..25891670647d97 100644 --- a/src/frontends/onnx/frontend/src/op/mod.cpp +++ b/src/frontends/onnx/frontend/src/op/mod.cpp @@ -11,14 +11,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector mod(const Node& node) { - ov::Output dividend{node.get_ng_inputs().at(0)}; - ov::Output divisor{node.get_ng_inputs().at(1)}; +ov::OutputVector mod(const ov::frontend::onnx::Node& node) { + ov::Output dividend{node.get_ov_inputs().at(0)}; + ov::Output divisor{node.get_ov_inputs().at(1)}; std::int64_t fmod = node.get_attribute_value("fmod", 0); ov::OutputVector output; @@ -36,10 +36,7 @@ ov::OutputVector mod(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mod.hpp b/src/frontends/onnx/frontend/src/op/mod.hpp index 1756664689f151..472bc3693fde78 100644 --- a/src/frontends/onnx/frontend/src/op/mod.hpp +++ b/src/frontends/onnx/frontend/src/op/mod.hpp @@ -4,21 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector mod(const Node& node); +ov::OutputVector mod(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/mul.hpp b/src/frontends/onnx/frontend/src/op/mul.hpp index abb39a7d7b1bf0..ef73e65555bbdf 100644 --- a/src/frontends/onnx/frontend/src/op/mul.hpp +++ b/src/frontends/onnx/frontend/src/op/mul.hpp @@ -4,32 +4,27 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/multiply.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector mul(const Node& node) { +inline ov::OutputVector mul(const ov::frontend::onnx::Node& node) { return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_7 { -inline ov::OutputVector mul(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector mul(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_7 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/neg.hpp b/src/frontends/onnx/frontend/src/op/neg.hpp index 3b30e1b0ca75e4..febdb25cdb32d1 100644 --- a/src/frontends/onnx/frontend/src/op/neg.hpp +++ b/src/frontends/onnx/frontend/src/op/neg.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/negative.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector neg(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector neg(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/nms_rotated.hpp b/src/frontends/onnx/frontend/src/op/nms_rotated.hpp index dc61560b61aaa2..b03b47db50da2b 100644 --- a/src/frontends/onnx/frontend/src/op/nms_rotated.hpp +++ b/src/frontends/onnx/frontend/src/op/nms_rotated.hpp @@ -6,18 +6,16 @@ #include -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/nms_rotated.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector nms_rotated(const Node& node) { +inline ov::OutputVector nms_rotated(const ov::frontend::onnx::Node& node) { auto iou_threshold = node.get_attribute_value("iou_threshold"); auto score_threshold = node.get_attribute_value("score_threshold"); auto max_output_boxes_per_class = @@ -25,8 +23,8 @@ inline ov::OutputVector nms_rotated(const Node& node) { auto iou_threshold_const = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {iou_threshold}); auto score_threshold_const = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {score_threshold}); - auto nms = std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1), + auto nms = std::make_shared(node.get_ov_inputs().at(0), + node.get_ov_inputs().at(1), max_output_boxes_per_class, iou_threshold_const, score_threshold_const, @@ -36,6 +34,6 @@ inline ov::OutputVector nms_rotated(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp b/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp index 50d3832f80b028..1375d089538a1b 100644 --- a/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp +++ b/src/frontends/onnx/frontend/src/op/non_max_suppression.cpp @@ -12,39 +12,39 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector non_max_suppression(const Node& node) { +ov::OutputVector non_max_suppression(const ov::frontend::onnx::Node& node) { using ov::op::util::is_null; // TODO: this op will not be tested until at least // a reference implementation is added - const auto ng_inputs = node.get_ng_inputs(); + const auto ng_inputs = node.get_ov_inputs(); const ov::Output boxes = ng_inputs.at(0); const ov::Output scores = ng_inputs.at(1); ov::Output max_output_boxes_per_class; if (ng_inputs.size() > 2 && !is_null(ng_inputs.at(2))) { - max_output_boxes_per_class = ngraph::onnx_import::reshape::interpret_as_scalar(ng_inputs.at(2)); + max_output_boxes_per_class = ov::frontend::onnx::reshape::interpret_as_scalar(ng_inputs.at(2)); } else { - max_output_boxes_per_class = v0::Constant::create(ov::element::i64, Shape{}, {0}); + max_output_boxes_per_class = v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); } ov::Output iou_threshold; if (ng_inputs.size() > 3 && !is_null(ng_inputs.at(3))) { - iou_threshold = ngraph::onnx_import::reshape::interpret_as_scalar(ng_inputs.at(3)); + iou_threshold = ov::frontend::onnx::reshape::interpret_as_scalar(ng_inputs.at(3)); } else { - iou_threshold = v0::Constant::create(ov::element::f32, Shape{}, {.0f}); + iou_threshold = v0::Constant::create(ov::element::f32, ov::Shape{}, {.0f}); } ov::Output score_threshold; if (ng_inputs.size() > 4 && !is_null(ng_inputs.at(4))) { - score_threshold = ngraph::onnx_import::reshape::interpret_as_scalar(ng_inputs.at(4)); + score_threshold = ov::frontend::onnx::reshape::interpret_as_scalar(ng_inputs.at(4)); } else { - score_threshold = v0::Constant::create(ov::element::f32, Shape{}, {-std::numeric_limits::max()}); + score_threshold = v0::Constant::create(ov::element::f32, ov::Shape{}, {-std::numeric_limits::max()}); } const auto center_point_box = node.get_attribute_value("center_point_box", 0); @@ -66,10 +66,7 @@ ov::OutputVector non_max_suppression(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/non_max_suppression.hpp b/src/frontends/onnx/frontend/src/op/non_max_suppression.hpp index 81f67be2f0d621..471d0b262757aa 100644 --- a/src/frontends/onnx/frontend/src/op/non_max_suppression.hpp +++ b/src/frontends/onnx/frontend/src/op/non_max_suppression.hpp @@ -4,21 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector non_max_suppression(const Node& node); +ov::OutputVector non_max_suppression(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/non_zero.cpp b/src/frontends/onnx/frontend/src/op/non_zero.cpp index 3e4bf800852438..60d04b2d0dcd9a 100644 --- a/src/frontends/onnx/frontend/src/op/non_zero.cpp +++ b/src/frontends/onnx/frontend/src/op/non_zero.cpp @@ -8,21 +8,18 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector non_zero(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector non_zero(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); return {std::make_shared(data, ov::element::i64)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/non_zero.hpp b/src/frontends/onnx/frontend/src/op/non_zero.hpp index 819a9fd5e6163f..dbef0efc410974 100644 --- a/src/frontends/onnx/frontend/src/op/non_zero.hpp +++ b/src/frontends/onnx/frontend/src/op/non_zero.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Convert ONNX NonZero operation to an OV node. @@ -19,13 +17,10 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX NonZero /// operation. -ov::OutputVector non_zero(const Node& node); +ov::OutputVector non_zero(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/not.hpp b/src/frontends/onnx/frontend/src/op/not.hpp index 0ea0069dc600f6..3002027518a140 100644 --- a/src/frontends/onnx/frontend/src/op/not.hpp +++ b/src/frontends/onnx/frontend/src/op/not.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/logical_not.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector logical_not(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector logical_not(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/onehot.cpp b/src/frontends/onnx/frontend/src/op/onehot.cpp index dde66e955ad971..f545b93f0cb767 100644 --- a/src/frontends/onnx/frontend/src/op/onehot.cpp +++ b/src/frontends/onnx/frontend/src/op/onehot.cpp @@ -12,13 +12,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector onehot(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector onehot(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; auto indices = std::make_shared(inputs.at(0), ov::element::i64); auto depth = std::make_shared(reshape::interpret_as_scalar(inputs.at(1)), ov::element::i64); // Rank 1 tensor containing exactly two elements: [off_value, on_value] @@ -34,9 +34,7 @@ ov::OutputVector onehot(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/onehot.hpp b/src/frontends/onnx/frontend/src/op/onehot.hpp index 24f610ccc2e3a0..8b8a15ad4b5e45 100644 --- a/src/frontends/onnx/frontend/src/op/onehot.hpp +++ b/src/frontends/onnx/frontend/src/op/onehot.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector onehot(const Node& node); +ov::OutputVector onehot(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/or.hpp b/src/frontends/onnx/frontend/src/op/or.hpp index bdcc205d225f78..423b631fdf26df 100644 --- a/src/frontends/onnx/frontend/src/op/or.hpp +++ b/src/frontends/onnx/frontend/src/op/or.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/logical_or.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector logical_or(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector logical_or(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp index b2c1f01342a2c9..7abb16859b9dcc 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.cpp @@ -21,12 +21,13 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector deformable_conv_2d(const Node& node) { - const ov::OutputVector& inputs = node.get_ng_inputs(); +ov::OutputVector deformable_conv_2d(const ov::frontend::onnx::Node& node) { + const ov::OutputVector& inputs = node.get_ov_inputs(); const auto strides = convpool::get_strides(node); const auto dilations = convpool::get_dilations(node); const auto paddings = convpool::get_pads(node); @@ -47,9 +48,7 @@ ov::OutputVector deformable_conv_2d(const Node& node) { deformable_groups)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp index 254328c1b9cf4c..225b8980873cd7 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/deformable_conv_2d.hpp @@ -16,21 +16,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector deformable_conv_2d(const Node& node); +ov::OutputVector deformable_conv_2d(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp index e030ce962d43a0..a248e79cb30d6c 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.cpp @@ -10,12 +10,13 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector detection_output(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector detection_output(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); auto box_logits = inputs[0]; auto class_preds = inputs[1]; @@ -67,9 +68,7 @@ ov::OutputVector detection_output(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp index edb95a1d3ea6f9..1dca062281e902 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/detection_output.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector detection_output(const Node& node); +ov::OutputVector detection_output(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp index 8a6394b59676c8..0a825f8defdcfa 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.cpp @@ -9,14 +9,15 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_detection_output(const Node& node) { +ov::OutputVector experimental_detectron_detection_output(const ov::frontend::onnx::Node& node) { using DetectionOutput = v6::ExperimentalDetectronDetectionOutput; - auto inputs = node.get_ng_inputs(); + auto inputs = node.get_ov_inputs(); auto rois = inputs[0]; auto deltas = inputs[1]; auto scores = inputs[2]; @@ -37,9 +38,7 @@ ov::OutputVector experimental_detectron_detection_output(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp index 190a54867734fc..07b09d782c0ae3 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/detection_output.hpp @@ -4,20 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_detection_output(const Node& node); +ov::OutputVector experimental_detectron_detection_output(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp index 3fbdc15c41ad21..f8c1e843d44340 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.cpp @@ -10,14 +10,15 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_generate_proposals(const Node& node) { +ov::OutputVector experimental_detectron_generate_proposals(const ov::frontend::onnx::Node& node) { using GenerateProposalsSingleImage = v6::ExperimentalDetectronGenerateProposalsSingleImage; - const auto inputs = node.get_ng_inputs(); + const auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 4, "ExperimentalDetectronGenerateProposalsSingleImage expects 4 " "inputs, received: ", @@ -39,9 +40,7 @@ ov::OutputVector experimental_detectron_generate_proposals(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp index deaa1a66c3a459..ef1779bf236aad 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.hpp @@ -4,20 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_generate_proposals(const Node& node); +ov::OutputVector experimental_detectron_generate_proposals(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp index ff77748be7d5d8..691a242128410d 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.cpp @@ -9,14 +9,15 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_prior_grid_generator(const Node& node) { +ov::OutputVector experimental_detectron_prior_grid_generator(const ov::frontend::onnx::Node& node) { using PriorGridGenerator = v6::ExperimentalDetectronPriorGridGenerator; - auto inputs = node.get_ng_inputs(); + auto inputs = node.get_ov_inputs(); auto priors = inputs[0]; auto feature_map = inputs[1]; auto im_data = inputs[2]; @@ -31,9 +32,7 @@ ov::OutputVector experimental_detectron_prior_grid_generator(const Node& node) { return {std::make_shared(priors, feature_map, im_data, attrs)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp index 756192d0650663..0f8d1f18da8f3c 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/prior_grid_generator.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_prior_grid_generator(const Node& node); +ov::OutputVector experimental_detectron_prior_grid_generator(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp index f5f6f7c6ff568e..819ffcdf76b636 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.cpp @@ -9,14 +9,15 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_roi_feature_extractor(const Node& node) { +ov::OutputVector experimental_detectron_roi_feature_extractor(const ov::frontend::onnx::Node& node) { using ROIFeatureExtractor = v6::ExperimentalDetectronROIFeatureExtractor; - auto inputs = node.get_ng_inputs(); + auto inputs = node.get_ov_inputs(); ROIFeatureExtractor::Attributes attrs{}; attrs.output_size = node.get_attribute_value("output_size", 7); @@ -28,9 +29,7 @@ ov::OutputVector experimental_detectron_roi_feature_extractor(const Node& node) } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp index baed74ec6a622c..4c0216486d2ac0 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.hpp @@ -4,20 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_roi_feature_extractor(const Node& node); +ov::OutputVector experimental_detectron_roi_feature_extractor(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp index 9bf41e5b84b89f..34d5cf5ed33fcb 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.cpp @@ -9,14 +9,15 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_topk_rois(const Node& node) { +ov::OutputVector experimental_detectron_topk_rois(const ov::frontend::onnx::Node& node) { using TopKROIs = v6::ExperimentalDetectronTopKROIs; - auto inputs = node.get_ng_inputs(); + auto inputs = node.get_ov_inputs(); auto input_rois = inputs[0]; auto rois_probs = inputs[1]; auto max_rois = static_cast(node.get_attribute_value("max_rois", 1000)); @@ -25,9 +26,7 @@ ov::OutputVector experimental_detectron_topk_rois(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp index b45fbd8b668fc5..0cb0c1fc477dc8 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/experimental_detectron/topk_rios.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector experimental_detectron_topk_rois(const Node& node); +ov::OutputVector experimental_detectron_topk_rois(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp index ae49aa07a8405c..2f0ccce704d9ef 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.cpp @@ -10,12 +10,13 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector fake_quantize(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector fake_quantize(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); const auto X = inputs.at(0); const auto input_low = inputs.at(1); const auto input_high = inputs.at(2); @@ -28,9 +29,7 @@ ov::OutputVector fake_quantize(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp index c9fb9f8c1ddee0..599a6d3c4ef6d3 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/fake_quantize.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector fake_quantize(const Node& node); +ov::OutputVector fake_quantize(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp index 05b1ab6b815167..2099c3ca8aa872 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.cpp @@ -14,8 +14,9 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { @@ -35,8 +36,8 @@ void validate_generate_proposals_inputs(const ov::OutputVector& inputs) { } } // namespace -ov::OutputVector generate_proposals(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector generate_proposals(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); validate_generate_proposals_inputs(inputs); const auto& scores = inputs[0]; // shape [N, A, H, W] @@ -52,10 +53,10 @@ ov::OutputVector generate_proposals(const Node& node) { attrs.normalized = !node.get_attribute_value("legacy_plus_one", true); // Broadcast anchors from [A, 4] to [H, W, A, 4] where [H, W] is taken from scores shape. - const auto zero = v0::Constant::create(ov::element::i64, Shape{1}, {0}); + const auto zero = v0::Constant::create(ov::element::i64, ov::Shape{1}, {0}); const auto scores_shape = std::make_shared(scores); const auto anchors_shape = std::make_shared(anchors); - const auto scores_shape_tail = v0::Constant::create(ov::element::i64, Shape{2}, {2, 3}); + const auto scores_shape_tail = v0::Constant::create(ov::element::i64, ov::Shape{2}, {2, 3}); const auto new_anchors_shape_front = std::make_shared(scores_shape, scores_shape_tail, zero); const auto new_anchors_shape = std::make_shared(ov::OutputVector{new_anchors_shape_front, anchors_shape}, 0); @@ -67,5 +68,6 @@ ov::OutputVector generate_proposals(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.hpp index 2df955bec7c27e..d3d57ae9280e0d 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/generate_proposals.hpp @@ -4,17 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector generate_proposals(const Node& node); +ov::OutputVector generate_proposals(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp index 0b5d3b4ef62b9e..dd92b4a6d1bcfc 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp @@ -11,12 +11,13 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector group_norm(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector group_norm(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 3, "Invalid number of inputs. Expected 3, actual " + std::to_string(inputs.size())); @@ -38,9 +39,7 @@ ov::OutputVector group_norm(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp index 9916bef453c53b..d75e2b91d32d66 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.hpp @@ -4,23 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector group_norm(const Node& node); +ov::OutputVector group_norm(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph - -// namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp index 972bc14e00066f..e556580401a502 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.cpp @@ -15,12 +15,13 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector normalize(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector normalize(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 2, "Invalid number of inputs"); auto data = inputs[0]; @@ -47,13 +48,14 @@ ov::OutputVector normalize(const Node& node) { for (int64_t i = 2; i < data_shape.rank().get_length(); ++i) { weights_shape.push_back(1); } - auto new_shape = std::make_shared(ov::element::i64, Shape{weights_shape.size()}, weights_shape); + auto new_shape = + std::make_shared(ov::element::i64, ov::Shape{weights_shape.size()}, weights_shape); weights = std::make_shared(inputs[1], new_shape, true); } std::shared_ptr axes; if (!across_spatial) { - axes = std::make_shared(ov::element::i64, Shape{1}, std::vector{1}); + axes = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{1}); } else { axes = common::get_monotonic_range_along_node_rank(data, 1); } @@ -63,9 +65,7 @@ ov::OutputVector normalize(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp index 83f9d5ee9c031e..cdce213ac65ea1 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/normalize.hpp @@ -4,20 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector normalize(const Node& node); +ov::OutputVector normalize(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp index e5e316dc059496..33987df84bcfbd 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.cpp @@ -17,16 +17,17 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace detail { namespace { std::shared_ptr make_slice(std::shared_ptr node, int64_t start, int64_t end) { return std::make_shared( node, - v0::Constant::create(ov::element::i64, Shape{1}, std::vector{start}), - v0::Constant::create(ov::element::i64, Shape{1}, std::vector{end}), + v0::Constant::create(ov::element::i64, ov::Shape{1}, std::vector{start}), + v0::Constant::create(ov::element::i64, ov::Shape{1}, std::vector{end}), std::vector{0}, // begin mask std::vector{0}); // end mask } @@ -34,8 +35,8 @@ std::shared_ptr make_slice(std::shared_ptr node, int } // namespace detail namespace set_1 { -ov::OutputVector prior_box(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector prior_box(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 2, "Invalid number of inputs"); auto output_shape = std::make_shared(inputs[0]); @@ -58,15 +59,15 @@ ov::OutputVector prior_box(const Node& node) { attrs.density = node.get_attribute_value>("density", {}); attrs.min_max_aspect_ratios_order = node.get_attribute_value("min_max_aspect_ratios_order", 1); - auto axes = v0::Constant::create(ov::element::i64, Shape{1}, std::vector{0}); + auto axes = v0::Constant::create(ov::element::i64, ov::Shape{1}, std::vector{0}); return { std::make_shared(std::make_shared(output_shape_slice, image_shape_slice, attrs), axes)}; } -ov::OutputVector prior_box_clustered(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector prior_box_clustered(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 2, "Invalid number of inputs"); auto output_shape_rank = inputs[0].get_partial_shape().rank().get_length(); @@ -97,7 +98,7 @@ ov::OutputVector prior_box_clustered(const Node& node) { attrs.step = node.get_attribute_value("step", 0.0f); attrs.offset = node.get_attribute_value("offset", 0.0f); - auto axes = v0::Constant::create(ov::element::i64, Shape{1}, std::vector{0}); + auto axes = v0::Constant::create(ov::element::i64, ov::Shape{1}, std::vector{0}); return {std::make_shared( std::make_shared(output_shape_slice, image_shape_slice, attrs), @@ -105,9 +106,7 @@ ov::OutputVector prior_box_clustered(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp index 2cbea3190a3025..a52e042dc7fc9e 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/prior_box.hpp @@ -4,23 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector prior_box(const Node& node); +ov::OutputVector prior_box(const ov::frontend::onnx::Node& node); -ov::OutputVector prior_box_clustered(const Node& node); +ov::OutputVector prior_box_clustered(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp index 9f1e08c5fa19aa..0e90eee634ec68 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.cpp @@ -12,26 +12,26 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector swish(const Node& node) { - ov::OutputVector ng_inputs{node.get_ng_inputs()}; +ov::OutputVector swish(const ov::frontend::onnx::Node& node) { + ov::OutputVector ov_inputs{node.get_ov_inputs()}; ov::Output beta; - if (ng_inputs.size() > 1) { - beta = ngraph::onnx_import::reshape::interpret_as_scalar(ng_inputs.at(1)); + if (ov_inputs.size() > 1) { + beta = ov::frontend::onnx::reshape::interpret_as_scalar(ov_inputs.at(1)); } else { - beta = v0::Constant::create(ov::element::f32, Shape{}, {1.0}); + beta = v0::Constant::create(ov::element::f32, ov::Shape{}, {1.0}); } - return {std::make_shared(ng_inputs.at(0), beta)}; + return {std::make_shared(ov_inputs.at(0), beta)}; } } // namespace set_1 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp index d4b8553474bcc5..3129e194654f4d 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/swish.hpp @@ -4,20 +4,16 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector swish(const Node& node); +ov::OutputVector swish(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/pad.cpp b/src/frontends/onnx/frontend/src/op/pad.cpp index 22cae64bed101d..ed4cbad656cdbe 100644 --- a/src/frontends/onnx/frontend/src/op/pad.cpp +++ b/src/frontends/onnx/frontend/src/op/pad.cpp @@ -9,9 +9,9 @@ #include "openvino/op/constant.hpp" #include "openvino/op/pad.hpp" #include "openvino/op/util/op_types.hpp" -#include "ov_models/ov_builders/split.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" +#include "utils/split.hpp" namespace { ov::op::PadMode get_pad_mode(std::string mode) { @@ -32,15 +32,15 @@ ov::op::PadMode get_pad_mode(std::string mode) { } // namespace using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector pad(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector pad(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); - const auto data_rank = node.get_ng_inputs().at(0).get_partial_shape().rank(); + const auto data_rank = node.get_ov_inputs().at(0).get_partial_shape().rank(); CHECK_VALID_NODE(node, data_rank.is_static(), "Data rank must be static for pad op"); const auto data_rank_value = data_rank.get_length(); @@ -62,8 +62,8 @@ ov::OutputVector pad(const Node& node) { } // namespace set_1 namespace set_11 { -ov::OutputVector pad(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector pad(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); const auto& data = inputs[0]; const auto& pads = inputs[1]; ov::Output values; @@ -87,7 +87,7 @@ ov::OutputVector pad(const Node& node) { padding_begin = v0::Constant::create(ov::element::i64, ov::Shape{half_size}, padding_begin_values); padding_end = v0::Constant::create(ov::element::i64, ov::Shape{half_size}, padding_end_values); } else { - ov::OutputVector padding = ov::op::util::split(pads, 2, 0); + ov::OutputVector padding = ov::op::util::make_split(pads, 2, 0); padding_begin = padding.at(0); padding_end = padding.at(1); @@ -100,10 +100,7 @@ ov::OutputVector pad(const Node& node) { } } // namespace set_11 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/pad.hpp b/src/frontends/onnx/frontend/src/op/pad.hpp index ac2396db8b3c93..da9da5437f305f 100644 --- a/src/frontends/onnx/frontend/src/op/pad.hpp +++ b/src/frontends/onnx/frontend/src/op/pad.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector pad(const Node& node); +ov::OutputVector pad(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_11 { -ov::OutputVector pad(const Node& node); +ov::OutputVector pad(const ov::frontend::onnx::Node& node); } // namespace set_11 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/pow.cpp b/src/frontends/onnx/frontend/src/op/pow.cpp index 8343831cc29563..d09e378172abee 100644 --- a/src/frontends/onnx/frontend/src/op/pow.cpp +++ b/src/frontends/onnx/frontend/src/op/pow.cpp @@ -10,13 +10,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector pow(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector pow(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 2, "Power operation requires 2 inputs. Got: ", inputs.size()); auto base = inputs[0]; @@ -36,10 +36,7 @@ ov::OutputVector pow(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/pow.hpp b/src/frontends/onnx/frontend/src/op/pow.hpp index 5de29ba1bb2741..6559a683e3f7e3 100644 --- a/src/frontends/onnx/frontend/src/op/pow.hpp +++ b/src/frontends/onnx/frontend/src/op/pow.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector pow(const Node& node); +ov::OutputVector pow(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/prelu.cpp b/src/frontends/onnx/frontend/src/op/prelu.cpp index bbc4c2f3ed8b59..6928c85d41c033 100644 --- a/src/frontends/onnx/frontend/src/op/prelu.cpp +++ b/src/frontends/onnx/frontend/src/op/prelu.cpp @@ -8,23 +8,20 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector prelu(const Node& node) { - ov::OutputVector ng_inputs{node.get_ng_inputs()}; - const auto& data = ng_inputs.at(0); - const auto& slope = ng_inputs.at(1); +ov::OutputVector prelu(const ov::frontend::onnx::Node& node) { + ov::OutputVector ov_inputs{node.get_ov_inputs()}; + const auto& data = ov_inputs.at(0); + const auto& slope = ov_inputs.at(1); return {std::make_shared(data, slope)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/prelu.hpp b/src/frontends/onnx/frontend/src/op/prelu.hpp index 2d0530b57fc88e..e7b92c46cd388d 100644 --- a/src/frontends/onnx/frontend/src/op/prelu.hpp +++ b/src/frontends/onnx/frontend/src/op/prelu.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector prelu(const Node& node); +ov::OutputVector prelu(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp b/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp index 3c86985429dc10..4621111d76decd 100644 --- a/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp +++ b/src/frontends/onnx/frontend/src/op/qlinear_conv.cpp @@ -17,13 +17,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector qlinear_conv(const Node& node) { - const ov::OutputVector& inputs = node.get_ng_inputs(); +ov::OutputVector qlinear_conv(const ov::frontend::onnx::Node& node) { + const ov::OutputVector& inputs = node.get_ov_inputs(); auto x = inputs.at(0); auto x_scale = inputs.at(1); @@ -60,10 +60,7 @@ ov::OutputVector qlinear_conv(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/qlinear_conv.hpp b/src/frontends/onnx/frontend/src/op/qlinear_conv.hpp index 37a01970d3b821..b89911a421cd61 100644 --- a/src/frontends/onnx/frontend/src/op/qlinear_conv.hpp +++ b/src/frontends/onnx/frontend/src/op/qlinear_conv.hpp @@ -7,13 +7,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Performs ONNX QLinearConv operation. @@ -22,11 +20,10 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX quantizied /// convolution operation. -ov::OutputVector qlinear_conv(const Node& node); +ov::OutputVector qlinear_conv(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp b/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp index 5244e578f3bfe4..16240b6fc073f4 100644 --- a/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp +++ b/src/frontends/onnx/frontend/src/op/qlinear_matmul.cpp @@ -12,13 +12,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector qlinear_matmul(const Node& node) { - const ov::OutputVector& inputs = node.get_ng_inputs(); +ov::OutputVector qlinear_matmul(const ov::frontend::onnx::Node& node) { + const ov::OutputVector& inputs = node.get_ov_inputs(); const auto& a = inputs.at(0); const auto& a_scale = reshape::interpret_as_scalar(inputs.at(1)); @@ -50,6 +50,6 @@ ov::OutputVector qlinear_matmul(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/qlinear_matmul.hpp b/src/frontends/onnx/frontend/src/op/qlinear_matmul.hpp index 98d3f92f8b0878..6d117a6159715c 100644 --- a/src/frontends/onnx/frontend/src/op/qlinear_matmul.hpp +++ b/src/frontends/onnx/frontend/src/op/qlinear_matmul.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Performs ONNX QLinearMatMul operation. @@ -19,9 +17,9 @@ namespace set_1 { /// /// \return The vector containing OV nodes producing output of ONNX quantizied /// matrix multiplication operation. -ov::OutputVector qlinear_matmul(const Node& node); +ov::OutputVector qlinear_matmul(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/quant_conv.cpp b/src/frontends/onnx/frontend/src/op/quant_conv.cpp index a28cc73b362dec..a913399e378393 100644 --- a/src/frontends/onnx/frontend/src/op/quant_conv.cpp +++ b/src/frontends/onnx/frontend/src/op/quant_conv.cpp @@ -48,11 +48,11 @@ namespace ngraph std::shared_ptr make_ng_quant_conv(const Output& data, const Output& filters, - const Strides& strides, - const Strides& filter_dilations, + const ov::Strides& strides, + const ov::Strides& filter_dilations, const CoordinateDiff& padding_below, const CoordinateDiff& padding_above, - const Strides& data_dilations, + const ov::Strides& data_dilations, int groups, const OpScale& op_scale, const OpZeroPoint& op_zero_point, @@ -176,7 +176,7 @@ namespace ngraph } // namespace - ov::OutputVector quant_conv(const Node& node) + ov::OutputVector quant_conv(const ov::frontend::onnx::Node& node) { const ov::OutputVector& inputs = node.get_ng_inputs(); auto data = inputs.at(0); @@ -212,9 +212,9 @@ namespace ngraph "provided group attribute value must be a multiple of filter channels " "count."); - Strides strides = convpool::get_strides(node); - Strides filter_dilations = convpool::get_dilations(node); - Strides data_dilations = Strides(convpool::get_kernel_shape(node).size(), 1UL); + ov::Strides strides = convpool::get_strides(node); + ov::Strides filter_dilations = convpool::get_dilations(node); + ov::Strides data_dilations = Strides(convpool::get_kernel_shape(node).size(), 1UL); auto paddings = convpool::get_pads(node); ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node); CoordinateDiff& padding_below = paddings.first; @@ -268,8 +268,8 @@ namespace ngraph } // namespace op - } // namespace onnx_import - -} // namespace ngraph + } // namespace onnx +} // namespace frontend +} // namespace ov #endif diff --git a/src/frontends/onnx/frontend/src/op/quant_conv.hpp b/src/frontends/onnx/frontend/src/op/quant_conv.hpp index fae89b8d917af1..38135b34649885 100644 --- a/src/frontends/onnx/frontend/src/op/quant_conv.hpp +++ b/src/frontends/onnx/frontend/src/op/quant_conv.hpp @@ -7,13 +7,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Performs ONNX Quant Conv operation. @@ -22,13 +20,10 @@ namespace set_1 { /// /// \return The vector containing Ngraph nodes producing output of ONNX quantizied /// convolution operation. -ov::OutputVector quant_conv(const Node& node); +ov::OutputVector quant_conv(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/quantize_linear.cpp index 3e2e3e3db47e21..b19f78dbcf0255 100644 --- a/src/frontends/onnx/frontend/src/op/quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/quantize_linear.cpp @@ -11,16 +11,15 @@ #include "openvino/op/fake_quantize.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/subtract.hpp" -#include "ov_models/ov_builders/reshape.hpp" #include "utils/reshape.hpp" #include "validation_util.hpp" using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace detail { namespace { @@ -28,7 +27,7 @@ ov::Output get_zero_point(const ov::OutputVector& inputs) { if (inputs.size() > 2) { return inputs.at(2); } else { - return std::make_shared(ov::element::u8, Shape{1}, std::uint8_t(0)); + return std::make_shared(ov::element::u8, ov::Shape{1}, std::uint8_t(0)); } } @@ -71,20 +70,20 @@ std::tuple, std::shared_ptr> get_output_band // should be aligned switch (destination_type) { case ov::element::i8: - output_low = std::make_shared(data_type, Shape{1}, -128); - output_high = std::make_shared(data_type, Shape{1}, 127); + output_low = std::make_shared(data_type, ov::Shape{1}, -128); + output_high = std::make_shared(data_type, ov::Shape{1}, 127); break; case ov::element::u8: - output_low = std::make_shared(data_type, Shape{1}, 0); - output_high = std::make_shared(data_type, Shape{1}, 255); + output_low = std::make_shared(data_type, ov::Shape{1}, 0); + output_high = std::make_shared(data_type, ov::Shape{1}, 255); break; case ov::element::i16: - output_low = std::make_shared(data_type, Shape{1}, -32768); - output_high = std::make_shared(data_type, Shape{1}, 32767); + output_low = std::make_shared(data_type, ov::Shape{1}, -32768); + output_high = std::make_shared(data_type, ov::Shape{1}, 32767); break; case ov::element::u16: - output_low = std::make_shared(data_type, Shape{1}, 0); - output_high = std::make_shared(data_type, Shape{1}, 65535); + output_low = std::make_shared(data_type, ov::Shape{1}, 0); + output_high = std::make_shared(data_type, ov::Shape{1}, 65535); break; default: OPENVINO_THROW("Unsupported element type for QuantizeLinear"); @@ -140,8 +139,8 @@ std::shared_ptr make_fake_quantize(const ov::Output& y_scale } // namespace detail namespace set_1 { -ov::OutputVector quantize_linear(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; auto x = inputs.at(0); auto y_scale = inputs.at(1); auto y_zero_point = detail::get_zero_point(inputs); @@ -161,7 +160,7 @@ ov::OutputVector quantize_linear(ov::Output x, ov::Output y_zero_point, int64_t axis, Node node) { - namespace detail = ngraph::onnx_import::op::detail; + namespace detail = ov::frontend::onnx::op::detail; x = detail::validate_data(node, x); detail::validate_zero_point_type(node, y_zero_point); @@ -183,7 +182,7 @@ ov::OutputVector quantize_linear(ov::Output x, " must match the number of respective input data axis size: ", x_shape[axis]); - Shape target_shape(x_shape.rank().get_length(), 1); + ov::Shape target_shape(x_shape.rank().get_length(), 1); target_shape[axis] = static_cast(x_shape[axis].get_length()); y_scale = ov::op::util::reshape(y_scale, target_shape); @@ -198,7 +197,7 @@ ov::OutputVector quantize_linear(ov::Output x, " must match the number of respective input data axis size: ", x_shape[axis]); - Shape target_shape(x_shape.rank().get_length(), 1); + ov::Shape target_shape(x_shape.rank().get_length(), 1); target_shape[axis] = static_cast(x_shape[axis].get_length()); y_zero_point = ov::op::util::reshape(y_zero_point, target_shape); @@ -208,8 +207,8 @@ ov::OutputVector quantize_linear(ov::Output x, } } // namespace -ov::OutputVector quantize_linear(const Node& node) { - const ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node) { + const ov::OutputVector inputs{node.get_ov_inputs()}; FRONT_END_GENERAL_CHECK(2 <= inputs.size() && inputs.size() <= 3, "The QuantizeLinear op expects 2 required and one optional " @@ -229,10 +228,7 @@ ov::OutputVector quantize_linear(const Node& node) { return quantize_linear(x, scale, zero_point, node.get_attribute_value("axis", 1), node); } } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/quantize_linear.hpp b/src/frontends/onnx/frontend/src/op/quantize_linear.hpp index 480a4aee6863ab..6460545b937618 100644 --- a/src/frontends/onnx/frontend/src/op/quantize_linear.hpp +++ b/src/frontends/onnx/frontend/src/op/quantize_linear.hpp @@ -4,14 +4,12 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace detail { std::shared_ptr make_fake_quantize(const ov::Output& y_scale, @@ -19,19 +17,16 @@ std::shared_ptr make_fake_quantize(const ov::Output& y_scale const ov::Output& data); } namespace set_1 { -ov::OutputVector quantize_linear(const Node& node); +ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_13 { -ov::OutputVector quantize_linear(const Node& node); +ov::OutputVector quantize_linear(const ov::frontend::onnx::Node& node); } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_normal.cpp b/src/frontends/onnx/frontend/src/op/random_normal.cpp index 7c3e0c853af4d7..8dde43a12fde30 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal.cpp +++ b/src/frontends/onnx/frontend/src/op/random_normal.cpp @@ -10,25 +10,27 @@ #include "utils/common.hpp" using namespace ov::op; +using ::ONNX_NAMESPACE::TensorProto_DataType; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector random_normal(const Node& node) { +ov::OutputVector random_normal(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, node.has_attribute("shape"), "RandomNormal operator must specify a 'shape' attribute."); const auto dtype = - node.get_attribute_value("dtype", static_cast(ONNX_NAMESPACE::TensorProto_DataType_FLOAT)); + node.get_attribute_value("dtype", + static_cast(TensorProto_DataType::TensorProto_DataType_FLOAT)); const auto target_type = common::get_ov_element_type(dtype); const auto mean = node.get_attribute_value("mean", 0.0f); const auto scale = node.get_attribute_value("scale", 1.0f); - auto scale_node = v0::Constant::create(target_type, Shape{1}, {scale}); - auto mean_node = v0::Constant::create(target_type, Shape{1}, {mean}); + auto scale_node = v0::Constant::create(target_type, ov::Shape{1}, {scale}); + auto mean_node = v0::Constant::create(target_type, ov::Shape{1}, {mean}); const auto seed = node.get_attribute_value("seed", 0); const auto shape = node.get_attribute_as_constant>("shape"); @@ -38,6 +40,6 @@ ov::OutputVector random_normal(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_normal.hpp b/src/frontends/onnx/frontend/src/op/random_normal.hpp index 4c15b5b3fa5bb2..c6f4b6b86ebcfd 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal.hpp +++ b/src/frontends/onnx/frontend/src/op/random_normal.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector random_normal(const Node& node); +ov::OutputVector random_normal(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_normal_like.cpp b/src/frontends/onnx/frontend/src/op/random_normal_like.cpp index 0eb38789c89ccf..b600469ede33da 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_normal_like.cpp @@ -11,14 +11,14 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector random_normal_like(const Node& node) { - const auto input = node.get_ng_inputs().at(0); +ov::OutputVector random_normal_like(const ov::frontend::onnx::Node& node) { + const auto input = node.get_ov_inputs().at(0); ov::element::Type target_type; if (node.has_attribute("dtype")) { @@ -33,8 +33,8 @@ ov::OutputVector random_normal_like(const Node& node) { const auto mean = node.get_attribute_value("mean", 0.0f); const auto scale = node.get_attribute_value("scale", 1.0f); - auto scale_node = v0::Constant::create(target_type, Shape{1}, {scale}); - auto mean_node = v0::Constant::create(target_type, Shape{1}, {mean}); + auto scale_node = v0::Constant::create(target_type, ov::Shape{1}, {scale}); + auto mean_node = v0::Constant::create(target_type, ov::Shape{1}, {mean}); auto res = ov::frontend::make_random_normal(shape, target_type, mean_node, scale_node, seed); return res.first; @@ -42,6 +42,6 @@ ov::OutputVector random_normal_like(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_normal_like.hpp b/src/frontends/onnx/frontend/src/op/random_normal_like.hpp index 5b3d294952d872..2359e962bec1df 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal_like.hpp +++ b/src/frontends/onnx/frontend/src/op/random_normal_like.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector random_normal_like(const Node& node); +ov::OutputVector random_normal_like(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.cpp b/src/frontends/onnx/frontend/src/op/random_uniform.cpp index 6db3217a38b8a2..54094940845e6c 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.cpp @@ -9,18 +9,20 @@ #include "utils/common.hpp" using namespace ov::op; +using ::ONNX_NAMESPACE::TensorProto_DataType; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector random_uniform(const Node& node) { +ov::OutputVector random_uniform(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, node.has_attribute("shape"), "RandomUniform operator must specify a 'shape' attribute."); const auto dtype = - node.get_attribute_value("dtype", static_cast(ONNX_NAMESPACE::TensorProto_DataType_FLOAT)); + node.get_attribute_value("dtype", + static_cast(TensorProto_DataType::TensorProto_DataType_FLOAT)); const auto high_const = node.get_attribute_as_constant("high", 1.0f); const auto low_const = node.get_attribute_as_constant("low", 0.0f); const auto seed = node.get_attribute_value("seed", 0.0f); @@ -41,6 +43,6 @@ ov::OutputVector random_uniform(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_uniform.hpp b/src/frontends/onnx/frontend/src/op/random_uniform.hpp index 685abf1b9ff792..a3ff62f13bb72d 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform.hpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector random_uniform(const Node& node); +ov::OutputVector random_uniform(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp index 3c15a177b23548..d626017b2a185b 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform_like.cpp @@ -11,14 +11,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector random_uniform_like(const Node& node) { - ov::OutputVector inputs{node.get_ng_inputs()}; +ov::OutputVector random_uniform_like(const ov::frontend::onnx::Node& node) { + ov::OutputVector inputs{node.get_ov_inputs()}; const auto input = inputs.at(0); ov::element::Type target_type; @@ -48,6 +48,6 @@ ov::OutputVector random_uniform_like(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/random_uniform_like.hpp b/src/frontends/onnx/frontend/src/op/random_uniform_like.hpp index e01cb2a48c6aed..8f5c52f8775b90 100644 --- a/src/frontends/onnx/frontend/src/op/random_uniform_like.hpp +++ b/src/frontends/onnx/frontend/src/op/random_uniform_like.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector random_uniform_like(const Node& node); +ov::OutputVector random_uniform_like(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/range.cpp b/src/frontends/onnx/frontend/src/op/range.cpp index c38e946d6f334f..3c61c45672b84b 100644 --- a/src/frontends/onnx/frontend/src/op/range.cpp +++ b/src/frontends/onnx/frontend/src/op/range.cpp @@ -10,13 +10,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector range(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector range(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); CHECK_VALID_NODE(node, inputs.size() >= 3, "Minimum 3 inputs are required. Got: ", inputs.size()); ov::Output start{inputs[0]}; @@ -43,7 +43,6 @@ ov::OutputVector range(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/range.hpp b/src/frontends/onnx/frontend/src/op/range.hpp index 0e43e3651eaf6a..f5117a55f4f1eb 100644 --- a/src/frontends/onnx/frontend/src/op/range.hpp +++ b/src/frontends/onnx/frontend/src/op/range.hpp @@ -4,20 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector range(const Node& node); +ov::OutputVector range(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reciprocal.cpp b/src/frontends/onnx/frontend/src/op/reciprocal.cpp index 34504bb2fd2c97..e9d275c72bfa81 100644 --- a/src/frontends/onnx/frontend/src/op/reciprocal.cpp +++ b/src/frontends/onnx/frontend/src/op/reciprocal.cpp @@ -10,23 +10,20 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector reciprocal(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector reciprocal(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); - auto one_node = v0::Constant::create(data.get_element_type(), Shape{}, {1}); + auto one_node = v0::Constant::create(data.get_element_type(), ov::Shape{}, {1}); return {std::make_shared(one_node, data)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reciprocal.hpp b/src/frontends/onnx/frontend/src/op/reciprocal.hpp index 618d45ee0bc3bf..9f59c8c0d3fd65 100644 --- a/src/frontends/onnx/frontend/src/op/reciprocal.hpp +++ b/src/frontends/onnx/frontend/src/op/reciprocal.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector reciprocal(const Node& node); +ov::OutputVector reciprocal(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reduce.cpp b/src/frontends/onnx/frontend/src/op/reduce.cpp index 363519f803c2ab..3322af52e76447 100644 --- a/src/frontends/onnx/frontend/src/op/reduce.cpp +++ b/src/frontends/onnx/frontend/src/op/reduce.cpp @@ -26,33 +26,33 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace { std::shared_ptr get_dynamic_all_axes_range(const Node& node) { - const auto input = node.get_ng_inputs().at(0); + const auto input = node.get_ov_inputs().at(0); const auto shape_of_input = std::make_shared(input); - const auto scalar = v0::Constant::create(ov::element::i32, Shape{1}, {0}); + const auto scalar = v0::Constant::create(ov::element::i32, ov::Shape{1}, {0}); const auto rank_of_input = std::make_shared(shape_of_input); const auto rank_of_input_scalar = std::make_shared(rank_of_input, scalar); - const auto start = v0::Constant::create(ov::element::i32, Shape{}, {0}); - const auto step = v0::Constant::create(ov::element::i32, Shape{}, {1}); + const auto start = v0::Constant::create(ov::element::i32, ov::Shape{}, {0}); + const auto step = v0::Constant::create(ov::element::i32, ov::Shape{}, {1}); return std::make_shared(start, rank_of_input_scalar, step, ov::element::i64); } std::shared_ptr get_reduction_axes_from_input(const Node& node) { const std::int64_t noop_with_empty_axes = node.get_attribute_value("noop_with_empty_axes", 0); - const auto input = node.get_ng_inputs().at(0); - if (node.get_ng_inputs().size() > 1) { - const auto reduction_axes = node.get_ng_inputs().at(1); + const auto input = node.get_ov_inputs().at(0); + if (node.get_ov_inputs().size() > 1) { + const auto reduction_axes = node.get_ov_inputs().at(1); const auto reduction_axes_rank = reduction_axes.get_partial_shape().rank(); FRONT_END_GENERAL_CHECK(reduction_axes.get_partial_shape().is_static(), "The axes tensor's shape needs to be known(static). Node: ", node.get_description()); - if (reduction_axes_rank.get_length() != 0 && reduction_axes.get_shape() != Shape{0}) { + if (reduction_axes_rank.get_length() != 0 && reduction_axes.get_shape() != ov::Shape{0}) { return reduction_axes.get_node_shared_ptr(); } } @@ -67,11 +67,11 @@ std::shared_ptr get_reduction_axes_from_input(const Node& node) { std::shared_ptr get_reduction_axes_from_attr(const Node& node) { auto reduction_axes = node.get_attribute_value>("axes", {}); - const auto input_rank = node.get_ng_inputs().at(0).get_partial_shape().rank(); + const auto input_rank = node.get_ov_inputs().at(0).get_partial_shape().rank(); if (reduction_axes.empty()) { if (input_rank.is_static()) { - reduction_axes = onnx_import::common::get_monotonic_range(input_rank.get_length()); + reduction_axes = ov::frontend::onnx::common::get_monotonic_range(input_rank.get_length()); } else { return get_dynamic_all_axes_range(node); } @@ -87,7 +87,7 @@ std::shared_ptr get_reduction_axes_from_attr(const Node& node) { ")"); } - return v0::Constant::create(ov::element::i64, Shape{reduction_axes.size()}, reduction_axes); + return v0::Constant::create(ov::element::i64, ov::Shape{reduction_axes.size()}, reduction_axes); } template @@ -106,62 +106,59 @@ std::shared_ptr make_ng_reduction_op(const Node& node, } // namespace namespace set_13 { -ov::OutputVector reduce_sum(const Node& node) { - return {make_ng_reduction_op(node, node.get_ng_inputs().at(0), false)}; +ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node) { + return {make_ng_reduction_op(node, node.get_ov_inputs().at(0), false)}; } } // namespace set_13 namespace set_1 { -ov::OutputVector reduce_log_sum(const Node& node) { - const ov::Output sum_node = make_ng_reduction_op(node, node.get_ng_inputs().at(0)); +ov::OutputVector reduce_log_sum(const ov::frontend::onnx::Node& node) { + const ov::Output sum_node = make_ng_reduction_op(node, node.get_ov_inputs().at(0)); return {std::make_shared(sum_node)}; } -ov::OutputVector reduce_log_sum_exp(const Node& node) { - const auto exp_node = std::make_shared(node.get_ng_inputs().at(0)); +ov::OutputVector reduce_log_sum_exp(const ov::frontend::onnx::Node& node) { + const auto exp_node = std::make_shared(node.get_ov_inputs().at(0)); const ov::Output sum_node = make_ng_reduction_op(node, exp_node); return {std::make_shared(sum_node)}; } -ov::OutputVector reduce_l1(const Node& node) { - return {make_ng_reduction_op(node, node.get_ng_inputs().at(0))}; +ov::OutputVector reduce_l1(const ov::frontend::onnx::Node& node) { + return {make_ng_reduction_op(node, node.get_ov_inputs().at(0))}; } -ov::OutputVector reduce_l2(const Node& node) { - return {make_ng_reduction_op(node, node.get_ng_inputs().at(0))}; +ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node) { + return {make_ng_reduction_op(node, node.get_ov_inputs().at(0))}; } -ov::OutputVector reduce_max(const Node& node) { - return {make_ng_reduction_op(node, node.get_ng_inputs().at(0))}; +ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node) { + return {make_ng_reduction_op(node, node.get_ov_inputs().at(0))}; } -ov::OutputVector reduce_mean(const Node& node) { - return {make_ng_reduction_op(node, node.get_ng_inputs().at(0))}; +ov::OutputVector reduce_mean(const ov::frontend::onnx::Node& node) { + return {make_ng_reduction_op(node, node.get_ov_inputs().at(0))}; } -ov::OutputVector reduce_min(const Node& node) { - return {make_ng_reduction_op(node, node.get_ng_inputs().at(0))}; +ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node) { + return {make_ng_reduction_op(node, node.get_ov_inputs().at(0))}; } -ov::OutputVector reduce_prod(const Node& node) { - return {make_ng_reduction_op(node, node.get_ng_inputs().at(0))}; +ov::OutputVector reduce_prod(const ov::frontend::onnx::Node& node) { + return {make_ng_reduction_op(node, node.get_ov_inputs().at(0))}; } -ov::OutputVector reduce_sum(const Node& node) { - return {make_ng_reduction_op(node, node.get_ng_inputs().at(0))}; +ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node) { + return {make_ng_reduction_op(node, node.get_ov_inputs().at(0))}; } -ov::OutputVector reduce_sum_square(const Node& node) { - const auto input = ov::Output{node.get_ng_inputs().at(0)}; +ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node) { + const auto input = ov::Output{node.get_ov_inputs().at(0)}; const auto square_node = std::make_shared(input, input); return {make_ng_reduction_op(node, square_node)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reduce.hpp b/src/frontends/onnx/frontend/src/op/reduce.hpp index 38eeca9de52de5..5c5f4a4a1ae50c 100644 --- a/src/frontends/onnx/frontend/src/op/reduce.hpp +++ b/src/frontends/onnx/frontend/src/op/reduce.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_13 { /// \brief Compute the sum of the input tensor's elements along the provided @@ -25,7 +23,7 @@ namespace set_13 { /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_sum(const Node& node); +ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node); } // namespace set_13 namespace set_1 { /// \brief Compute the log sum of the input tensor's elements along the @@ -40,7 +38,7 @@ namespace set_1 { /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_log_sum(const Node& node); +ov::OutputVector reduce_log_sum(const ov::frontend::onnx::Node& node); /// \brief Compute the log sum exponent of the input tensor's elements along /// the provided axes. @@ -54,7 +52,7 @@ ov::OutputVector reduce_log_sum(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_log_sum_exp(const Node& node); +ov::OutputVector reduce_log_sum_exp(const ov::frontend::onnx::Node& node); /// \brief Compute the L1 norm of the input tensor's element along the provided /// axes. @@ -68,7 +66,7 @@ ov::OutputVector reduce_log_sum_exp(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_l1(const Node& node); +ov::OutputVector reduce_l1(const ov::frontend::onnx::Node& node); /// \brief Compute the L2 norm of the input tensor's element along the provided /// axes. @@ -82,7 +80,7 @@ ov::OutputVector reduce_l1(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_l2(const Node& node); +ov::OutputVector reduce_l2(const ov::frontend::onnx::Node& node); /// \brief Compute the maximum value of the input tensor's elements along the /// provided axes. @@ -96,7 +94,7 @@ ov::OutputVector reduce_l2(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_max(const Node& node); +ov::OutputVector reduce_max(const ov::frontend::onnx::Node& node); /// \brief Compute the mean value of the input tensor's elements along the /// provided axes. @@ -110,7 +108,7 @@ ov::OutputVector reduce_max(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_mean(const Node& node); +ov::OutputVector reduce_mean(const ov::frontend::onnx::Node& node); /// \brief Compute the minimum value of the input tensor's elements along the /// provided axes. @@ -124,7 +122,7 @@ ov::OutputVector reduce_mean(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_min(const Node& node); +ov::OutputVector reduce_min(const ov::frontend::onnx::Node& node); /// \brief Compute the product of the input tensor's elements along the /// provided axes. @@ -138,7 +136,7 @@ ov::OutputVector reduce_min(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_prod(const Node& node); +ov::OutputVector reduce_prod(const ov::frontend::onnx::Node& node); /// \brief Compute the sum of the input tensor's elements along the provided /// axes. @@ -152,7 +150,7 @@ ov::OutputVector reduce_prod(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_sum(const Node& node); +ov::OutputVector reduce_sum(const ov::frontend::onnx::Node& node); /// \brief Compute the sum square of the input tensor's element along the /// provided axes. @@ -166,13 +164,10 @@ ov::OutputVector reduce_sum(const Node& node); /// /// \return The OV node equivalent of the ONNX operation. /// -ov::OutputVector reduce_sum_square(const Node& node); +ov::OutputVector reduce_sum_square(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/relu.hpp b/src/frontends/onnx/frontend/src/op/relu.hpp index cfbb4b3afc0bf8..e4afc4f7515fcb 100644 --- a/src/frontends/onnx/frontend/src/op/relu.hpp +++ b/src/frontends/onnx/frontend/src/op/relu.hpp @@ -4,26 +4,21 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/relu.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector relu(const Node& node) { - ov::OutputVector ng_inputs{node.get_ng_inputs()}; - return {std::make_shared(ng_inputs.at(0))}; +inline ov::OutputVector relu(const ov::frontend::onnx::Node& node) { + ov::OutputVector ov_inputs{node.get_ov_inputs()}; + return {std::make_shared(ov_inputs.at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reshape.cpp b/src/frontends/onnx/frontend/src/op/reshape.cpp index 76ece6d66bfa89..c2abfc0e96630f 100644 --- a/src/frontends/onnx/frontend/src/op/reshape.cpp +++ b/src/frontends/onnx/frontend/src/op/reshape.cpp @@ -10,20 +10,20 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector reshape(const Node& node) { - ov::OutputVector ng_inputs{node.get_ng_inputs()}; - const auto data = ng_inputs.at(0); +ov::OutputVector reshape(const ov::frontend::onnx::Node& node) { + ov::OutputVector ov_inputs{node.get_ov_inputs()}; + const auto data = ov_inputs.at(0); ov::Output pattern; bool special_zero = true; // Since opset 5 the target shape is provided as input - if (ng_inputs.size() == 2) { - pattern = ng_inputs.at(1); + if (ov_inputs.size() == 2) { + pattern = ov_inputs.at(1); } else { // Added in onnx reshape version 14 special_zero = !node.get_attribute_value("allowzero", 0); @@ -35,10 +35,7 @@ ov::OutputVector reshape(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reshape.hpp b/src/frontends/onnx/frontend/src/op/reshape.hpp index effab548e37d7b..16315e2872c394 100644 --- a/src/frontends/onnx/frontend/src/op/reshape.hpp +++ b/src/frontends/onnx/frontend/src/op/reshape.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// @@ -20,13 +18,10 @@ namespace set_1 { /// /// \return OV node representing this operation. /// -ov::OutputVector reshape(const Node& node); +ov::OutputVector reshape(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/resize.cpp b/src/frontends/onnx/frontend/src/op/resize.cpp index 1727c1274e631e..82f5f0f78c509c 100644 --- a/src/frontends/onnx/frontend/src/op/resize.cpp +++ b/src/frontends/onnx/frontend/src/op/resize.cpp @@ -11,9 +11,9 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace { static const std::unordered_set supported_modes = {"nearest", "linear", "cubic"}; @@ -59,7 +59,7 @@ static int mode_as_int(const std::map& converting_map, const s using InterpolateAttrs = v11::Interpolate::InterpolateAttrs; -InterpolateAttrs get_resize_attrs(const onnx_import::Node& node) { +InterpolateAttrs get_resize_attrs(const ov::frontend::onnx::Node& node) { auto get_str_attr = [&node](const std::string& name, const std::string& default_value) { return node.get_attribute_value(name, default_value); }; @@ -112,10 +112,10 @@ InterpolateAttrs get_resize_attrs(const onnx_import::Node& node) { } // namespace namespace set_11 { -ov::OutputVector resize(const onnx_import::Node& node) { +ov::OutputVector resize(const ov::frontend::onnx::Node& node) { // roi input (inputs.at(2)) is ignored because it is used only // in "tf_crop_and_resize" which is not handled now - const auto inputs = node.get_ng_inputs(); + const auto inputs = node.get_ov_inputs(); const auto& data = inputs.at(0); auto attrs = get_resize_attrs(node); @@ -133,8 +133,8 @@ ov::OutputVector resize(const onnx_import::Node& node) { } // namespace set_11 namespace set_1 { -ov::OutputVector resize(const onnx_import::Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector resize(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); const auto& data = inputs.at(0); const auto& scales = inputs.at(1); @@ -152,6 +152,6 @@ ov::OutputVector resize(const onnx_import::Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/resize.hpp b/src/frontends/onnx/frontend/src/op/resize.hpp index c3287f3b7c212d..625afedbf5d5c4 100644 --- a/src/frontends/onnx/frontend/src/op/resize.hpp +++ b/src/frontends/onnx/frontend/src/op/resize.hpp @@ -4,26 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector resize(const Node& node); +ov::OutputVector resize(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_11 { -ov::OutputVector resize(const Node& node); +ov::OutputVector resize(const ov::frontend::onnx::Node& node); } } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reverse_sequence.cpp b/src/frontends/onnx/frontend/src/op/reverse_sequence.cpp index 89d3e027992fa1..127bab0bc4f148 100644 --- a/src/frontends/onnx/frontend/src/op/reverse_sequence.cpp +++ b/src/frontends/onnx/frontend/src/op/reverse_sequence.cpp @@ -13,17 +13,17 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector reverse_sequence(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector reverse_sequence(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); - const auto sequence_lengths = node.get_ng_inputs().at(1); + const auto sequence_lengths = node.get_ov_inputs().at(1); // OpenVINO supports only int32 type of sequence_lengths - const auto sequence_lengths_i32 = std::make_shared(node.get_ng_inputs().at(1), ov::element::i32); + const auto sequence_lengths_i32 = std::make_shared(node.get_ov_inputs().at(1), ov::element::i32); const auto data_rank = data.get_partial_shape().rank(); const auto batch_axis = node.get_attribute_value("batch_axis", 1); @@ -48,10 +48,7 @@ ov::OutputVector reverse_sequence(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/reverse_sequence.hpp b/src/frontends/onnx/frontend/src/op/reverse_sequence.hpp index 87d3b7d5d31eb2..be9891adee8b83 100644 --- a/src/frontends/onnx/frontend/src/op/reverse_sequence.hpp +++ b/src/frontends/onnx/frontend/src/op/reverse_sequence.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector reverse_sequence(const Node& node); +ov::OutputVector reverse_sequence(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/rnn.cpp b/src/frontends/onnx/frontend/src/op/rnn.cpp index 129ea3588177bd..2d9c6aa9287601 100644 --- a/src/frontends/onnx/frontend/src/op/rnn.cpp +++ b/src/frontends/onnx/frontend/src/op/rnn.cpp @@ -5,19 +5,19 @@ #include "op/rnn.hpp" #include "openvino/op/rnn_sequence.hpp" -#include "ov_models/ov_builders/reshape.hpp" #include "utils/recurrent.hpp" +#include "utils/reshape.hpp" using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { namespace { struct RNNInputMap : public recurrent::OpInputMap { - RNNInputMap(const onnx_import::Node& node, std::size_t gates_count) : OpInputMap(node, gates_count) {} + RNNInputMap(const ov::frontend::onnx::Node& node, std::size_t gates_count) : OpInputMap(node, gates_count) {} virtual ~RNNInputMap() = default; }; @@ -29,7 +29,7 @@ struct RNNAttributes : public recurrent::OpAttributes { }; } // namespace -ov::OutputVector rnn(const Node& node) { +ov::OutputVector rnn(const ov::frontend::onnx::Node& node) { constexpr std::size_t gates_count = 1; RNNInputMap input_map{node, gates_count}; RNNAttributes attributes{node}; @@ -54,6 +54,6 @@ ov::OutputVector rnn(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/rnn.hpp b/src/frontends/onnx/frontend/src/op/rnn.hpp index d5cbec3e48111f..926b08d382d332 100644 --- a/src/frontends/onnx/frontend/src/op/rnn.hpp +++ b/src/frontends/onnx/frontend/src/op/rnn.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector rnn(const Node& node); +ov::OutputVector rnn(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/roi_align.cpp b/src/frontends/onnx/frontend/src/op/roi_align.cpp index 38524125f3045c..3c811defbe0757 100644 --- a/src/frontends/onnx/frontend/src/op/roi_align.cpp +++ b/src/frontends/onnx/frontend/src/op/roi_align.cpp @@ -9,15 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { - namespace set_1 { - -ov::OutputVector roi_align(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector roi_align(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 3, "The RoiAlign operator expects 3 inputs. Got: ", inputs.size()); @@ -45,8 +43,8 @@ ov::OutputVector roi_align(const Node& node) { } } // namespace set_1 namespace set_16 { -ov::OutputVector roi_align(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector roi_align(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); FRONT_END_GENERAL_CHECK(inputs.size() == 3, "The RoiAlign operator expects 3 inputs. Got: ", inputs.size()); @@ -80,10 +78,7 @@ ov::OutputVector roi_align(const Node& node) { aligned_mode)}; } } // namespace set_16 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/roi_align.hpp b/src/frontends/onnx/frontend/src/op/roi_align.hpp index 37643c272e6f9b..5b4c0305ef2093 100644 --- a/src/frontends/onnx/frontend/src/op/roi_align.hpp +++ b/src/frontends/onnx/frontend/src/op/roi_align.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector roi_align(const Node& node); +ov::OutputVector roi_align(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_16 { -ov::OutputVector roi_align(const Node& node); +ov::OutputVector roi_align(const ov::frontend::onnx::Node& node); } // namespace set_16 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/round.cpp b/src/frontends/onnx/frontend/src/op/round.cpp index 8007b76398afa8..035443b22eb576 100644 --- a/src/frontends/onnx/frontend/src/op/round.cpp +++ b/src/frontends/onnx/frontend/src/op/round.cpp @@ -11,19 +11,16 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector round(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), v5::Round::RoundMode::HALF_TO_EVEN)}; +ov::OutputVector round(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), v5::Round::RoundMode::HALF_TO_EVEN)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/round.hpp b/src/frontends/onnx/frontend/src/op/round.hpp index bef374ae9edad6..8cabccbe4fd616 100644 --- a/src/frontends/onnx/frontend/src/op/round.hpp +++ b/src/frontends/onnx/frontend/src/op/round.hpp @@ -7,22 +7,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector round(const Node& node); +ov::OutputVector round(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scan.cpp b/src/frontends/onnx/frontend/src/op/scan.cpp index 12903aaf70fbdd..d91c90b047a2ba 100644 --- a/src/frontends/onnx/frontend/src/op/scan.cpp +++ b/src/frontends/onnx/frontend/src/op/scan.cpp @@ -16,9 +16,9 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace { @@ -114,11 +114,11 @@ ov::OutputVector scan_to_tensor_iterator(const ov::OutputVector& node_inputs, return outputs; } -ov::OutputVector import_onnx_scan(const Node& node, +ov::OutputVector import_onnx_scan(const ov::frontend::onnx::Node& node, int64_t default_axis, int64_t in_offset, std::string&& in_directions_attr_name) { - const auto& node_inputs = node.get_ng_inputs(); + const auto& node_inputs = node.get_ov_inputs(); const auto& subgraphs = node.get_subgraphs(); auto body_graph = subgraphs.at("body"); @@ -158,10 +158,10 @@ ov::OutputVector import_onnx_scan(const Node& node, namespace set_1 { -ov::OutputVector scan(const Node& node) { +ov::OutputVector scan(const ov::frontend::onnx::Node& node) { // ONNX Scan-8 can have optional `sequence_lens` input, // and sequence scan_input axis is assumed to be always 1. - OPENVINO_ASSERT(ov::op::util::is_null(node.get_ng_inputs().at(0)), + OPENVINO_ASSERT(ov::op::util::is_null(node.get_ov_inputs().at(0)), node.get_description(), " ONNX Scan-8 `sequence_lens` input is not supported. "); return import_onnx_scan(node, 1, 1, "directions"); @@ -171,7 +171,7 @@ ov::OutputVector scan(const Node& node) { namespace set_9 { -ov::OutputVector scan(const Node& node) { +ov::OutputVector scan(const ov::frontend::onnx::Node& node) { // Since ONNX Scan-9 the optional `sequence_lens input` was removed, // new attributes to specify input/output axes and directions were added. return import_onnx_scan(node, 0, 0, "scan_input_directions"); @@ -179,6 +179,6 @@ ov::OutputVector scan(const Node& node) { } // namespace set_9 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scan.hpp b/src/frontends/onnx/frontend/src/op/scan.hpp index 9b45e0aeeef237..41cb62fa51dcc6 100644 --- a/src/frontends/onnx/frontend/src/op/scan.hpp +++ b/src/frontends/onnx/frontend/src/op/scan.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Creates OpenVino node representing ONNX Scan operator. @@ -22,16 +20,13 @@ namespace set_1 { /// /// \return ov::OutputVector of resulting OpenVino nodes. /// -ov::OutputVector scan(const Node& node); +ov::OutputVector scan(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_9 { -ov::OutputVector scan(const Node& node); +ov::OutputVector scan(const ov::frontend::onnx::Node& node); } // namespace set_9 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scatter_elements.cpp b/src/frontends/onnx/frontend/src/op/scatter_elements.cpp index 993dca05f49e56..b7704868049235 100644 --- a/src/frontends/onnx/frontend/src/op/scatter_elements.cpp +++ b/src/frontends/onnx/frontend/src/op/scatter_elements.cpp @@ -9,15 +9,15 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector scatter_elements(const Node& node) { - const auto data = node.get_ng_inputs().at(0); - const auto indices = node.get_ng_inputs().at(1); - const auto updates = node.get_ng_inputs().at(2); +ov::OutputVector scatter_elements(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); + const auto indices = node.get_ov_inputs().at(1); + const auto updates = node.get_ov_inputs().at(2); const auto axis_node = node.get_attribute_as_constant("axis", 0); v12::ScatterElementsUpdate::Reduction reduction_ov; @@ -47,10 +47,7 @@ ov::OutputVector scatter_elements(const Node& node) { return {std::make_shared(data, indices, updates, axis_node, reduction_ov)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scatter_elements.hpp b/src/frontends/onnx/frontend/src/op/scatter_elements.hpp index fa6dfcdba5e4f9..4b7e0e608a9a7d 100644 --- a/src/frontends/onnx/frontend/src/op/scatter_elements.hpp +++ b/src/frontends/onnx/frontend/src/op/scatter_elements.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector scatter_elements(const Node& node); +ov::OutputVector scatter_elements(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scatter_nd.cpp b/src/frontends/onnx/frontend/src/op/scatter_nd.cpp index 70be4366aeb431..2718ea391d7ed6 100644 --- a/src/frontends/onnx/frontend/src/op/scatter_nd.cpp +++ b/src/frontends/onnx/frontend/src/op/scatter_nd.cpp @@ -12,16 +12,16 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector scatter_nd(const Node& node) { - ov::OutputVector ng_inputs{node.get_ng_inputs()}; - auto data = ng_inputs.at(0); - auto indices = ng_inputs.at(1); - auto updates = ng_inputs.at(2); +ov::OutputVector scatter_nd(const ov::frontend::onnx::Node& node) { + ov::OutputVector ov_inputs{node.get_ov_inputs()}; + auto data = ov_inputs.at(0); + auto indices = ov_inputs.at(1); + auto updates = ov_inputs.at(2); if (node.has_attribute("reduction")) { const auto reduction = node.get_attribute_value("reduction", "none"); CHECK_VALID_NODE(node, @@ -34,10 +34,7 @@ ov::OutputVector scatter_nd(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/scatter_nd.hpp b/src/frontends/onnx/frontend/src/op/scatter_nd.hpp index edf94ed7b0c995..4ba3e2a8c7df00 100644 --- a/src/frontends/onnx/frontend/src/op/scatter_nd.hpp +++ b/src/frontends/onnx/frontend/src/op/scatter_nd.hpp @@ -7,22 +7,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector scatter_nd(const Node& node); +ov::OutputVector scatter_nd(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/selu.cpp b/src/frontends/onnx/frontend/src/op/selu.cpp index fe5b56bb06de48..3345c7e8c9bcd6 100644 --- a/src/frontends/onnx/frontend/src/op/selu.cpp +++ b/src/frontends/onnx/frontend/src/op/selu.cpp @@ -9,13 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector selu(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector selu(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); auto alpha = node.get_attribute_value("alpha", 1.67326319217681884765625); auto gamma = node.get_attribute_value("gamma", 1.05070102214813232421875); @@ -27,10 +27,7 @@ ov::OutputVector selu(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/selu.hpp b/src/frontends/onnx/frontend/src/op/selu.hpp index 752a4283ec827b..82ed712ce69bac 100644 --- a/src/frontends/onnx/frontend/src/op/selu.hpp +++ b/src/frontends/onnx/frontend/src/op/selu.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector selu(const Node& node); +ov::OutputVector selu(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/shape.cpp b/src/frontends/onnx/frontend/src/op/shape.cpp index 87598efd2575b5..e9d87145697b99 100644 --- a/src/frontends/onnx/frontend/src/op/shape.cpp +++ b/src/frontends/onnx/frontend/src/op/shape.cpp @@ -8,19 +8,19 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector shape(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector shape(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/shape.hpp b/src/frontends/onnx/frontend/src/op/shape.hpp index df36350baf32d4..51ba245c1efb39 100644 --- a/src/frontends/onnx/frontend/src/op/shape.hpp +++ b/src/frontends/onnx/frontend/src/op/shape.hpp @@ -4,20 +4,18 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector shape(const Node& node); +ov::OutputVector shape(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/shrink.cpp b/src/frontends/onnx/frontend/src/op/shrink.cpp index 4ffb0b2073d4f2..61bbd031f41562 100644 --- a/src/frontends/onnx/frontend/src/op/shrink.cpp +++ b/src/frontends/onnx/frontend/src/op/shrink.cpp @@ -15,13 +15,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector shrink(const Node& node) { - const auto input = node.get_ng_inputs().at(0); +ov::OutputVector shrink(const ov::frontend::onnx::Node& node) { + const auto input = node.get_ov_inputs().at(0); const float bias = node.get_attribute_value("bias", 0.0f); const float lambd = node.get_attribute_value("lambd", 0.5f); @@ -65,10 +65,7 @@ ov::OutputVector shrink(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/shrink.hpp b/src/frontends/onnx/frontend/src/op/shrink.hpp index 39037059e06122..8e04c0f4fbab15 100644 --- a/src/frontends/onnx/frontend/src/op/shrink.hpp +++ b/src/frontends/onnx/frontend/src/op/shrink.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief ONNX Shrink operator @@ -19,12 +17,9 @@ namespace set_1 { /// Input values greater or equal to '-lambd' and less or equal to 'lambd' are /// zeroed-out. 'Bias' is added to the values that are less than '-lambd' /// and subtracted from values greater than 'lambd'. -ov::OutputVector shrink(const Node& node); +ov::OutputVector shrink(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sigmoid.hpp b/src/frontends/onnx/frontend/src/op/sigmoid.hpp index 903986936d3fee..4cc334c6034e66 100644 --- a/src/frontends/onnx/frontend/src/op/sigmoid.hpp +++ b/src/frontends/onnx/frontend/src/op/sigmoid.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/sigmoid.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector sigmoid(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector sigmoid(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sign.hpp b/src/frontends/onnx/frontend/src/op/sign.hpp index 8da5fd25b7ed92..4a8ae114b7a30e 100644 --- a/src/frontends/onnx/frontend/src/op/sign.hpp +++ b/src/frontends/onnx/frontend/src/op/sign.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/sign.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector sign(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector sign(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sin.hpp b/src/frontends/onnx/frontend/src/op/sin.hpp index 122540ad91e4d1..741f8a5295018d 100644 --- a/src/frontends/onnx/frontend/src/op/sin.hpp +++ b/src/frontends/onnx/frontend/src/op/sin.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/sin.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector sin(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector sin(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sinh.hpp b/src/frontends/onnx/frontend/src/op/sinh.hpp index 91be05aa26b257..58c17e38a3021b 100644 --- a/src/frontends/onnx/frontend/src/op/sinh.hpp +++ b/src/frontends/onnx/frontend/src/op/sinh.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/sinh.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector sinh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector sinh(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/size.cpp b/src/frontends/onnx/frontend/src/op/size.cpp index df4b33431767b9..759e7041baa40c 100644 --- a/src/frontends/onnx/frontend/src/op/size.cpp +++ b/src/frontends/onnx/frontend/src/op/size.cpp @@ -4,29 +4,27 @@ #include "op/size.hpp" +#include "openvino/core/shape.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/reduce_prod.hpp" #include "openvino/op/shape_of.hpp" using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector size(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector size(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); auto axes = v0::Constant::create(ov::element::i32, ov::Shape{}, {0}); auto input_shape = std::make_shared(data); return {std::make_shared(input_shape, axes)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/size.hpp b/src/frontends/onnx/frontend/src/op/size.hpp index 003b16dcd986d4..8511b705f298fc 100644 --- a/src/frontends/onnx/frontend/src/op/size.hpp +++ b/src/frontends/onnx/frontend/src/op/size.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector size(const Node& node); +ov::OutputVector size(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/slice.cpp b/src/frontends/onnx/frontend/src/op/slice.cpp index 9baf2eef917df5..aba0f14b41cbbb 100644 --- a/src/frontends/onnx/frontend/src/op/slice.cpp +++ b/src/frontends/onnx/frontend/src/op/slice.cpp @@ -11,18 +11,17 @@ #include "openvino/op/slice.hpp" using namespace ov::op; - using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_10 { -ov::OutputVector slice(const Node& node) { +ov::OutputVector slice(const ov::frontend::onnx::Node& node) { using ov::op::util::is_null; - ov::OutputVector inputs{node.get_ng_inputs()}; + ov::OutputVector inputs{node.get_ov_inputs()}; const auto& data = inputs.at(0); const auto& starts = inputs.at(1); const auto& ends = inputs.at(2); @@ -48,26 +47,27 @@ ov::OutputVector slice(const Node& node) { } // namespace set_10 namespace set_1 { -ov::OutputVector slice(const Node& node) { - ov::Output data = node.get_ng_inputs().at(0); +ov::OutputVector slice(const ov::frontend::onnx::Node& node) { + ov::Output data = node.get_ov_inputs().at(0); const auto starts_atr = node.get_attribute_value>("starts"); const auto ends = node.get_attribute_as_constant>("ends"); - const auto starts = std::make_shared(ov::element::i64, Shape{starts_atr.size()}, starts_atr); + const auto starts = std::make_shared(ov::element::i64, ov::Shape{starts_atr.size()}, starts_atr); auto axes_atr = node.get_attribute_value>("axes", std::vector()); - const auto steps = - v0::Constant::create(ov::element::i64, Shape{starts_atr.size()}, std::vector(starts_atr.size(), 1)); + const auto steps = v0::Constant::create(ov::element::i64, + ov::Shape{starts_atr.size()}, + std::vector(starts_atr.size(), 1)); if (axes_atr.empty()) { return {std::make_shared(data, starts, ends, steps)}; } else { - const auto& axes = std::make_shared(ov::element::i64, Shape{axes_atr.size()}, axes_atr); + const auto& axes = std::make_shared(ov::element::i64, ov::Shape{axes_atr.size()}, axes_atr); return {std::make_shared(data, starts, ends, steps, axes)}; } } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/slice.hpp b/src/frontends/onnx/frontend/src/op/slice.hpp index 9c8f0b355dc471..7f9e7aae4f097b 100644 --- a/src/frontends/onnx/frontend/src/op/slice.hpp +++ b/src/frontends/onnx/frontend/src/op/slice.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_10 { -ov::OutputVector slice(const Node& node); +ov::OutputVector slice(const ov::frontend::onnx::Node& node); } // namespace set_10 namespace set_1 { -ov::OutputVector slice(const Node& node); +ov::OutputVector slice(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softmax.cpp b/src/frontends/onnx/frontend/src/op/softmax.cpp index ddae97314dda3a..eb13ca9af0e72e 100644 --- a/src/frontends/onnx/frontend/src/op/softmax.cpp +++ b/src/frontends/onnx/frontend/src/op/softmax.cpp @@ -9,13 +9,13 @@ #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/softmax.hpp" -#include "ov_models/ov_builders/reshape.hpp" +#include "utils/reshape.hpp" using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace { std::shared_ptr onnx_softmax(const ov::Output data, const int64_t axis) { const auto coerced_data = ov::op::util::flatten(data, static_cast(axis)); @@ -28,8 +28,8 @@ std::shared_ptr onnx_softmax(const ov::Output data, const in namespace op { namespace set_1 { -ov::OutputVector softmax(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); const auto data_rank = data.get_partial_shape().rank(); FRONT_END_GENERAL_CHECK(data_rank.is_static(), "ONNX Softmax data rank needs to be known (static)"); @@ -51,8 +51,8 @@ ov::OutputVector softmax(const Node& node) { } } // namespace set_1 namespace set_11 { -ov::OutputVector softmax(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); const auto data_rank = data.get_partial_shape().rank(); FRONT_END_GENERAL_CHECK(data_rank.is_static(), "ONNX Softmax data rank needs to be known (static)"); @@ -74,8 +74,8 @@ ov::OutputVector softmax(const Node& node) { } } // namespace set_11 namespace set_13 { -ov::OutputVector softmax(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector softmax(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); const auto axis = node.get_attribute_value("axis", -1); @@ -83,6 +83,6 @@ ov::OutputVector softmax(const Node& node) { } } // namespace set_13 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softmax.hpp b/src/frontends/onnx/frontend/src/op/softmax.hpp index 89cf70c2f1c136..cded80d80ec19c 100644 --- a/src/frontends/onnx/frontend/src/op/softmax.hpp +++ b/src/frontends/onnx/frontend/src/op/softmax.hpp @@ -4,31 +4,27 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector softmax(const Node& node); +ov::OutputVector softmax(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_11 { -ov::OutputVector softmax(const Node& node); +ov::OutputVector softmax(const ov::frontend::onnx::Node& node); } // namespace set_11 namespace set_13 { -ov::OutputVector softmax(const Node& node); +ov::OutputVector softmax(const ov::frontend::onnx::Node& node); } // namespace set_13 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softplus.cpp b/src/frontends/onnx/frontend/src/op/softplus.cpp index 5578fd3ccc49b7..3b5d246e6995e7 100644 --- a/src/frontends/onnx/frontend/src/op/softplus.cpp +++ b/src/frontends/onnx/frontend/src/op/softplus.cpp @@ -8,21 +8,18 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector softplus(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector softplus(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); return {std::make_shared(data)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softplus.hpp b/src/frontends/onnx/frontend/src/op/softplus.hpp index 131c0fe1dc3253..911dcf6b9e2d83 100644 --- a/src/frontends/onnx/frontend/src/op/softplus.hpp +++ b/src/frontends/onnx/frontend/src/op/softplus.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector softplus(const Node& node); +ov::OutputVector softplus(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softsign.cpp b/src/frontends/onnx/frontend/src/op/softsign.cpp index 8dbfc9b0bf5f7d..8b7bf672c0daaa 100644 --- a/src/frontends/onnx/frontend/src/op/softsign.cpp +++ b/src/frontends/onnx/frontend/src/op/softsign.cpp @@ -8,16 +8,16 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector softsign(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +ov::OutputVector softsign(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/softsign.hpp b/src/frontends/onnx/frontend/src/op/softsign.hpp index 6948422d7c646f..931dbda573fd23 100644 --- a/src/frontends/onnx/frontend/src/op/softsign.hpp +++ b/src/frontends/onnx/frontend/src/op/softsign.hpp @@ -4,19 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector softsign(const Node& node); +ov::OutputVector softsign(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/space_to_depth.cpp b/src/frontends/onnx/frontend/src/op/space_to_depth.cpp index a1cb1e645de097..a286e26355d134 100644 --- a/src/frontends/onnx/frontend/src/op/space_to_depth.cpp +++ b/src/frontends/onnx/frontend/src/op/space_to_depth.cpp @@ -9,24 +9,21 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector space_to_depth(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector space_to_depth(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); const auto& shape = data.get_partial_shape(); FRONT_END_GENERAL_CHECK(shape.rank().is_static() && shape.rank().get_length() == 4, "Input must be 4-dimensional"); std::size_t block_size = node.get_attribute_value("blocksize"); const auto mode = v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; - return ov::OutputVector{std::make_shared(data, mode, block_size)}; + return {std::make_shared(data, mode, block_size)}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/space_to_depth.hpp b/src/frontends/onnx/frontend/src/op/space_to_depth.hpp index f00e4632f90a0f..f39b6261778135 100644 --- a/src/frontends/onnx/frontend/src/op/space_to_depth.hpp +++ b/src/frontends/onnx/frontend/src/op/space_to_depth.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Permutes input tensor blocks of spatial data into depth. @@ -19,12 +17,9 @@ namespace set_1 { /// /// \return ov::OutputVector containing Tensor with shape: /// [N, C * blocksize * blocksize, H / blocksize, W / blocksize] -ov::OutputVector space_to_depth(const Node& node); +ov::OutputVector space_to_depth(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/split.cpp b/src/frontends/onnx/frontend/src/op/split.cpp index ec217e86932eb5..e62607bec2c24a 100644 --- a/src/frontends/onnx/frontend/src/op/split.cpp +++ b/src/frontends/onnx/frontend/src/op/split.cpp @@ -6,38 +6,38 @@ #include "openvino/op/constant.hpp" #include "openvino/op/variadic_split.hpp" -#include "ov_models/ov_builders/split.hpp" +#include "utils/split.hpp" using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector split(const Node& node) { - const auto input = node.get_ng_inputs().at(0); +ov::OutputVector split(const ov::frontend::onnx::Node& node) { + const auto input = node.get_ov_inputs().at(0); const auto axis = node.get_attribute_value("axis", 0); if (node.has_attribute("split")) { const auto splits = node.get_attribute_value>("split"); - return ov::op::util::split(input, splits, axis); + return ov::op::util::make_split(input, splits, axis); } else { const auto outputs_number = node.get_output_names().size(); - return ov::op::util::split(input, outputs_number, axis); + return ov::op::util::make_split(input, outputs_number, axis); } } } // namespace set_1 namespace set_13 { -ov::OutputVector split(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector split(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); const auto axis = node.get_attribute_value("axis", 0); if (inputs.size() < 2) { const auto outputs_number = node.get_output_names().size(); - return ov::op::util::split(inputs.at(0), outputs_number, axis); + return ov::op::util::make_split(inputs.at(0), outputs_number, axis); } else { const auto axis_node = v0::Constant::create(ov::element::Type_t::i64, ov::Shape{}, {axis}); return {std::make_shared(inputs.at(0), axis_node, inputs.at(1))->outputs()}; @@ -46,8 +46,6 @@ ov::OutputVector split(const Node& node) { } // namespace set_13 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/split.hpp b/src/frontends/onnx/frontend/src/op/split.hpp index 2386410eed5f42..e618f0de65bf1b 100644 --- a/src/frontends/onnx/frontend/src/op/split.hpp +++ b/src/frontends/onnx/frontend/src/op/split.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector split(const Node& node); +ov::OutputVector split(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_13 { -ov::OutputVector split(const Node& node); +ov::OutputVector split(const ov::frontend::onnx::Node& node); } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sqrt.hpp b/src/frontends/onnx/frontend/src/op/sqrt.hpp index 50a0c320e1ca2b..bad982c34059c0 100644 --- a/src/frontends/onnx/frontend/src/op/sqrt.hpp +++ b/src/frontends/onnx/frontend/src/op/sqrt.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/sqrt.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector sqrt(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector sqrt(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/squeeze.cpp b/src/frontends/onnx/frontend/src/op/squeeze.cpp index 5c03b8b128f986..a922c8a98a6596 100644 --- a/src/frontends/onnx/frontend/src/op/squeeze.cpp +++ b/src/frontends/onnx/frontend/src/op/squeeze.cpp @@ -9,13 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector squeeze(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector squeeze(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); const auto axes = node.get_attribute_value>("axes", {}); if (axes.empty()) { @@ -29,8 +29,8 @@ ov::OutputVector squeeze(const Node& node) { } // namespace set_1 namespace set_13 { -ov::OutputVector squeeze(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector squeeze(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); if (inputs.size() < 2) { return {std::make_shared(inputs.at(0))}; } else { @@ -40,6 +40,6 @@ ov::OutputVector squeeze(const Node& node) { } // namespace set_13 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/squeeze.hpp b/src/frontends/onnx/frontend/src/op/squeeze.hpp index e17a497e88958b..d50b33e158b695 100644 --- a/src/frontends/onnx/frontend/src/op/squeeze.hpp +++ b/src/frontends/onnx/frontend/src/op/squeeze.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector squeeze(const Node& node); +ov::OutputVector squeeze(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_13 { -ov::OutputVector squeeze(const Node& node); +ov::OutputVector squeeze(const ov::frontend::onnx::Node& node); } // namespace set_13 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/stft.cpp b/src/frontends/onnx/frontend/src/op/stft.cpp index f6688d03270913..e83feec56c4492 100644 --- a/src/frontends/onnx/frontend/src/op/stft.cpp +++ b/src/frontends/onnx/frontend/src/op/stft.cpp @@ -21,20 +21,20 @@ using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_17 { -ov::OutputVector stft(const Node& node) { - const ov::OutputVector ng_inputs{node.get_ng_inputs()}; - auto signal = ng_inputs.at(0); - const auto dft_length_provided = ng_inputs.size() > 3 && !ov::op::util::is_null(ng_inputs[3]); +ov::OutputVector stft(const ov::frontend::onnx::Node& node) { + const ov::OutputVector ov_inputs{node.get_ov_inputs()}; + auto signal = ov_inputs.at(0); + const auto dft_length_provided = ov_inputs.size() > 3 && !ov::op::util::is_null(ov_inputs[3]); const auto onesided = node.get_attribute_value("onesided", 1); const int64_t axis = 1; - const auto& frame_step_node = ng_inputs.at(1); + const auto& frame_step_node = ov_inputs.at(1); CHECK_VALID_NODE(node, ov::op::util::is_constant(frame_step_node.get_node_shared_ptr()) && ov::shape_size(frame_step_node.get_shape()) <= 1, @@ -48,7 +48,7 @@ ov::OutputVector stft(const Node& node) { int64_t frame_length = signal_param_shape[axis].get_length() / frame_step; // default value if (dft_length_provided) { - const auto& frame_length_node = ng_inputs[3]; + const auto& frame_length_node = ov_inputs[3]; CHECK_VALID_NODE(node, ov::op::util::is_constant(frame_length_node.get_node_shared_ptr()) && ov::shape_size(frame_length_node.get_shape()) <= 1, @@ -57,15 +57,15 @@ ov::OutputVector stft(const Node& node) { ov::as_type_ptr(frame_length_node.get_node_shared_ptr())->cast_vector()[0]; } - const auto window_node_provided = ng_inputs.size() > 2 && !ov::op::util::is_null(ng_inputs[2]); + const auto window_node_provided = ov_inputs.size() > 2 && !ov::op::util::is_null(ov_inputs[2]); if (window_node_provided) { // window input provided - if (ng_inputs[2].get_partial_shape().rank().is_static()) { + if (ov_inputs[2].get_partial_shape().rank().is_static()) { CHECK_VALID_NODE(node, - ng_inputs[2].get_partial_shape().rank().get_length() == 1, + ov_inputs[2].get_partial_shape().rank().get_length() == 1, "The rank of window input must be 1D."); - if (ng_inputs[2].get_partial_shape()[0].is_static()) { + if (ov_inputs[2].get_partial_shape()[0].is_static()) { CHECK_VALID_NODE(node, - ng_inputs[2].get_partial_shape()[0].get_length() == frame_length, + ov_inputs[2].get_partial_shape()[0].get_length() == frame_length, "The length of window input must be equal to frame_length."); } } @@ -81,18 +81,18 @@ ov::OutputVector stft(const Node& node) { const auto nstfts = static_cast((signal_param_shape[axis].get_length() - frame_length) / frame_step) + 1; const auto axis_const = v0::Constant::create(ov::element::i64, {}, {axis}); const auto zero_const = v0::Constant::create(ov::element::i64, {}, {0}); - const auto step = v0::Constant::create(ov::element::i64, Shape{2}, {1, 1}); + const auto step = v0::Constant::create(ov::element::i64, ov::Shape{2}, {1, 1}); ov::OutputVector all_signals; for (int64_t batch = 0; batch < batch_size; ++batch) { ov::OutputVector signals_in_batch; for (int64_t sig_idx = 0; sig_idx < nstfts; ++sig_idx) { const auto start = - v0::Constant::create(ov::element::i64, Shape{2}, std::vector{batch, sig_idx * frame_step}); + v0::Constant::create(ov::element::i64, ov::Shape{2}, std::vector{batch, sig_idx * frame_step}); const auto stop = v0::Constant::create(ov::element::i64, - Shape{2}, + ov::Shape{2}, std::vector{batch + 1, sig_idx * frame_step + frame_length}); - const auto slice_axes = v0::Constant::create(ov::element::i64, Shape{2}, std::vector{0, axis}); + const auto slice_axes = v0::Constant::create(ov::element::i64, ov::Shape{2}, std::vector{0, axis}); const auto slice = std::make_shared(signal, start, stop, step, slice_axes); const ov::Output flatten_slice = std::make_shared( slice, @@ -106,12 +106,12 @@ ov::OutputVector stft(const Node& node) { flatten_slice, is_complex(flatten_slice) ? std::make_shared( // align window shape with signal shape - std::make_shared(ng_inputs[2], + std::make_shared(ov_inputs[2], v0::Constant::create(ov::element::i64, {1}, {1})), std::make_shared(flatten_slice)) - : ng_inputs[2]) + : ov_inputs[2]) : flatten_slice, - dft_length_provided ? ng_inputs[3] : std::make_shared(), + dft_length_provided ? ov_inputs[3] : std::make_shared(), 0, false, onesided == 1); @@ -124,10 +124,7 @@ ov::OutputVector stft(const Node& node) { } } // namespace set_17 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/stft.hpp b/src/frontends/onnx/frontend/src/op/stft.hpp index e0b1352db4b00d..3cd7358de35087 100644 --- a/src/frontends/onnx/frontend/src/op/stft.hpp +++ b/src/frontends/onnx/frontend/src/op/stft.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_17 { -ov::OutputVector stft(const Node& node); +ov::OutputVector stft(const ov::frontend::onnx::Node& node); } // namespace set_17 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sub.hpp b/src/frontends/onnx/frontend/src/op/sub.hpp index f072b81abdfe53..7118bc47e1e37f 100644 --- a/src/frontends/onnx/frontend/src/op/sub.hpp +++ b/src/frontends/onnx/frontend/src/op/sub.hpp @@ -4,32 +4,27 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/subtract.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector sub(const Node& node) { +inline ov::OutputVector sub(const ov::frontend::onnx::Node& node) { return common::handle_opset6_binary_op(node); } } // namespace set_1 namespace set_7 { -inline ov::OutputVector sub(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; +inline ov::OutputVector sub(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), node.get_ov_inputs().at(1))}; } } // namespace set_7 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/sum.hpp b/src/frontends/onnx/frontend/src/op/sum.hpp index 34acb7c873e30a..2bd3c276709dc4 100644 --- a/src/frontends/onnx/frontend/src/op/sum.hpp +++ b/src/frontends/onnx/frontend/src/op/sum.hpp @@ -4,33 +4,28 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/add.hpp" #include "utils/variadic.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector sum(const Node& node) { +inline ov::OutputVector sum(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node, ov::op::AutoBroadcastType::NONE); } } // namespace set_1 namespace set_8 { -inline ov::OutputVector sum(const Node& node) { +inline ov::OutputVector sum(const ov::frontend::onnx::Node& node) { return variadic::make_ng_variadic_op(node); } } // namespace set_8 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/tan.hpp b/src/frontends/onnx/frontend/src/op/tan.hpp index 50f7c4b5007d17..88e1eceed8f025 100644 --- a/src/frontends/onnx/frontend/src/op/tan.hpp +++ b/src/frontends/onnx/frontend/src/op/tan.hpp @@ -4,24 +4,19 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/tan.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector tan(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector tan(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/tanh.hpp b/src/frontends/onnx/frontend/src/op/tanh.hpp index 2f63cf6ddb75c5..cf28e1baf8c590 100644 --- a/src/frontends/onnx/frontend/src/op/tanh.hpp +++ b/src/frontends/onnx/frontend/src/op/tanh.hpp @@ -4,25 +4,20 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/tanh.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector tanh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; +inline ov::OutputVector tanh(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/thresholded_relu.cpp b/src/frontends/onnx/frontend/src/op/thresholded_relu.cpp index 7a94ab9f3f87b9..f8e5b0aad60814 100644 --- a/src/frontends/onnx/frontend/src/op/thresholded_relu.cpp +++ b/src/frontends/onnx/frontend/src/op/thresholded_relu.cpp @@ -11,13 +11,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector thresholded_relu(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector thresholded_relu(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); const double alpha = node.get_attribute_value("alpha", 1.0); const auto alpha_node = v0::Constant::create(data.get_element_type(), ov::Shape{}, {alpha}); @@ -29,10 +29,7 @@ ov::OutputVector thresholded_relu(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/thresholded_relu.hpp b/src/frontends/onnx/frontend/src/op/thresholded_relu.hpp index 41892b7605ca05..297efcfa317f3e 100644 --- a/src/frontends/onnx/frontend/src/op/thresholded_relu.hpp +++ b/src/frontends/onnx/frontend/src/op/thresholded_relu.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector thresholded_relu(const Node& node); +ov::OutputVector thresholded_relu(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/tile.cpp b/src/frontends/onnx/frontend/src/op/tile.cpp index 231b3de4d7556d..0a36d01d862bc1 100644 --- a/src/frontends/onnx/frontend/src/op/tile.cpp +++ b/src/frontends/onnx/frontend/src/op/tile.cpp @@ -10,14 +10,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector tile(const Node& node) { - auto input = node.get_ng_inputs().at(0); - auto repeats = node.get_ng_inputs().at(1); +ov::OutputVector tile(const ov::frontend::onnx::Node& node) { + auto input = node.get_ov_inputs().at(0); + auto repeats = node.get_ov_inputs().at(1); // Workaround for backends which require repeats to be i64. // Remove the following line when no longer needed. @@ -27,10 +27,7 @@ ov::OutputVector tile(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/tile.hpp b/src/frontends/onnx/frontend/src/op/tile.hpp index 5aff2d12a153dd..106f38970ab7f5 100644 --- a/src/frontends/onnx/frontend/src/op/tile.hpp +++ b/src/frontends/onnx/frontend/src/op/tile.hpp @@ -4,26 +4,21 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Performs ONNX Tile operation. /// /// \param node The ONNX node object representing this operation. /// \return The vector containing OV a node producing the output of the Tile op. -ov::OutputVector tile(const Node& node); +ov::OutputVector tile(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/topk.cpp b/src/frontends/onnx/frontend/src/op/topk.cpp index c945b2c98e2b93..cc220b49a04086 100644 --- a/src/frontends/onnx/frontend/src/op/topk.cpp +++ b/src/frontends/onnx/frontend/src/op/topk.cpp @@ -8,16 +8,15 @@ #include "openvino/op/topk.hpp" #include "utils/reshape.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START namespace { /// \return Return the second input to the TopK node reshaped to a scalar. -ov::Output get_k(const ngraph::onnx_import::Node& node) { - auto k_node = node.get_ng_inputs().at(1); +ov::Output get_k(const ov::frontend::onnx::Node& node) { + auto k_node = node.get_ov_inputs().at(1); FRONT_END_GENERAL_CHECK(shape_size(k_node.get_shape()) == 1, "ONNX TopK operator: 'K' parameter must contain a single positive value.", node); - return ngraph::onnx_import::reshape::interpret_as_scalar(k_node); + return ov::frontend::onnx::reshape::interpret_as_scalar(k_node); } } // namespace @@ -25,12 +24,13 @@ using namespace ov::op; using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector topk(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector topk(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); const auto k_node = node.get_attribute_as_constant("k"); const std::int64_t axis{node.get_attribute_value("axis", -1)}; @@ -46,8 +46,8 @@ ov::OutputVector topk(const Node& node) { } // namespace set_1 namespace set_10 { -ov::OutputVector topk(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector topk(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); auto k = get_k(node); const std::int64_t axis{node.get_attribute_value("axis", -1)}; @@ -63,9 +63,9 @@ ov::OutputVector topk(const Node& node) { } // namespace set_10 namespace set_11 { -ov::OutputVector topk(const Node& node) { +ov::OutputVector topk(const ov::frontend::onnx::Node& node) { // Process inputs - auto data = node.get_ng_inputs().at(0); + auto data = node.get_ov_inputs().at(0); auto k = get_k(node); // Process attributes @@ -84,10 +84,7 @@ ov::OutputVector topk(const Node& node) { return {top_k->output(0), top_k->output(1)}; } } // namespace set_11 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/topk.hpp b/src/frontends/onnx/frontend/src/op/topk.hpp index 92b1b7f2138a2f..f5968c60c1ac03 100644 --- a/src/frontends/onnx/frontend/src/op/topk.hpp +++ b/src/frontends/onnx/frontend/src/op/topk.hpp @@ -4,13 +4,11 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { /// \brief Performs ONNX TopK operation. @@ -18,7 +16,7 @@ namespace set_1 { /// \param node The ONNX node object representing this operation. /// \return The vector containing OV nodes producing output of ONNX TopK /// operation (both values and indices). -ov::OutputVector topk(const Node& node); +ov::OutputVector topk(const ov::frontend::onnx::Node& node); } // namespace set_1 /// \brief Performs TopK operation from ONNX version 1.5 @@ -26,19 +24,17 @@ ov::OutputVector topk(const Node& node); /// \details ONNX op set 10 added support for K as a dynamic input, not a static /// attribute. namespace set_10 { -ov::OutputVector topk(const Node& node); +ov::OutputVector topk(const ov::frontend::onnx::Node& node); } /// \brief Performs TopK operation from ONNX version 1.6 /// /// \details ONNX op set 11 added support for `largest` and `sorted` attributes. namespace set_11 { -ov::OutputVector topk(const Node& node); +ov::OutputVector topk(const ov::frontend::onnx::Node& node); } } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/transpose.cpp b/src/frontends/onnx/frontend/src/op/transpose.cpp index 362338b45adc18..9ea28088f884ab 100644 --- a/src/frontends/onnx/frontend/src/op/transpose.cpp +++ b/src/frontends/onnx/frontend/src/op/transpose.cpp @@ -4,17 +4,17 @@ #include "op/transpose.hpp" -#include "ov_models/ov_builders/reshape.hpp" +#include "utils/reshape.hpp" using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector transpose(const Node& node) { - ov::Output data = node.get_ng_inputs().at(0); +ov::OutputVector transpose(const ov::frontend::onnx::Node& node) { + ov::Output data = node.get_ov_inputs().at(0); auto permute_axes = node.get_attribute_value>("perm", {}); @@ -22,10 +22,7 @@ ov::OutputVector transpose(const Node& node) { } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/transpose.hpp b/src/frontends/onnx/frontend/src/op/transpose.hpp index 129feb7e87a805..897437d1fda96f 100644 --- a/src/frontends/onnx/frontend/src/op/transpose.hpp +++ b/src/frontends/onnx/frontend/src/op/transpose.hpp @@ -4,22 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector transpose(const Node& node); +ov::OutputVector transpose(const ov::frontend::onnx::Node& node); } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/trilu.cpp b/src/frontends/onnx/frontend/src/op/trilu.cpp index e6111296b6a795..30849020945fe9 100644 --- a/src/frontends/onnx/frontend/src/op/trilu.cpp +++ b/src/frontends/onnx/frontend/src/op/trilu.cpp @@ -20,14 +20,14 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector trilu(const Node& node) { - const auto inputs = node.get_ng_inputs(); +ov::OutputVector trilu(const ov::frontend::onnx::Node& node) { + const auto inputs = node.get_ov_inputs(); const auto num_inputs = inputs.size(); CHECK_VALID_NODE(node, num_inputs > 0 && num_inputs <= 2, "Trilu expects <= 2 input tensors. Got: ", num_inputs); @@ -104,6 +104,6 @@ ov::OutputVector trilu(const Node& node) { } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/trilu.hpp b/src/frontends/onnx/frontend/src/op/trilu.hpp index 72eeb416638a85..abfe0f2522fbea 100644 --- a/src/frontends/onnx/frontend/src/op/trilu.hpp +++ b/src/frontends/onnx/frontend/src/op/trilu.hpp @@ -2,19 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // #pragma once -#include "openvino/core/deprecated.hpp" - -OPENVINO_SUPPRESS_DEPRECATED_START #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector trilu(const Node& node); +ov::OutputVector trilu(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/unique.cpp b/src/frontends/onnx/frontend/src/op/unique.cpp index 58a832a0bdbe1b..035e32940bc102 100644 --- a/src/frontends/onnx/frontend/src/op/unique.cpp +++ b/src/frontends/onnx/frontend/src/op/unique.cpp @@ -8,13 +8,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector unique(const Node& node) { - const auto data = node.get_ng_inputs().at(0); +ov::OutputVector unique(const ov::frontend::onnx::Node& node) { + const auto data = node.get_ov_inputs().at(0); const bool sorted = node.get_attribute_value("sorted", 1); if (node.has_attribute("axis")) { @@ -26,6 +26,6 @@ ov::OutputVector unique(const Node& node) { } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/unique.hpp b/src/frontends/onnx/frontend/src/op/unique.hpp index 0855fed3f08caf..cc8f181e77e74b 100644 --- a/src/frontends/onnx/frontend/src/op/unique.hpp +++ b/src/frontends/onnx/frontend/src/op/unique.hpp @@ -4,21 +4,17 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector unique(const Node& node); +ov::OutputVector unique(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/unsqueeze.cpp b/src/frontends/onnx/frontend/src/op/unsqueeze.cpp index e2a0cf90d8db65..a2a2c53787d0bb 100644 --- a/src/frontends/onnx/frontend/src/op/unsqueeze.cpp +++ b/src/frontends/onnx/frontend/src/op/unsqueeze.cpp @@ -9,13 +9,13 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector unsqueeze(const Node& node) { - auto data = node.get_ng_inputs().at(0); +ov::OutputVector unsqueeze(const ov::frontend::onnx::Node& node) { + auto data = node.get_ov_inputs().at(0); auto axes_node = node.get_attribute_as_constant>("axes", {}); return {std::make_shared(data, axes_node)}; } @@ -23,15 +23,13 @@ ov::OutputVector unsqueeze(const Node& node) { } // namespace set_1 namespace set_13 { -ov::OutputVector unsqueeze(const Node& node) { - auto inputs = node.get_ng_inputs(); +ov::OutputVector unsqueeze(const ov::frontend::onnx::Node& node) { + auto inputs = node.get_ov_inputs(); return {std::make_shared(inputs.at(0), inputs.at(1))}; } } // namespace set_13 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/unsqueeze.hpp b/src/frontends/onnx/frontend/src/op/unsqueeze.hpp index e1a3a1931d189a..353424b1dc619a 100644 --- a/src/frontends/onnx/frontend/src/op/unsqueeze.hpp +++ b/src/frontends/onnx/frontend/src/op/unsqueeze.hpp @@ -4,26 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector unsqueeze(const Node& node); +ov::OutputVector unsqueeze(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_13 { -ov::OutputVector unsqueeze(const Node& node); +ov::OutputVector unsqueeze(const ov::frontend::onnx::Node& node); } // namespace set_13 } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/upsample.cpp b/src/frontends/onnx/frontend/src/op/upsample.cpp index bced1015bb5748..767d0872cfa859 100644 --- a/src/frontends/onnx/frontend/src/op/upsample.cpp +++ b/src/frontends/onnx/frontend/src/op/upsample.cpp @@ -10,16 +10,16 @@ using namespace ov::op; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace { constexpr unsigned version_1{1}; constexpr unsigned version_7{7}; constexpr unsigned version_9{9}; -void check_mode_support(const onnx_import::Node& node, const std::string& mode, const unsigned op_version) { +void check_mode_support(const ov::frontend::onnx::Node& node, const std::string& mode, const unsigned op_version) { const std::unordered_set modes_v1 = {"nearest", "bilinear"}; const std::unordered_set modes_v7 = {"nearest", "linear"}; const auto& supported_modes = op_version < version_7 ? modes_v1 : modes_v7; @@ -55,13 +55,13 @@ v11::Interpolate::InterpolateAttrs get_attributes(const std::string& mode) { } // namespace namespace set_1 { -ov::OutputVector upsample(const onnx_import::Node& node) { +ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { const auto height_scale = node.get_attribute_value("height_scale"); const auto width_scale = node.get_attribute_value("width_scale"); const auto mode = node.get_attribute_value("mode", "nearest"); check_mode_support(node, mode, version_1); - const auto data = node.get_ng_inputs().at(0); + const auto data = node.get_ov_inputs().at(0); static const std::string expectation{"Input tensor is required to be 4D."}; const auto rank = data.get_partial_shape().rank(); @@ -81,12 +81,12 @@ ov::OutputVector upsample(const onnx_import::Node& node) { } // namespace set_1 namespace set_7 { -ov::OutputVector upsample(const onnx_import::Node& node) { +ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { const auto scales = node.get_attribute_value>("scales"); const auto mode = node.get_attribute_value("mode", "nearest"); check_mode_support(node, mode, version_7); - const auto data = node.get_ng_inputs().at(0); + const auto data = node.get_ov_inputs().at(0); const auto rank = data.get_partial_shape().rank(); CHECK_VALID_NODE(node, @@ -102,16 +102,16 @@ ov::OutputVector upsample(const onnx_import::Node& node) { } // namespace set_7 namespace set_9 { -ov::OutputVector upsample(const onnx_import::Node& node) { +ov::OutputVector upsample(const ov::frontend::onnx::Node& node) { const auto mode = node.get_attribute_value("mode", "nearest"); check_mode_support(node, mode, version_9); - const auto& inputs = node.get_ng_inputs(); + const auto& inputs = node.get_ov_inputs(); return std::make_shared(inputs.at(0), inputs.at(1), get_attributes(mode))->outputs(); } } // namespace set_9 } // namespace op -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/upsample.hpp b/src/frontends/onnx/frontend/src/op/upsample.hpp index 39ab0e8793109c..87d40de79d990d 100644 --- a/src/frontends/onnx/frontend/src/op/upsample.hpp +++ b/src/frontends/onnx/frontend/src/op/upsample.hpp @@ -4,32 +4,27 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector upsample(const Node& node); +ov::OutputVector upsample(const ov::frontend::onnx::Node& node); } // namespace set_1 namespace set_7 { -ov::OutputVector upsample(const Node& node); +ov::OutputVector upsample(const ov::frontend::onnx::Node& node); } // namespace set_7 namespace set_9 { -ov::OutputVector upsample(const Node& node); +ov::OutputVector upsample(const ov::frontend::onnx::Node& node); } // namespace set_9 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/where.hpp b/src/frontends/onnx/frontend/src/op/where.hpp index 8983398c31ccd4..57b59f2f95e302 100644 --- a/src/frontends/onnx/frontend/src/op/where.hpp +++ b/src/frontends/onnx/frontend/src/op/where.hpp @@ -4,26 +4,21 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/select.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector where(const Node& node) { - ov::OutputVector ng_inputs{node.get_ng_inputs()}; +inline ov::OutputVector where(const ov::frontend::onnx::Node& node) { + ov::OutputVector ov_inputs{node.get_ov_inputs()}; - return {std::make_shared(ng_inputs.at(0), ng_inputs.at(1), ng_inputs.at(2))}; + return {std::make_shared(ov_inputs.at(0), ov_inputs.at(1), ov_inputs.at(2))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/op/xor.hpp b/src/frontends/onnx/frontend/src/op/xor.hpp index db794b887bbb83..5d40e4e232e948 100644 --- a/src/frontends/onnx/frontend/src/op/xor.hpp +++ b/src/frontends/onnx/frontend/src/op/xor.hpp @@ -4,27 +4,22 @@ #pragma once -#include "openvino/core/deprecated.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START - #include "core/node.hpp" #include "openvino/op/logical_xor.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -inline ov::OutputVector logical_xor(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1), +inline ov::OutputVector logical_xor(const ov::frontend::onnx::Node& node) { + return {std::make_shared(node.get_ov_inputs().at(0), + node.get_ov_inputs().at(1), ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY))}; } } // namespace set_1 - } // namespace op - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index 7d45d51bfe1fee..334eae33837371 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -193,8 +193,9 @@ using namespace ov::frontend::onnx; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { const char* OPENVINO_ONNX_DOMAIN = "org.openvinotoolkit"; @@ -606,6 +607,6 @@ OperatorsBridge::OperatorsBridge() { #undef REGISTER_OPERATOR #undef REGISTER_OPERATOR_WITH_DOMAIN -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/ops_bridge.hpp b/src/frontends/onnx/frontend/src/ops_bridge.hpp index fa611a1c2e8c79..7f0fe96785bd03 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.hpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.hpp @@ -12,11 +12,11 @@ #include #include "core/operator_set.hpp" -#include "openvino/core/deprecated.hpp" #include "version_range.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { class OperatorsBridge { public: @@ -89,6 +89,6 @@ class OperatorsBridge { extern const char* OPENVINO_ONNX_DOMAIN; -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/place.cpp b/src/frontends/onnx/frontend/src/place.cpp index 3430bf4e6b56cb..f08221d3d3cbff 100644 --- a/src/frontends/onnx/frontend/src/place.cpp +++ b/src/frontends/onnx/frontend/src/place.cpp @@ -9,12 +9,12 @@ using namespace ov; using namespace ov::frontend::onnx; -PlaceInputEdge::PlaceInputEdge(const onnx_editor::InputEdge& edge, std::shared_ptr editor) +PlaceInputEdge::PlaceInputEdge(const InputEdge& edge, std::shared_ptr editor) : m_edge{edge}, m_editor{std::move(editor)}, m_initial_source_tensor_name{m_editor->get_source_tensor_name(m_edge)} {} -PlaceInputEdge::PlaceInputEdge(onnx_editor::InputEdge&& edge, std::shared_ptr editor) +PlaceInputEdge::PlaceInputEdge(InputEdge&& edge, std::shared_ptr editor) : m_edge{std::move(edge)}, m_editor{std::move(editor)}, m_initial_source_tensor_name{m_editor->get_source_tensor_name(m_edge)} {} @@ -26,7 +26,7 @@ void PlaceInputEdge::check_if_valid() const { " is outdated since the topology of the model has been changed."); } -onnx_editor::InputEdge PlaceInputEdge::get_input_edge() const { +InputEdge PlaceInputEdge::get_input_edge() const { return m_edge; } @@ -65,7 +65,7 @@ ov::frontend::Place::Ptr PlaceInputEdge::get_source_tensor() const { std::vector PlaceInputEdge::get_consuming_operations() const { check_if_valid(); - return {std::make_shared(onnx_editor::EditorNode{m_edge.m_node_idx}, m_editor)}; + return {std::make_shared(EditorNode{m_edge.m_node_idx}, m_editor)}; } ov::frontend::Place::Ptr PlaceInputEdge::get_producing_operation() const { @@ -76,13 +76,12 @@ ov::frontend::Place::Ptr PlaceInputEdge::get_producing_port() const { return get_source_tensor()->get_producing_port(); } -PlaceOutputEdge::PlaceOutputEdge(const onnx_editor::OutputEdge& edge, - std::shared_ptr editor) +PlaceOutputEdge::PlaceOutputEdge(const OutputEdge& edge, std::shared_ptr editor) : m_edge{edge}, m_editor{std::move(editor)}, m_initial_target_tensor_name{m_editor->get_target_tensor_name(edge)} {} -PlaceOutputEdge::PlaceOutputEdge(onnx_editor::OutputEdge&& edge, std::shared_ptr editor) +PlaceOutputEdge::PlaceOutputEdge(OutputEdge&& edge, std::shared_ptr editor) : m_edge{std::move(edge)}, m_editor{std::move(editor)}, m_initial_target_tensor_name{m_editor->get_target_tensor_name(m_edge)} {} @@ -95,7 +94,7 @@ void PlaceOutputEdge::check_if_valid() const { " is outdated since the topology of the model has been changed."); } -onnx_editor::OutputEdge PlaceOutputEdge::get_output_edge() const { +OutputEdge PlaceOutputEdge::get_output_edge() const { return m_edge; } @@ -138,18 +137,18 @@ std::vector PlaceOutputEdge::get_consuming_ports() con ov::frontend::Place::Ptr PlaceOutputEdge::get_producing_operation() const { check_if_valid(); - return std::make_shared(onnx_editor::EditorNode{m_edge.m_node_idx}, m_editor); + return std::make_shared(EditorNode{m_edge.m_node_idx}, m_editor); } std::vector PlaceOutputEdge::get_consuming_operations() const { return get_target_tensor()->get_consuming_operations(); } -PlaceTensor::PlaceTensor(const std::string& name, std::shared_ptr editor) +PlaceTensor::PlaceTensor(const std::string& name, std::shared_ptr editor) : m_name{name}, m_editor{std::move(editor)} {} -PlaceTensor::PlaceTensor(std::string&& name, std::shared_ptr editor) +PlaceTensor::PlaceTensor(std::string&& name, std::shared_ptr editor) : m_name{std::move(name)}, m_editor{std::move(editor)} {} @@ -166,7 +165,7 @@ ov::frontend::Place::Ptr PlaceTensor::get_producing_port() const { std::vector PlaceTensor::get_consuming_ports() const { std::vector ret; auto edges = m_editor->find_output_consumers(m_name); - std::transform(edges.begin(), edges.end(), std::back_inserter(ret), [this](const onnx_editor::InputEdge& edge) { + std::transform(edges.begin(), edges.end(), std::back_inserter(ret), [this](const InputEdge& edge) { return std::make_shared(edge, this->m_editor); }); return ret; @@ -228,12 +227,12 @@ void PlaceTensor::set_name_for_dimension(size_t shape_dim_index, const std::stri m_editor->set_name_for_dimension(m_name, shape_dim_index, dim_name); } -PlaceOp::PlaceOp(const onnx_editor::EditorNode& node, std::shared_ptr editor) +PlaceOp::PlaceOp(const EditorNode& node, std::shared_ptr editor) : m_node{node}, m_editor{std::move(editor)}, m_initial_first_output{m_editor->get_output_ports(m_node).at(0)} {} -PlaceOp::PlaceOp(onnx_editor::EditorNode&& node, std::shared_ptr editor) +PlaceOp::PlaceOp(EditorNode&& node, std::shared_ptr editor) : m_node{std::move(node)}, m_editor{std::move(editor)}, m_initial_first_output{m_editor->get_output_ports(m_node).at(0)} {} @@ -254,7 +253,7 @@ std::vector PlaceOp::get_names() const { } } -const onnx_editor::EditorNode& PlaceOp::get_editor_node() const { +const EditorNode& PlaceOp::get_editor_node() const { return m_node; } @@ -269,9 +268,8 @@ ov::frontend::Place::Ptr PlaceOp::get_output_port(int output_port_index) const { check_if_valid(); const int out_ports_number = static_cast(m_editor->get_output_ports(m_node).size()); if (output_port_index < out_ports_number) { - return std::make_shared( - m_editor->find_output_edge(m_node, onnx_editor::EditorOutput{output_port_index}), - m_editor); + return std::make_shared(m_editor->find_output_edge(m_node, EditorOutput{output_port_index}), + m_editor); } return nullptr; } @@ -280,9 +278,8 @@ ov::frontend::Place::Ptr PlaceOp::get_output_port(const std::string& output_port check_if_valid(); const auto output_ports = m_editor->get_output_ports(m_node); if (std::count(std::begin(output_ports), std::end(output_ports), output_port_name) == 1) { - return std::make_shared( - m_editor->find_output_edge(m_node, onnx_editor::EditorOutput{output_port_name}), - m_editor); + return std::make_shared(m_editor->find_output_edge(m_node, EditorOutput{output_port_name}), + m_editor); } return nullptr; } @@ -298,9 +295,8 @@ ov::frontend::Place::Ptr PlaceOp::get_input_port(int input_port_index) const { check_if_valid(); const int in_ports_number = static_cast(m_editor->get_input_ports(m_node).size()); if (input_port_index < in_ports_number) { - return std::make_shared( - m_editor->find_input_edge(m_node, onnx_editor::EditorInput{input_port_index}), - m_editor); + return std::make_shared(m_editor->find_input_edge(m_node, EditorInput{input_port_index}), + m_editor); } return nullptr; } @@ -309,8 +305,7 @@ ov::frontend::Place::Ptr PlaceOp::get_input_port(const std::string& input_name) check_if_valid(); const auto input_ports = m_editor->get_input_ports(m_node); if (std::count(std::begin(input_ports), std::end(input_ports), input_name) == 1) { - return std::make_shared(m_editor->find_input_edge(m_node, onnx_editor::EditorInput{input_name}), - m_editor); + return std::make_shared(m_editor->find_input_edge(m_node, EditorInput{input_name}), m_editor); } return nullptr; } diff --git a/src/frontends/onnx/frontend/src/place.hpp b/src/frontends/onnx/frontend/src/place.hpp index 8e70b4db8ce16f..0a3d95855e98a7 100644 --- a/src/frontends/onnx/frontend/src/place.hpp +++ b/src/frontends/onnx/frontend/src/place.hpp @@ -15,11 +15,11 @@ namespace onnx { class PlaceInputEdge : public Place { public: - PlaceInputEdge(const onnx_editor::InputEdge& edge, std::shared_ptr editor); - PlaceInputEdge(onnx_editor::InputEdge&& edge, std::shared_ptr editor); + PlaceInputEdge(const InputEdge& edge, std::shared_ptr editor); + PlaceInputEdge(InputEdge&& edge, std::shared_ptr editor); // internal usage - onnx_editor::InputEdge get_input_edge() const; + InputEdge get_input_edge() const; void check_if_valid() const; // external usage @@ -34,18 +34,18 @@ class PlaceInputEdge : public Place { Place::Ptr get_producing_port() const override; private: - onnx_editor::InputEdge m_edge; - const std::shared_ptr m_editor; + InputEdge m_edge; + const std::shared_ptr m_editor; std::string m_initial_source_tensor_name; }; class PlaceOutputEdge : public Place { public: - PlaceOutputEdge(const onnx_editor::OutputEdge& edge, std::shared_ptr editor); - PlaceOutputEdge(onnx_editor::OutputEdge&& edge, std::shared_ptr editor); + PlaceOutputEdge(const OutputEdge& edge, std::shared_ptr editor); + PlaceOutputEdge(OutputEdge&& edge, std::shared_ptr editor); // internal usage - onnx_editor::OutputEdge get_output_edge() const; + OutputEdge get_output_edge() const; void check_if_valid() const; // external usage @@ -60,15 +60,15 @@ class PlaceOutputEdge : public Place { std::vector get_consuming_operations() const override; private: - onnx_editor::OutputEdge m_edge; - std::shared_ptr m_editor; + OutputEdge m_edge; + std::shared_ptr m_editor; std::string m_initial_target_tensor_name; }; class PlaceTensor : public Place { public: - PlaceTensor(const std::string& name, std::shared_ptr editor); - PlaceTensor(std::string&& name, std::shared_ptr editor); + PlaceTensor(const std::string& name, std::shared_ptr editor); + PlaceTensor(std::string&& name, std::shared_ptr editor); // external usage std::vector get_names() const override; @@ -86,17 +86,17 @@ class PlaceTensor : public Place { private: std::string m_name; - std::shared_ptr m_editor; + std::shared_ptr m_editor; }; class PlaceOp : public Place { public: - PlaceOp(const onnx_editor::EditorNode& node, std::shared_ptr editor); - PlaceOp(onnx_editor::EditorNode&& node, std::shared_ptr editor); + PlaceOp(const EditorNode& node, std::shared_ptr editor); + PlaceOp(EditorNode&& node, std::shared_ptr editor); std::vector get_names() const override; // internal usage - const onnx_editor::EditorNode& get_editor_node() const; + const EditorNode& get_editor_node() const; void set_name(const std::string& new_name); void check_if_valid() const; @@ -131,8 +131,8 @@ class PlaceOp : public Place { bool is_output() const override; private: - onnx_editor::EditorNode m_node; - std::shared_ptr m_editor; + EditorNode m_node; + std::shared_ptr m_editor; std::string m_initial_first_output; }; diff --git a/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.cpp b/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.cpp index 646134722c431b..0359e32e02cf59 100644 --- a/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.cpp @@ -17,16 +17,16 @@ using namespace ov; using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace utils { -OPENVINO_SUPPRESS_DEPRECATED_START + ArgMinMaxFactory::ArgMinMaxFactory(const Node& node) : m_keep_dims{node.get_attribute_value("keepdims", 1)}, - m_input_node{node.get_ng_inputs().at(0)}, + m_input_node{node.get_ov_inputs().at(0)}, m_axis{node.get_attribute_value("axis", 0)}, m_select_last_index{node.get_attribute_value("select_last_index", 0)} {} -OPENVINO_SUPPRESS_DEPRECATED_END std::shared_ptr ArgMinMaxFactory::make_arg_max() const { return make_topk_subgraph(v11::TopK::Mode::MAX); @@ -37,7 +37,7 @@ std::shared_ptr ArgMinMaxFactory::make_arg_min() const { } std::shared_ptr ArgMinMaxFactory::make_topk_subgraph(v11::TopK::Mode mode) const { - const auto k_node = v0::Constant::create(ov::element::i64, Shape{}, {1}); + const auto k_node = v0::Constant::create(ov::element::i64, ov::Shape{}, {1}); if (m_select_last_index == 1) { // Example (ArgMin): @@ -67,23 +67,25 @@ std::shared_ptr ArgMinMaxFactory::make_topk_subgraph(v11::TopK::Mode m const int64_t normalized_axis = ov::util::normalize_axis(m_input_node.get_node(), m_axis, m_input_node.get_partial_shape().rank()); - const auto axis_node = v0::Constant::create(ov::element::i64, Shape{1}, {normalized_axis}); + const auto axis_node = v0::Constant::create(ov::element::i64, ov::Shape{1}, {normalized_axis}); const auto reverse = std::make_shared(m_input_node, axis_node, v1::Reverse::Mode::INDEX); const auto topk = std::make_shared(reverse, k_node, normalized_axis, mode, v1::TopK::SortType::NONE); const auto data_shape = std::make_shared(m_input_node); const auto dims_on_axis = - std::make_shared(data_shape, axis_node, v0::Constant::create(ov::element::i64, Shape{}, {0})); + std::make_shared(data_shape, + axis_node, + v0::Constant::create(ov::element::i64, ov::Shape{}, {0})); const auto res_index = std::make_shared(dims_on_axis, std::make_shared(topk->output(1), ov::element::i64)); const auto result = - std::make_shared(res_index, v0::Constant::create(ov::element::i64, Shape{1}, {1})); + std::make_shared(res_index, v0::Constant::create(ov::element::i64, ov::Shape{1}, {1})); if (m_keep_dims == 0) { - const auto axis_to_remove = v0::Constant::create(ov::element::u64, Shape{}, {topk->get_axis()}); + const auto axis_to_remove = v0::Constant::create(ov::element::u64, ov::Shape{}, {topk->get_axis()}); return std::make_shared(result, axis_to_remove); } @@ -96,7 +98,7 @@ std::shared_ptr ArgMinMaxFactory::make_topk_subgraph(v11::TopK::Mode m const auto result = std::make_shared(topk->output(1), ov::element::i64); if (m_keep_dims == 0) { - const auto axis_to_remove = v0::Constant::create(ov::element::u64, Shape{}, {topk->get_axis()}); + const auto axis_to_remove = v0::Constant::create(ov::element::u64, ov::Shape{}, {topk->get_axis()}); return std::make_shared(result, axis_to_remove); } @@ -104,5 +106,6 @@ std::shared_ptr ArgMinMaxFactory::make_topk_subgraph(v11::TopK::Mode m return result; } } // namespace utils -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.hpp b/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.hpp index 943655e56d66d4..213aee65fb23e5 100644 --- a/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.hpp +++ b/src/frontends/onnx/frontend/src/utils/arg_min_max_factory.hpp @@ -8,19 +8,18 @@ #include #include "core/node.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/node.hpp" #include "openvino/op/topk.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace utils { /// \brief Factory class which generates sub-graphs for ONNX ArgMin, ArgMax ops. class ArgMinMaxFactory { public: - OPENVINO_SUPPRESS_DEPRECATED_START explicit ArgMinMaxFactory(const Node& node); - OPENVINO_SUPPRESS_DEPRECATED_END + virtual ~ArgMinMaxFactory() = default; /// \brief Creates ArgMax ONNX operation. @@ -41,5 +40,6 @@ class ArgMinMaxFactory { }; } // namespace utils -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/common.cpp b/src/frontends/onnx/frontend/src/utils/common.cpp index b41c7bd055d7c4..de997ff168c1cd 100644 --- a/src/frontends/onnx/frontend/src/utils/common.cpp +++ b/src/frontends/onnx/frontend/src/utils/common.cpp @@ -7,7 +7,6 @@ #include // onnx types #include "onnx_framework_node.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" @@ -21,41 +20,42 @@ #include "openvino/op/subtract.hpp" using namespace ov::op; +using ::ONNX_NAMESPACE::TensorProto_DataType; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace common { const ov::element::Type& get_ov_element_type(int64_t onnx_type) { switch (onnx_type) { - case ONNX_NAMESPACE::TensorProto_DataType_BOOL: + case TensorProto_DataType::TensorProto_DataType_BOOL: return ov::element::boolean; - case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: + case TensorProto_DataType::TensorProto_DataType_DOUBLE: return ov::element::f64; - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: + case TensorProto_DataType::TensorProto_DataType_FLOAT16: return ov::element::f16; - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: + case TensorProto_DataType::TensorProto_DataType_FLOAT: return ov::element::f32; - case ONNX_NAMESPACE::TensorProto_DataType_INT8: + case TensorProto_DataType::TensorProto_DataType_INT8: return ov::element::i8; - case ONNX_NAMESPACE::TensorProto_DataType_INT16: + case TensorProto_DataType::TensorProto_DataType_INT16: return ov::element::i16; - case ONNX_NAMESPACE::TensorProto_DataType_INT32: + case TensorProto_DataType::TensorProto_DataType_INT32: return ov::element::i32; - case ONNX_NAMESPACE::TensorProto_DataType_INT64: + case TensorProto_DataType::TensorProto_DataType_INT64: return ov::element::i64; - case ONNX_NAMESPACE::TensorProto_DataType_UINT8: + case TensorProto_DataType::TensorProto_DataType_UINT8: return ov::element::u8; - case ONNX_NAMESPACE::TensorProto_DataType_UINT16: + case TensorProto_DataType::TensorProto_DataType_UINT16: return ov::element::u16; - case ONNX_NAMESPACE::TensorProto_DataType_UINT32: + case TensorProto_DataType::TensorProto_DataType_UINT32: return ov::element::u32; - case ONNX_NAMESPACE::TensorProto_DataType_UINT64: + case TensorProto_DataType::TensorProto_DataType_UINT64: return ov::element::u64; - case ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED: + case TensorProto_DataType::TensorProto_DataType_UNDEFINED: return ov::element::dynamic; - case ONNX_NAMESPACE::TensorProto_DataType_BFLOAT16: + case TensorProto_DataType::TensorProto_DataType_BFLOAT16: return ov::element::bf16; } OPENVINO_THROW("unsupported element type"); @@ -99,9 +99,9 @@ void validate_scalar_input(const char* input_name, } template -ov::OutputVector handle_opset6_binary_op(const Node& node) { - const ov::Output lhs_node = node.get_ng_inputs().at(0); - ov::Output rhs_node = node.get_ng_inputs().at(1); +ov::OutputVector handle_opset6_binary_op(const ov::frontend::onnx::Node& node) { + const ov::Output lhs_node = node.get_ov_inputs().at(0); + ov::Output rhs_node = node.get_ov_inputs().at(1); const bool broadcast = node.get_attribute_value("broadcast", 0); if (broadcast) { if (node.has_attribute("axis")) { @@ -115,7 +115,7 @@ ov::OutputVector handle_opset6_binary_op(const Node& node) { axis += lhs_rank; if (lhs_rank > axis + rhs_rank) { auto ones = v0::Constant::create(ov::element::i64, - Shape{static_cast(lhs_rank - axis - rhs_rank)}, + ov::Shape{static_cast(lhs_rank - axis - rhs_rank)}, std::vector(lhs_rank - axis - rhs_rank, 1)); auto rhs_shape = std::make_shared(rhs_node); auto new_shape = std::make_shared(ov::OutputVector{rhs_shape, ones}, 0); @@ -137,7 +137,7 @@ template ov::OutputVector handle_opset6_binary_op(const Node& no const std::string FAILSAFE_NODE = "ONNX_FAILSAFE_NODE"; std::shared_ptr make_failsafe_constant(const ov::element::Type& dtype) { - const auto failsafe_constant = v0::Constant::create(dtype, Shape{}, {0}); + const auto failsafe_constant = v0::Constant::create(dtype, ov::Shape{}, {0}); auto& rt_info = failsafe_constant->get_rt_info(); rt_info[FAILSAFE_NODE] = true; return failsafe_constant; @@ -167,7 +167,7 @@ std::string collect_translation_exceptions(const std::shared_ptr& par bool unsupported_found = false; bool additional_error_found = false; for (const auto& op : partially_converted->get_ops()) { - if (const auto unsupported = std::dynamic_pointer_cast(op)) { + if (const auto unsupported = std::dynamic_pointer_cast(op)) { if (unsupported->additional_error_message().empty()) { fully_unsupported_ops += (unsupported->get_attrs().get_opset_name().empty() ? "" @@ -198,6 +198,6 @@ std::string collect_translation_exceptions(const std::shared_ptr& par } } // namespace common -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/common.hpp b/src/frontends/onnx/frontend/src/utils/common.hpp index 801901c5dd0037..0bfee5c8dce3a5 100644 --- a/src/frontends/onnx/frontend/src/utils/common.hpp +++ b/src/frontends/onnx/frontend/src/utils/common.hpp @@ -14,14 +14,14 @@ #include #include "core/node.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/node.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/op/constant.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace common { const ov::element::Type& get_ov_element_type(std::int64_t onnx_type); @@ -131,10 +131,9 @@ std::unique_ptr make_unique(Args&&... args) { /// \param node ONNX node /// /// \return ov::OutputVector with binary op -OPENVINO_SUPPRESS_DEPRECATED_START + template -ov::OutputVector handle_opset6_binary_op(const Node& node); -OPENVINO_SUPPRESS_DEPRECATED_END +ov::OutputVector handle_opset6_binary_op(const ov::frontend::onnx::Node& node); /// \brief Creates a "dummy" constant to be used in place of an invalid initializer /// encountered in the original model. @@ -157,5 +156,6 @@ bool is_optimized_out(const ov::Output& node_output); /// \brief Collect unsupported operators after convert_partially and all exceptions from translation process. std::string collect_translation_exceptions(const std::shared_ptr& partially_converted); } // namespace common -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp index fbe44b296052f7..782924439a7e05 100644 --- a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp @@ -8,13 +8,13 @@ #include "exceptions.hpp" #include "openvino/op/group_conv.hpp" #include "openvino/op/util/attr_types.hpp" -#include "ov_models/ov_builders/reshape.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace conv_factory { std::shared_ptr make_ng_convolution(const ov::Output& data, const ov::Output& filters, @@ -45,5 +45,6 @@ std::shared_ptr make_ng_convolution(const ov::Output& data } } } // namespace conv_factory -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/conv_factory.hpp b/src/frontends/onnx/frontend/src/utils/conv_factory.hpp index 681cb92787f784..bf3020d4256f1b 100644 --- a/src/frontends/onnx/frontend/src/utils/conv_factory.hpp +++ b/src/frontends/onnx/frontend/src/utils/conv_factory.hpp @@ -8,8 +8,9 @@ #include "openvino/core/node.hpp" #include "openvino/op/op.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace conv_factory { std::shared_ptr make_ng_convolution(const ov::Output& data, const ov::Output& filters, @@ -20,5 +21,6 @@ std::shared_ptr make_ng_convolution(const ov::Output& data int64_t groups, const ov::op::PadType& auto_pad); } // namespace conv_factory -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/convpool.cpp b/src/frontends/onnx/frontend/src/utils/convpool.cpp index 4dc590dae189a1..b59fed54385e4f 100644 --- a/src/frontends/onnx/frontend/src/utils/convpool.cpp +++ b/src/frontends/onnx/frontend/src/utils/convpool.cpp @@ -19,13 +19,12 @@ using namespace ov; using namespace ov::op; using ov::CoordinateDiff; -OPENVINO_SUPPRESS_DEPRECATED_START - -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace convpool { -Shape get_kernel_shape(const Node& node) { - const auto& data_shape = node.get_ng_inputs().at(0).get_partial_shape(); +ov::Shape get_kernel_shape(const Node& node) { + const auto& data_shape = node.get_ov_inputs().at(0).get_partial_shape(); const size_t input_spatial_dims = data_shape.rank().get_length() - 2; return node.get_attribute_value>("kernel_shape", std::vector(input_spatial_dims, 1UL)); } @@ -39,7 +38,7 @@ namespace { /// \return The attribute default value. /// std::vector get_attr_default_value(const Node& node, const std::string& attr_name) { - const auto data_rank = node.get_ng_inputs().at(0).get_partial_shape().rank(); + const auto data_rank = node.get_ov_inputs().at(0).get_partial_shape().rank(); CHECK_VALID_NODE(node, data_rank.is_static(), "If '", attr_name, "' is not provided data rank must be static."); const auto data_spatial_dims = data_rank.get_length() - 2; @@ -70,11 +69,11 @@ std::vector get_attribute_value(const Node& node, } } // namespace -Strides get_strides(const Node& node, const std::size_t kernel_rank) { +ov::Strides get_strides(const Node& node, const std::size_t kernel_rank) { return get_attribute_value(node, "strides", kernel_rank); } -Strides get_dilations(const Node& node, const std::size_t kernel_rank) { +ov::Strides get_dilations(const Node& node, const std::size_t kernel_rank) { return get_attribute_value(node, "dilations", kernel_rank); } @@ -127,17 +126,17 @@ std::pair get_pads(const Node& node, const size_ } std::pair get_pads(const Node& node) { - const auto data_rank = node.get_ng_inputs().at(0).get_partial_shape().rank(); + const auto data_rank = node.get_ov_inputs().at(0).get_partial_shape().rank(); CHECK_VALID_NODE(node, data_rank.is_static(), "The rank of node must be static in order to calculate pads"); const auto data_spatial_dims = data_rank.get_length() - 2; return get_pads(node, data_spatial_dims); } -void calculate_auto_pads(const Shape& data_shape, - const Shape& filter_shape, - const Strides& strides, - const Strides& dilations, +void calculate_auto_pads(const ov::Shape& data_shape, + const ov::Shape& filter_shape, + const ov::Strides& strides, + const ov::Strides& dilations, const ov::op::PadType& pad_type, CoordinateDiff& padding_below, CoordinateDiff& padding_above) { @@ -165,9 +164,9 @@ void calculate_auto_pads(const Shape& data_shape, } Output get_reshaped_filters(const Output& filters, int64_t groups) { - const auto zero_node = v0::Constant::create(ov::element::i64, Shape(), {0}); - const auto split_lengths = v0::Constant::create(ov::element::i64, Shape{2}, {1, -1}); - const auto groups_node = v0::Constant::create(ov::element::i64, Shape{1}, {groups}); + const auto zero_node = v0::Constant::create(ov::element::i64, ov::Shape(), {0}); + const auto split_lengths = v0::Constant::create(ov::element::i64, ov::Shape{2}, {1, -1}); + const auto groups_node = v0::Constant::create(ov::element::i64, ov::Shape{1}, {groups}); const auto filters_shape = std::make_shared(filters); const auto splitted_shape = std::make_shared(filters_shape, zero_node, split_lengths); @@ -181,7 +180,6 @@ Output get_reshaped_filters(const Output& filters, int64_t g return reshaped_filters; } } // namespace convpool -} // namespace onnx_import -} // namespace ngraph - -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/convpool.hpp b/src/frontends/onnx/frontend/src/utils/convpool.hpp index 240fa667f447de..052e63e7d15993 100644 --- a/src/frontends/onnx/frontend/src/utils/convpool.hpp +++ b/src/frontends/onnx/frontend/src/utils/convpool.hpp @@ -5,19 +5,18 @@ #pragma once #include "core/node.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/strides.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace convpool { /// \brief Get shape of kernel (filter) in pixels. /// /// \param node The Node ptr representing Conv or Pool operation. /// \return The kernel Shape object representing its dimensions (height, width, depth). -ov::Shape get_kernel_shape(const Node& node); +ov::Shape get_kernel_shape(const ov::frontend::onnx::Node& node); /// /// \brief Get number of pixels to stride operation by in each direction. @@ -27,7 +26,7 @@ ov::Shape get_kernel_shape(const Node& node); /// /// \return The kernel Shape object representing its dimensions (height, width, /// depth). -ov::Strides get_strides(const Node& node, const std::size_t kernel_rank = 0UL); +ov::Strides get_strides(const ov::frontend::onnx::Node& node, const std::size_t kernel_rank = 0UL); /// /// \brief Get number of pixels for filter dilation in each direction. @@ -35,16 +34,16 @@ ov::Strides get_strides(const Node& node, const std::size_t kernel_rank = 0UL); /// \param[in] node The Node ptr representing ONNX operation. /// \param[in] kernel_rank The operator'skernel rank. /// -/// \return The Strides object containing number of pixels for filter dilation +/// \return The ov::Strides object containing number of pixels for filter dilation /// (height, width, depth). -ov::Strides get_dilations(const Node& node, const std::size_t kernel_rank = 0UL); +ov::Strides get_dilations(const ov::frontend::onnx::Node& node, const std::size_t kernel_rank = 0UL); /// \brief Gets the 'ceil_mode' (rounding type) attribute value. /// /// \param[in] node The ONNX node we query for attribute. /// /// \return The OV RoundingType object representing 'ceil_mode' attribute value. -ov::op::RoundingType get_rounding_type(const Node& node); +ov::op::RoundingType get_rounding_type(const ov::frontend::onnx::Node& node); /// \brief Get padding values for the operation described by an ONNX node. /// \details Values are taken from the `pads` attribute. @@ -56,7 +55,8 @@ ov::op::RoundingType get_rounding_type(const Node& node); /// /// \return A pair of (padding_above, padding_below), which elements contains number of /// pixels to pad in respective dimensions (height, width, depth). -std::pair get_pads(const Node& node, const size_t kernel_rank); +std::pair get_pads(const ov::frontend::onnx::Node& node, + const size_t kernel_rank); /// \brief Get padding values for the operation described by an ONNX node. /// \details Values are taken from the `pads` attribute. @@ -67,7 +67,7 @@ std::pair get_pads(const Node& node, con /// /// \return A pair of (padding_above, padding_below), which elements contains number of /// pixels to pad in respective dimensions (height, width, depth). -std::pair get_pads(const Node& node); +std::pair get_pads(const ov::frontend::onnx::Node& node); /// /// \brief Calculate paddings with respect to auto_pad value. @@ -95,7 +95,7 @@ void calculate_auto_pads(const ov::Shape& data_shape, /// /// \return The OV PadType object representing 'auto_pad' attribute value. /// -ov::op::PadType get_auto_pad(const Node& node); +ov::op::PadType get_auto_pad(const ov::frontend::onnx::Node& node); /// \brief Reshape group convolution filters to match desired shape: /// from [C_INPUT x C_OUTPUT/groups x k1 x k2 x ... x kn] @@ -107,8 +107,6 @@ ov::op::PadType get_auto_pad(const Node& node); /// \return Reshaped filters input. ov::Output get_reshaped_filters(const ov::Output& filters, int64_t groups); } // namespace convpool - -} // namespace onnx_import - -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/dft.cpp b/src/frontends/onnx/frontend/src/utils/dft.cpp index 38a75a57f51d1b..4b9702f54ba0b4 100644 --- a/src/frontends/onnx/frontend/src/utils/dft.cpp +++ b/src/frontends/onnx/frontend/src/utils/dft.cpp @@ -5,7 +5,6 @@ #include "dft.hpp" #include "core/null_node.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" @@ -18,8 +17,9 @@ using namespace ov::op; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace dft { namespace { @@ -54,9 +54,8 @@ ov::Output make_dft(const ov::Output& signal, if (is_inversed || !is_onesided) { // skip for RDFT case conversion_to_complex_applied = try_convert_real_to_complex(processed_signal); } - OPENVINO_SUPPRESS_DEPRECATED_START + bool dft_length_provided = !ov::op::util::is_null(length); - OPENVINO_SUPPRESS_DEPRECATED_END ov::Output result; if (is_inversed) { @@ -83,5 +82,6 @@ ov::Output make_dft(const ov::Output& signal, return {result}; } } // namespace dft -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/dft.hpp b/src/frontends/onnx/frontend/src/utils/dft.hpp index 6ab0b01580df43..4052b250635e6d 100644 --- a/src/frontends/onnx/frontend/src/utils/dft.hpp +++ b/src/frontends/onnx/frontend/src/utils/dft.hpp @@ -6,8 +6,9 @@ #include "openvino/core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace dft { ov::Output make_dft(const ov::Output& signal, @@ -16,5 +17,6 @@ ov::Output make_dft(const ov::Output& signal, bool is_inversed, bool is_one_sided); } // namespace dft -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/legacy_conversion_extension.hpp b/src/frontends/onnx/frontend/src/utils/legacy_conversion_extension.hpp deleted file mode 100644 index 75df0dffc933dd..00000000000000 --- a/src/frontends/onnx/frontend/src/utils/legacy_conversion_extension.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once -#include - -#include "openvino/frontend/extension/conversion.hpp" -#include "openvino/frontend/node_context.hpp" -#include "openvino/frontend/onnx/node_context.hpp" -#include "openvino/frontend/onnx/visibility.hpp" -#include "ops_bridge.hpp" - -namespace ngraph { -namespace onnx_import { -/// An extension holding its own copy of the OperatorsBridge which should only be used with legacy ONNX importer API -/// Having it here keeps the legacy API operational without interfering with the frontends API -class LegacyConversionExtension : public ov::frontend::ConversionExtensionBase { -public: - using Ptr = std::shared_ptr; - - LegacyConversionExtension() : ov::frontend::ConversionExtensionBase("") {} - - OperatorsBridge& ops_bridge() { - return m_legacy_ops_bridge; - } - - /// The legacy API entry point for registering custom operations globally (does not affect ONNX FE) - void register_operator(const std::string& name, int64_t version, const std::string& domain, Operator fn) { - std::lock_guard lock{m_mutex}; - m_legacy_ops_bridge.register_operator(name, version, domain, std::move(fn)); - } - - void unregister_operator(const std::string& name, int64_t version, const std::string& domain) { - std::lock_guard lock{m_mutex}; - m_legacy_ops_bridge.unregister_operator(name, version, domain); - } - -private: - std::mutex m_mutex; - OperatorsBridge m_legacy_ops_bridge; -}; -} // namespace onnx_import -} // namespace ngraph diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/norm.cpp b/src/frontends/onnx/frontend/src/utils/norm.cpp similarity index 94% rename from src/tests/ov_helpers/ov_models/ov_builders/src/norm.cpp rename to src/frontends/onnx/frontend/src/utils/norm.cpp index 2baba1971d5dac..5de81d55fdb31e 100644 --- a/src/tests/ov_helpers/ov_models/ov_builders/src/norm.cpp +++ b/src/frontends/onnx/frontend/src/utils/norm.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/ov_builders/norm.hpp" +#include "utils/norm.hpp" #include "openvino/op/abs.hpp" #include "openvino/op/add.hpp" @@ -50,7 +50,7 @@ std::shared_ptr lp_norm(const Output& value, std::shared_ptr inv_p_node = ov::op::v0::Constant::create(values->get_element_type(), Shape{}, {1.f / p_norm}); - return {std::make_shared(values, inv_p_node)}; + return std::make_shared(values, inv_p_node); } /// \brief Calculates L-0 norm of input tensor. @@ -126,17 +126,14 @@ std::shared_ptr l2_norm(const Output& value, std::shared_ptr values{std::make_shared(pow, reduction_axes, keep_dims)}; std::shared_ptr bias_node{ov::op::v0::Constant::create(values->get_element_type(), Shape{}, {bias})}; - std::shared_ptr result; switch (bias_mode) { case BiasMode::MAX: { - result = std::make_shared(std::make_shared(values, bias_node)); - break; + return std::make_shared(std::make_shared(values, bias_node)); } case BiasMode::ADD: default: - result = std::make_shared(std::make_shared(values, bias_node)); + return std::make_shared(std::make_shared(values, bias_node)); } - return result; } } // namespace diff --git a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/norm.hpp b/src/frontends/onnx/frontend/src/utils/norm.hpp similarity index 100% rename from src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/norm.hpp rename to src/frontends/onnx/frontend/src/utils/norm.hpp diff --git a/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp b/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp index fd7ec5d34cf1d7..11c55e2251b7d6 100644 --- a/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp +++ b/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp @@ -15,8 +15,9 @@ using namespace ov; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace detail { namespace { void remove_dangling_parameters(std::shared_ptr& model) { @@ -30,7 +31,7 @@ void remove_dangling_parameters(std::shared_ptr& model) { std::all_of(parameter_users.begin(), parameter_users.end(), [](const std::shared_ptr& node) -> bool { - return std::dynamic_pointer_cast(node) != nullptr; + return std::dynamic_pointer_cast(node) != nullptr; }); if (is_dangling_parameter) { model->remove_parameter(parameter); @@ -46,9 +47,7 @@ void remove_dangling_results(std::shared_ptr& model) { const auto result_inputs = result->input_values(); const bool is_dangling_result = std::all_of(result_inputs.begin(), result_inputs.end(), [](const Output& node) -> bool { - OPENVINO_SUPPRESS_DEPRECATED_START return ov::op::util::is_null(node); - OPENVINO_SUPPRESS_DEPRECATED_END }); if (is_dangling_result) { model->remove_result(result); @@ -56,7 +55,7 @@ void remove_dangling_results(std::shared_ptr& model) { } } -void apply_transformations(ONNX_NAMESPACE::ModelProto& model_proto) { +void apply_transformations(ModelProto& model_proto) { transform::fixup_legacy_operators(model_proto); } @@ -68,10 +67,10 @@ void convert_decoded_model(std::shared_ptr model) { OPENVINO_ASSERT(it != rt_info.end(), "Could not find '" + std::string(ONNX_GRAPH_RT_ATTRIBUTE) + "' attribute in decoded model. Model probably wasn't created by FrontEnd::decode function."); - auto onnx_graph = it->second.as>(); + auto onnx_graph = it->second.as>(); for (const auto& node : model->get_ordered_ops()) { - if (auto raw_node = std::dynamic_pointer_cast(node)) { - if (auto subgraph_node = std::dynamic_pointer_cast(node)) { + if (auto raw_node = std::dynamic_pointer_cast(node)) { + if (auto subgraph_node = std::dynamic_pointer_cast(node)) { subgraph_node->infer_inputs_from_parent(); for (auto& model : subgraph_node->get_subgraph_models()) { convert_decoded_model(model); @@ -90,7 +89,7 @@ void convert_decoded_model(std::shared_ptr model) { detail::remove_dangling_results(model); } -std::shared_ptr import_onnx_model(std::shared_ptr model_proto, +std::shared_ptr import_onnx_model(std::shared_ptr model_proto, const std::string& model_path, detail::MappedMemoryHandles mmap_cache, ov::frontend::ExtensionHolder extensions) { @@ -102,7 +101,7 @@ std::shared_ptr import_onnx_model(std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, +std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, const std::string& model_path, detail::MappedMemoryHandles mmap_cache, ov::frontend::ExtensionHolder extensions) { @@ -114,5 +113,6 @@ std::shared_ptr decode_to_framework_nodes(std::shared_ptrdecode(); } } // namespace detail -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp b/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp index 8348313ca3f112..e6afa4b582cecc 100644 --- a/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp +++ b/src/frontends/onnx/frontend/src/utils/onnx_internal.hpp @@ -9,16 +9,19 @@ #include "openvino/core/model.hpp" #include "openvino/frontend/extension/holder.hpp" -#include "utils/legacy_conversion_extension.hpp" #include "utils/tensor_external_data.hpp" namespace ONNX_NAMESPACE { class ModelProto; } -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace detail { + +using ::ONNX_NAMESPACE::ModelProto; + /// \brief Imports and converts an serialized ONNX model from a ModelProto /// to an ov::Model representation. /// @@ -34,7 +37,7 @@ namespace detail { /// \param extensions An object containing a collection of frontend extensions to use during the import process /// \return An ov::Model that represents a single output from the created /// graph. -std::shared_ptr import_onnx_model(std::shared_ptr model_proto, +std::shared_ptr import_onnx_model(std::shared_ptr model_proto, const std::string& model_path, detail::MappedMemoryHandles mmap_cache, ov::frontend::ExtensionHolder extensions = {}); @@ -47,7 +50,7 @@ std::shared_ptr import_onnx_model(std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, +std::shared_ptr decode_to_framework_nodes(std::shared_ptr model_proto, const std::string& model_path, detail::MappedMemoryHandles mmap_cache, ov::frontend::ExtensionHolder extensions = {}); @@ -56,10 +59,7 @@ std::shared_ptr decode_to_framework_nodes(std::shared_ptr model); -/// \brief Get the legacy conversion extension. -/// -/// \return const LegacyConversionExtension::Ptr -const LegacyConversionExtension::Ptr get_legacy_conversion_extension(); } // namespace detail -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/pooling_factory.cpp b/src/frontends/onnx/frontend/src/utils/pooling_factory.cpp index 1f460812ad6719..4a0afb5dce8e62 100644 --- a/src/frontends/onnx/frontend/src/utils/pooling_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/pooling_factory.cpp @@ -17,8 +17,9 @@ using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace pooling { namespace { @@ -32,14 +33,13 @@ std::shared_ptr transposition_axis_order(const ov::Rank& input_ran std::iota(axes.begin(), axes.end(), 0); std::reverse(axes.begin() + 2, axes.end()); - return std::make_shared(ov::element::i32, Shape{rank}, axes); + return std::make_shared(ov::element::i32, ov::Shape{rank}, axes); } } // namespace -OPENVINO_SUPPRESS_DEPRECATED_START PoolingFactory::PoolingFactory(const Node& node) : m_onnx_node{node}, - m_inputs{node.get_ng_inputs()}, + m_inputs{node.get_ov_inputs()}, m_kernel_shape(node.get_attribute_value>("kernel_shape")), m_strides{convpool::get_strides(node, m_kernel_shape.size())}, m_dilations{convpool::get_dilations(node, m_kernel_shape.size())}, @@ -48,8 +48,8 @@ PoolingFactory::PoolingFactory(const Node& node) const auto paddings = convpool::get_pads(node, m_kernel_shape.size()); const ov::CoordinateDiff& padding_above{paddings.second}; const ov::CoordinateDiff& padding_below{paddings.first}; - m_padding_below = Shape{std::begin(padding_below), std::end(padding_below)}; - m_padding_above = Shape{std::begin(padding_above), std::end(padding_above)}; + m_padding_below = ov::Shape{std::begin(padding_below), std::end(padding_below)}; + m_padding_above = ov::Shape{std::begin(padding_above), std::end(padding_above)}; m_storage_order = static_cast(node.get_attribute_value("storage_order", 0)); } @@ -64,7 +64,6 @@ ov::OutputVector PoolingFactory::make_avg_pool() const { m_rounding_type, m_auto_pad)}; } -OPENVINO_SUPPRESS_DEPRECATED_END ov::OutputVector PoolingFactory::make_max_pool() const { return {std::make_shared(m_inputs.at(0), @@ -95,5 +94,6 @@ ov::OutputVector PoolingFactory::make_max_pool_with_indices() const { } } } // namespace pooling -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/pooling_factory.hpp b/src/frontends/onnx/frontend/src/utils/pooling_factory.hpp index 5c09c6e92ccfd8..452af29e6c159e 100644 --- a/src/frontends/onnx/frontend/src/utils/pooling_factory.hpp +++ b/src/frontends/onnx/frontend/src/utils/pooling_factory.hpp @@ -8,15 +8,15 @@ #include #include "core/node.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/node.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/strides.hpp" #include "openvino/op/op.hpp" #include "openvino/op/util/attr_types.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace pooling { /// /// \brief Factory class which generates sub-graphs for ONNX 'regular' pooling @@ -30,10 +30,8 @@ namespace pooling { /// paddings, kernel shape and auto_pad type. class PoolingFactory { public: - OPENVINO_SUPPRESS_DEPRECATED_START explicit PoolingFactory(const Node& node); virtual ~PoolingFactory() = default; - OPENVINO_SUPPRESS_DEPRECATED_END /// /// \brief Creates average pooling ONNX operation. @@ -51,9 +49,8 @@ class PoolingFactory { ov::OutputVector make_max_pool_with_indices() const; protected: - OPENVINO_SUPPRESS_DEPRECATED_START Node m_onnx_node; - OPENVINO_SUPPRESS_DEPRECATED_END + const ov::OutputVector m_inputs; ov::Shape m_kernel_shape; ov::Strides m_strides; @@ -68,5 +65,6 @@ class PoolingFactory { StorageOrder m_storage_order; }; } // namespace pooling -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/recurrent.cpp b/src/frontends/onnx/frontend/src/utils/recurrent.cpp index 66be9ffc786e7d..f5c2b6d8098f3b 100644 --- a/src/frontends/onnx/frontend/src/utils/recurrent.cpp +++ b/src/frontends/onnx/frontend/src/utils/recurrent.cpp @@ -18,18 +18,18 @@ #include "openvino/op/multiply.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/util/common_util.hpp" -#include "ov_models/ov_builders/reshape.hpp" -#include "ov_models/ov_builders/split.hpp" +#include "utils/reshape.hpp" +#include "utils/split.hpp" using namespace ov::op; using ov::Shape; -OPENVINO_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace recurrent { -OpInputMap::OpInputMap(const onnx_import::Node& node, std::size_t gates_count) { - const auto& ng_inputs = node.get_ng_inputs(); +OpInputMap::OpInputMap(const ov::frontend::onnx::Node& node, std::size_t gates_count) { + const auto& ng_inputs = node.get_ov_inputs(); m_map[OpInput::X] = ov::op::util::reorder_axes(ng_inputs.at(0), {1, 0, 2}); m_map[OpInput::W] = ng_inputs.at(1); @@ -41,33 +41,33 @@ OpInputMap::OpInputMap(const onnx_import::Node& node, std::size_t gates_count) { // Get dimensions needed for default inputs creation auto shape_of_x = std::make_shared(m_map[OpInput::X]); - auto axes = v0::Constant::create(ov::element::i32, Shape{1}, {0}); + auto axes = v0::Constant::create(ov::element::i32, ov::Shape{1}, {0}); auto batch_size_node = - std::make_shared(shape_of_x, v0::Constant::create(ov::element::i32, Shape{1}, {0}), axes); + std::make_shared(shape_of_x, v0::Constant::create(ov::element::i32, ov::Shape{1}, {0}), axes); auto seq_length_node = - std::make_shared(shape_of_x, v0::Constant::create(ov::element::i32, Shape{1}, {1}), axes); + std::make_shared(shape_of_x, v0::Constant::create(ov::element::i32, ov::Shape{1}, {1}), axes); auto shape_of_r = std::make_shared(m_map[OpInput::R]); auto num_directions_node = - std::make_shared(shape_of_r, v0::Constant::create(ov::element::i32, Shape{1}, {0}), axes); + std::make_shared(shape_of_r, v0::Constant::create(ov::element::i32, ov::Shape{1}, {0}), axes); auto hidden_size_node = - std::make_shared(shape_of_r, v0::Constant::create(ov::element::i32, Shape{1}, {2}), axes); + std::make_shared(shape_of_r, v0::Constant::create(ov::element::i32, ov::Shape{1}, {2}), axes); // ------ Optional inputs ------ if (ng_inputs.size() > 3 && !ov::op::util::is_null(ng_inputs.at(3))) { auto bias = ng_inputs.at(3); - auto split_bias = ov::op::util::split(bias, 2, 1); + auto split_bias = ov::op::util::make_split(bias, 2, 1); m_map[OpInput::B] = std::make_shared(split_bias.at(0), split_bias.at(1)); } else { auto b_shape = std::make_shared( - ov::OutputVector{ - num_directions_node, - std::make_shared(v0::Constant::create(ov::element::Type_t::i64, Shape{1}, {gates_count}), - hidden_size_node)}, + ov::OutputVector{num_directions_node, + std::make_shared( + v0::Constant::create(ov::element::Type_t::i64, ov::Shape{1}, {gates_count}), + hidden_size_node)}, 0); - m_map[OpInput::B] = - std::make_shared(v0::Constant::create(m_map[OpInput::X].get_element_type(), Shape{}, {0}), - b_shape); + m_map[OpInput::B] = std::make_shared( + v0::Constant::create(m_map[OpInput::X].get_element_type(), ov::Shape{}, {0}), + b_shape); } if (ng_inputs.size() > 4 && !ov::op::util::is_null(ng_inputs.at(4))) { m_map[OpInput::SEQ_LENGTHS] = ng_inputs.at(4); @@ -80,9 +80,9 @@ OpInputMap::OpInputMap(const onnx_import::Node& node, std::size_t gates_count) { } else { auto init_h_shape = std::make_shared(ov::OutputVector{batch_size_node, num_directions_node, hidden_size_node}, 0); - m_map[OpInput::INIT_H] = - std::make_shared(v0::Constant::create(m_map[OpInput::X].get_element_type(), Shape{}, {0}), - init_h_shape); + m_map[OpInput::INIT_H] = std::make_shared( + v0::Constant::create(m_map[OpInput::X].get_element_type(), ov::Shape{}, {0}), + init_h_shape); } } @@ -115,6 +115,6 @@ OpAttributes::OpAttributes(const Node& node) } } // namespace recurrent -} // namespace onnx_import -} // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/recurrent.hpp b/src/frontends/onnx/frontend/src/utils/recurrent.hpp index b286510edb24e4..a40a75a7e5100d 100644 --- a/src/frontends/onnx/frontend/src/utils/recurrent.hpp +++ b/src/frontends/onnx/frontend/src/utils/recurrent.hpp @@ -9,12 +9,12 @@ #include #include "core/node.hpp" -#include "openvino/core/deprecated.hpp" #include "openvino/core/node.hpp" #include "openvino/op/util/attr_types.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace recurrent { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INPUT NODES PARSING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -41,9 +41,8 @@ enum class OpInput { struct OpInputMap { using container_type = std::map>; - OPENVINO_SUPPRESS_DEPRECATED_START - explicit OpInputMap(const onnx_import::Node& node, std::size_t gates_count); - OPENVINO_SUPPRESS_DEPRECATED_END + explicit OpInputMap(const ov::frontend::onnx::Node& node, std::size_t gates_count); + OpInputMap(container_type&& map); virtual ~OpInputMap() = default; @@ -59,9 +58,8 @@ struct OpInputMap { /// \brief This structure aggregates operator's attributes. /// struct OpAttributes { - OPENVINO_SUPPRESS_DEPRECATED_START explicit OpAttributes(const Node& node); - OPENVINO_SUPPRESS_DEPRECATED_END + virtual ~OpAttributes() = default; ov::op::RecurrentSequenceDirection m_direction; @@ -73,5 +71,6 @@ struct OpAttributes { }; } // namespace recurrent -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/reshape.cpp b/src/frontends/onnx/frontend/src/utils/reshape.cpp index 34be077fec9ed2..35c8efbbf941c5 100644 --- a/src/frontends/onnx/frontend/src/utils/reshape.cpp +++ b/src/frontends/onnx/frontend/src/utils/reshape.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/ov_builders/reshape.hpp" +#include "utils/reshape.hpp" #include #include @@ -10,20 +10,28 @@ #include #include "openvino/frontend/exception.hpp" +#include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_prod.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/strided_slice.hpp" #include "openvino/op/subtract.hpp" +#include "openvino/op/transpose.hpp" #include "openvino/op/util/op_types.hpp" -#include "utils/reshape.hpp" +#include "openvino/op/variadic_split.hpp" using namespace ov::op; using ov::Shape; -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace reshape { std::vector infer_dimensions(const std::string& node_name, const std::vector& input_shape, @@ -72,7 +80,7 @@ std::vector infer_dimensions(const std::string& node_name, } ov::Output interpret_as_scalar(const ov::Output& node) { - Shape node_shape = node.get_shape(); + ov::Shape node_shape = node.get_shape(); // If node is already a scalar, return original if (is_scalar(node_shape)) { @@ -83,20 +91,20 @@ ov::Output interpret_as_scalar(const ov::Output& node) { "Scalar value can't be derived from a node with ", node_shape); - // If node is a Constant, recreate as Constant with Shape{} + // If node is a Constant, recreate as Constant with ov::Shape{} if (ov::op::util::is_constant(node.get_node())) { const auto value = ov::as_type_ptr(node.get_node_shared_ptr())->get_data_ptr(); return std::make_shared(node.get_element_type(), ov::Shape{}, value); } - return ov::op::util::reshape(node, Shape{}); + return ov::op::util::reshape(node, ov::Shape{}); } ov::Output reshape_channel_shaped_node_to_nchw(const ov::Output& node, const ov::Output& expected_rank) { // Prepare tail shape (rank = conv.rank - 2): [1, 1, 1, 1, ... ] - const auto one_const = v0::Constant::create(ov::element::i64, Shape{1}, {1}); - const auto two_const = v0::Constant::create(ov::element::i64, Shape{1}, {2}); + const auto one_const = v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); + const auto two_const = v0::Constant::create(ov::element::i64, ov::Shape{1}, {2}); const auto tail_shape_rank = std::make_shared(expected_rank, two_const); const auto tail_shape = std::make_shared(one_const, tail_shape_rank); @@ -108,5 +116,110 @@ ov::Output reshape_channel_shaped_node_to_nchw(const ov::Output reshape(const Output& value, const Shape& shape) { + if (value.get_partial_shape().same_scheme(shape)) { + return value.get_node_shared_ptr(); + } else if (is_scalar(shape)) { + auto value_rank = value.get_shape().size(); + AxisVector axes_vector(value_rank); + std::iota(axes_vector.begin(), axes_vector.end(), 0); + auto axes = ov::op::v0::Constant::create(ov::element::i64, Shape{value_rank}, axes_vector); + return std::make_shared(value, axes); + } else { + auto out_pattern = ov::op::v0::Constant::create(ov::element::i64, + Shape{shape.size()}, + std::vector(shape.begin(), shape.end())); + + return std::make_shared(value, out_pattern, false); + } +} + +std::shared_ptr reorder_axes(const Output& value, std::vector axes_order) { + const auto axes_order_const = + ov::op::v0::Constant::create(ov::element::i64, + Shape{axes_order.size()}, + std::vector(axes_order.begin(), axes_order.end())); + return std::make_shared(value, axes_order_const); +} + +std::shared_ptr transpose(const Output& value) { + // This part is left to preserve backward compatibility and ensure passing ONNX tests. + if (value.get_partial_shape().is_static()) { + std::vector axes_order(value.get_shape().size()); + std::iota(begin(axes_order), end(axes_order), 0); + std::reverse(begin(axes_order), end(axes_order)); + return reorder_axes(value, axes_order); + } + + const auto input_rank = std::make_shared(std::make_shared(value)); + const auto neg_one = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {-1}); + const auto start_node = std::make_shared(input_rank, neg_one); + const auto reverse_axes_order = std::make_shared(reshape(start_node, Shape{}), // start + neg_one, // stop (exclusive) + neg_one); // step + return std::make_shared(value, reverse_axes_order); +} + +namespace { +/// +/// \brief Return the node representing normalized axis with respect to +/// provided rank. +/// +/// \param[in] node_rank The node representing rank used for normalization. +/// \param[in] axis The axis value to be normalized. +/// +/// \return The new Constant node representing normalized axis value. +/// +std::shared_ptr get_normalized_axis_node(const std::shared_ptr node_rank, int64_t axis) { + auto axis_node = ov::op::v0::Constant::create(ov::element::i64, Shape{1}, {axis}); + // shortcut for already positive value + if (axis >= 0) { + return axis_node; + } + + // TODO: What if axis value is beyond acceptable values? [-node_rank, + // node_rank-1] + return std::make_shared(node_rank, axis_node); +} +} // namespace + +std::shared_ptr flatten(const Output& value, int axis) { + // First dimension of output tensor is the product of [d_0, ... d_{axis-1}] dimensions of + // input tensor. The last dimension is the product of the rest of input tensor dimensions: + // [d_{axis}, ..., d_n] + std::shared_ptr output_shape; + if (axis == 0) { + output_shape = ov::op::v0::Constant::create(ov::element::i64, Shape{2}, {1, -1}); + } else if (axis == 1) { + output_shape = ov::op::v0::Constant::create(ov::element::i64, Shape{2}, {0, -1}); + } else { + const auto value_shape = std::make_shared(value); + const auto value_rank = std::make_shared(value_shape); + const auto axis_node = get_normalized_axis_node(value_rank, axis); + + const auto first_part_dims = + std::make_shared(value_shape, + ov::op::v0::Constant::create(ov::element::i64, {1}, {0}), + axis_node, + std::vector{0}, + std::vector{0}); + const auto first_part_dims_length = + std::make_shared(first_part_dims, + ov::op::v0::Constant::create(ov::element::i64, {}, {0}), + true); + + const auto remaining_part_length = ov::op::v0::Constant::create(ov::element::i64, {1}, {-1}); + + output_shape = + std::make_shared(ov::OutputVector{first_part_dims_length, remaining_part_length}, 0); + } + return std::make_shared(value, output_shape, true); +} +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/reshape.hpp b/src/frontends/onnx/frontend/src/utils/reshape.hpp index 198696310cb72c..1000f52c4e4d7d 100644 --- a/src/frontends/onnx/frontend/src/utils/reshape.hpp +++ b/src/frontends/onnx/frontend/src/utils/reshape.hpp @@ -11,8 +11,9 @@ #include "openvino/core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace reshape { /// \brief Infer `output_shape` dimension values. /// @@ -61,5 +62,42 @@ ov::Output reshape_channel_shaped_node_to_nchw(const ov::Output& expected_rank); } // namespace reshape -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend + +namespace op { +namespace util { +/// \brief Change shape of a value +/// +/// \param[in] value The value to be reshaped. +/// \param[in] shape The new shape. +/// +/// \return Reshape:v1 op. +std::shared_ptr reshape(const Output& value, const Shape& shape); + +/// \brief Permute axes according to specified axes_order parameter. +/// +/// \param The vlaue whose axes we want to permute. +/// \param axes_order The permutation of axes. +/// +/// \return Transpose:v1 op. +std::shared_ptr reorder_axes(const Output& value, std::vector axes_order = {}); + +/// \brief Return transposed value (with axes in reversed order). +/// +/// \param Value to transpose. +/// +/// \return Transpose:v1 op. +std::shared_ptr transpose(const Output& value); + +/// \brief Flatten a value into a 2D matrix, with a static dividing axis. +/// +/// \param The tensor to be flattened. +/// \param The axis dividing shape. +/// +/// \return The new value will be a 2D matrix representing the flattened input +/// node. +std::shared_ptr flatten(const Output& value, int axis); +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/split.cpp b/src/frontends/onnx/frontend/src/utils/split.cpp similarity index 79% rename from src/tests/ov_helpers/ov_models/ov_builders/src/split.cpp rename to src/frontends/onnx/frontend/src/utils/split.cpp index fadd4afd8f07e5..eab597930eaaa8 100644 --- a/src/tests/ov_helpers/ov_models/ov_builders/src/split.cpp +++ b/src/frontends/onnx/frontend/src/utils/split.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/ov_builders/split.hpp" +#include "utils/split.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/split.hpp" @@ -11,7 +11,7 @@ namespace ov { namespace op { namespace util { -OutputVector split(const Output& value, const std::vector& split_lengths, int64_t axis) { +OutputVector make_split(const Output& value, const std::vector& split_lengths, int64_t axis) { const auto axis_node = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {axis}); const auto split_lengths_node = ov::op::v0::Constant::create(ov::element::i64, Shape{split_lengths.size()}, split_lengths); @@ -20,7 +20,7 @@ OutputVector split(const Output& value, const std::vector& sp return variadic_split->outputs(); } -OutputVector split(const Output& value, int64_t num_splits, int64_t axis) { +OutputVector make_split(const Output& value, int64_t num_splits, int64_t axis) { const auto axis_node = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {axis}); const auto split = std::make_shared(value, axis_node, num_splits); diff --git a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/split.hpp b/src/frontends/onnx/frontend/src/utils/split.hpp similarity index 89% rename from src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/split.hpp rename to src/frontends/onnx/frontend/src/utils/split.hpp index d08b1da3aaf4a5..5cdbaf287eb90b 100644 --- a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/split.hpp +++ b/src/frontends/onnx/frontend/src/utils/split.hpp @@ -21,7 +21,7 @@ namespace util { /// \return The vector containing multiple outputs we split input node into. /// The vector is output of Split:v1 op /// -ov::OutputVector split(const Output& value, const std::vector& split_lengths, int64_t axis = 0); +ov::OutputVector make_split(const Output& value, const std::vector& split_lengths, int64_t axis = 0); /// \brief Split value on specified axis into multiple parts. /// @@ -39,7 +39,7 @@ ov::OutputVector split(const Output& value, const std::vector& sp /// \return The vector containing multiple nodes we split input node into. /// The vector is output of VariadicSplit:v1 op /// -ov::OutputVector split(const Output& value, int64_t num_splits, int64_t axis = 0); +ov::OutputVector make_split(const Output& value, int64_t num_splits, int64_t axis = 0); } // namespace util } // namespace op } // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp b/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp index d96f354c65e1c5..677b2d63fb9342 100644 --- a/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp +++ b/src/frontends/onnx/frontend/src/utils/tensor_external_data.cpp @@ -11,10 +11,11 @@ #include "openvino/util/file_util.hpp" #include "openvino/util/log.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace detail { -TensorExternalData::TensorExternalData(const ONNX_NAMESPACE::TensorProto& tensor) { +TensorExternalData::TensorExternalData(const TensorProto& tensor) { for (const auto& entry : tensor.external_data()) { if (entry.key() == "location") { m_data_location = ov::util::sanitize_path(entry.value()); @@ -103,5 +104,6 @@ std::string TensorExternalData::to_string() const { return s.str(); } } // namespace detail -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp b/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp index eb04e001e7ed4c..28a8860007d0c3 100644 --- a/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp +++ b/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp @@ -10,16 +10,18 @@ #include "openvino/runtime/shared_buffer.hpp" #include "openvino/util/mmap_object.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace detail { +using ::ONNX_NAMESPACE::TensorProto; template using Buffer = std::shared_ptr>>; using MappedMemoryHandles = std::shared_ptr>>; /// \brief Helper class used to load tensor data from external files class TensorExternalData { public: - TensorExternalData(const ONNX_NAMESPACE::TensorProto& tensor); + TensorExternalData(const TensorProto& tensor); /// \brief Load external data from tensor passed to constructor /// @@ -51,5 +53,6 @@ class TensorExternalData { std::string m_sha1_digest{}; }; } // namespace detail -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/src/utils/variadic.hpp b/src/frontends/onnx/frontend/src/utils/variadic.hpp index 553d61eb90c2a9..5f5dd373615b52 100644 --- a/src/frontends/onnx/frontend/src/utils/variadic.hpp +++ b/src/frontends/onnx/frontend/src/utils/variadic.hpp @@ -7,11 +7,11 @@ #include #include "core/node.hpp" -#include "openvino/core/deprecated.hpp" #include "utils/common.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace variadic { /// \brief Create an OpenVINO version of an ONNX variadic operation. /// This creates a subgraph with a series of binary operations. @@ -21,12 +21,12 @@ namespace variadic { /// \tparam T Class of an OpenVINO binary operation (e.g. Add, Minimum, Maximum) /// /// \return OpenVINO node equivalent of the ONNX operation -OPENVINO_SUPPRESS_DEPRECATED_START + template inline ov::OutputVector make_ng_variadic_op( const Node& node, const ov::op::AutoBroadcastSpec& auto_broadcast = ov::op::AutoBroadcastType::NUMPY) { - const ov::OutputVector ng_inputs{node.get_ng_inputs()}; + const ov::OutputVector ng_inputs{node.get_ov_inputs()}; // Templated binary operation - Creates Add, Minimum, Maximum, etc. const auto binary_operation = [&auto_broadcast](const ov::Output& arg0, @@ -46,10 +46,8 @@ inline ov::OutputVector make_ng_variadic_op( return {result}; } -OPENVINO_SUPPRESS_DEPRECATED_END } // namespace variadic - -} // namespace onnx_import - -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/onnx_common/include/onnx_common/utils.hpp b/src/frontends/onnx/onnx_common/include/onnx_common/utils.hpp index 62df90c9e7a05b..a26346945dd24b 100644 --- a/src/frontends/onnx/onnx_common/include/onnx_common/utils.hpp +++ b/src/frontends/onnx/onnx_common/include/onnx_common/utils.hpp @@ -10,7 +10,9 @@ namespace ov { namespace frontend { namespace onnx { namespace common { -using namespace ::ONNX_NAMESPACE; +using ::ONNX_NAMESPACE::TensorProto_DataType; +using ::ONNX_NAMESPACE::TensorShapeProto; + /// \brief Retuns size of an ONNX data type in bytes. /// /// \param onnx_type Number assigned to an ONNX data type in the TensorProto_DataType enum. diff --git a/src/frontends/onnx/onnx_common/src/utils.cpp b/src/frontends/onnx/onnx_common/src/utils.cpp index 725826ba95a23b..9b7977fccdadd5 100644 --- a/src/frontends/onnx/onnx_common/src/utils.cpp +++ b/src/frontends/onnx/onnx_common/src/utils.cpp @@ -4,12 +4,10 @@ #include "onnx_common/utils.hpp" -#include - -#include - #include "openvino/core/except.hpp" +using namespace ::ONNX_NAMESPACE; + namespace ov { namespace frontend { namespace onnx { @@ -49,8 +47,6 @@ size_t get_onnx_data_size(int32_t onnx_type) { } OPENVINO_THROW("unsupported element type"); } -namespace { -using namespace ONNX_NAMESPACE; const std::map OV_2_ONNX_TYPES = { {ov::element::Type_t::bf16, TensorProto_DataType::TensorProto_DataType_BFLOAT16}, {ov::element::Type_t::f16, TensorProto_DataType::TensorProto_DataType_FLOAT16}, @@ -65,7 +61,6 @@ const std::map OV_2_ONNX_TYPES = { {ov::element::Type_t::u32, TensorProto_DataType::TensorProto_DataType_UINT32}, {ov::element::Type_t::u64, TensorProto_DataType::TensorProto_DataType_UINT64}, {ov::element::Type_t::boolean, TensorProto_DataType::TensorProto_DataType_BOOL}}; -} // namespace ov::element::Type_t onnx_to_ov_data_type(const TensorProto_DataType& onnx_type) { const auto result = std::find_if(OV_2_ONNX_TYPES.begin(), diff --git a/src/frontends/paddle/src/input_model.cpp b/src/frontends/paddle/src/input_model.cpp index 1cba2924d81873..ef361aa683e07a 100644 --- a/src/frontends/paddle/src/input_model.cpp +++ b/src/frontends/paddle/src/input_model.cpp @@ -48,8 +48,8 @@ class InputModel::InputModelImpl { void set_partial_shape(Place::Ptr place, const ov::PartialShape&); ov::PartialShape get_partial_shape(Place::Ptr place) const; void set_element_type(Place::Ptr place, const ov::element::Type&); + ov::element::Type get_element_type(const Place::Ptr& place) const; void set_tensor_value(Place::Ptr place, const void* value); - std::vector> get_op_places(const int32_t blck_idx) const; std::map> get_var_places() const { return m_var_places; @@ -558,6 +558,10 @@ void InputModel::InputModelImpl::set_element_type(Place::Ptr place, const ov::el castToTensorPlace(place)->set_element_type(type); } +ov::element::Type InputModel::InputModelImpl::get_element_type(const Place::Ptr& place) const { + return castToTensorPlace(place)->get_element_type(); +} + void InputModel::InputModelImpl::set_tensor_value(Place::Ptr place, const void* value) { m_graph_changed = true; auto tensor_place = castToTensorPlace(place); @@ -632,6 +636,10 @@ void InputModel::set_element_type(const Place::Ptr& place, const ov::element::Ty _impl->set_element_type(place, type); } +ov::element::Type InputModel::get_element_type(const Place::Ptr& place) const { + return castToTensorPlace(place)->get_element_type(); +} + void InputModel::set_tensor_value(const Place::Ptr& place, const void* value) { _impl->set_tensor_value(place, value); } diff --git a/src/frontends/paddle/src/input_model.hpp b/src/frontends/paddle/src/input_model.hpp index 8607cf30134129..11b4e230eb4a38 100644 --- a/src/frontends/paddle/src/input_model.hpp +++ b/src/frontends/paddle/src/input_model.hpp @@ -31,6 +31,7 @@ class InputModel : public ov::frontend::InputModel { void set_partial_shape(const Place::Ptr& place, const ov::PartialShape&) override; ov::PartialShape get_partial_shape(const Place::Ptr& place) const override; void set_element_type(const Place::Ptr& place, const ov::element::Type&) override; + ov::element::Type get_element_type(const Place::Ptr& place) const override; void set_tensor_value(const Place::Ptr& place, const void* value) override; int64_t get_version() const; diff --git a/src/frontends/paddle/tests/read_paddle_model_test.cpp b/src/frontends/paddle/tests/read_paddle_model_test.cpp index 521e14b79e7408..d321e1e0e820c3 100644 --- a/src/frontends/paddle/tests/read_paddle_model_test.cpp +++ b/src/frontends/paddle/tests/read_paddle_model_test.cpp @@ -3,10 +3,8 @@ // #include -#include #include -#include #include #include #include diff --git a/src/inference/dev_api/openvino/runtime/internal_properties.hpp b/src/inference/dev_api/openvino/runtime/internal_properties.hpp index 5250609306a523..a9e42e03fa54e7 100644 --- a/src/inference/dev_api/openvino/runtime/internal_properties.hpp +++ b/src/inference/dev_api/openvino/runtime/internal_properties.hpp @@ -43,29 +43,11 @@ static constexpr Property exclusive_async_requests */ static constexpr Property config_device_id{"CONFIG_DEVICE_ID"}; -/** - * @brief The name for setting CPU affinity per thread option. - * - * It is passed to Core::get_property() - * - * The following options are implemented only for the TBB as a threading option - * ov::threading::IStreamsExecutor::ThreadBindingType::NUMA (pinning threads to NUMA nodes, best for real-life, - * contented cases) on the Windows and MacOS* this option behaves as YES - * ov::threading::IStreamsExecutor::ThreadBindingType::HYBRID_AWARE (let the runtime to do pinning to the cores types, - * e.g. prefer the "big" cores for latency tasks) on the hybrid CPUs this option is default - * - * Also, the settings are ignored, if the OpenVINO compiled with OpenMP and any affinity-related OpenMP's - * environment variable is set (as affinity is configured explicitly) - * @ingroup ov_dev_api_plugin_api - */ -static constexpr Property cpu_bind_thread{ - "CPU_BIND_THREAD"}; - /** * @brief Limit \#threads that are used by IStreamsExecutor to execute `parallel_for` calls * @ingroup ov_dev_api_plugin_api */ -static constexpr Property threads_per_stream{"THREADS_PER_STREAM"}; +static constexpr Property threads_per_stream{"THREADS_PER_STREAM"}; /** * @brief It contains compiled_model_runtime_properties information to make plugin runtime can check whether it is diff --git a/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp b/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp index 363b7a912b15f0..43708c94794ae8 100644 --- a/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp +++ b/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp @@ -21,31 +21,6 @@ namespace ov { namespace threading { -/** - * @brief Number of streams in Performance-core(big core) - */ -static constexpr Property big_core_streams{"BIG_CORE_STREAMS"}; - -/** - * @brief Number of streams in Efficient-core(small core) on hybrid cores machine - */ -static constexpr Property small_core_streams{"SMALL_CORE_STREAMS"}; - -/** - * @brief Number of threads per stream in big cores - */ -static constexpr Property threads_per_stream_big{"THREADS_PER_STREAM_BIG"}; - -/** - * @brief Number of threads per stream in small cores on hybrid cores machine - */ -static constexpr Property threads_per_stream_small{"THREADS_PER_STREAM_SMALL"}; - -/** - * @brief Small core start offset when binding cpu cores - */ -static constexpr Property small_core_offset{"SMALL_CORE_OFFSET"}; - /** * @interface IStreamsExecutor * @ingroup ov_dev_api_threading @@ -80,86 +55,65 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { * @brief Defines IStreamsExecutor configuration */ struct OPENVINO_RUNTIME_API Config { - /** - * @brief Sets configuration - * @param properties map of properties - */ - void set_property(const ov::AnyMap& properties); - - /** - * @brief Sets configuration - * @param key property name - * @param value property value - */ - void set_property(const std::string& key, const ov::Any& value); - - /** - * @brief Return configuration value - * @param key configuration key - * @return configuration value wrapped into ov::Any - */ - ov::Any get_property(const std::string& key) const; - - /** - * @brief Create appropriate multithreaded configuration - * filing unconfigured values from initial configuration using hardware properties - * @param initial Inital configuration - * @param fp_intesive additional hint for the the (Hybrid) core-types selection logic - * whether the executor should be configured for floating point intensive work (as opposite to int8 - * intensive) - * @return configured values - */ - static Config make_default_multi_threaded(const Config& initial, const bool fp_intesive = true); - static int get_default_num_streams( - const bool enable_hyper_thread = true); // no network specifics considered (only CPU's caps); - static int get_hybrid_num_streams(std::map& config, const int stream_mode); - static void update_hybrid_custom_threads(Config& config); - static Config reserve_cpu_threads(const Config& initial); - - std::string _name; //!< Used by `ITT` to name executor threads - int _streams = 1; //!< Number of streams. - int _threadsPerStream = 0; //!< Number of threads per stream that executes `ov_parallel` calls - ThreadBindingType _threadBindingType = ThreadBindingType::NONE; //!< Thread binding to hardware resource type. - //!< No binding by default - int _threadBindingStep = 1; //!< In case of @ref CORES binding offset type - //!< thread binded to cores with defined step - int _threadBindingOffset = 0; //!< In case of @ref CORES binding offset type thread binded to cores - //!< starting from offset - int _threads = 0; //!< Number of threads distributed between streams. - //!< Reserved. Should not be used. - int _big_core_streams = 0; //!< Number of streams in Performance-core(big core) - int _small_core_streams = 0; //!< Number of streams in Efficient-core(small core) - int _threads_per_stream_big = 0; //!< Threads per stream in big cores - int _threads_per_stream_small = 0; //!< Threads per stream in small cores - int _small_core_offset = 0; //!< Calculate small core start offset when binding cpu cores - bool _enable_hyper_thread = true; //!< enable hyper thread - int _plugin_task = NOT_USED; - enum StreamMode { DEFAULT, AGGRESSIVE, LESSAGGRESSIVE }; + public: enum PreferredCoreType { ANY, LITTLE, BIG, ROUND_ROBIN // used w/multiple streams to populate the Big cores first, then the Little, then wrap around // (for large #streams) - } _threadPreferredCoreType = - PreferredCoreType::ANY; //!< In case of @ref HYBRID_AWARE hints the TBB to affinitize + }; + private: + std::string _name; //!< Used by `ITT` to name executor threads + int _streams = 1; //!< Number of streams. + int _threads_per_stream = 0; //!< Number of threads per stream that executes `ov_parallel` calls + ThreadBindingType _threadBindingType = ThreadBindingType::NONE; //!< Thread binding to hardware resource type. + //!< No binding by default + int _threadBindingStep = 1; //!< In case of @ref CORES binding offset type + //!< thread binded to cores with defined step + int _threadBindingOffset = 0; //!< In case of @ref CORES binding offset type thread binded to cores + //!< starting from offset + int _threads = 0; //!< Number of threads distributed between streams. + //!< Reserved. Should not be used. + PreferredCoreType _thread_preferred_core_type = + PreferredCoreType::ANY; //!< LITTLE and BIG are valid in hybrid core machine, ANY is valid in all machines. + //!< Core type priority: physical PCore, ECore, logical PCore std::vector> _streams_info_table = {}; std::vector> _stream_processor_ids; bool _cpu_reservation = false; - bool _streams_changed = false; + /** + * @brief Get and reserve cpu ids based on configuration and hardware information, + * streams_info_table must be present in the configuration + */ + void reserve_cpu_threads(); + + /** + * @brief Modify _streams_info_table and related configuration according to configuration + */ + void update_executor_config(); + + /** + * @brief Set _streams_info_table and _cpu_reservation in cpu streams executor config when nstreams = 0, + * that is, only create one thread with TBB + */ + void set_config_zero_stream(); + + public: /** * @brief A constructor with arguments * - * @param[in] name The executor name - * @param[in] streams @copybrief Config::_streams - * @param[in] threadsPerStream @copybrief Config::_threadsPerStream - * @param[in] threadBindingType @copybrief Config::_threadBindingType - * @param[in] threadBindingStep @copybrief Config::_threadBindingStep - * @param[in] threadBindingOffset @copybrief Config::_threadBindingOffset - * @param[in] threads @copybrief Config::_threads - * @param[in] threadPreferBigCores @copybrief Config::_threadPreferBigCores + * @param[in] name The executor name + * @param[in] streams @copybrief Config::_streams + * @param[in] threadsPerStream @copybrief Config::_threads_per_stream + * @param[in] threadBindingType @copybrief Config::_threadBindingType + * @param[in] threadBindingStep @copybrief Config::_threadBindingStep + * @param[in] threadBindingOffset @copybrief Config::_threadBindingOffset + * @param[in] threads @copybrief Config::_threads + * @param[in] threadPreferredCoreType @copybrief Config::_thread_preferred_core_type + * @param[in] streamsInfoTable @copybrief Config::_streams_info_table + * @param[in] cpuReservation @copybrief Config::_cpu_reservation */ Config(std::string name = "StreamsExecutor", int streams = 1, @@ -173,32 +127,103 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { bool cpuReservation = false) : _name{name}, _streams{streams}, - _threadsPerStream{threadsPerStream}, + _threads_per_stream{threadsPerStream}, _threadBindingType{threadBindingType}, _threadBindingStep{threadBindingStep}, _threadBindingOffset{threadBindingOffset}, _threads{threads}, - _threadPreferredCoreType(threadPreferredCoreType), + _thread_preferred_core_type(threadPreferredCoreType), _streams_info_table{streamsInfoTable}, - _cpu_reservation{cpuReservation} {} + _cpu_reservation{cpuReservation} { + update_executor_config(); + } + // These APIs which includes set_property and get_property can not be removed until they will never be called by + // other plugins such as NV plugin. /** - * @brief Modify _streams_info_table and related configuration according to user-specified parameters, bind - * threads to cpu cores if cpu_pinning is true. - * @param stream_nums Number of streams specified by user - * @param threads_per_stream Number of threads per stream specified by user - * @param core_type Cpu type (Big/Little/Any) specified by user - * @param cpu_pinning Whether to bind the threads to cpu cores + * @brief Sets configuration + * @param properties map of properties */ - void update_executor_config(int stream_nums, - int threads_per_stream, - PreferredCoreType core_type, - bool cpu_pinning); + void set_property(const ov::AnyMap& properties); + /** - * @brief Set _streams_info_table and _cpu_reservation in cpu streams executor config when nstreams = 0, - * that is, only create one thread with TBB + * @brief Sets configuration + * @param key property name + * @param value property value */ - void set_config_zero_stream(); + void set_property(const std::string& key, const ov::Any& value); + + /** + * @brief Return configuration value + * @param key configuration key + * @return configuration value wrapped into ov::Any + */ + ov::Any get_property(const std::string& key) const; + + std::string get_name() { + return _name; + } + int get_streams() { + return _streams; + } + int get_streams() const { + return _streams; + } + int get_threads() { + return _threads; + } + int get_threads() const { + return _threads; + } + int get_threads_per_stream() { + return _threads_per_stream; + } + bool get_cpu_reservation() { + return _cpu_reservation; + } + std::vector> get_streams_info_table() { + return _streams_info_table; + } + std::vector> get_stream_processor_ids() { + return _stream_processor_ids; + } + ThreadBindingType get_thread_binding_type() { + return _threadBindingType; + } + int get_thread_binding_step() { + return _threadBindingStep; + } + int get_thread_binding_offset() { + return _threadBindingOffset; + } + bool operator==(const Config& config){ + if (_name == config._name && _streams == config._streams && + _threads_per_stream == config._threads_per_stream && _threadBindingType == config._threadBindingType && + _thread_preferred_core_type == config._thread_preferred_core_type) { + return true; + } else { + return false; + } + } + + /** + * @brief Create appropriate multithreaded configuration + * filing unconfigured values from initial configuration using hardware properties + * @param initial Inital configuration + * @return configured values + */ + static Config make_default_multi_threaded(const Config& initial); + + static int get_default_num_streams(); // no network specifics considered (only CPU's caps); + + /** + * @brief Get and reserve cpu ids based on configuration and hardware information + * streams_info_table must be present in the configuration + * @param initial Inital configuration + * @return configured values + */ + // It will be removed when other plugins will no longer call it. + static Config reserve_cpu_threads(const Config& initial); }; /** diff --git a/src/inference/src/core.cpp b/src/inference/src/core.cpp index 75a68a2c75723d..618c795de14382 100644 --- a/src/inference/src/core.cpp +++ b/src/inference/src/core.cpp @@ -11,31 +11,44 @@ #include "openvino/core/so_extension.hpp" #include "openvino/runtime/device_id_parser.hpp" #include "openvino/runtime/iremote_context.hpp" +#include "openvino/util/common_util.hpp" #include "openvino/util/file_util.hpp" namespace ov { std::string find_plugins_xml(const std::string& xml_file) { - if (xml_file.empty()) { - const auto ov_library_path = ov::util::get_ov_lib_path(); + std::string xml_file_name = xml_file; + if (xml_file_name.empty()) { + // Default plugin xml file name, will search in OV folder. + xml_file_name = "plugins.xml"; + } else { + // User can set any path for plugins xml file but need guarantee security issue if apply file path out of OV + // folder. + // If the xml file exists or file path contains file separator, return file path; + // Else search it in OV folder with no restriction on file name and extension. + if (ov::util::file_exists(xml_file_name) || + xml_file_name.find(util::FileTraits().file_separator) != xml_file_name.npos) { + return xml_file_name; + } + } + const auto ov_library_path = ov::util::get_ov_lib_path(); - // plugins.xml can be found in either: + // plugins xml can be found in either: + // 1. openvino-X.Y.Z relative to libopenvino.so folder + std::ostringstream str; + str << "openvino-" << OPENVINO_VERSION_MAJOR << "." << OPENVINO_VERSION_MINOR << "." << OPENVINO_VERSION_PATCH; + const auto sub_folder = str.str(); - // 1. openvino-X.Y.Z relative to libopenvino.so folder - std::ostringstream str; - str << "openvino-" << OPENVINO_VERSION_MAJOR << "." << OPENVINO_VERSION_MINOR << "." << OPENVINO_VERSION_PATCH; - const auto sub_folder = str.str(); + // register plugins from default openvino-/plugins.xml config + auto xmlConfigFileDefault = ov::util::path_join({ov_library_path, sub_folder, xml_file_name}); + if (ov::util::file_exists(xmlConfigFileDefault)) + return xmlConfigFileDefault; - // register plugins from default openvino-/plugins.xml config - auto xmlConfigFileDefault = ov::util::path_join({ov_library_path, sub_folder, "plugins.xml"}); - if (ov::util::file_exists(xmlConfigFileDefault)) - return xmlConfigFileDefault; + // 2. in folder with libopenvino.so + xmlConfigFileDefault = ov::util::path_join({ov_library_path, xml_file_name}); + if (ov::util::file_exists(xmlConfigFileDefault)) + return xmlConfigFileDefault; - // 2. in folder with libopenvino.so - xmlConfigFileDefault = ov::util::path_join({ov_library_path, "plugins.xml"}); - if (ov::util::file_exists(xmlConfigFileDefault)) - return xmlConfigFileDefault; - } return xml_file; } diff --git a/src/inference/src/dev/converter_utils.cpp b/src/inference/src/dev/converter_utils.cpp index 0c3423fa546ae9..6566534df7e83b 100644 --- a/src/inference/src/dev/converter_utils.cpp +++ b/src/inference/src/dev/converter_utils.cpp @@ -21,7 +21,6 @@ #include "ie_layouts.h" #include "ie_ngraph_utils.hpp" #include "iplugin_wrapper.hpp" -#include "legacy_op_extension.hpp" #include "openvino/core/except.hpp" #include "openvino/op/parameter.hpp" #include "openvino/runtime/exception.hpp" diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index e3eeafe85930ad..3d4b71f0505535 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -30,32 +30,18 @@ struct CPUStreamsExecutor::Impl { CpuSet _mask; int _ncpus = 0; int _threadBindingStep = 0; - int _offset = 0; - int _cpuIdxOffset = 0; std::vector _cpu_ids; - Observer(custom::task_arena& arena, - CpuSet mask, - int ncpus, - const int streamId, - const int threadsPerStream, - const int threadBindingStep, - const int threadBindingOffset, - const int cpuIdxOffset = 0, - const std::vector cpu_ids = {}) + Observer(custom::task_arena& arena, CpuSet mask, int ncpus, const std::vector cpu_ids = {}) : custom::task_scheduler_observer(arena), _mask{std::move(mask)}, _ncpus(ncpus), - _threadBindingStep(threadBindingStep), - _offset{streamId * threadsPerStream + threadBindingOffset}, - _cpuIdxOffset(cpuIdxOffset), _cpu_ids(cpu_ids) {} void on_scheduler_entry(bool) override { - pin_thread_to_vacant_core(_offset + tbb::this_task_arena::current_thread_index(), + pin_thread_to_vacant_core(tbb::this_task_arena::current_thread_index(), _threadBindingStep, _ncpus, _mask, - _cpu_ids, - _cpuIdxOffset); + _cpu_ids); } void on_scheduler_exit(bool) override { pin_current_thread_by_mask(_ncpus, _mask); @@ -73,41 +59,41 @@ struct CPUStreamsExecutor::Impl { _impl->_streamIdQueue.pop(); } } - _numaNodeId = _impl->_config._streams - ? _impl->_usedNumaNodes.at((_streamId % _impl->_config._streams) / - ((_impl->_config._streams + _impl->_usedNumaNodes.size() - 1) / - _impl->_usedNumaNodes.size())) - : _impl->_usedNumaNodes.at(_streamId % _impl->_usedNumaNodes.size()); + _numaNodeId = + _impl->_config.get_streams() + ? _impl->_usedNumaNodes.at((_streamId % _impl->_config.get_streams()) / + ((_impl->_config.get_streams() + _impl->_usedNumaNodes.size() - 1) / + _impl->_usedNumaNodes.size())) + : _impl->_usedNumaNodes.at(_streamId % _impl->_usedNumaNodes.size()); #if OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO - if (is_cpu_map_available() && _impl->_config._streams_info_table.size() > 0) { + if (is_cpu_map_available() && _impl->_config.get_streams_info_table().size() > 0) { init_stream(); - } else { - init_stream_legacy(); } #elif OV_THREAD == OV_THREAD_OMP - omp_set_num_threads(_impl->_config._threadsPerStream); - if (!check_open_mp_env_vars(false) && (ThreadBindingType::NONE != _impl->_config._threadBindingType)) { + omp_set_num_threads(_impl->_config.get_threads_per_stream()); + if (!check_open_mp_env_vars(false) && + (ThreadBindingType::NONE != _impl->_config.get_thread_binding_type())) { CpuSet processMask; int ncpus = 0; std::tie(processMask, ncpus) = get_process_mask(); if (nullptr != processMask) { - parallel_nt(_impl->_config._threadsPerStream, [&](int threadIndex, int threadsPerStream) { - int thrIdx = _streamId * _impl->_config._threadsPerStream + threadIndex + - _impl->_config._threadBindingOffset; - pin_thread_to_vacant_core(thrIdx, _impl->_config._threadBindingStep, ncpus, processMask); + parallel_nt(_impl->_config.get_threads_per_stream(), [&](int threadIndex, int threadsPerStream) { + int thrIdx = _streamId * _impl->_config.get_threads_per_stream() + threadIndex + + _impl->_config.get_thread_binding_offset(); + pin_thread_to_vacant_core(thrIdx, _impl->_config.get_thread_binding_step(), ncpus, processMask); }); } } #elif OV_THREAD == OV_THREAD_SEQ - if (ThreadBindingType::NUMA == _impl->_config._threadBindingType) { + if (ThreadBindingType::NUMA == _impl->_config.get_thread_binding_type()) { pin_current_thread_to_socket(_numaNodeId); - } else if (ThreadBindingType::CORES == _impl->_config._threadBindingType) { + } else if (ThreadBindingType::CORES == _impl->_config.get_thread_binding_type()) { CpuSet processMask; int ncpus = 0; std::tie(processMask, ncpus) = get_process_mask(); if (nullptr != processMask) { - pin_thread_to_vacant_core(_streamId + _impl->_config._threadBindingOffset, - _impl->_config._threadBindingStep, + pin_thread_to_vacant_core(_streamId + _impl->_config.get_thread_binding_offset(), + _impl->_config.get_thread_binding_step(), ncpus, processMask); } @@ -120,7 +106,7 @@ struct CPUStreamsExecutor::Impl { _impl->_streamIdQueue.push(_streamId); } #if OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO - if (_impl->_config._name.find("StreamsExecutor") == std::string::npos) { + if (_impl->_config.get_name().find("StreamsExecutor") == std::string::npos) { set_cpu_used(_cpu_ids, NOT_USED); } if (nullptr != _observer) { @@ -136,6 +122,7 @@ struct CPUStreamsExecutor::Impl { const int core_type, const int numa_node_id, const int max_threads_per_core) { + auto stream_processors = _impl->_config.get_stream_processor_ids(); _numaNodeId = std::max(0, numa_node_id); _socketId = get_socket_by_numa_node(_numaNodeId); if (stream_type == STREAM_WITHOUT_PARAM) { @@ -157,23 +144,15 @@ struct CPUStreamsExecutor::Impl { .set_max_threads_per_core(max_threads_per_core)}); } else { _taskArena.reset(new custom::task_arena{concurrency}); - _cpu_ids = static_cast(_impl->_config._stream_processor_ids.size()) == _impl->_config._streams - ? _impl->_config._stream_processor_ids[stream_id] + _cpu_ids = static_cast(stream_processors.size()) == _impl->_config.get_streams() + ? stream_processors[stream_id] : _cpu_ids; if (_cpu_ids.size() > 0) { CpuSet processMask; int ncpus = 0; std::tie(processMask, ncpus) = get_process_mask(); if (nullptr != processMask) { - _observer.reset(new Observer{*_taskArena, - std::move(processMask), - ncpus, - 0, - concurrency, - 0, - 0, - 0, - _cpu_ids}); + _observer.reset(new Observer{*_taskArena, std::move(processMask), ncpus, _cpu_ids}); _observer->observe(true); } } @@ -186,11 +165,12 @@ struct CPUStreamsExecutor::Impl { int max_threads_per_core; StreamCreateType stream_type; const auto org_proc_type_table = get_org_proc_type_table(); - const auto stream_id = _impl->_config._streams == 0 ? 0 : _streamId % _impl->_config._streams; + int streams_num = _impl->_config.get_streams(); + const auto stream_id = streams_num == 0 ? 0 : _streamId % streams_num; get_cur_stream_info(stream_id, - _impl->_config._cpu_reservation, + _impl->_config.get_cpu_reservation(), org_proc_type_table, - _impl->_config._streams_info_table, + _impl->_config.get_streams_info_table(), stream_type, concurrency, cpu_core_type, @@ -206,112 +186,6 @@ struct CPUStreamsExecutor::Impl { numa_node_id, max_threads_per_core); } - - void init_stream_legacy() { - const auto concurrency = (0 == _impl->_config._threadsPerStream) ? custom::task_arena::automatic - : _impl->_config._threadsPerStream; - if (ThreadBindingType::HYBRID_AWARE == _impl->_config._threadBindingType) { - if (Config::PreferredCoreType::ROUND_ROBIN != _impl->_config._threadPreferredCoreType) { - if (Config::PreferredCoreType::ANY == _impl->_config._threadPreferredCoreType) { - _taskArena.reset(new custom::task_arena{concurrency}); - } else { - const auto selected_core_type = - Config::PreferredCoreType::BIG == _impl->_config._threadPreferredCoreType - ? custom::info::core_types().back() // running on Big cores only - : custom::info::core_types().front(); // running on Little cores only - _taskArena.reset(new custom::task_arena{custom::task_arena::constraints{} - .set_core_type(selected_core_type) - .set_max_concurrency(concurrency)}); - } - } else { - // assigning the stream to the core type in the round-robin fashion - // wrapping around total_streams (i.e. how many streams all different core types can handle - // together). Binding priority: Big core, Logical big core, Small core - const auto total_streams = _impl->total_streams_on_core_types.back().second; - const auto big_core_streams = _impl->total_streams_on_core_types.front().second; - const auto hybrid_core = _impl->total_streams_on_core_types.size() > 1; - const auto phy_core_streams = - _impl->_config._big_core_streams == 0 - ? 0 - : _impl->num_big_core_phys / _impl->_config._threads_per_stream_big; - const auto streamId_wrapped = _streamId % total_streams; - const auto& selected_core_type = - std::find_if( - _impl->total_streams_on_core_types.cbegin(), - _impl->total_streams_on_core_types.cend(), - [streamId_wrapped](const decltype(_impl->total_streams_on_core_types)::value_type& p) { - return p.second > streamId_wrapped; - }) - ->first; - const auto small_core = hybrid_core && selected_core_type == 0; - const auto logic_core = !small_core && streamId_wrapped >= phy_core_streams; - const auto small_core_skip = small_core && _impl->_config._threads_per_stream_small == 3 && - _impl->_config._small_core_streams > 1; - const auto max_concurrency = - small_core ? _impl->_config._threads_per_stream_small : _impl->_config._threads_per_stream_big; - // Special handling of _threads_per_stream_small == 3 - const auto small_core_id = small_core_skip ? 0 : streamId_wrapped - big_core_streams; - const auto stream_id = - hybrid_core - ? (small_core ? small_core_id - : (logic_core ? streamId_wrapped - phy_core_streams : streamId_wrapped)) - : streamId_wrapped; - const auto thread_binding_step = hybrid_core ? (small_core ? _impl->_config._threadBindingStep : 2) - : _impl->_config._threadBindingStep; - // Special handling of _threads_per_stream_small == 3, need to skip 4 (Four cores share one L2 - // cache on the small core), stream_id = 0, cpu_idx_offset cumulative plus 4 - const auto small_core_offset = - small_core_skip ? _impl->_config._small_core_offset + (streamId_wrapped - big_core_streams) * 4 - : _impl->_config._small_core_offset; - const auto cpu_idx_offset = - hybrid_core - // Prevent conflicts with system scheduling, so default cpu id on big core starts from 1 - ? (small_core ? small_core_offset : (logic_core ? 0 : 1)) - : 0; -# ifdef _WIN32 - _taskArena.reset(new custom::task_arena{custom::task_arena::constraints{} - .set_core_type(selected_core_type) - .set_max_concurrency(max_concurrency)}); -# else - _taskArena.reset(new custom::task_arena{max_concurrency}); -# endif - CpuSet processMask; - int ncpus = 0; - std::tie(processMask, ncpus) = get_process_mask(); - if (nullptr != processMask) { - _observer.reset(new Observer{*_taskArena, - std::move(processMask), - ncpus, - stream_id, - max_concurrency, - thread_binding_step, - _impl->_config._threadBindingOffset, - cpu_idx_offset}); - _observer->observe(true); - } - } - } else if (ThreadBindingType::NUMA == _impl->_config._threadBindingType) { - _taskArena.reset(new custom::task_arena{custom::task_arena::constraints{_numaNodeId, concurrency}}); - } else if ((0 != _impl->_config._threadsPerStream) || - (ThreadBindingType::CORES == _impl->_config._threadBindingType)) { - _taskArena.reset(new custom::task_arena{concurrency}); - if (ThreadBindingType::CORES == _impl->_config._threadBindingType) { - CpuSet processMask; - int ncpus = 0; - std::tie(processMask, ncpus) = get_process_mask(); - if (nullptr != processMask) { - _observer.reset(new Observer{*_taskArena, - std::move(processMask), - ncpus, - _streamId, - _impl->_config._threadsPerStream, - _impl->_config._threadBindingStep, - _impl->_config._threadBindingOffset}); - _observer->observe(true); - } - } - } - } #endif Impl* _impl = nullptr; @@ -439,46 +313,17 @@ struct CPUStreamsExecutor::Impl { this) { _exectorMgr = executor_manager(); auto numaNodes = get_available_numa_nodes(); - if (_config._streams != 0) { + int streams_num = _config.get_streams(); + if (streams_num != 0) { std::copy_n(std::begin(numaNodes), - std::min(_config._streams, numaNodes.size()), + std::min(streams_num, numaNodes.size()), std::back_inserter(_usedNumaNodes)); } else { _usedNumaNodes = numaNodes; } -#if (OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO) - if (!is_cpu_map_available() && ThreadBindingType::HYBRID_AWARE == config._threadBindingType) { - const auto core_types = custom::info::core_types(); - const auto num_core_phys = get_number_of_cpu_cores(); - num_big_core_phys = get_number_of_cpu_cores(true); - const auto num_small_core_phys = num_core_phys - num_big_core_phys; - int sum = 0; - // reversed order, so BIG cores are first - for (auto iter = core_types.rbegin(); iter < core_types.rend(); iter++) { - const auto& type = *iter; - // calculating the #streams per core type - const int num_streams_for_core_type = - type == 0 ? std::max(1, - std::min(config._small_core_streams, - config._threads_per_stream_small == 0 - ? 0 - : num_small_core_phys / config._threads_per_stream_small)) - : std::max(1, - std::min(config._big_core_streams, - config._threads_per_stream_big == 0 - ? 0 - : num_big_core_phys / config._threads_per_stream_big * 2)); - sum += num_streams_for_core_type; - // prefix sum, so the core type for a given stream id will be deduced just as a upper_bound - // (notice that the map keeps the elements in the descending order, so the big cores are populated - // first) - total_streams_on_core_types.push_back({type, sum}); - } - } -#endif - for (auto streamId = 0; streamId < _config._streams; ++streamId) { + for (auto streamId = 0; streamId < streams_num; ++streamId) { _threads.emplace_back([this, streamId] { - openvino::itt::threadName(_config._name + "_" + std::to_string(streamId)); + openvino::itt::threadName(_config.get_name() + "_" + std::to_string(streamId)); for (bool stopped = false; !stopped;) { Task task; { @@ -548,15 +393,6 @@ struct CPUStreamsExecutor::Impl { bool _isStopped = false; std::vector _usedNumaNodes; CustomThreadLocal _streams; -#if (OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO) - // stream id mapping to the core type - // stored in the reversed order (so the big cores, with the highest core_type_id value, are populated first) - // every entry is the core type and #streams that this AND ALL EARLIER entries can handle (prefix sum) - // (so mapping is actually just an upper_bound: core type is deduced from the entry for which the id < #streams) - using StreamIdToCoreTypes = std::vector>; - StreamIdToCoreTypes total_streams_on_core_types; - int num_big_core_phys; -#endif std::shared_ptr _exectorMgr; }; @@ -595,7 +431,7 @@ void CPUStreamsExecutor::execute(Task task) { } void CPUStreamsExecutor::run(Task task) { - if (0 == _impl->_config._streams) { + if (0 == _impl->_config.get_streams()) { _impl->Defer(std::move(task)); } else { _impl->Enqueue(std::move(task)); diff --git a/src/inference/src/dev/threading/executor_manager.cpp b/src/inference/src/dev/threading/executor_manager.cpp index f540f79f27d6ac..d0c639f35b7982 100644 --- a/src/inference/src/dev/threading/executor_manager.cpp +++ b/src/inference/src/dev/threading/executor_manager.cpp @@ -128,20 +128,14 @@ std::shared_ptr ExecutorManagerImpl::get_executor( std::shared_ptr ExecutorManagerImpl::get_idle_cpu_streams_executor( const ov::threading::IStreamsExecutor::Config& config) { std::lock_guard guard(streamExecutorMutex); - for (const auto& it : cpuStreamsExecutors) { + for (auto& it : cpuStreamsExecutors) { const auto& executor = it.second; if (executor.use_count() != 1) continue; - const auto& executorConfig = it.first; - if (executorConfig._name == config._name && executorConfig._streams == config._streams && - executorConfig._threadsPerStream == config._threadsPerStream && - executorConfig._threadBindingType == config._threadBindingType && - executorConfig._threadBindingStep == config._threadBindingStep && - executorConfig._threadBindingOffset == config._threadBindingOffset) - if (executorConfig._threadBindingType != ov::threading::IStreamsExecutor::ThreadBindingType::HYBRID_AWARE || - executorConfig._threadPreferredCoreType == config._threadPreferredCoreType) - return executor; + auto& executorConfig = it.first; + if (executorConfig == config) + return executor; } auto newExec = std::make_shared(config); tbbThreadsCreated = true; @@ -167,23 +161,30 @@ void ExecutorManagerImpl::clear(const std::string& id) { cpuStreamsExecutors.clear(); } else { executors.erase(id); - cpuStreamsExecutors.erase( - std::remove_if(cpuStreamsExecutors.begin(), - cpuStreamsExecutors.end(), - [&](const std::pair>& it) { - return it.first._name == id; - }), - cpuStreamsExecutors.end()); + cpuStreamsExecutors.erase(std::remove_if(cpuStreamsExecutors.begin(), + cpuStreamsExecutors.end(), + [&](std::pair>& it) { + return it.first.get_name() == id; + }), + cpuStreamsExecutors.end()); } } void ExecutorManagerImpl::execute_task_by_streams_executor( ov::threading::IStreamsExecutor::Config::PreferredCoreType core_type, ov::threading::Task task) { - ov::threading::IStreamsExecutor::Config streamsConfig("StreamsExecutor"); - streamsConfig.update_executor_config(1, 1, core_type, false); - if (!streamsConfig._streams_info_table.empty()) { + ov::threading::IStreamsExecutor::Config streamsConfig("StreamsExecutor", + 1, + 1, + ov::threading::IStreamsExecutor::ThreadBindingType::NONE, + 1, + 0, + 0, + core_type, + {}, + false); + if (!streamsConfig.get_streams_info_table().empty()) { auto taskExecutor = std::make_shared(streamsConfig); std::vector tasks{std::move(task)}; taskExecutor->run_and_wait(tasks); diff --git a/src/inference/src/dev/threading/istreams_executor.cpp b/src/inference/src/dev/threading/istreams_executor.cpp index cf52e22cdc2ecf..b7151e15b74e5e 100644 --- a/src/inference/src/dev/threading/istreams_executor.cpp +++ b/src/inference/src/dev/threading/istreams_executor.cpp @@ -29,61 +29,15 @@ void IStreamsExecutor::Config::set_property(const ov::AnyMap& property) { for (const auto& it : property) { const auto& key = it.first; const auto value = it.second; - OPENVINO_SUPPRESS_DEPRECATED_START - if (key == ov::internal::cpu_bind_thread.name()) { - if (value.as() == "YES" || value.as() == "NUMA") { -#if (defined(__APPLE__) || defined(_WIN32)) - _threadBindingType = IStreamsExecutor::ThreadBindingType::NUMA; -#else - _threadBindingType = (value.as() == "YES") ? IStreamsExecutor::ThreadBindingType::CORES - : IStreamsExecutor::ThreadBindingType::NUMA; -#endif - } else if (value.as() == "HYBRID_AWARE") { - _threadBindingType = IStreamsExecutor::ThreadBindingType::HYBRID_AWARE; - } else if (value.as() == "NO") { - _threadBindingType = IStreamsExecutor::ThreadBindingType::NONE; - } else { - OPENVINO_THROW("Wrong value for property key ", - ov::internal::cpu_bind_thread.name(), - ". Expected only YES(binds to cores) / NO(no binding) / NUMA(binds to NUMA nodes) / " - "HYBRID_AWARE (let the runtime recognize and use the hybrid cores)"); - } - } else if (key == ov::affinity) { - ov::Affinity affinity; - std::stringstream{value.as()} >> affinity; - switch (affinity) { - case ov::Affinity::NONE: - _threadBindingType = ThreadBindingType::NONE; - break; - case ov::Affinity::CORE: { -#if (defined(__APPLE__) || defined(_WIN32)) - _threadBindingType = ThreadBindingType::NUMA; -#else - _threadBindingType = ThreadBindingType::CORES; -#endif - } break; - case ov::Affinity::NUMA: - _threadBindingType = ThreadBindingType::NUMA; - break; - case ov::Affinity::HYBRID_AWARE: - _threadBindingType = ThreadBindingType::HYBRID_AWARE; - break; - default: - OPENVINO_THROW("Unsupported affinity type"); - } - } else if (key == ov::num_streams) { + if (key == ov::num_streams) { auto streams = value.as(); if (streams == ov::streams::NUMA) { - _streams = static_cast(get_available_numa_nodes().size()); - _streams_changed = true; + _streams = 1; } else if (streams == ov::streams::AUTO) { // bare minimum of streams (that evenly divides available number of cores) - if (!is_cpu_map_available()) { - _streams = get_default_num_streams(); - } + _streams = get_default_num_streams(); } else if (streams.num >= 0) { _streams = streams.num; - _streams_changed = true; } else { OPENVINO_THROW("Wrong value for property key ", ov::num_streams.name(), @@ -107,163 +61,40 @@ void IStreamsExecutor::Config::set_property(const ov::AnyMap& property) { } _threads = val_i; } else if (key == ov::internal::threads_per_stream) { - _threadsPerStream = static_cast(value.as()); - } else if (key == ov::threading::big_core_streams) { - int val_i; - try { - val_i = value.as(); - } catch (const std::exception&) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::big_core_streams.name(), - ". Expected only non negative numbers (#streams)"); - } - if (val_i < 0) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::big_core_streams.name(), - ". Expected only non negative numbers (#streams)"); - } - _big_core_streams = val_i; - } else if (key == ov::threading::small_core_streams) { - int val_i; - try { - val_i = value.as(); - } catch (const std::exception&) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::small_core_streams.name(), - ". Expected only non negative numbers (#streams)"); - } - if (val_i < 0) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::small_core_streams.name(), - ". Expected only non negative numbers (#streams)"); - } - _small_core_streams = val_i; - } else if (key == ov::threading::threads_per_stream_big) { - int val_i; - try { - val_i = value.as(); - } catch (const std::exception&) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::threads_per_stream_big.name(), - ". Expected only non negative numbers (#threads)"); - } - if (val_i < 0) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::threads_per_stream_big.name(), - ". Expected only non negative numbers (#threads)"); - } - _threads_per_stream_big = val_i; - } else if (key == ov::threading::threads_per_stream_small) { - int val_i; - try { - val_i = value.as(); - } catch (const std::exception&) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::threads_per_stream_small.name(), - ". Expected only non negative numbers (#threads)"); - } - if (val_i < 0) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::threads_per_stream_small.name(), - ". Expected only non negative numbers (#threads)"); - } - _threads_per_stream_small = val_i; - } else if (key == ov::threading::small_core_offset) { - int val_i; - try { - val_i = value.as(); - } catch (const std::exception&) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::small_core_offset.name(), - ". Expected only non negative numbers"); - } - if (val_i < 0) { - OPENVINO_THROW("Wrong value for HYBRID_AWARE key ", - ov::threading::small_core_offset.name(), - ". Expected only non negative numbers"); - } - _small_core_offset = val_i; + _threads_per_stream = static_cast(value.as()); } else { OPENVINO_THROW("Wrong value for property key ", key); } - OPENVINO_SUPPRESS_DEPRECATED_END } } ov::Any IStreamsExecutor::Config::get_property(const std::string& key) const { if (key == ov::supported_properties) { - OPENVINO_SUPPRESS_DEPRECATED_START std::vector properties{ ov::num_streams.name(), - ov::internal::cpu_bind_thread.name(), ov::inference_num_threads.name(), - ov::threading::big_core_streams.name(), - ov::threading::small_core_streams.name(), - ov::threading::threads_per_stream_big.name(), - ov::threading::threads_per_stream_small.name(), - ov::threading::small_core_offset.name(), ov::internal::threads_per_stream.name(), - ov::affinity.name(), }; - OPENVINO_SUPPRESS_DEPRECATED_END return properties; - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (key == ov::affinity) { - switch (_threadBindingType) { - case IStreamsExecutor::ThreadBindingType::NONE: - return ov::Affinity::NONE; - case IStreamsExecutor::ThreadBindingType::CORES: - return ov::Affinity::CORE; - case IStreamsExecutor::ThreadBindingType::NUMA: - return ov::Affinity::NUMA; - case IStreamsExecutor::ThreadBindingType::HYBRID_AWARE: - return ov::Affinity::HYBRID_AWARE; - } - OPENVINO_SUPPRESS_DEPRECATED_END } else if (key == ov::num_streams) { return decltype(ov::num_streams)::value_type{_streams}; - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (key == ov::internal::cpu_bind_thread) { - switch (_threadBindingType) { - case IStreamsExecutor::ThreadBindingType::NONE: - return {"NO"}; - case IStreamsExecutor::ThreadBindingType::CORES: - return {"YES"}; - case IStreamsExecutor::ThreadBindingType::NUMA: - return {"NUMA"}; - case IStreamsExecutor::ThreadBindingType::HYBRID_AWARE: - return {"HYBRID_AWARE"}; - } - } else if (key == ov::num_streams) { - return {std::to_string(_streams)}; } else if (key == ov::inference_num_threads) { return decltype(ov::inference_num_threads)::value_type{_threads}; } else if (key == ov::internal::threads_per_stream) { - return {std::to_string(_threadsPerStream)}; - } else if (key == ov::threading::big_core_streams) { - return {std::to_string(_big_core_streams)}; - } else if (key == ov::threading::small_core_streams) { - return {std::to_string(_small_core_streams)}; - } else if (key == ov::threading::threads_per_stream_big) { - return {std::to_string(_threads_per_stream_big)}; - } else if (key == ov::threading::threads_per_stream_small) { - return {std::to_string(_threads_per_stream_small)}; - } else if (key == ov::threading::small_core_offset) { - return {std::to_string(_small_core_offset)}; - } else if (key == ov::hint::enable_hyper_threading) { - return _enable_hyper_thread; - OPENVINO_SUPPRESS_DEPRECATED_END + return decltype(ov::internal::threads_per_stream)::value_type{_threads_per_stream}; } else { OPENVINO_THROW("Wrong value for property key ", key); } return {}; } -int IStreamsExecutor::Config::get_default_num_streams(const bool enable_hyper_thread) { - const int sockets = static_cast(get_available_numa_nodes().size()); +int IStreamsExecutor::Config::get_default_num_streams() { // bare minimum of streams (that evenly divides available number of core) - const int num_cores = sockets == 1 ? (enable_hyper_thread ? parallel_get_max_threads() : get_number_of_cpu_cores()) - : get_number_of_cpu_cores(); + const auto proc_type_table = get_proc_type_table(); + if (proc_type_table.empty()) { + return 1; + } + const auto num_cores = proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][EFFICIENT_CORE_PROC]; if (0 == num_cores % 4) return std::max(4, num_cores / 4); else if (0 == num_cores % 5) @@ -274,202 +105,81 @@ int IStreamsExecutor::Config::get_default_num_streams(const bool enable_hyper_th return 1; } -int IStreamsExecutor::Config::get_hybrid_num_streams(std::map& config, - const int stream_mode) { - const int num_cores = parallel_get_max_threads(); - const int num_cores_phy = get_number_of_cpu_cores(); - const int num_big_cores_phy = get_number_of_cpu_cores(true); - const int num_small_cores = num_cores_phy - num_big_cores_phy; - const int num_big_cores = num_cores > num_cores_phy ? num_big_cores_phy * 2 : num_big_cores_phy; - int big_core_streams = 0; - int small_core_streams = 0; - int threads_per_stream_big = 0; - int threads_per_stream_small = 0; - - if (stream_mode == DEFAULT) { - // bare minimum of streams (that evenly divides available number of core) - if (0 == num_big_cores_phy % 4) { - threads_per_stream_big = 4; - } else if (0 == num_big_cores_phy % 5) { - threads_per_stream_big = 5; - } else if (0 == num_big_cores_phy % 3) { - threads_per_stream_big = 3; - } else { // if user disables some cores say in BIOS, so we got weird #cores which is not easy to divide - threads_per_stream_big = num_big_cores_phy; - } +IStreamsExecutor::Config IStreamsExecutor::Config::make_default_multi_threaded( + const IStreamsExecutor::Config& initial) { + const auto proc_type_table = get_proc_type_table(); + auto streamConfig = initial; - big_core_streams = num_big_cores / threads_per_stream_big; - threads_per_stream_small = threads_per_stream_big; - if (num_small_cores == 0) { - threads_per_stream_small = 0; - } else if (num_small_cores < threads_per_stream_small) { - small_core_streams = 1; - threads_per_stream_small = num_small_cores; - threads_per_stream_big = threads_per_stream_small; - // Balance the computation of physical core and logical core, the number of threads on the physical core and - // logical core should be equal - big_core_streams = num_big_cores_phy / threads_per_stream_big * 2; - } else { - small_core_streams = num_small_cores / threads_per_stream_small; - } - } else if (stream_mode == AGGRESSIVE) { - big_core_streams = num_big_cores; - small_core_streams = num_small_cores; - threads_per_stream_big = num_big_cores / big_core_streams; - threads_per_stream_small = num_small_cores == 0 ? 0 : num_small_cores / small_core_streams; - } else if (stream_mode == LESSAGGRESSIVE) { - big_core_streams = num_big_cores / 2; - small_core_streams = num_small_cores / 2; - threads_per_stream_big = num_big_cores / big_core_streams; - threads_per_stream_small = num_small_cores == 0 ? 0 : num_small_cores / small_core_streams; - } else { - OPENVINO_THROW("Wrong stream mode to get num of streams: ", stream_mode); + if (proc_type_table.empty()) { + return streamConfig; } - OPENVINO_SUPPRESS_DEPRECATED_START - config[ov::threading::big_core_streams.name()] = std::to_string(big_core_streams); - config[ov::threading::small_core_streams.name()] = std::to_string(small_core_streams); - config[ov::threading::threads_per_stream_big.name()] = std::to_string(threads_per_stream_big); - config[ov::threading::threads_per_stream_small.name()] = std::to_string(threads_per_stream_small); - // This is default setting for specific CPU which Pcore is in front and Ecore is in the back. - config[ov::threading::small_core_offset.name()] = std::to_string(num_small_cores == 0 ? 0 : num_big_cores); - OPENVINO_SUPPRESS_DEPRECATED_END - return big_core_streams + small_core_streams; -} -void IStreamsExecutor::Config::update_hybrid_custom_threads(Config& config) { - const auto num_cores = parallel_get_max_threads(); - const auto num_cores_phys = get_number_of_cpu_cores(); - const auto num_big_cores_phys = get_number_of_cpu_cores(true); - const auto num_big_cores = num_cores > num_cores_phys ? num_big_cores_phys * 2 : num_big_cores_phys; - const auto num_small_cores_phys = num_cores_phys - num_big_cores_phys; - const auto threads = config._threads ? config._threads : num_cores; - const auto streams = config._streams > 0 ? config._streams : 1; - - config._small_core_offset = num_big_cores; - int threads_per_stream = std::max(1, threads / streams); - - if ((num_big_cores_phys / threads_per_stream >= streams) && (1 < threads_per_stream)) { - config._big_core_streams = streams; - config._threads_per_stream_big = threads_per_stream; - config._small_core_streams = 0; - config._threads_per_stream_small = 0; - } else if ((num_small_cores_phys / threads_per_stream >= streams) && (num_big_cores_phys < threads_per_stream)) { - config._big_core_streams = 0; - config._threads_per_stream_big = 0; - config._small_core_streams = streams; - config._threads_per_stream_small = threads_per_stream; - } else { - const int threads_per_stream_big = std::min(num_big_cores_phys, threads_per_stream); - const int threads_per_stream_small = std::min(num_small_cores_phys, threads_per_stream); - - threads_per_stream = std::min(threads_per_stream_big, threads_per_stream_small); - while (threads_per_stream > 1) { - const int base_big_streams = num_big_cores_phys / threads_per_stream; - const int base_small_streams = num_small_cores_phys > 0 ? num_small_cores_phys / threads_per_stream : 0; - if (base_big_streams + base_small_streams >= streams) { - config._big_core_streams = base_big_streams; - config._small_core_streams = streams - base_big_streams; - break; - } else if (base_big_streams * 2 + base_small_streams >= streams) { - config._big_core_streams = streams - base_small_streams; - config._small_core_streams = base_small_streams; - break; - } else { - threads_per_stream = threads_per_stream > 1 ? threads_per_stream - 1 : 1; + const auto numa_nodes = proc_type_table.size() > 1 ? proc_type_table.size() - 1 : proc_type_table.size(); + const bool latency_case = static_cast(streamConfig._streams) <= numa_nodes; + + // by default, do not use the hyper-threading (to minimize threads synch overheads) + int num_cores = !latency_case && numa_nodes == 1 + ? proc_type_table[0][ALL_PROC] + : proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][EFFICIENT_CORE_PROC]; + + // additional latency-case logic for hybrid processors: + if (proc_type_table[0][EFFICIENT_CORE_PROC] > 0 && proc_type_table[0][MAIN_CORE_PROC] > 0) { + if (streamConfig._thread_preferred_core_type == IStreamsExecutor::Config::ANY) { + // by default the latency case uses (faster) Big cores only, depending on the compute ratio + const bool big_only = proc_type_table[0][MAIN_CORE_PROC] > (proc_type_table[0][EFFICIENT_CORE_PROC] / 2); + // selecting the preferred core type + if (big_only) { + streamConfig._thread_preferred_core_type = IStreamsExecutor::Config::PreferredCoreType::BIG; + const int hyper_threading_threshold = + 2; // min #cores, for which the hyper-threading becomes useful for the latency case + // additionally selecting the #cores to use in the "Big-only" case + num_cores = (proc_type_table[0][MAIN_CORE_PROC] <= hyper_threading_threshold) + ? proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][HYPER_THREADING_PROC] + : proc_type_table[0][MAIN_CORE_PROC]; } + } else if (streamConfig._thread_preferred_core_type == IStreamsExecutor::Config::BIG) { + num_cores = proc_type_table[0][MAIN_CORE_PROC]; + } else if (streamConfig._thread_preferred_core_type == IStreamsExecutor::Config::LITTLE) { + num_cores = proc_type_table[0][EFFICIENT_CORE_PROC]; } + } - if (threads_per_stream == 1) { - const int stream_loops = streams / num_cores; - const int remain_streams = streams - stream_loops * num_cores; - if (num_big_cores_phys >= remain_streams) { - config._big_core_streams = remain_streams + num_big_cores * stream_loops; - config._small_core_streams = num_small_cores_phys * stream_loops; - } else if (num_big_cores_phys + num_small_cores_phys >= remain_streams) { - config._big_core_streams = num_big_cores_phys + num_big_cores * stream_loops; - config._small_core_streams = remain_streams - num_big_cores_phys + num_small_cores_phys * stream_loops; - } else { - config._big_core_streams = remain_streams - num_small_cores_phys + num_big_cores * stream_loops; - config._small_core_streams = num_small_cores_phys * (stream_loops + 1); + const auto threads = streamConfig._threads ? streamConfig._threads : num_cores; + int threads_per_stream = streamConfig._streams ? std::max(1, threads / streamConfig._streams) : threads; + if (proc_type_table[0][EFFICIENT_CORE_PROC] > 0 && proc_type_table[0][MAIN_CORE_PROC] > 0 && + streamConfig._thread_preferred_core_type == IStreamsExecutor::Config::ANY) { + if (streamConfig._streams > 1) { + threads_per_stream = + std::min(std::min(proc_type_table[0][MAIN_CORE_PROC], proc_type_table[0][EFFICIENT_CORE_PROC]), + threads_per_stream); + while (1) { + int streams_num = proc_type_table[0][MAIN_CORE_PROC] / threads_per_stream + + proc_type_table[0][HYPER_THREADING_PROC] / threads_per_stream + + proc_type_table[0][EFFICIENT_CORE_PROC] / threads_per_stream; + if (streams_num >= streamConfig._streams) { + break; + } else { + if (threads_per_stream > 1) { + threads_per_stream--; + } + } } } - - config._threads_per_stream_big = threads_per_stream; - config._threads_per_stream_small = threads_per_stream; } + streamConfig._threads_per_stream = threads_per_stream; + streamConfig._threads = streamConfig._threads_per_stream * streamConfig._streams; + streamConfig.update_executor_config(); + return streamConfig; } -IStreamsExecutor::Config IStreamsExecutor::Config::make_default_multi_threaded(const IStreamsExecutor::Config& initial, - const bool fp_intesive) { - const auto envThreads = parallel_get_env_threads(); - const auto& numaNodes = get_available_numa_nodes(); - const int numaNodesNum = static_cast(numaNodes.size()); - auto streamExecutorConfig = initial; - const bool bLatencyCase = streamExecutorConfig._streams <= numaNodesNum; +void IStreamsExecutor::Config::reserve_cpu_threads() { + int status = _name.find("StreamsExecutor") != std::string::npos ? NOT_USED : CPU_USED; - // by default, do not use the hyper-threading (to minimize threads synch overheads) - int num_cores_default = get_number_of_cpu_cores(); -#if (OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO) - // additional latency-case logic for hybrid processors: - if (ThreadBindingType::HYBRID_AWARE == streamExecutorConfig._threadBindingType) { - const auto core_types = custom::info::core_types(); - const auto num_little_cores = - custom::info::default_concurrency(custom::task_arena::constraints{}.set_core_type(core_types.front())); - const auto num_big_cores_phys = get_number_of_cpu_cores(true); - const int int8_threshold = 4; // ~relative efficiency of the VNNI-intensive code for Big vs Little cores; - const int fp32_threshold = 2; // ~relative efficiency of the AVX2 fp32 code for Big vs Little cores; - // by default the latency case uses (faster) Big cores only, depending on the compute ratio - const bool bLatencyCaseBigOnly = - num_big_cores_phys > (num_little_cores / (fp_intesive ? fp32_threshold : int8_threshold)); - // selecting the preferred core type - streamExecutorConfig._threadPreferredCoreType = - bLatencyCase ? (bLatencyCaseBigOnly ? IStreamsExecutor::Config::PreferredCoreType::BIG - : IStreamsExecutor::Config::PreferredCoreType::ANY) - : IStreamsExecutor::Config::PreferredCoreType::ROUND_ROBIN; - // additionally selecting the #cores to use in the "Big-only" case - if (bLatencyCaseBigOnly) { - const int hyper_threading_threshold = - 2; // min #cores, for which the hyper-threading becomes useful for the latency case - const auto num_big_cores = - custom::info::default_concurrency(custom::task_arena::constraints{}.set_core_type(core_types.back())); - num_cores_default = (num_big_cores_phys <= hyper_threading_threshold) ? num_big_cores : num_big_cores_phys; - } - // if nstreams or nthreads are set, need to calculate the Hybrid aware parameters here - if (!bLatencyCase && (streamExecutorConfig._big_core_streams == 0 || streamExecutorConfig._threads)) { - update_hybrid_custom_threads(streamExecutorConfig); - } - OPENVINO_DEBUG << "[ p_e_core_info ] streams (threads): " << streamExecutorConfig._streams << "(" - << streamExecutorConfig._threads_per_stream_big * streamExecutorConfig._big_core_streams + - streamExecutorConfig._threads_per_stream_small * streamExecutorConfig._small_core_streams - << ") -- PCore: " << streamExecutorConfig._big_core_streams << "(" - << streamExecutorConfig._threads_per_stream_big - << ") ECore: " << streamExecutorConfig._small_core_streams << "(" - << streamExecutorConfig._threads_per_stream_small << ")"; + if (_streams_info_table.size() == 0 || (status == CPU_USED && !_cpu_reservation)) { + return; } -#endif - const auto hwCores = - !bLatencyCase && numaNodesNum == 1 - // throughput case on a single-NUMA node machine uses all available cores - ? (streamExecutorConfig._enable_hyper_thread ? parallel_get_max_threads() : num_cores_default) - // in the rest of cases: - // multi-node machine - // or - // latency case, single-node yet hybrid case that uses - // all core types - // or - // big-cores only, but the #cores is "enough" (pls see the logic above) - // it is usually beneficial not to use the hyper-threading (which is default) - : num_cores_default; - const auto threads = - streamExecutorConfig._threads ? streamExecutorConfig._threads : (envThreads ? envThreads : hwCores); - streamExecutorConfig._threadsPerStream = - streamExecutorConfig._streams ? std::max(1, threads / streamExecutorConfig._streams) : threads; - streamExecutorConfig._threads = - (!bLatencyCase && ThreadBindingType::HYBRID_AWARE == streamExecutorConfig._threadBindingType) - ? streamExecutorConfig._big_core_streams * streamExecutorConfig._threads_per_stream_big + - streamExecutorConfig._small_core_streams * streamExecutorConfig._threads_per_stream_small - : streamExecutorConfig._threadsPerStream * streamExecutorConfig._streams; - return streamExecutorConfig; + + reserve_available_cpus(_streams_info_table, _stream_processor_ids, status); } IStreamsExecutor::Config IStreamsExecutor::Config::reserve_cpu_threads(const IStreamsExecutor::Config& initial) { @@ -491,120 +201,150 @@ IStreamsExecutor::Config IStreamsExecutor::Config::reserve_cpu_threads(const ISt config._streams_info_table[i][NUMBER_OF_STREAMS] * config._streams_info_table[i][THREADS_PER_STREAM]; } } + config._threads_per_stream = config._streams_info_table[0][THREADS_PER_STREAM]; OPENVINO_DEBUG << "[ threading ] " << config._name << " reserve_cpu_threads " << config._streams << "(" << config._threads << ")"; return config; } -void IStreamsExecutor::Config::update_executor_config(int stream_nums, - int threads_per_stream, - IStreamsExecutor::Config::PreferredCoreType core_type, - bool cpu_pinning) { - const auto proc_type_table = ov::get_proc_type_table(); +void IStreamsExecutor::Config::update_executor_config() { + const auto proc_type_table = get_proc_type_table(); + bool streams_info_available = false; if (proc_type_table.empty()) { return; } - if (proc_type_table.size() > 1) { - core_type = ov::threading::IStreamsExecutor::Config::ANY; + if (!_streams_info_table.empty()) { + streams_info_available = true; + std::vector threads_proc_type(HYPER_THREADING_PROC + 1, 0); + for (size_t i = 0; i < _streams_info_table.size(); i++) { + if (_streams_info_table[i][NUMBER_OF_STREAMS] > 0) { + threads_proc_type[_streams_info_table[i][PROC_TYPE]] += + _streams_info_table[i][THREADS_PER_STREAM] * _streams_info_table[i][NUMBER_OF_STREAMS]; + } + } + for (size_t i = ALL_PROC; i < threads_proc_type.size(); i++) { + if (threads_proc_type[i] > proc_type_table[0][i]) { + streams_info_available = false; + break; + } + } } - // IStreamsExecutor::Config config = initial; - const auto total_num_cores = proc_type_table[0][ALL_PROC]; - const auto total_num_big_cores = proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][HYPER_THREADING_PROC]; - const auto total_num_little_cores = proc_type_table[0][EFFICIENT_CORE_PROC]; + if (!streams_info_available) { + _streams_info_table.clear(); - int num_cores = total_num_cores; - if (core_type == ov::threading::IStreamsExecutor::Config::BIG) { - num_cores = total_num_big_cores; - } else if (core_type == ov::threading::IStreamsExecutor::Config::LITTLE) { - num_cores = total_num_little_cores; - } + const auto total_num_cores = proc_type_table[0][ALL_PROC]; + const auto total_num_big_cores = proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][HYPER_THREADING_PROC]; + const auto total_num_little_cores = proc_type_table[0][EFFICIENT_CORE_PROC]; - int streams = std::min(stream_nums, num_cores); + if ((total_num_little_cores == 0 && _thread_preferred_core_type == IStreamsExecutor::Config::LITTLE) || + (total_num_big_cores == 0 && _thread_preferred_core_type == IStreamsExecutor::Config::BIG) || + (proc_type_table.size() > 1 && _thread_preferred_core_type == IStreamsExecutor::Config::BIG)) { + _thread_preferred_core_type = IStreamsExecutor::Config::ANY; + } - if (streams == 0) { - return; - } + int num_cores = total_num_cores; + if (_thread_preferred_core_type == IStreamsExecutor::Config::BIG) { + num_cores = total_num_big_cores; + } else if (_thread_preferred_core_type == IStreamsExecutor::Config::LITTLE) { + num_cores = total_num_little_cores; + } - _streams = streams; - _threadPreferredCoreType = core_type; - _threadsPerStream = threads_per_stream; - - // create stream_info_table based on core type - std::vector stream_info(ov::CPU_STREAMS_TABLE_SIZE, 0); - stream_info[ov::THREADS_PER_STREAM] = _threadsPerStream; - stream_info[ov::STREAM_NUMA_NODE_ID] = 0; - stream_info[ov::STREAM_SOCKET_ID] = 0; - if (core_type == ov::threading::IStreamsExecutor::Config::BIG) { - if (proc_type_table[0][ov::MAIN_CORE_PROC] < _streams) { - if (proc_type_table[0][ov::MAIN_CORE_PROC] > 0) { - stream_info[ov::NUMBER_OF_STREAMS] = proc_type_table[0][ov::MAIN_CORE_PROC]; - stream_info[ov::PROC_TYPE] = ov::MAIN_CORE_PROC; - _streams_info_table.push_back(stream_info); - } - if (proc_type_table[0][ov::HYPER_THREADING_PROC] > 0) { - stream_info[ov::NUMBER_OF_STREAMS] = proc_type_table[0][ov::HYPER_THREADING_PROC]; - stream_info[ov::PROC_TYPE] = ov::HYPER_THREADING_PROC; + _streams = _streams > 0 ? std::min(_streams, num_cores) : _streams; + if (_streams == 0) { + set_config_zero_stream(); + return; + } + + _threads_per_stream = + _threads_per_stream > 0 ? std::min(num_cores, _streams * _threads_per_stream) / _streams : 0; + if (_threads_per_stream == 0) { + return; + } + + // create stream_info_table based on core type + std::vector stream_info(CPU_STREAMS_TABLE_SIZE, 0); + stream_info[THREADS_PER_STREAM] = _threads_per_stream; + stream_info[STREAM_NUMA_NODE_ID] = 0; + stream_info[STREAM_SOCKET_ID] = 0; + int cur_threads = _streams * _threads_per_stream; + if (_thread_preferred_core_type == IStreamsExecutor::Config::LITTLE) { + stream_info[PROC_TYPE] = EFFICIENT_CORE_PROC; + stream_info[NUMBER_OF_STREAMS] = _streams; + _streams_info_table.push_back(stream_info); + } else { + int start = proc_type_table.size() > 1 ? 1 : 0; + std::vector core_types; + // Using cores crossed sockets or hyper threads when streams = 1 + if (_streams == 1 && _threads_per_stream > proc_type_table[start][ov::MAIN_CORE_PROC]) { + stream_info[NUMBER_OF_STREAMS] = _streams; + stream_info[PROC_TYPE] = ALL_PROC; + stream_info[STREAM_NUMA_NODE_ID] = proc_type_table.size() > 1 ? -1 : 0; + stream_info[STREAM_SOCKET_ID] = proc_type_table.size() > 1 ? -1 : 0; _streams_info_table.push_back(stream_info); + stream_info[NUMBER_OF_STREAMS] = 0; } - } else { - stream_info[ov::PROC_TYPE] = ov::MAIN_CORE_PROC; - stream_info[ov::NUMBER_OF_STREAMS] = _streams; - _streams_info_table.push_back(stream_info); - } - } else if (core_type == ov::threading::IStreamsExecutor::Config::LITTLE) { - stream_info[ov::PROC_TYPE] = ov::EFFICIENT_CORE_PROC; - stream_info[ov::NUMBER_OF_STREAMS] = _streams; - _streams_info_table.push_back(stream_info); - } else { - int total_streams = 0; - if (proc_type_table.size() == 1) { - for (int i = ov::MAIN_CORE_PROC; i <= ov::HYPER_THREADING_PROC; i++) { - if (proc_type_table[0][i] > 0) { - stream_info[ov::NUMBER_OF_STREAMS] = - (total_streams + proc_type_table[0][i] > _streams ? _streams - total_streams - : proc_type_table[0][i]); - stream_info[ov::PROC_TYPE] = i; - stream_info[ov::STREAM_NUMA_NODE_ID] = proc_type_table[0][PROC_NUMA_NODE_ID]; - stream_info[ov::STREAM_SOCKET_ID] = proc_type_table[0][PROC_SOCKET_ID]; - _streams_info_table.push_back(stream_info); - total_streams += stream_info[ov::NUMBER_OF_STREAMS]; - } - if (total_streams >= _streams) - break; + if (_thread_preferred_core_type == IStreamsExecutor::Config::BIG && + proc_type_table[0][EFFICIENT_CORE_PROC] > 0) { + core_types = {MAIN_CORE_PROC, HYPER_THREADING_PROC}; + } else { + core_types = {MAIN_CORE_PROC, EFFICIENT_CORE_PROC, HYPER_THREADING_PROC}; } - } else { - for (size_t i = 1; i < proc_type_table.size(); i++) { - for (int j = ov::MAIN_CORE_PROC; j < ov::HYPER_THREADING_PROC; j++) { - if (proc_type_table[i][j] > 0) { - stream_info[ov::NUMBER_OF_STREAMS] = - (total_streams + proc_type_table[i][j] > _streams ? _streams - total_streams - : proc_type_table[i][j]); - stream_info[ov::PROC_TYPE] = j; - stream_info[ov::STREAM_NUMA_NODE_ID] = proc_type_table[i][PROC_NUMA_NODE_ID]; - stream_info[ov::STREAM_SOCKET_ID] = proc_type_table[i][PROC_SOCKET_ID]; + for (int j : core_types) { + for (size_t i = start; i < proc_type_table.size(); i++) { + if (proc_type_table[i][j] > 0 && cur_threads > 0) { + if (_threads_per_stream > proc_type_table[i][j]) { + stream_info[THREADS_PER_STREAM] = std::min(proc_type_table[i][j], cur_threads); + cur_threads -= stream_info[THREADS_PER_STREAM]; + } else { + stream_info[NUMBER_OF_STREAMS] = + std::min(proc_type_table[i][j], cur_threads) / _threads_per_stream; + cur_threads -= stream_info[NUMBER_OF_STREAMS] * _threads_per_stream; + } + stream_info[PROC_TYPE] = j; + stream_info[STREAM_NUMA_NODE_ID] = proc_type_table[i][PROC_NUMA_NODE_ID]; + stream_info[STREAM_SOCKET_ID] = proc_type_table[i][PROC_SOCKET_ID]; _streams_info_table.push_back(stream_info); - total_streams += stream_info[ov::NUMBER_OF_STREAMS]; } - if (total_streams >= _streams) - break; } - if (total_streams >= _streams) - break; } } } - if (cpu_pinning) { - _cpu_reservation = cpu_pinning; - auto new_config = reserve_cpu_threads(*this); - _stream_processor_ids = new_config._stream_processor_ids; - _streams = new_config._streams; - _threads = new_config._threads; + if (_cpu_reservation) { + reserve_cpu_threads(); + } + + // Recaculate _streams, _threads and _threads_per_stream by _streams_info_table + int num_streams = 0; + _threads = 0; + for (size_t i = 0; i < _streams_info_table.size(); i++) { + if (_streams_info_table[i][NUMBER_OF_STREAMS] > 0) { + num_streams += _streams_info_table[i][NUMBER_OF_STREAMS]; + _threads += _streams_info_table[i][NUMBER_OF_STREAMS] * _streams_info_table[i][THREADS_PER_STREAM]; + } + } + _threads_per_stream = _streams_info_table[0][THREADS_PER_STREAM]; + _streams = _streams > 0 ? num_streams : _streams; + + OPENVINO_DEBUG << "[ threading ] proc_type_table:"; + for (size_t i = 0; i < proc_type_table.size(); i++) { + OPENVINO_DEBUG << proc_type_table[i][ALL_PROC] << " " << proc_type_table[i][MAIN_CORE_PROC] << " " + << proc_type_table[i][EFFICIENT_CORE_PROC] << " " << proc_type_table[i][HYPER_THREADING_PROC] + << " " << proc_type_table[i][PROC_NUMA_NODE_ID] << " " << proc_type_table[i][PROC_SOCKET_ID]; + } + + OPENVINO_DEBUG << "[ threading ] streams_info_table:"; + for (size_t i = 0; i < _streams_info_table.size(); i++) { + OPENVINO_DEBUG << _streams_info_table[i][NUMBER_OF_STREAMS] << " " << _streams_info_table[i][PROC_TYPE] << " " + << _streams_info_table[i][THREADS_PER_STREAM] << " " + << _streams_info_table[i][STREAM_NUMA_NODE_ID] << " " + << _streams_info_table[i][STREAM_SOCKET_ID]; } + OPENVINO_DEBUG << "[ threading ] " << _name << ": " << _streams << "(" << _threads << ")"; } void IStreamsExecutor::Config::set_config_zero_stream() { diff --git a/src/inference/src/dev/threading/thread_affinity.cpp b/src/inference/src/dev/threading/thread_affinity.cpp index a91052f893858b..f1cd0958d5427b 100644 --- a/src/inference/src/dev/threading/thread_affinity.cpp +++ b/src/inference/src/dev/threading/thread_affinity.cpp @@ -51,8 +51,7 @@ bool pin_thread_to_vacant_core(int thrIdx, int hyperthreads, int ncores, const CpuSet& procMask, - const std::vector& cpu_ids, - int cpuIdxOffset) { + const std::vector& cpu_ids) { if (procMask == nullptr) return false; const size_t size = CPU_ALLOC_SIZE(ncores); @@ -64,7 +63,7 @@ bool pin_thread_to_vacant_core(int thrIdx, mapped_idx = cpu_ids[thrIdx]; } else { // Place threads with specified step - int cpu_idx = cpuIdxOffset; + int cpu_idx = 0; for (int i = 0, offset = 0; i < thrIdx; ++i) { cpu_idx += hyperthreads; if (cpu_idx >= num_cpus) @@ -72,8 +71,8 @@ bool pin_thread_to_vacant_core(int thrIdx, } // Find index of 'cpu_idx'-th bit that equals to 1 - mapped_idx = cpuIdxOffset - 1; - while (cpu_idx >= cpuIdxOffset) { + mapped_idx = -1; + while (cpu_idx >= 0) { mapped_idx++; if (CPU_ISSET_S(mapped_idx, size, procMask.get())) --cpu_idx; @@ -125,8 +124,7 @@ bool pin_thread_to_vacant_core(int thrIdx, int hyperthreads, int ncores, const CpuSet& procMask, - const std::vector& cpu_ids, - int cpuIdxOffset) { + const std::vector& cpu_ids) { return 0 != SetThreadAffinityMask(GetCurrentThread(), DWORD_PTR(1) << cpu_ids[thrIdx]); } bool pin_current_thread_by_mask(int ncores, const CpuSet& procMask) { @@ -146,8 +144,7 @@ bool pin_thread_to_vacant_core(int thrIdx, int hyperthreads, int ncores, const CpuSet& procMask, - const std::vector& cpu_ids, - int cpuIdxOffset) { + const std::vector& cpu_ids) { return false; } bool pin_current_thread_by_mask(int ncores, const CpuSet& procMask) { diff --git a/src/inference/src/dev/threading/thread_affinity.hpp b/src/inference/src/dev/threading/thread_affinity.hpp index 6d31989148de92..5428825b1ff6d7 100644 --- a/src/inference/src/dev/threading/thread_affinity.hpp +++ b/src/inference/src/dev/threading/thread_affinity.hpp @@ -80,8 +80,7 @@ bool pin_thread_to_vacant_core(int thrIdx, int hyperThreads, int ncores, const CpuSet& processMask, - const std::vector& cpu_ids = {}, - int cpuIdxOffset = 0); + const std::vector& cpu_ids = {}); /** * @brief Pins thread to a spare core in the round-robin scheme, while respecting the given process mask. diff --git a/src/inference/src/ie_network_reader.cpp b/src/inference/src/ie_network_reader.cpp index 4732f4c69bf30e..9858dc01677740 100644 --- a/src/inference/src/ie_network_reader.cpp +++ b/src/inference/src/ie_network_reader.cpp @@ -19,7 +19,6 @@ #include "ie_icnn_network.hpp" #include "ie_input_info.hpp" #include "itt.hpp" -#include "legacy_op_extension.hpp" #include "openvino/core/deprecated.hpp" #include "openvino/core/except.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index 1cb25f90004df0..988fdab3baeba5 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -15,6 +15,7 @@ #include "ie_common.h" #include "openvino/core/except.hpp" #include "openvino/runtime/system_conf.hpp" +#include "openvino/util/log.hpp" #include "os/cpu_map_info.hpp" namespace ov { @@ -278,6 +279,21 @@ CPU::CPU() { }; _org_proc_type_table = _proc_type_table; + + OPENVINO_DEBUG << "[ threading ] cpu_mapping_table:"; + for (size_t i = 0; i < _cpu_mapping_table.size(); i++) { + OPENVINO_DEBUG << _cpu_mapping_table[i][CPU_MAP_PROCESSOR_ID] << " " + << _cpu_mapping_table[i][CPU_MAP_NUMA_NODE_ID] << " " << _cpu_mapping_table[i][CPU_MAP_SOCKET_ID] + << " " << _cpu_mapping_table[i][CPU_MAP_CORE_ID] << " " + << _cpu_mapping_table[i][CPU_MAP_CORE_TYPE] << " " << _cpu_mapping_table[i][CPU_MAP_GROUP_ID] + << " " << _cpu_mapping_table[i][CPU_MAP_USED_FLAG]; + } + OPENVINO_DEBUG << "[ threading ] org_proc_type_table:"; + for (size_t i = 0; i < _proc_type_table.size(); i++) { + OPENVINO_DEBUG << _proc_type_table[i][ALL_PROC] << " " << _proc_type_table[i][MAIN_CORE_PROC] << " " + << _proc_type_table[i][EFFICIENT_CORE_PROC] << " " << _proc_type_table[i][HYPER_THREADING_PROC] + << " " << _proc_type_table[i][PROC_NUMA_NODE_ID] << " " << _proc_type_table[i][PROC_SOCKET_ID]; + } } void parse_node_info_linux(const std::vector node_info_table, diff --git a/src/inference/src/system_conf.cpp b/src/inference/src/system_conf.cpp index 72b073dd8fde1c..4dd6b1db499028 100644 --- a/src/inference/src/system_conf.cpp +++ b/src/inference/src/system_conf.cpp @@ -413,29 +413,6 @@ void reserve_available_cpus(const std::vector> streams_info_tab stream_processors, cpu_status); - OPENVINO_DEBUG << "[ threading ] cpu_mapping_table:"; - for (size_t i = 0; i < cpu._cpu_mapping_table.size(); i++) { - OPENVINO_DEBUG << cpu._cpu_mapping_table[i][CPU_MAP_PROCESSOR_ID] << " " - << cpu._cpu_mapping_table[i][CPU_MAP_NUMA_NODE_ID] << " " - << cpu._cpu_mapping_table[i][CPU_MAP_SOCKET_ID] << " " - << cpu._cpu_mapping_table[i][CPU_MAP_CORE_ID] << " " - << cpu._cpu_mapping_table[i][CPU_MAP_CORE_TYPE] << " " - << cpu._cpu_mapping_table[i][CPU_MAP_GROUP_ID] << " " - << cpu._cpu_mapping_table[i][CPU_MAP_USED_FLAG]; - } - OPENVINO_DEBUG << "[ threading ] proc_type_table:"; - for (size_t i = 0; i < cpu._proc_type_table.size(); i++) { - OPENVINO_DEBUG << cpu._proc_type_table[i][ALL_PROC] << " " << cpu._proc_type_table[i][MAIN_CORE_PROC] << " " - << cpu._proc_type_table[i][EFFICIENT_CORE_PROC] << " " - << cpu._proc_type_table[i][HYPER_THREADING_PROC] << " " - << cpu._proc_type_table[i][PROC_NUMA_NODE_ID] << " " << cpu._proc_type_table[i][PROC_SOCKET_ID]; - } - OPENVINO_DEBUG << "[ threading ] streams_info_table:"; - for (size_t i = 0; i < streams_info_table.size(); i++) { - OPENVINO_DEBUG << streams_info_table[i][NUMBER_OF_STREAMS] << " " << streams_info_table[i][PROC_TYPE] << " " - << streams_info_table[i][THREADS_PER_STREAM] << " " << streams_info_table[i][STREAM_NUMA_NODE_ID] - << " " << streams_info_table[i][STREAM_SOCKET_ID]; - } OPENVINO_DEBUG << "[ threading ] stream_processors:"; for (size_t i = 0; i < stream_processors.size(); i++) { OPENVINO_DEBUG << "{"; diff --git a/src/inference/tests/functional/async_infer_request_test.cpp b/src/inference/tests/functional/async_infer_request_test.cpp deleted file mode 100644 index 68b97e5d69bb6d..00000000000000 --- a/src/inference/tests/functional/async_infer_request_test.cpp +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; -using namespace InferenceEngine::details; - -IE_SUPPRESS_DEPRECATED_START - -TEST(InferRequestCPPTests, throwsOnUninitializedSetBlob) { - InferRequest req; - ASSERT_THROW(req.SetBlob({}, {}), InferenceEngine::NotAllocated); -} - -TEST(InferRequestCPPTests, throwsOnUninitializedGetBlob) { - InferRequest req; - ASSERT_THROW(req.GetBlob({}), InferenceEngine::NotAllocated); -} - -TEST(InferRequestCPPTests, throwsOnUninitializedInfer) { - InferRequest req; - ASSERT_THROW(req.Infer(), InferenceEngine::NotAllocated); -} - -TEST(InferRequestCPPTests, throwsOnUninitializedGetPerformanceCounts) { - InferRequest req; - ASSERT_THROW(req.GetPerformanceCounts(), InferenceEngine::NotAllocated); -} - -TEST(InferRequestCPPTests, throwsOnUninitializedSetInput) { - InferRequest req; - ASSERT_THROW(req.SetInput({{}}), InferenceEngine::NotAllocated); -} - -TEST(InferRequestCPPTests, throwsOnUninitializedSetOutput) { - InferRequest req; - ASSERT_THROW(req.SetOutput({{}}), InferenceEngine::NotAllocated); -} - -TEST(InferRequestCPPTests, throwsOnUninitializedStartAsync) { - InferRequest req; - ASSERT_THROW(req.StartAsync(), InferenceEngine::NotAllocated); -} - -TEST(InferRequestCPPTests, throwsOnUninitializedWait) { - InferRequest req; - ASSERT_THROW(req.Wait({}), InferenceEngine::NotAllocated); -} - -TEST(InferRequestCPPTests, throwsOnUninitializedSetCompletionCallback) { - InferRequest req; - std::function f; - ASSERT_THROW(req.SetCompletionCallback(f), InferenceEngine::NotAllocated); -} - -IE_SUPPRESS_DEPRECATED_START - -TEST(InferRequestCPPTests, throwsOnUninitializedCast) { - InferRequest req; - ASSERT_THROW((void)static_cast(req), InferenceEngine::NotAllocated); -} - -IE_SUPPRESS_DEPRECATED_END - -TEST(InferRequestCPPTests, throwsOnUninitializedQueryState) { - InferRequest req; - ASSERT_THROW(req.QueryState(), InferenceEngine::NotAllocated); -} diff --git a/src/inference/tests/functional/cnn_network_test.cpp b/src/inference/tests/functional/cnn_network_test.cpp deleted file mode 100644 index 8430248649aa0c..00000000000000 --- a/src/inference/tests/functional/cnn_network_test.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "cpp/ie_cnn_network.h" -#include "inference_engine.hpp" -#include "openvino/opsets/opset.hpp" -#include "openvino/pass/serialize.hpp" -#include "openvino/util/file_util.hpp" - -using namespace InferenceEngine; - -using CNNNetworkTests = ::testing::Test; - -IE_SUPPRESS_DEPRECATED_START - -TEST_F(CNNNetworkTests, throwsOnInitWithNull) { - std::shared_ptr nlptr = nullptr; - ASSERT_THROW(CNNNetwork network(nlptr), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnUninitializedCastToICNNNetwork) { - CNNNetwork network; - ASSERT_THROW((void)static_cast(network), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnConstUninitializedCastToICNNNetwork) { - const CNNNetwork network; - ASSERT_THROW((void)static_cast(network), InferenceEngine::Exception); -} - -IE_SUPPRESS_DEPRECATED_END - -TEST_F(CNNNetworkTests, throwsOnInitWithNullNgraph) { - std::shared_ptr nlptr = nullptr; - ASSERT_THROW(CNNNetwork network(nlptr), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnUninitializedGetOutputsInfo) { - CNNNetwork network; - ASSERT_THROW(network.getOutputsInfo(), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnUninitializedGetInputsInfo) { - CNNNetwork network; - ASSERT_THROW(network.getInputsInfo(), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnUninitializedLayerCount) { - CNNNetwork network; - ASSERT_THROW(network.layerCount(), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnUninitializedGetName) { - CNNNetwork network; - ASSERT_THROW(network.getName(), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnUninitializedGetFunction) { - CNNNetwork network; - ASSERT_THROW(network.getFunction(), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnConstUninitializedGetFunction) { - const CNNNetwork network; - ASSERT_THROW(network.getFunction(), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnConstUninitializedBegin) { - CNNNetwork network; - ASSERT_THROW(network.getFunction(), InferenceEngine::Exception); -} - -TEST_F(CNNNetworkTests, throwsOnConstUninitializedGetInputShapes) { - CNNNetwork network; - ASSERT_THROW(network.getInputShapes(), InferenceEngine::Exception); -} - -static std::shared_ptr CNNNetworkTests_create_model() { - auto param1 = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - param1->set_friendly_name("p1_friendly"); - param1->output(0).set_names({"p1_1", "p1_2"}); - auto param2 = std::make_shared(ov::element::f32, ov::PartialShape{-1, 3, 224, 224}); - param2->set_friendly_name("p2_friendly"); - param2->output(0).set_names({"p2_1", "p2_2"}); - auto param3 = std::make_shared(ov::element::f32, ov::PartialShape{1, 3, 224, 224}); - param3->set_friendly_name("p3_friendly"); - param3->output(0).set_names({"p3_1", "p3_2"}); - return std::make_shared(ov::OutputVector{param1, param2, param3}, - ov::ParameterVector{param1, param2, param3}); -} - -TEST_F(CNNNetworkTests, throwsHasDynamicInputs) { - auto model = CNNNetworkTests_create_model(); - CNNNetwork network(model); - InferenceEngine::Core core; - try { - core.LoadNetwork(network); - FAIL() << "LoadNetwork with dynamic inputs shall throw"; - } catch (const InferenceEngine::Exception& e) { - EXPECT_TRUE(std::string(e.what()).find("InferenceEngine::Core::LoadNetwork") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p1_1") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p1_2") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p2_1") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p2_2") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p3_1") == std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p3_2") == std::string::npos) << e.what(); - } -} - -TEST_F(CNNNetworkTests, throwsHasDynamicInputs_queryNetwork) { - auto model = CNNNetworkTests_create_model(); - CNNNetwork network(model); - InferenceEngine::Core core; - try { - core.QueryNetwork(network, "mock"); - FAIL() << "QueryNetwork with dynamic inputs shall throw"; - } catch (const InferenceEngine::Exception& e) { - EXPECT_TRUE(std::string(e.what()).find("InferenceEngine::Core::QueryNetwork") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p1_1") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p1_2") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p2_1") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p2_2") != std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p3_1") == std::string::npos) << e.what(); - EXPECT_TRUE(std::string(e.what()).find("p3_2") == std::string::npos) << e.what(); - } -} diff --git a/src/inference/tests/functional/core_threading.cpp b/src/inference/tests/functional/core_threading.cpp deleted file mode 100644 index 92b13f8b083c46..00000000000000 --- a/src/inference/tests/functional/core_threading.cpp +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "openvino/util/file_util.hpp" -#ifdef __GLIBC__ -# include -# if __GLIBC_MINOR__ < 34 -# define OV_TEST_GLIBC_VERSION_LESS_2_34 -# endif -#endif - -class IECoreThreadingTests : public ::testing::Test { -protected: - std::string modelName = "IECoreThreadingTests.xml", weightsName = "IECoreThreadingTests.bin"; - -public: - void SetUp() override { - auto prefix = ov::test::utils::generateTestFilePrefix(); - modelName = prefix + modelName; - weightsName = prefix + weightsName; - ov::test::utils::generate_test_model(modelName, weightsName); - } - - void TearDown() override { - ov::test::utils::removeIRFiles(modelName, weightsName); - } - - void runParallel(std::function func, - const unsigned int iterations = 100, - const unsigned int threadsNum = 8) { - std::vector threads(threadsNum); - - for (auto& thread : threads) { - thread = std::thread([&]() { - for (unsigned int i = 0; i < iterations; ++i) { - func(); - } - }); - } - - for (auto& thread : threads) { - if (thread.joinable()) - thread.join(); - } - } -}; - -// tested function: SetConfig -TEST_F(IECoreThreadingTests, SetConfigPluginDoesNotExist) { - InferenceEngine::Core ie; - std::map localConfig = {{ov::enable_profiling.name(), "YES"}}; - - runParallel( - [&]() { - ie.SetConfig(localConfig); - }, - 10000); -} - -// TODO: CVS-68982 -#ifndef OPENVINO_STATIC_LIBRARY - -// tested function: RegisterPlugin -TEST_F(IECoreThreadingTests, RegisterPlugin) { - InferenceEngine::Core ie; - std::atomic index{0}; - runParallel( - [&]() { - const std::string deviceName = std::to_string(index++); - ie.RegisterPlugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), - std::string("mock_engine") + OV_BUILD_POSTFIX), - deviceName); - ie.GetVersions(deviceName); - ie.UnregisterPlugin(deviceName); - }, - 4000); -} - -// tested function: RegisterPlugins -TEST_F(IECoreThreadingTests, RegisterPlugins) { - InferenceEngine::Core ie; - std::atomic index{0}; - - auto getPluginXml = [&]() -> std::tuple { - std::string indexStr = std::to_string(index++); - std::string pluginsXML = "test_plugins" + indexStr + ".xml"; - std::ofstream file(pluginsXML); - - file << "::file_separator; - file << ov::util::FileTraits::library_prefix(); - file << "mock_engine"; - file << OV_BUILD_POSTFIX; - file << ov::util::FileTraits::dot_symbol; - file << ov::util::FileTraits::library_ext(); - file << "\" name=\""; - file << indexStr; - file << "\">"; - file.flush(); - file.close(); - - return std::tie(pluginsXML, indexStr); - }; - - runParallel( - [&]() { - std::string fileName, deviceName; - std::tie(fileName, deviceName) = getPluginXml(); - ie.RegisterPlugins(fileName); - ie.GetVersions(deviceName); - ASSERT_EQ(0, std::remove(fileName.c_str())); - }, - 1000); -} - -#endif // !OPENVINO_STATIC_LIBRARY - -// tested function: GetAvailableDevices, UnregisterPlugin -// TODO: some initialization (e.g. thread/dlopen) sporadically fails during such stress-test scenario -TEST_F(IECoreThreadingTests, GetAvailableDevices) { -#ifdef OV_TEST_GLIBC_VERSION_LESS_2_34 - GTEST_SKIP(); -#endif - InferenceEngine::Core ie; - runParallel( - [&]() { - std::vector devices = ie.GetAvailableDevices(); - - // unregister all the devices - for (auto&& deviceName : devices) { - try { - ie.UnregisterPlugin(deviceName); - } catch (const InferenceEngine::Exception& ex) { - // if several threads unload plugin at once, the first thread does this - // while all others will throw an exception that plugin is not registered - ASSERT_STR_CONTAINS(ex.what(), "name is not registered in the"); - } - } - }, - 30); -} diff --git a/src/inference/tests/functional/data_test.cpp b/src/inference/tests/functional/data_test.cpp deleted file mode 100644 index e3ff4e6f69f1d0..00000000000000 --- a/src/inference/tests/functional/data_test.cpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; -IE_SUPPRESS_DEPRECATED_START - -class DataTests : public ::testing::Test { -protected: - const std::string data_name = "test_data_name"; - const Precision precision = Precision::FP32; - - const SizeVector notEmptyDims = {1, 1, 1, 1}; - const SizeVector emptyDims = {}; - const size_t batchSize = 1; - - class BlockingDescTest : public BlockingDesc { - public: - BlockingDescTest(const SizeVector& blocked_dims, const SizeVector& order) : BlockingDesc(blocked_dims, order) {} - - void fillDescTest(const SizeVector& blocked_dims, const SizeVector& order) { - fillDesc(blocked_dims, order); - } - }; -}; - -TEST_F(DataTests, canSetEmptyDimsForDataDefault) { - Data data(data_name, precision); - ASSERT_NO_THROW(data.setDims(emptyDims)); - ASSERT_FALSE(data.isInitialized()); -} - -TEST_F(DataTests, canSetEmptyDimsForDataBlocked) { - Data data(data_name, precision, BLOCKED); - ASSERT_NO_THROW(data.setDims(emptyDims)); -} - -TEST_F(DataTests, canSetNotEmptyDimsForDataBlocked) { - Data data(data_name, precision, BLOCKED); - ASSERT_NO_THROW(data.setDims(notEmptyDims)); -} - -TEST_F(DataTests, canSetNotEmptyDimsForDataNCHW) { - Data data(data_name, precision, NCHW); - ASSERT_NO_THROW(data.setDims(notEmptyDims)); - ASSERT_TRUE(data.isInitialized()); -} - -TEST_F(DataTests, canSetEmptyDimsForTensorDescNCHW) { - TensorDesc desc(precision, emptyDims, NCHW); - ASSERT_NO_THROW(desc.setDims(emptyDims)); -} - -TEST_F(DataTests, canSetEmptyDimsForTensorDescBlocked) { - TensorDesc desc(precision, emptyDims, BLOCKED); - ASSERT_NO_THROW(desc.setDims(emptyDims)); -} - -TEST_F(DataTests, canSetNotEmptyDimsForTensorDescBlocked) { - TensorDesc desc(precision, notEmptyDims, BLOCKED); - ASSERT_NO_THROW(desc.setDims(notEmptyDims)); -} - -TEST_F(DataTests, canSetEmptyDimsForBlockingDescOrder) { - ASSERT_NO_THROW(BlockingDesc(emptyDims, emptyDims)); -} - -TEST_F(DataTests, throwOnFillDescByEmptyDimsForBlockingDesc) { - BlockingDescTest desc(emptyDims, emptyDims); - ASSERT_THROW(desc.fillDescTest(emptyDims, emptyDims), Exception); -} - -TEST_F(DataTests, throwOnSetEmptyDimsForBlockingDescBlocked) { - ASSERT_NO_THROW(BlockingDesc(emptyDims, BLOCKED)); -} - -TEST_F(DataTests, throwOnSetEmptyDimsForBlockingDescNCHW) { - ASSERT_NO_THROW(BlockingDesc(emptyDims, NCHW)); -} - -TEST_F(DataTests, canSetNotEmptyDimsForBlockingDescBlocked) { - ASSERT_NO_THROW(BlockingDesc(notEmptyDims, BLOCKED)); -} - -TEST_F(DataTests, canSetNotEmptyDimsForBlockingDescNCHW) { - ASSERT_NO_THROW(BlockingDesc(notEmptyDims, NCHW)); -} - -TEST_F(DataTests, setPrecision) { - Data data(data_name, {Precision::FP32, emptyDims, Layout::NCHW}); - - EXPECT_EQ(Precision::FP32, data.getPrecision()); - EXPECT_EQ(Precision::FP32, data.getTensorDesc().getPrecision()); - - data.setPrecision(Precision::FP16); - EXPECT_EQ(Precision::FP16, data.getPrecision()); - EXPECT_EQ(Precision::FP16, data.getTensorDesc().getPrecision()); -} diff --git a/src/inference/tests/functional/executable_network.cpp b/src/inference/tests/functional/executable_network.cpp deleted file mode 100644 index 5d741e876749c4..00000000000000 --- a/src/inference/tests/functional/executable_network.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; -using namespace InferenceEngine::details; - -TEST(ExecutableNetworkTests, throwsOnUninitializedGetOutputsInfo) { - ExecutableNetwork exec; - ASSERT_THROW(exec.GetOutputsInfo(), InferenceEngine::NotAllocated); -} - -TEST(ExecutableNetworkTests, throwsOnUninitializedGetInputsInfo) { - ExecutableNetwork exec; - ASSERT_THROW(exec.GetInputsInfo(), InferenceEngine::NotAllocated); -} - -TEST(ExecutableNetworkTests, throwsOnUninitializedExport) { - ExecutableNetwork exec; - ASSERT_THROW(exec.Export(std::string()), InferenceEngine::NotAllocated); -} - -TEST(ExecutableNetworkTests, throwsOnUninitializedExportStream) { - ExecutableNetwork exec; - ASSERT_THROW(exec.Export(std::cout), InferenceEngine::NotAllocated); -} - -TEST(ExecutableNetworkTests, throwsOnUninitializedGetExecGraphInfo) { - ExecutableNetwork exec; - ASSERT_THROW(exec.GetExecGraphInfo(), InferenceEngine::NotAllocated); -} - -TEST(ExecutableNetworkTests, throwsOnUninitializedSetConfig) { - ExecutableNetwork exec; - ASSERT_THROW(exec.SetConfig({{}}), InferenceEngine::NotAllocated); -} - -TEST(ExecutableNetworkTests, throwsOnUninitializedGetConfig) { - ExecutableNetwork exec; - ASSERT_THROW(exec.GetConfig({}), InferenceEngine::NotAllocated); -} - -TEST(ExecutableNetworkTests, throwsOnUninitializedGetMetric) { - ExecutableNetwork exec; - ASSERT_THROW(exec.GetMetric({}), InferenceEngine::NotAllocated); -} diff --git a/src/inference/tests/functional/ie_precision_test.cpp b/src/inference/tests/functional/ie_precision_test.cpp deleted file mode 100644 index 65bb27f0e476ab..00000000000000 --- a/src/inference/tests/functional/ie_precision_test.cpp +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include -#include - -IE_SUPPRESS_DEPRECATED_START - -using Precision = InferenceEngine::Precision; - -using PrecisionTests = ::testing::Test; - -TEST_F(PrecisionTests, ShowsCorrectPrecisionNames) { - EXPECT_STREQ(Precision(Precision::I64).name(), "I64"); - EXPECT_STREQ(Precision(Precision::U64).name(), "U64"); - EXPECT_STREQ(Precision(Precision::BF16).name(), "BF16"); - EXPECT_STREQ(Precision(Precision::FP16).name(), "FP16"); - EXPECT_STREQ(Precision(Precision::FP32).name(), "FP32"); - EXPECT_STREQ(Precision(Precision::FP64).name(), "FP64"); - EXPECT_STREQ(Precision(Precision::I16).name(), "I16"); - EXPECT_STREQ(Precision(Precision::I32).name(), "I32"); - EXPECT_STREQ(Precision(Precision::U32).name(), "U32"); - EXPECT_STREQ(Precision(Precision::U16).name(), "U16"); - EXPECT_STREQ(Precision(Precision::I4).name(), "I4"); - EXPECT_STREQ(Precision(Precision::I8).name(), "I8"); - EXPECT_STREQ(Precision(Precision::Q78).name(), "Q78"); - EXPECT_STREQ(Precision(Precision::U4).name(), "U4"); - EXPECT_STREQ(Precision(Precision::U8).name(), "U8"); - EXPECT_STREQ(Precision(Precision::MIXED).name(), "MIXED"); - EXPECT_STREQ(Precision(Precision::UNSPECIFIED).name(), "UNSPECIFIED"); - EXPECT_STREQ(Precision(static_cast(-3)).name(), "UNSPECIFIED"); - EXPECT_STREQ(Precision(1, "Custom Name").name(), "Custom Name"); -} - -TEST_F(PrecisionTests, sizeIsCorrect) { - EXPECT_EQ(Precision(Precision::I64).size(), 8); - EXPECT_EQ(Precision(Precision::U64).size(), 8); - EXPECT_EQ(Precision(Precision::BF16).size(), 2); - EXPECT_EQ(Precision(Precision::FP16).size(), 2); - EXPECT_EQ(Precision(Precision::FP32).size(), 4); - EXPECT_EQ(Precision(Precision::FP64).size(), 8); - EXPECT_EQ(Precision(Precision::I32).size(), 4); - EXPECT_EQ(Precision(Precision::U32).size(), 4); - EXPECT_EQ(Precision(Precision::I16).size(), 2); - EXPECT_EQ(Precision(Precision::U16).size(), 2); - EXPECT_EQ(Precision(Precision::I4).size(), 1); - EXPECT_EQ(Precision(Precision::I8).size(), 1); - EXPECT_EQ(Precision(Precision::Q78).size(), 2); - EXPECT_EQ(Precision(Precision::U8).size(), 1); - EXPECT_EQ(Precision(Precision::U4).size(), 1); - EXPECT_EQ(Precision(10 * 8).size(), 10); - EXPECT_THROW(Precision(Precision::MIXED).size(), InferenceEngine::Exception); - EXPECT_THROW(Precision(Precision::UNSPECIFIED).size(), InferenceEngine::Exception); -} - -TEST_F(PrecisionTests, bitsSizeIsCorrect) { - EXPECT_EQ(Precision(Precision::I64).bitsSize(), 64); - EXPECT_EQ(Precision(Precision::U64).bitsSize(), 64); - EXPECT_EQ(Precision(Precision::BF16).bitsSize(), 16); - EXPECT_EQ(Precision(Precision::FP16).bitsSize(), 16); - EXPECT_EQ(Precision(Precision::FP32).bitsSize(), 32); - EXPECT_EQ(Precision(Precision::FP64).bitsSize(), 64); - EXPECT_EQ(Precision(Precision::I32).bitsSize(), 32); - EXPECT_EQ(Precision(Precision::U32).bitsSize(), 32); - EXPECT_EQ(Precision(Precision::I16).bitsSize(), 16); - EXPECT_EQ(Precision(Precision::U16).bitsSize(), 16); - EXPECT_EQ(Precision(Precision::I4).bitsSize(), 4); - EXPECT_EQ(Precision(Precision::I8).bitsSize(), 8); - EXPECT_EQ(Precision(Precision::Q78).bitsSize(), 16); - EXPECT_EQ(Precision(Precision::U8).bitsSize(), 8); - EXPECT_EQ(Precision(Precision::U4).bitsSize(), 4); - EXPECT_EQ(Precision(10 * 8).bitsSize(), 80); - EXPECT_THROW(Precision(Precision::MIXED).bitsSize(), InferenceEngine::Exception); - EXPECT_THROW(Precision(Precision::UNSPECIFIED).bitsSize(), InferenceEngine::Exception); -} - -TEST_F(PrecisionTests, is_float) { - EXPECT_TRUE(Precision(Precision::BF16).is_float()); - EXPECT_TRUE(Precision(Precision::FP16).is_float()); - EXPECT_TRUE(Precision(Precision::FP32).is_float()); - EXPECT_TRUE(Precision(Precision::FP64).is_float()); - EXPECT_FALSE(Precision(Precision::I64).is_float()); - EXPECT_FALSE(Precision(Precision::U64).is_float()); - EXPECT_FALSE(Precision(Precision::I32).is_float()); - EXPECT_FALSE(Precision(Precision::U32).is_float()); - EXPECT_FALSE(Precision(Precision::I16).is_float()); - EXPECT_FALSE(Precision(Precision::U16).is_float()); - EXPECT_FALSE(Precision(Precision::I8).is_float()); - EXPECT_FALSE(Precision(Precision::I4).is_float()); - EXPECT_FALSE(Precision(Precision::Q78).is_float()); - EXPECT_FALSE(Precision(Precision::U4).is_float()); - EXPECT_FALSE(Precision(Precision::U8).is_float()); - EXPECT_FALSE(Precision(Precision::MIXED).is_float()); - EXPECT_FALSE(Precision(10).is_float()); - EXPECT_FALSE(Precision(static_cast(-3)).is_float()); - EXPECT_FALSE(Precision(Precision::UNSPECIFIED).is_float()); -} - -TEST_F(PrecisionTests, constructFromSTR) { - EXPECT_EQ(Precision(Precision::I64), Precision::FromStr("I64")); - EXPECT_EQ(Precision(Precision::U64), Precision::FromStr("U64")); - EXPECT_EQ(Precision(Precision::BF16), Precision::FromStr("BF16")); - EXPECT_EQ(Precision(Precision::FP16), Precision::FromStr("FP16")); - EXPECT_EQ(Precision(Precision::FP32), Precision::FromStr("FP32")); - EXPECT_EQ(Precision(Precision::FP64), Precision::FromStr("FP64")); - EXPECT_EQ(Precision(Precision::I32), Precision::FromStr("I32")); - EXPECT_EQ(Precision(Precision::U32), Precision::FromStr("U32")); - EXPECT_EQ(Precision(Precision::I16), Precision::FromStr("I16")); - EXPECT_EQ(Precision(Precision::U16), Precision::FromStr("U16")); - EXPECT_EQ(Precision(Precision::I4), Precision::FromStr("I4")); - EXPECT_EQ(Precision(Precision::I8), Precision::FromStr("I8")); - EXPECT_EQ(Precision(Precision::Q78), Precision::FromStr("Q78")); - EXPECT_EQ(Precision(Precision::U4), Precision::FromStr("U4")); - EXPECT_EQ(Precision(Precision::U8), Precision::FromStr("U8")); - EXPECT_EQ(Precision(Precision::MIXED), Precision::FromStr("MIXED")); - EXPECT_EQ(Precision(static_cast(-3)), Precision::FromStr("UNSPECIFIED")); - EXPECT_EQ(Precision(Precision::UNSPECIFIED), Precision::FromStr("UNSPECIFIED")); -} - -TEST_F(PrecisionTests, canCompareCustomPrecisions) { - Precision p(12); - Precision p1(12, "XXX"); - EXPECT_FALSE(p == p1); - - std::string d; - d.push_back('X'); - d.push_back('X'); - d.push_back('X'); - Precision p2(12, d.c_str()); - EXPECT_TRUE(p2 == p1); - - Precision p3(13, "XXX"); - EXPECT_FALSE(p3 == p1); - - Precision p4(13); - EXPECT_FALSE(p4 == p); - - Precision p5(12); - EXPECT_TRUE(p5 == p); -} - -TEST_F(PrecisionTests, canUseInIfs) { - Precision p; - EXPECT_TRUE(!p); - p = Precision::FP32; - EXPECT_FALSE(!p); - EXPECT_TRUE(p); - p = Precision(static_cast(-3)); - EXPECT_TRUE(!p); -} - -TEST_F(PrecisionTests, canCreateFromStruct) { - struct X { - int a; - int b; - }; - auto precision = Precision::fromType(); - EXPECT_EQ(precision.size(), sizeof(X)); -} - -TEST_F(PrecisionTests, canCreateMoreThan255bitsPrecisions) { - struct Y { - uint8_t a[257]; - }; - - EXPECT_NO_THROW(Precision::fromType()); - EXPECT_EQ(Precision::fromType().size(), 257); -} diff --git a/src/inference/tests/functional/matmul_sr_tests.cpp b/src/inference/tests/functional/matmul_sr_tests.cpp index 3d17cfd915fa58..9e073ed17185c4 100644 --- a/src/inference/tests/functional/matmul_sr_tests.cpp +++ b/src/inference/tests/functional/matmul_sr_tests.cpp @@ -12,7 +12,6 @@ #include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" -#include "ie_common.h" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/matmul.hpp" diff --git a/src/inference/tests/functional/ov_core_test.cpp b/src/inference/tests/functional/ov_core_test.cpp new file mode 100644 index 00000000000000..a7fb627cff4884 --- /dev/null +++ b/src/inference/tests/functional/ov_core_test.cpp @@ -0,0 +1,81 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include + +#include "common_test_utils/common_utils.hpp" +#include "common_test_utils/file_utils.hpp" +#include "openvino/runtime/core.hpp" +#include "openvino/util/file_util.hpp" + +#ifndef OPENVINO_STATIC_LIBRARY + +static void create_plugin_xml(const std::string& file_name) { + std::ofstream file(file_name); + + file << "::file_separator; + file << ov::util::FileTraits::library_prefix(); + file << "mock_engine"; + file << OV_BUILD_POSTFIX; + file << ov::util::FileTraits::dot_symbol; + file << ov::util::FileTraits::library_ext(); + file << "\" name=\"1\">"; + file.flush(); + file.close(); +} + +static void remove_plugin_xml(const std::string& file_name) { + ov::test::utils::removeFile(file_name); +} + +TEST(CoreBaseTest, LoadPluginXML) { + std::string xml_file_name = "test_plugin.xml"; + std::string xml_file_path = + ov::test::utils::getOpenvinoLibDirectory() + ov::util::FileTraits::file_separator + xml_file_name; + create_plugin_xml(xml_file_path); + EXPECT_NO_THROW(ov::Core core(xml_file_name)); + remove_plugin_xml(xml_file_path); +} + +TEST(CoreBaseTest, LoadPluginDifferentXMLExtension) { + std::string xml_file_name = "test_plugin.test"; + std::string xml_file_path = + ov::test::utils::getOpenvinoLibDirectory() + ov::util::FileTraits::file_separator + xml_file_name; + create_plugin_xml(xml_file_path); + EXPECT_NO_THROW(ov::Core core(xml_file_name)); + remove_plugin_xml(xml_file_path); +} + +TEST(CoreBaseTest, LoadAbsoluteOVPathPluginXML) { + std::string xml_file_name = "test_plugin.xml"; + std::string xml_file_path = + ov::test::utils::getOpenvinoLibDirectory() + ov::util::FileTraits::file_separator + xml_file_name; + create_plugin_xml(xml_file_path); + EXPECT_NO_THROW(ov::Core core(xml_file_path)); + remove_plugin_xml(xml_file_path); +} + +TEST(CoreBaseTest, LoadAbsoluteCWPathPluginXML) { + std::string xml_file_name = "test_plugin.xml"; + std::string xml_file_path = + ov::test::utils::getCurrentWorkingDir() + ov::util::FileTraits::file_separator + xml_file_name; + create_plugin_xml(xml_file_path); + EXPECT_NO_THROW(ov::Core core(xml_file_path)); + remove_plugin_xml(xml_file_path); +} + +TEST(CoreBaseTest, LoadRelativeCWPathPluginXML) { + std::string xml_file_name = "test_plugin.xml"; + std::string xml_file_path = + ov::test::utils::getCurrentWorkingDir() + ov::util::FileTraits::file_separator + xml_file_name; + create_plugin_xml(xml_file_path); + EXPECT_NO_THROW(ov::Core core(xml_file_name)); + remove_plugin_xml(xml_file_path); +} + +#endif \ No newline at end of file diff --git a/src/inference/tests/functional/ov_extension_test.cpp b/src/inference/tests/functional/ov_extension_test.cpp index 08a60b81e44e08..ca2cefca071745 100644 --- a/src/inference/tests/functional/ov_extension_test.cpp +++ b/src/inference/tests/functional/ov_extension_test.cpp @@ -8,7 +8,6 @@ #include "openvino/util/file_util.hpp" using namespace testing; -using namespace InferenceEngine; using namespace ov::test::utils; namespace { diff --git a/src/inference/tests/functional/ov_infer_request_test.cpp b/src/inference/tests/functional/ov_infer_request_test.cpp index 8aede54728dccc..935b056b2fef7e 100644 --- a/src/inference/tests/functional/ov_infer_request_test.cpp +++ b/src/inference/tests/functional/ov_infer_request_test.cpp @@ -12,8 +12,6 @@ using namespace ::testing; using namespace std; -using namespace InferenceEngine; -using namespace InferenceEngine::details; TEST(InferRequestOVTests, throwsOnUninitializedSetTensor) { ov::InferRequest req; diff --git a/src/inference/tests/functional/pre_allocator_test.cpp b/src/inference/tests/functional/pre_allocator_test.cpp deleted file mode 100644 index eb0099884050dd..00000000000000 --- a/src/inference/tests/functional/pre_allocator_test.cpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "details/ie_pre_allocator.hpp" -#include "ie_allocator.hpp" - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -class PreallocatorTests : public ::testing::Test { -protected: - std::vector mybuf; - - void SetUp() override { - mybuf.resize(10); - allocator = details::make_pre_allocator(&*mybuf.begin(), mybuf.size()); - } - std::shared_ptr allocator; -}; - -TEST_F(PreallocatorTests, canAccessPreAllocatedMemory) { - void* handle = allocator->alloc(3); - float* ptr = reinterpret_cast(allocator->lock(handle)); - - mybuf = {1.1f, 2.2f, 3.3f}; - - ASSERT_EQ(ptr, &*mybuf.begin()); - ASSERT_EQ(ptr[0], 1.1f); - ASSERT_EQ(ptr[1], 2.2f); - ASSERT_EQ(ptr[2], 3.3f); -} - -TEST_F(PreallocatorTests, canNotAllocateMoreMemory) { - // large block such as 10k will result in nullptr - EXPECT_EQ(nullptr, allocator->lock(allocator->alloc(10 * sizeof(float) + 1))); - EXPECT_NE(nullptr, allocator->lock(allocator->alloc(10 * sizeof(float)))); -} - -TEST_F(PreallocatorTests, canNotLockWrongHandle) { - void* handle = allocator->alloc(3); - EXPECT_EQ(nullptr, allocator->lock(1 + reinterpret_cast(handle))); -} diff --git a/src/inference/tests/functional/tensor_desc_test.cpp b/src/inference/tests/functional/tensor_desc_test.cpp deleted file mode 100644 index 64dc1372e926c1..00000000000000 --- a/src/inference/tests/functional/tensor_desc_test.cpp +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include -#include - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; - -IE_SUPPRESS_DEPRECATED_START - -using TensorDescTests = ::testing::Test; - -TEST_F(TensorDescTests, CreateBlobWithIncorrectLayout) { - ASSERT_THROW(make_shared_blob({Precision::FP32, {1, 3, 32}, Layout::NC}), Exception); -} - -TEST_F(TensorDescTests, CreateBlockedBlobNCHW) { - TensorDesc desc(Precision::FP32, {1, 4, 2, 1}, {{1, 2, 2, 1, 2}, {0, 1, 2, 3, 1}}); - float data[8] = {1, 2, 3, 4, 5, 6, 7, 8}; - Blob::Ptr blockedBlob = make_shared_blob(desc, data); - Blob::Ptr nchwBlob = make_shared_blob({Precision::FP32, {1, 4, 2, 1}, Layout::NCHW}, data); - ASSERT_NE(blockedBlob->getTensorDesc().offset(5), nchwBlob->getTensorDesc().offset(5)); - ASSERT_EQ(6, blockedBlob->getTensorDesc().offset(5)); - ASSERT_EQ(5, nchwBlob->getTensorDesc().offset(5)); - ASSERT_EQ(Layout::NCHW, nchwBlob->getTensorDesc().getLayout()); - ASSERT_EQ(Layout::BLOCKED, blockedBlob->getTensorDesc().getLayout()); -} - -TEST_F(TensorDescTests, CreateBlockedBlobNCDHW) { - TensorDesc desc(Precision::FP32, {1, 4, 2, 2, 1}, {{1, 2, 2, 2, 1, 2}, {0, 1, 2, 3, 4, 1}}); - float data[8] = {1, 2, 3, 4, 5, 6, 7, 8}; - Blob::Ptr blockedBlob = make_shared_blob(desc, data); - Blob::Ptr ncdhwBlob = make_shared_blob({Precision::FP32, {1, 4, 2, 2, 1}, Layout::NCDHW}, data); - ASSERT_NE(blockedBlob->getTensorDesc().offset(6), ncdhwBlob->getTensorDesc().offset(6)); - ASSERT_EQ(5, blockedBlob->getTensorDesc().offset(6)); - ASSERT_EQ(6, ncdhwBlob->getTensorDesc().offset(6)); - ASSERT_EQ(Layout::NCDHW, ncdhwBlob->getTensorDesc().getLayout()); - ASSERT_EQ(Layout::BLOCKED, blockedBlob->getTensorDesc().getLayout()); -} - -TEST_F(TensorDescTests, CompareHWCandCHWLayouts) { - TensorDesc descCHW(Precision::FP32, {1, 3, 4}, Layout::CHW); - TensorDesc descHWC(Precision::FP32, {1, 3, 4}, Layout::HWC); - SizeVector chw = {0, 1, 2}; - SizeVector hwc = {1, 2, 0}; - - ASSERT_NE(descCHW, descHWC); - ASSERT_NE(descCHW.getBlockingDesc(), descHWC.getBlockingDesc()); - ASSERT_NE(descCHW.getBlockingDesc().getOrder(), descHWC.getBlockingDesc().getOrder()); - ASSERT_EQ(descCHW.getBlockingDesc().getOrder(), chw); - ASSERT_EQ(descHWC.getBlockingDesc().getOrder(), hwc); -} - -TEST_F(TensorDescTests, CompareNHWCandNCHWLayouts) { - TensorDesc descNCHW(Precision::FP32, {1, 3, 4, 2}, Layout::NCHW); - TensorDesc descNHWC(Precision::FP32, {1, 3, 4, 2}, Layout::NHWC); - SizeVector nchw = {0, 1, 2, 3}; - SizeVector nhwc = {0, 2, 3, 1}; - - ASSERT_NE(descNCHW, descNHWC); - ASSERT_NE(descNCHW.getBlockingDesc(), descNHWC.getBlockingDesc()); - ASSERT_NE(descNCHW.getBlockingDesc().getOrder(), descNHWC.getBlockingDesc().getOrder()); - ASSERT_EQ(descNCHW.getBlockingDesc().getOrder(), nchw); - ASSERT_EQ(descNHWC.getBlockingDesc().getOrder(), nhwc); -} - -TEST_F(TensorDescTests, CompareNDHWCandNCDHWLayouts) { - TensorDesc descNCDHW(Precision::FP32, {1, 3, 4, 4, 2}, Layout::NCDHW); - TensorDesc descNDHWC(Precision::FP32, {1, 3, 4, 4, 2}, Layout::NDHWC); - SizeVector ncdhw = {0, 1, 2, 3, 4}; - SizeVector ndhwc = {0, 2, 3, 4, 1}; - - ASSERT_NE(descNCDHW, descNDHWC); - ASSERT_NE(descNCDHW.getBlockingDesc(), descNDHWC.getBlockingDesc()); - ASSERT_NE(descNCDHW.getBlockingDesc().getOrder(), descNDHWC.getBlockingDesc().getOrder()); - ASSERT_EQ(descNCDHW.getBlockingDesc().getOrder(), ncdhw); - ASSERT_EQ(descNDHWC.getBlockingDesc().getOrder(), ndhwc); -} - -TEST_F(TensorDescTests, SetLayout) { - TensorDesc descNCHW(Precision::FP32, {1, 2, 3, 4}, Layout::NCHW); - TensorDesc decsNHWC = descNCHW; - decsNHWC.setLayout(Layout::NHWC); - - TensorDesc refNHWC(descNCHW.getPrecision(), descNCHW.getDims(), Layout::NHWC); - ASSERT_EQ(decsNHWC, refNHWC); -} - -TEST_F(TensorDescTests, setDimsForBLOCKED) { - TensorDesc desc(Precision::FP32, {1, 2, 3, 4, 5, 6}, Layout::BLOCKED); - SizeVector newDims{7, 7, 7, 7, 7, 7}; - desc.setDims(newDims); - EXPECT_EQ(desc.getDims(), newDims); - EXPECT_EQ(desc.getBlockingDesc().getBlockDims(), newDims); -} - -TEST_F(TensorDescTests, setDimsForNHWC) { - TensorDesc desc(Precision::FP32, {1, 2, 3, 4}, Layout::NHWC); - auto refOrder = desc.getBlockingDesc().getOrder(); - SizeVector newDims{7, 7, 7, 7}; - desc.setDims(newDims); - EXPECT_EQ(desc.getDims(), newDims); - EXPECT_EQ(desc.getLayout(), Layout::NHWC); - EXPECT_EQ(desc.getBlockingDesc().getBlockDims(), newDims); - EXPECT_EQ(desc.getBlockingDesc().getOrder(), refOrder); -} diff --git a/src/inference/tests/functional/variable_state.cpp b/src/inference/tests/functional/variable_state.cpp deleted file mode 100644 index 5d23b4b7132614..00000000000000 --- a/src/inference/tests/functional/variable_state.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include - -#include "openvino/core/deprecated.hpp" - -using namespace ::testing; -using namespace std; -using namespace InferenceEngine; -using namespace InferenceEngine::details; -OPENVINO_SUPPRESS_DEPRECATED_START - -TEST(VariableStateCPPTests, throwsOnUninitializedReset) { - VariableState req; - ASSERT_THROW(req.Reset(), InferenceEngine::NotAllocated); -} - -TEST(VariableStateCPPTests, throwsOnUninitializedGetname) { - VariableState req; - ASSERT_THROW(req.GetName(), InferenceEngine::NotAllocated); -} - -TEST(VariableStateCPPTests, throwsOnUninitializedGetState) { - VariableState req; - ASSERT_THROW(req.GetState(), InferenceEngine::NotAllocated); -} - -TEST(VariableStateCPPTests, throwsOnUninitializedSetState) { - VariableState req; - Blob::Ptr blob; - ASSERT_THROW(req.SetState(blob), InferenceEngine::NotAllocated); -} diff --git a/src/inference/tests/unit/executor_config_test.cpp b/src/inference/tests/unit/executor_config_test.cpp new file mode 100644 index 00000000000000..6a02b35241a686 --- /dev/null +++ b/src/inference/tests/unit/executor_config_test.cpp @@ -0,0 +1,1146 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_common.hpp" +#include "openvino/runtime/threading/istreams_executor.hpp" +#include "os/cpu_map_info.hpp" + +using namespace testing; +using namespace ov; +using namespace threading; + +namespace { + +#if defined(__linux__) || defined(_WIN32) + +struct ExecutorConfigTestCase { + std::vector> _proc_type_table; + std::vector> _cpu_mapping_table; + int _num_streams; + int _threads_per_stream; + ov::threading::IStreamsExecutor::Config::PreferredCoreType _core_type; + bool _cpu_pinning; + std::vector> _streams_info_table_in; + std::vector> _streams_info_table; + std::vector> _stream_processors; +}; + +class ExecutorConfigTest : public ov::test::TestsCommon, + public testing::WithParamInterface> { +public: + void SetUp() override { + auto test_data = std::get<0>(GetParam()); + + CPU& cpu = cpu_info(); + cpu._org_proc_type_table = test_data._proc_type_table; + cpu._proc_type_table = test_data._proc_type_table; + cpu._cpu_mapping_table = test_data._cpu_mapping_table; + cpu._numa_nodes = cpu._proc_type_table.size() > 1 ? static_cast(cpu._proc_type_table.size()) - 1 : 1; + cpu._sockets = cpu._numa_nodes; + + ov::threading::IStreamsExecutor::Config config{"config test", + test_data._num_streams, + test_data._threads_per_stream, + ov::threading::IStreamsExecutor::ThreadBindingType::NONE, + 1, + 0, + 0, + test_data._core_type, + test_data._streams_info_table_in, + test_data._cpu_pinning}; + + ASSERT_EQ(test_data._cpu_pinning, config.get_cpu_reservation()); + ASSERT_EQ(test_data._streams_info_table, config.get_streams_info_table()); + ASSERT_EQ(test_data._stream_processors, config.get_stream_processor_ids()); + } +}; + +ExecutorConfigTestCase _1sockets_streams_4_threads_1 = { + // param[in]: proc_type_table, {total processors, number of physical processors, number of Efficient processors, + // number of hyper threading processors} + { + {12, 6, 0, 6, 0, 0}, + }, + // param[in]: cpu_mapping_table, {PROCESSOR_ID, NUMA_ID, SOCKET_ID, CORE_ID, CORE_TYPE, GROUP_ID, Used} + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 4, // param[in]: the number of streams + 1, // param[in]: the number of threads per stream + ov::threading::IStreamsExecutor::Config::ANY, // param[in]: specified cpu core type + false, // param[in]: specified cpu pinning + {}, // param[in]: streams info table + // param[out]: streams_info_table, {NUMBER_OF_STREAMS, PROC_TYPE, THREADS_PER_STREAM, STREAM_NUMA_NODE_ID, + // STREAM_SOCKET_ID} + { + {4, MAIN_CORE_PROC, 1, 0, 0}, + }, + // param[out]: stream_processors, the list of processor ids on each stream. + {}, +}; + +ExecutorConfigTestCase _1sockets_streams_4_threads_0 = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 4, + 0, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + {}, + {}, +}; + +ExecutorConfigTestCase _1sockets_streams_1_threads_12 = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 1, + 12, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {1, ALL_PROC, 12, 0, 0}, + {0, MAIN_CORE_PROC, 6, 0, 0}, + {0, HYPER_THREADING_PROC, 6, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _1sockets_streams_1_threads_10 = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 1, + 10, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {1, ALL_PROC, 10, 0, 0}, + {0, MAIN_CORE_PROC, 6, 0, 0}, + {0, HYPER_THREADING_PROC, 4, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _1sockets_streams_12_threads_1 = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 12, + 1, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {6, MAIN_CORE_PROC, 1, 0, 0}, + {6, HYPER_THREADING_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _1sockets_streams_13_threads_1 = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 13, + 1, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {6, MAIN_CORE_PROC, 1, 0, 0}, + {6, HYPER_THREADING_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _1sockets_streams_6_threads_1_core_e = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 7, + 1, + ov::threading::IStreamsExecutor::Config::LITTLE, + false, + {}, + { + {6, MAIN_CORE_PROC, 1, 0, 0}, + {1, HYPER_THREADING_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _1sockets_streams_5_threads_1_binding = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 5, + 1, + ov::threading::IStreamsExecutor::Config::ANY, + true, + {}, + { + {5, MAIN_CORE_PROC, 1, 0, 0}, + }, + {{0}, {2}, {4}, {6}, {8}}, +}; + +ExecutorConfigTestCase _2sockets_streams_36_threads_1 = { + { + {72, 36, 0, 36, -1, -1}, + {36, 18, 0, 18, 0, 0}, + {36, 18, 0, 18, 1, 1}, + }, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 1, 1, 18, HYPER_THREADING_PROC, 18, -1}, {19, 1, 1, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 1, 1, 20, HYPER_THREADING_PROC, 20, -1}, {21, 1, 1, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 1, 1, 22, HYPER_THREADING_PROC, 22, -1}, {23, 1, 1, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 0, 0, 36, MAIN_CORE_PROC, 36, -1}, {37, 0, 0, 37, MAIN_CORE_PROC, 37, -1}, + {38, 0, 0, 38, MAIN_CORE_PROC, 38, -1}, {39, 0, 0, 39, MAIN_CORE_PROC, 39, -1}, + {40, 0, 0, 40, MAIN_CORE_PROC, 40, -1}, {41, 0, 0, 41, MAIN_CORE_PROC, 41, -1}, + {42, 0, 0, 42, MAIN_CORE_PROC, 42, -1}, {43, 0, 0, 43, MAIN_CORE_PROC, 43, -1}, + {44, 0, 0, 44, MAIN_CORE_PROC, 44, -1}, {45, 0, 0, 45, MAIN_CORE_PROC, 45, -1}, + {46, 0, 0, 46, MAIN_CORE_PROC, 46, -1}, {47, 0, 0, 47, MAIN_CORE_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {55, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, + {56, 1, 1, 56, MAIN_CORE_PROC, 56, -1}, {57, 1, 1, 57, MAIN_CORE_PROC, 57, -1}, + {58, 1, 1, 58, MAIN_CORE_PROC, 58, -1}, {59, 1, 1, 59, MAIN_CORE_PROC, 59, -1}, + {60, 1, 1, 60, MAIN_CORE_PROC, 60, -1}, {61, 1, 1, 61, MAIN_CORE_PROC, 61, -1}, + {62, 1, 1, 62, MAIN_CORE_PROC, 62, -1}, {63, 1, 1, 63, MAIN_CORE_PROC, 63, -1}, + {64, 1, 1, 64, MAIN_CORE_PROC, 64, -1}, {65, 1, 1, 65, MAIN_CORE_PROC, 65, -1}, + {66, 1, 1, 66, MAIN_CORE_PROC, 66, -1}, {67, 1, 1, 67, MAIN_CORE_PROC, 67, -1}, + {68, 1, 1, 68, MAIN_CORE_PROC, 68, -1}, {69, 1, 1, 69, MAIN_CORE_PROC, 69, -1}, + {70, 1, 1, 70, MAIN_CORE_PROC, 70, -1}, {71, 1, 1, 71, MAIN_CORE_PROC, 71, -1}, + }, + 36, + 1, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {18, MAIN_CORE_PROC, 1, 0, 0}, + {18, MAIN_CORE_PROC, 1, 1, 1}, + }, + {}, +}; + +ExecutorConfigTestCase _2sockets_streams_4_threads_5 = { + { + {72, 36, 0, 36, -1, -1}, + {36, 18, 0, 18, 0, 0}, + {36, 18, 0, 18, 1, 1}, + }, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 1, 1, 18, HYPER_THREADING_PROC, 18, -1}, {19, 1, 1, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 1, 1, 20, HYPER_THREADING_PROC, 20, -1}, {21, 1, 1, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 1, 1, 22, HYPER_THREADING_PROC, 22, -1}, {23, 1, 1, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 0, 0, 36, MAIN_CORE_PROC, 36, -1}, {37, 0, 0, 37, MAIN_CORE_PROC, 37, -1}, + {38, 0, 0, 38, MAIN_CORE_PROC, 38, -1}, {39, 0, 0, 39, MAIN_CORE_PROC, 39, -1}, + {40, 0, 0, 40, MAIN_CORE_PROC, 40, -1}, {41, 0, 0, 41, MAIN_CORE_PROC, 41, -1}, + {42, 0, 0, 42, MAIN_CORE_PROC, 42, -1}, {43, 0, 0, 43, MAIN_CORE_PROC, 43, -1}, + {44, 0, 0, 44, MAIN_CORE_PROC, 44, -1}, {45, 0, 0, 45, MAIN_CORE_PROC, 45, -1}, + {46, 0, 0, 46, MAIN_CORE_PROC, 46, -1}, {47, 0, 0, 47, MAIN_CORE_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {55, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, + {56, 1, 1, 56, MAIN_CORE_PROC, 56, -1}, {57, 1, 1, 57, MAIN_CORE_PROC, 57, -1}, + {58, 1, 1, 58, MAIN_CORE_PROC, 58, -1}, {59, 1, 1, 59, MAIN_CORE_PROC, 59, -1}, + {60, 1, 1, 60, MAIN_CORE_PROC, 60, -1}, {61, 1, 1, 61, MAIN_CORE_PROC, 61, -1}, + {62, 1, 1, 62, MAIN_CORE_PROC, 62, -1}, {63, 1, 1, 63, MAIN_CORE_PROC, 63, -1}, + {64, 1, 1, 64, MAIN_CORE_PROC, 64, -1}, {65, 1, 1, 65, MAIN_CORE_PROC, 65, -1}, + {66, 1, 1, 66, MAIN_CORE_PROC, 66, -1}, {67, 1, 1, 67, MAIN_CORE_PROC, 67, -1}, + {68, 1, 1, 68, MAIN_CORE_PROC, 68, -1}, {69, 1, 1, 69, MAIN_CORE_PROC, 69, -1}, + {70, 1, 1, 70, MAIN_CORE_PROC, 70, -1}, {71, 1, 1, 71, MAIN_CORE_PROC, 71, -1}, + }, + 4, + 5, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {3, MAIN_CORE_PROC, 5, 0, 0}, + {1, MAIN_CORE_PROC, 5, 1, 1}, + }, + {}, +}; + +ExecutorConfigTestCase _2sockets_streams_1_threads_36 = { + { + {72, 36, 0, 36, -1, -1}, + {36, 18, 0, 18, 0, 0}, + {36, 18, 0, 18, 1, 1}, + }, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 1, 1, 18, HYPER_THREADING_PROC, 18, -1}, {19, 1, 1, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 1, 1, 20, HYPER_THREADING_PROC, 20, -1}, {21, 1, 1, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 1, 1, 22, HYPER_THREADING_PROC, 22, -1}, {23, 1, 1, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 0, 0, 36, MAIN_CORE_PROC, 36, -1}, {37, 0, 0, 37, MAIN_CORE_PROC, 37, -1}, + {38, 0, 0, 38, MAIN_CORE_PROC, 38, -1}, {39, 0, 0, 39, MAIN_CORE_PROC, 39, -1}, + {40, 0, 0, 40, MAIN_CORE_PROC, 40, -1}, {41, 0, 0, 41, MAIN_CORE_PROC, 41, -1}, + {42, 0, 0, 42, MAIN_CORE_PROC, 42, -1}, {43, 0, 0, 43, MAIN_CORE_PROC, 43, -1}, + {44, 0, 0, 44, MAIN_CORE_PROC, 44, -1}, {45, 0, 0, 45, MAIN_CORE_PROC, 45, -1}, + {46, 0, 0, 46, MAIN_CORE_PROC, 46, -1}, {47, 0, 0, 47, MAIN_CORE_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {55, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, + {56, 1, 1, 56, MAIN_CORE_PROC, 56, -1}, {57, 1, 1, 57, MAIN_CORE_PROC, 57, -1}, + {58, 1, 1, 58, MAIN_CORE_PROC, 58, -1}, {59, 1, 1, 59, MAIN_CORE_PROC, 59, -1}, + {60, 1, 1, 60, MAIN_CORE_PROC, 60, -1}, {61, 1, 1, 61, MAIN_CORE_PROC, 61, -1}, + {62, 1, 1, 62, MAIN_CORE_PROC, 62, -1}, {63, 1, 1, 63, MAIN_CORE_PROC, 63, -1}, + {64, 1, 1, 64, MAIN_CORE_PROC, 64, -1}, {65, 1, 1, 65, MAIN_CORE_PROC, 65, -1}, + {66, 1, 1, 66, MAIN_CORE_PROC, 66, -1}, {67, 1, 1, 67, MAIN_CORE_PROC, 67, -1}, + {68, 1, 1, 68, MAIN_CORE_PROC, 68, -1}, {69, 1, 1, 69, MAIN_CORE_PROC, 69, -1}, + {70, 1, 1, 70, MAIN_CORE_PROC, 70, -1}, {71, 1, 1, 71, MAIN_CORE_PROC, 71, -1}, + }, + 1, + 36, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {1, ALL_PROC, 36, -1, -1}, + {0, MAIN_CORE_PROC, 18, 0, 0}, + {0, MAIN_CORE_PROC, 18, 1, 1}, + }, + {}, +}; + +ExecutorConfigTestCase _2sockets_streams_1_threads_30 = { + { + {72, 36, 0, 36, -1, -1}, + {36, 18, 0, 18, 0, 0}, + {36, 18, 0, 18, 1, 1}, + }, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 1, 1, 18, HYPER_THREADING_PROC, 18, -1}, {19, 1, 1, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 1, 1, 20, HYPER_THREADING_PROC, 20, -1}, {21, 1, 1, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 1, 1, 22, HYPER_THREADING_PROC, 22, -1}, {23, 1, 1, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 0, 0, 36, MAIN_CORE_PROC, 36, -1}, {37, 0, 0, 37, MAIN_CORE_PROC, 37, -1}, + {38, 0, 0, 38, MAIN_CORE_PROC, 38, -1}, {39, 0, 0, 39, MAIN_CORE_PROC, 39, -1}, + {40, 0, 0, 40, MAIN_CORE_PROC, 40, -1}, {41, 0, 0, 41, MAIN_CORE_PROC, 41, -1}, + {42, 0, 0, 42, MAIN_CORE_PROC, 42, -1}, {43, 0, 0, 43, MAIN_CORE_PROC, 43, -1}, + {44, 0, 0, 44, MAIN_CORE_PROC, 44, -1}, {45, 0, 0, 45, MAIN_CORE_PROC, 45, -1}, + {46, 0, 0, 46, MAIN_CORE_PROC, 46, -1}, {47, 0, 0, 47, MAIN_CORE_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {55, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, + {56, 1, 1, 56, MAIN_CORE_PROC, 56, -1}, {57, 1, 1, 57, MAIN_CORE_PROC, 57, -1}, + {58, 1, 1, 58, MAIN_CORE_PROC, 58, -1}, {59, 1, 1, 59, MAIN_CORE_PROC, 59, -1}, + {60, 1, 1, 60, MAIN_CORE_PROC, 60, -1}, {61, 1, 1, 61, MAIN_CORE_PROC, 61, -1}, + {62, 1, 1, 62, MAIN_CORE_PROC, 62, -1}, {63, 1, 1, 63, MAIN_CORE_PROC, 63, -1}, + {64, 1, 1, 64, MAIN_CORE_PROC, 64, -1}, {65, 1, 1, 65, MAIN_CORE_PROC, 65, -1}, + {66, 1, 1, 66, MAIN_CORE_PROC, 66, -1}, {67, 1, 1, 67, MAIN_CORE_PROC, 67, -1}, + {68, 1, 1, 68, MAIN_CORE_PROC, 68, -1}, {69, 1, 1, 69, MAIN_CORE_PROC, 69, -1}, + {70, 1, 1, 70, MAIN_CORE_PROC, 70, -1}, {71, 1, 1, 71, MAIN_CORE_PROC, 71, -1}, + }, + 1, + 30, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {1, ALL_PROC, 30, -1, -1}, + {0, MAIN_CORE_PROC, 18, 0, 0}, + {0, MAIN_CORE_PROC, 12, 1, 1}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_5_threads_2 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 5, + 2, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {4, MAIN_CORE_PROC, 2, 0, 0}, + {1, EFFICIENT_CORE_PROC, 2, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_5_threads_5 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 5, + 5, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {2, MAIN_CORE_PROC, 4, 0, 0}, + {2, EFFICIENT_CORE_PROC, 4, 0, 0}, + {1, HYPER_THREADING_PROC, 4, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_4_threads_5 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 4, + 5, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {1, MAIN_CORE_PROC, 5, 0, 0}, + {1, EFFICIENT_CORE_PROC, 5, 0, 0}, + {1, HYPER_THREADING_PROC, 5, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_4_threads_1 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 4, + 1, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {4, MAIN_CORE_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_5_threads_10 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 5, + 10, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {2, MAIN_CORE_PROC, 4, 0, 0}, + {2, EFFICIENT_CORE_PROC, 4, 0, 0}, + {1, HYPER_THREADING_PROC, 4, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_26_threads_1 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 26, + 1, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + { + {8, MAIN_CORE_PROC, 1, 0, 0}, + {8, EFFICIENT_CORE_PROC, 1, 0, 0}, + {8, HYPER_THREADING_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_26_threads_1_p = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 26, + 1, + ov::threading::IStreamsExecutor::Config::BIG, + false, + {}, + { + {8, MAIN_CORE_PROC, 1, 0, 0}, + {8, HYPER_THREADING_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_26_threads_1_e = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 26, + 1, + ov::threading::IStreamsExecutor::Config::LITTLE, + false, + {}, + { + {8, EFFICIENT_CORE_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_1_threads_0 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 0, + ov::threading::IStreamsExecutor::Config::ANY, + false, + {}, + {}, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_1_threads_1_p = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 1, + ov::threading::IStreamsExecutor::Config::BIG, + false, + {}, + { + {1, MAIN_CORE_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_1_threads_1_e = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 1, + ov::threading::IStreamsExecutor::Config::LITTLE, + false, + {}, + { + {1, EFFICIENT_CORE_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_1_threads_16_p = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 16, + ov::threading::IStreamsExecutor::Config::BIG, + false, + {}, + { + {1, ALL_PROC, 16, 0, 0}, + {0, MAIN_CORE_PROC, 8, 0, 0}, + {0, HYPER_THREADING_PROC, 8, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_1_threads_18_p = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 18, + ov::threading::IStreamsExecutor::Config::BIG, + false, + {}, + { + {1, ALL_PROC, 16, 0, 0}, + {0, MAIN_CORE_PROC, 8, 0, 0}, + {0, HYPER_THREADING_PROC, 8, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_1_threads_10_p = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 10, + ov::threading::IStreamsExecutor::Config::BIG, + false, + {}, + { + {1, ALL_PROC, 10, 0, 0}, + {0, MAIN_CORE_PROC, 8, 0, 0}, + {0, HYPER_THREADING_PROC, 2, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_10_threads_1_e = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 10, + 1, + ov::threading::IStreamsExecutor::Config::LITTLE, + false, + {}, + { + {8, EFFICIENT_CORE_PROC, 1, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_10_threads_1_binding = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 10, + 2, + ov::threading::IStreamsExecutor::Config::ANY, + true, + {}, + { + {4, MAIN_CORE_PROC, 2, 0, 0}, + {4, EFFICIENT_CORE_PROC, 2, 0, 0}, + {2, HYPER_THREADING_PROC, 2, 0, 0}, + }, + {{0, 2}, {4, 6}, {8, 10}, {12, 14}, {16, 17}, {18, 19}, {20, 21}, {22, 23}, {1, 3}, {5, 7}}, +}; + +ExecutorConfigTestCase _pecore_streams_info_table_1 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 8, + ov::threading::IStreamsExecutor::Config::BIG, + false, + { + {2, MAIN_CORE_PROC, 2, 0, 0}, + {2, EFFICIENT_CORE_PROC, 2, 0, 0}, + }, + { + {2, MAIN_CORE_PROC, 2, 0, 0}, + {2, EFFICIENT_CORE_PROC, 2, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_info_table_2 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 8, + ov::threading::IStreamsExecutor::Config::BIG, + false, + { + {5, MAIN_CORE_PROC, 2, 0, 0}, + {2, EFFICIENT_CORE_PROC, 2, 0, 0}, + }, + { + {1, MAIN_CORE_PROC, 8, 0, 0}, + }, + {}, +}; + +ExecutorConfigTestCase _pecore_streams_info_table_3 = { + { + {24, 8, 8, 8, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, + }, + 1, + 8, + ov::threading::IStreamsExecutor::Config::BIG, + true, + { + {2, MAIN_CORE_PROC, 2, 0, 0}, + {2, EFFICIENT_CORE_PROC, 2, 0, 0}, + {2, HYPER_THREADING_PROC, 2, 0, 0}, + }, + { + {2, MAIN_CORE_PROC, 2, 0, 0}, + {2, EFFICIENT_CORE_PROC, 2, 0, 0}, + {2, HYPER_THREADING_PROC, 2, 0, 0}, + }, + {{0, 2}, {4, 6}, {16, 17}, {18, 19}, {1, 3}, {5, 7}}, +}; + +TEST_P(ExecutorConfigTest, ExecutorConfig) {} + +INSTANTIATE_TEST_SUITE_P(smoke_ExecutorConfig, + ExecutorConfigTest, + testing::Values(_1sockets_streams_4_threads_1, + _1sockets_streams_4_threads_0, + _1sockets_streams_1_threads_12, + _1sockets_streams_1_threads_10, + _1sockets_streams_12_threads_1, + _1sockets_streams_13_threads_1, + _1sockets_streams_6_threads_1_core_e, + _1sockets_streams_5_threads_1_binding, + _2sockets_streams_36_threads_1, + _2sockets_streams_4_threads_5, + _2sockets_streams_1_threads_36, + _2sockets_streams_1_threads_30, + _pecore_streams_5_threads_2, + _pecore_streams_5_threads_5, + _pecore_streams_4_threads_5, + _pecore_streams_4_threads_1, + _pecore_streams_5_threads_10, + _pecore_streams_26_threads_1, + _pecore_streams_26_threads_1_p, + _pecore_streams_26_threads_1_e, + _pecore_streams_1_threads_0, + _pecore_streams_1_threads_1_p, + _pecore_streams_1_threads_1_e, + _pecore_streams_1_threads_16_p, + _pecore_streams_1_threads_18_p, + _pecore_streams_1_threads_10_p, + _pecore_streams_10_threads_1_e, + _pecore_streams_10_threads_1_binding, + _pecore_streams_info_table_1, + _pecore_streams_info_table_2, + _pecore_streams_info_table_3)); +#endif +} // namespace \ No newline at end of file diff --git a/src/inference/tests/unit/make_default_multi_threaded_test.cpp b/src/inference/tests/unit/make_default_multi_threaded_test.cpp new file mode 100644 index 00000000000000..e1917394507045 --- /dev/null +++ b/src/inference/tests/unit/make_default_multi_threaded_test.cpp @@ -0,0 +1,153 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include + +#include "openvino/runtime/threading/istreams_executor.hpp" +#include "os/cpu_map_info.hpp" + +using namespace testing; +using namespace ov; +using namespace threading; + +namespace { + +#if defined(__linux__) || defined(_WIN32) + +struct MakeDefaultMultiThreadsTestCase { + std::vector> _proc_type_table; + int _num_streams; + std::vector> _streams_info_table; +}; + +class MakeDefaultMultiThreadsTest : public ov::test::TestsCommon, + public testing::WithParamInterface> { +public: + void SetUp() override { + auto test_data = std::get<0>(GetParam()); + + CPU& cpu = cpu_info(); + cpu._org_proc_type_table = test_data._proc_type_table; + cpu._proc_type_table = test_data._proc_type_table; + cpu._numa_nodes = + test_data._proc_type_table.size() > 1 ? static_cast(test_data._proc_type_table.size()) - 1 : 1; + + ov::threading::IStreamsExecutor::Config config{"make default multi threads test", test_data._num_streams}; + auto streamsConfig = ov::threading::IStreamsExecutor::Config::make_default_multi_threaded(config); + + ASSERT_EQ(streamsConfig.get_streams_info_table(), test_data._streams_info_table); + } +}; + +MakeDefaultMultiThreadsTestCase _1sockets_streams_1 = { + // param[in]: proc_type_table, {total processors, number of physical processors, number of Efficient processors, + // number of hyper threading processors} + { + {12, 6, 0, 6, 0, 0}, + }, + 1, // param[in]: the number of streams + // param[out]: streams info table + { + {1, 1, 6, 0, 0}, + }, +}; + +MakeDefaultMultiThreadsTestCase _1sockets_streams_2 = { + { + {12, 6, 0, 6, 0, 0}, + }, + 2, + { + {1, 1, 6, 0, 0}, + {1, 3, 6, 0, 0}, + }, +}; + +MakeDefaultMultiThreadsTestCase _2sockets_streams_1 = { + { + {72, 36, 0, 36, -1, -1}, + {36, 18, 0, 18, 0, 0}, + {36, 18, 0, 18, 1, 1}, + }, + 1, + { + {1, 0, 36, -1, -1}, + {0, 1, 18, 0, 0}, + {0, 1, 18, 1, 1}, + }, +}; + +MakeDefaultMultiThreadsTestCase _2sockets_streams_4 = { + { + {72, 36, 0, 36, -1, -1}, + {36, 18, 0, 18, 0, 0}, + {36, 18, 0, 18, 1, 1}, + }, + 4, + { + {2, 1, 9, 0, 0}, + {2, 1, 9, 1, 1}, + }, +}; + +MakeDefaultMultiThreadsTestCase _pecore24_streams_1 = { + { + {24, 8, 8, 8, 0, 0}, + }, + 1, + { + {1, 1, 8, 0, 0}, + }, +}; + +MakeDefaultMultiThreadsTestCase _pecore24_streams_3 = { + { + {24, 8, 8, 8, 0, 0}, + }, + 3, + { + {3, 1, 2, 0, 0}, + }, +}; + +MakeDefaultMultiThreadsTestCase _pecore32_streams_1 = { + { + {32, 8, 16, 8, 0, 0}, + }, + 1, + { + {1, 0, 24, 0, 0}, + {0, 1, 8, 0, 0}, + {0, 2, 16, 0, 0}, + }, +}; + +MakeDefaultMultiThreadsTestCase _pecore32_streams_5 = { + { + {32, 8, 16, 8, 0, 0}, + }, + 5, + { + {1, 1, 5, 0, 0}, + {3, 2, 5, 0, 0}, + {1, 3, 5, 0, 0}, + }, +}; + +TEST_P(MakeDefaultMultiThreadsTest, MakeDefaultMultiThreads) {} + +INSTANTIATE_TEST_SUITE_P(smoke_MakeDefaultMultiThreads, + MakeDefaultMultiThreadsTest, + testing::Values(_1sockets_streams_1, + _1sockets_streams_2, + _2sockets_streams_1, + _2sockets_streams_4, + _pecore24_streams_1, + _pecore24_streams_3, + _pecore32_streams_1, + _pecore32_streams_5)); +#endif +} // namespace diff --git a/src/inference/tests/unit/system_allocator_test.cpp b/src/inference/tests/unit/system_allocator_test.cpp deleted file mode 100644 index bab05bd634d6b3..00000000000000 --- a/src/inference/tests/unit/system_allocator_test.cpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "system_allocator.hpp" - -#include - -#include - -#include "common_test_utils/test_common.hpp" - -using namespace InferenceEngine; -class SystemAllocatorReleaseTests : public ov::test::TestsCommon {}; - -class SystemAllocatorTests : public ov::test::TestsCommon { -protected: - void SetUp() override { - ov::test::TestsCommon::SetUp(); - ASSERT_EQ(allocator.get(), nullptr); - allocator = createSystemMemoryAllocator(); - ASSERT_NE(allocator.get(), nullptr); - } - - void TearDown() override { - ov::test::TestsCommon::TearDown(); - ASSERT_NE(allocator.get(), nullptr); - allocator.reset(); - ASSERT_EQ(allocator.get(), nullptr); - } - - std::unique_ptr createSystemMemoryAllocator() { - return std::unique_ptr(new SystemMemoryAllocator()); - } - - std::unique_ptr allocator; - -public: -}; - -TEST_F(SystemAllocatorTests, canAllocate) { - void* handle0 = allocator->alloc(0); - void* handle1 = allocator->alloc(100); - EXPECT_NE(handle0, nullptr); - EXPECT_NE(handle1, nullptr); - delete[] reinterpret_cast(handle0); - delete[] reinterpret_cast(handle1); -} - -TEST_F(SystemAllocatorTests, canFree) { - EXPECT_TRUE(allocator->free(nullptr)); - void* handle0 = reinterpret_cast(new char[0]); - void* handle1 = reinterpret_cast(new char[100]); - EXPECT_TRUE(allocator->free(handle0)); - EXPECT_TRUE(allocator->free(handle1)); -} - -TEST_F(SystemAllocatorTests, canLockAndUnlockAllocatedMemory) { - // large block such as 10k will result in sigsegv if not allocated - void* handle = allocator->alloc(10000); - char* ptr = reinterpret_cast(allocator->lock(handle)); - ptr[9999] = 11; - EXPECT_EQ(ptr[9999], 11); - allocator->unlock(ptr); - allocator->free(handle); -} diff --git a/src/inference/tests/unit/update_executor_config_test.cpp b/src/inference/tests/unit/update_executor_config_test.cpp deleted file mode 100644 index abb3612eb8750d..00000000000000 --- a/src/inference/tests/unit/update_executor_config_test.cpp +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "common_test_utils/test_common.hpp" -#include "openvino/runtime/threading/istreams_executor.hpp" -#include "os/cpu_map_info.hpp" - -using namespace testing; -using namespace ov; -using namespace threading; - -namespace { - -#if defined(__linux__) || defined(_WIN32) - -struct UpdateExecutorConfigTestCase { - ov::threading::IStreamsExecutor::Config _config; - std::vector> _proc_type_table; - std::vector> _cpu_mapping_table; - int _num_streams; - int _threads_per_stream; - ov::threading::IStreamsExecutor::Config::PreferredCoreType _core_type; - bool _cpu_pinning; - std::vector> _streams_info_table; - std::vector> _stream_processors; -}; - -class UpdateExecutorConfigTest : public ov::test::TestsCommon, - public testing::WithParamInterface> { -public: - void SetUp() override { - auto test_data = std::get<0>(GetParam()); - - CPU& cpu = cpu_info(); - cpu._org_proc_type_table = test_data._proc_type_table; - cpu._proc_type_table = test_data._proc_type_table; - cpu._cpu_mapping_table = test_data._cpu_mapping_table; - cpu._numa_nodes = 1; - - test_data._config.update_executor_config(test_data._num_streams, - test_data._threads_per_stream, - test_data._core_type, - test_data._cpu_pinning); - - ASSERT_EQ(test_data._num_streams, test_data._config._streams); - ASSERT_EQ(test_data._threads_per_stream, test_data._config._threadsPerStream); - ASSERT_EQ(test_data._core_type, test_data._config._threadPreferredCoreType); - ASSERT_EQ(test_data._cpu_pinning, test_data._config._cpu_reservation); - ASSERT_EQ(test_data._num_streams, test_data._config._streams); - ASSERT_EQ(test_data._streams_info_table, test_data._config._streams_info_table); - ASSERT_EQ(test_data._stream_processors, test_data._config._stream_processor_ids); - } -}; - -UpdateExecutorConfigTestCase _update_num_streams = { - ov::threading::IStreamsExecutor::Config{"update num streams test"}, // param[in]: initial configuration - // param[in]: proc_type_table, {total processors, number of physical processors, number of Efficient processors, - // number of hyper threading processors} - { - {12, 6, 0, 6, 0, 0}, - }, - // param[in]: cpu_mapping_table, {PROCESSOR_ID, NUMA_ID, SOCKET_ID, CORE_ID, CORE_TYPE, GROUP_ID, Used} - { - {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, - {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, - {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, - {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, - {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, - {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, - {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, - {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, - {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, - {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, - {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, - {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, - }, - 4, // param[in]: the number of streams - 1, // param[in]: the number of threads per stream - ov::threading::IStreamsExecutor::Config::ANY, // param[in]: specified cpu core type - false, // param[in]: specified cpu pinning - // param[out]: streams_info_table, {NUMBER_OF_STREAMS, PROC_TYPE, THREADS_PER_STREAM, STREAM_NUMA_NODE_ID, - // STREAM_SOCKET_ID} - { - {4, MAIN_CORE_PROC, 1, 0, 0}, - }, - // param[out]: stream_processors, the list of processor ids on each stream. - {}, -}; - -UpdateExecutorConfigTestCase _update_core_type = { - ov::threading::IStreamsExecutor::Config{"update core type test"}, - { - {24, 8, 8, 8, 0, 0}, - }, - { - {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, - {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, - {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, - {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, - {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, - {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, - {12, 0, 0, 6, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 6, HYPER_THREADING_PROC, 13, -1}, - {14, 0, 0, 7, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 7, HYPER_THREADING_PROC, 15, -1}, - {16, 0, 0, 8, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 9, EFFICIENT_CORE_PROC, 17, -1}, - {18, 0, 0, 10, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 11, EFFICIENT_CORE_PROC, 19, -1}, - {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, - {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, - }, - 8, - 1, - ov::threading::IStreamsExecutor::Config::LITTLE, - false, - { - {8, EFFICIENT_CORE_PROC, 1, 0, 0}, - }, - {}, -}; - -UpdateExecutorConfigTestCase _update_cpu_pinning = { - ov::threading::IStreamsExecutor::Config{"update cpu pinning test"}, - { - {8, 4, 0, 4, 0, 0}, - }, - { - {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, - {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, - {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, - {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, - {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, - {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, - {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, - {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, - }, - 8, - 1, - ov::threading::IStreamsExecutor::Config::ANY, - true, - { - {4, MAIN_CORE_PROC, 1, 0, 0}, - {4, HYPER_THREADING_PROC, 1, 0, 0}, - }, - { - {0}, - {2}, - {4}, - {6}, - {1}, - {3}, - {5}, - {7}, - }, -}; - -TEST_P(UpdateExecutorConfigTest, UpdateExecutorConfig) {} - -INSTANTIATE_TEST_SUITE_P(smoke_UpdateExecutorConfig, - UpdateExecutorConfigTest, - testing::Values(_update_num_streams, _update_core_type, _update_cpu_pinning)); -#endif -} // namespace diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp index 2c54a0d17b2f8d..6c1c4e433b08e3 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/core_config.cpp @@ -2,12 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "functional_test_utils/core_config.hpp" - #include "shared_test_classes/base/ov_subgraph.hpp" -void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {} - namespace ov { namespace test { diff --git a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp index e7c337a10d5648..a78d71f79b4c58 100644 --- a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp @@ -207,7 +207,7 @@ class AutoBatchAsyncInferRequestTest : public ::testing::TestWithParam t; for (int n = 0; n < sz; n++) { - IE_ASSERT(workerRequestPtr->_tasks.try_pop(t)); + OPENVINO_ASSERT(workerRequestPtr->_tasks.try_pop(t)); t.first->m_sync_request->m_batched_request_status = SyncInferRequest::eExecutionFlavor::TIMEOUT_EXECUTED; t.first->m_request_without_batch->start_async(); diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index 7c754a56930cb2..6e755ce01824a0 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -46,7 +46,6 @@ CompiledModel::CompiledModel(const std::shared_ptr& model, m_cfg{cfg}, m_name{model->get_name()}, m_loaded_from_cache(loaded_from_cache) { - bool isFloatModel = !ov::op::util::has_op_with_type(m_model); m_mutex = std::make_shared(); const auto& core = m_plugin->get_core(); if (!core) @@ -57,15 +56,9 @@ CompiledModel::CompiledModel(const std::shared_ptr& model, // special case when all InferRequests are muxed into a single queue m_task_executor = m_plugin->get_executor_manager()->get_executor("CPU"); } else { - auto streamsExecutorConfig = - is_cpu_map_available() - ? m_cfg.streamExecutorConfig - : IStreamsExecutor::Config::make_default_multi_threaded(m_cfg.streamExecutorConfig, isFloatModel); - streamsExecutorConfig._name = "CPUStreamsExecutor"; - m_cfg.streamExecutorConfig._threads = streamsExecutorConfig._threads; - m_task_executor = m_plugin->get_executor_manager()->get_idle_cpu_streams_executor(streamsExecutorConfig); + m_task_executor = m_plugin->get_executor_manager()->get_idle_cpu_streams_executor(m_cfg.streamExecutorConfig); } - if (0 != cfg.streamExecutorConfig._streams) { + if (0 != cfg.streams) { m_callback_executor = m_plugin->get_executor_manager()->get_idle_cpu_streams_executor( IStreamsExecutor::Config{"CPUCallbackExecutor", 1, 0, IStreamsExecutor::ThreadBindingType::NONE}); } else { @@ -77,11 +70,11 @@ CompiledModel::CompiledModel(const std::shared_ptr& model, if (m_callback_executor) set_callback_executor(m_callback_executor); - int streams = std::max(1, m_cfg.streamExecutorConfig._streams); + int streams = std::max(1, m_cfg.streamExecutorConfig.get_streams()); std::vector tasks; tasks.resize(streams); m_graphs.resize(streams); - if (m_cfg.streamExecutorConfig._streams != 0) { + if (m_cfg.streams != 0) { auto all_graphs_ready = [&] { return std::all_of(m_graphs.begin(), m_graphs.end(), [&](Graph& graph) { return graph.IsReady(); @@ -117,7 +110,7 @@ CompiledModel::GraphGuard::Lock CompiledModel::get_graph() const { { std::lock_guard lock{*m_mutex.get()}; // disable weights caching if graph was created only once - auto weightsCache = m_cfg.streamExecutorConfig._streams != 1 ? m_socketWeights[socketId] : nullptr; + auto weightsCache = m_cfg.streams != 1 ? m_socketWeights[socketId] : nullptr; auto isQuantizedFlag = (m_cfg.lpTransformsMode == Config::On) && ov::pass::low_precision::LowPrecision::isFunctionQuantized(m_model); @@ -187,7 +180,6 @@ ov::Any CompiledModel::get_property(const std::string& name) const { }; if (name == ov::supported_properties) { - OPENVINO_SUPPRESS_DEPRECATED_START return std::vector{ RO_property(ov::supported_properties.name()), RO_property(ov::model_name.name()), @@ -208,7 +200,6 @@ ov::Any CompiledModel::get_property(const std::string& name) const { RO_property(ov::log::level.name()), RO_property(ov::intel_cpu::sparse_weights_decompression_rate.name()), }; - OPENVINO_SUPPRESS_DEPRECATED_END } if (name == ov::model_name) { @@ -216,16 +207,16 @@ ov::Any CompiledModel::get_property(const std::string& name) const { const std::string modelName = graph.dump()->get_friendly_name(); return decltype(ov::model_name)::value_type(modelName); } else if (name == ov::optimal_number_of_infer_requests) { - const auto streams = config.streamExecutorConfig._streams; + const auto streams = config.streamExecutorConfig.get_streams(); return decltype(ov::optimal_number_of_infer_requests)::value_type( streams > 0 ? streams : 1); // ov::optimal_number_of_infer_requests has no negative values } else if (name == ov::num_streams) { - const auto streams = config.streamExecutorConfig._streams; + const auto streams = config.streamExecutorConfig.get_streams(); return decltype(ov::num_streams)::value_type( streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2) OPENVINO_SUPPRESS_DEPRECATED_START } else if (name == ov::affinity) { - const auto affinity = config.streamExecutorConfig._threadBindingType; + const auto affinity = config.threadBindingType; switch (affinity) { case IStreamsExecutor::ThreadBindingType::NONE: return ov::Affinity::NONE; @@ -239,7 +230,7 @@ ov::Any CompiledModel::get_property(const std::string& name) const { return ov::Affinity::NONE; OPENVINO_SUPPRESS_DEPRECATED_END } else if (name == ov::inference_num_threads) { - const auto num_threads = config.streamExecutorConfig._threads; + const auto num_threads = config.streamExecutorConfig.get_threads(); return decltype(ov::inference_num_threads)::value_type(num_threads); } else if (name == ov::enable_profiling.name()) { const bool perfCount = config.collectPerfCounters; diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index a5ab2a3385b8a3..0bb7e80ea32895 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -26,9 +26,9 @@ using namespace dnnl::impl::cpu::x64; Config::Config() { // this is default mode #if defined(__APPLE__) || defined(_WIN32) - streamExecutorConfig._threadBindingType = IStreamsExecutor::NONE; + threadBindingType = IStreamsExecutor::NONE; #else - streamExecutorConfig._threadBindingType = IStreamsExecutor::CORES; + threadBindingType = IStreamsExecutor::CORES; #endif // for the TBB code-path, additional configuration depending on the OS and CPU types @@ -37,14 +37,14 @@ Config::Config() { // 'CORES' is not implemented for Win/MacOS; so the 'NONE' or 'NUMA' is default auto numaNodes = get_available_numa_nodes(); if (numaNodes.size() > 1) { - streamExecutorConfig._threadBindingType = IStreamsExecutor::NUMA; + threadBindingType = IStreamsExecutor::NUMA; } else { - streamExecutorConfig._threadBindingType = IStreamsExecutor::NONE; + threadBindingType = IStreamsExecutor::NONE; } # endif if (get_available_cores_types().size() > 1 /*Hybrid CPU*/) { - streamExecutorConfig._threadBindingType = IStreamsExecutor::HYBRID_AWARE; + threadBindingType = IStreamsExecutor::HYBRID_AWARE; } #endif CPU_DEBUG_CAP_ENABLE(applyDebugCapsProperties()); @@ -74,26 +74,63 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { streamExecutorConfig.set_property(key, val.as()); + streams = streamExecutorConfig.get_streams(); + threads = streamExecutorConfig.get_threads(); + threadsPerStream = streamExecutorConfig.get_threads_per_stream(); + if (key == ov::num_streams.name()) { + ov::Any value = val.as(); + auto streams_value = value.as(); + if (streams_value == ov::streams::NUMA) { + latencyThreadingMode = Config::LatencyThreadingMode::PER_NUMA_NODE; + } else if (streams_value == ov::streams::AUTO) { + hintPerfMode = ov::hint::PerformanceMode::THROUGHPUT; + changedHintPerfMode = true; + } else { + streamsChanged = true; + } + } OPENVINO_SUPPRESS_DEPRECATED_START - if (key == ov::affinity.name()) { + } else if (key == ov::affinity.name()) { + try { + ov::Affinity affinity = val.as(); changedCpuPinning = true; - try { - const auto affinity_val = val.as(); - enableCpuPinning = - (affinity_val == ov::Affinity::CORE || affinity_val == ov::Affinity::HYBRID_AWARE) ? true - : false; - } catch (const ov::Exception&) { + enableCpuPinning = + (affinity == ov::Affinity::CORE || affinity == ov::Affinity::HYBRID_AWARE) ? true : false; + switch (affinity) { + case ov::Affinity::NONE: + threadBindingType = IStreamsExecutor::ThreadBindingType::NONE; + break; + case ov::Affinity::CORE: { +#if (defined(__APPLE__) || defined(_WIN32)) + threadBindingType = IStreamsExecutor::ThreadBindingType::NUMA; +#else + threadBindingType = IStreamsExecutor::ThreadBindingType::CORES; +#endif + } break; + case ov::Affinity::NUMA: + threadBindingType = IStreamsExecutor::ThreadBindingType::NUMA; + break; + case ov::Affinity::HYBRID_AWARE: + threadBindingType = IStreamsExecutor::ThreadBindingType::HYBRID_AWARE; + break; + default: OPENVINO_THROW("Wrong value ", val.as(), "for property key ", key, ". Expected only ov::Affinity::CORE/NUMA/HYBRID_AWARE."); } + } catch (const ov::Exception&) { + OPENVINO_THROW("Wrong value ", + val.as(), + "for property key ", + key, + ". Expected only ov::Affinity::CORE/NUMA/HYBRID_AWARE."); } OPENVINO_SUPPRESS_DEPRECATED_END } else if (key == ov::hint::performance_mode.name()) { try { - hintPerfMode = val.as(); + hintPerfMode = !changedHintPerfMode ? val.as() : hintPerfMode; } catch (const ov::Exception&) { OPENVINO_THROW("Wrong value ", val.as(), @@ -306,8 +343,8 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { if (executionMode == ov::hint::ExecutionMode::PERFORMANCE) { inferencePrecision = ov::element::f32; #if defined(OV_CPU_ARM_ENABLE_FP16) - //fp16 precision is used as default precision on ARM for non-convolution networks - //fp16 ACL convolution is slower than fp32 + // fp16 precision is used as default precision on ARM for non-convolution networks + // fp16 ACL convolution is slower than fp32 if (modelType != ModelType::CNN) inferencePrecision = ov::element::f16; #else @@ -323,8 +360,8 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { _config.clear(); if (exclusiveAsyncRequests) { // Exclusive request feature disables the streams - streamExecutorConfig._streams = 1; - streamExecutorConfig._streams_changed = true; + streams = 1; + streamsChanged = true; } this->modelType = modelType; @@ -337,20 +374,6 @@ void Config::updateProperties() { if (!_config.empty()) return; - switch (streamExecutorConfig._threadBindingType) { - case IStreamsExecutor::ThreadBindingType::NONE: - _config.insert({ov::internal::cpu_bind_thread.name(), "NO"}); - break; - case IStreamsExecutor::ThreadBindingType::CORES: - _config.insert({ov::internal::cpu_bind_thread.name(), "YES"}); - break; - case IStreamsExecutor::ThreadBindingType::NUMA: - _config.insert({ov::internal::cpu_bind_thread.name(), ov::util::to_string(ov::Affinity::NUMA)}); - break; - case IStreamsExecutor::ThreadBindingType::HYBRID_AWARE: - _config.insert({ov::internal::cpu_bind_thread.name(), ov::util::to_string(ov::Affinity::HYBRID_AWARE)}); - break; - } if (collectPerfCounters == true) _config.insert({ov::enable_profiling.name(), "YES"}); else diff --git a/src/plugins/intel_cpu/src/config.h b/src/plugins/intel_cpu/src/config.h index 3a5d7a7c486388..8c4910eb02aa32 100644 --- a/src/plugins/intel_cpu/src/config.h +++ b/src/plugins/intel_cpu/src/config.h @@ -62,7 +62,13 @@ struct Config { size_t rtCacheCapacity = 0ul; #endif ov::threading::IStreamsExecutor::Config streamExecutorConfig; + int streams = 1; + bool streamsChanged = false; + int threads = 0; + int threadsPerStream = 0; + ov::threading::IStreamsExecutor::ThreadBindingType threadBindingType = ov::threading::IStreamsExecutor::ThreadBindingType::NONE; ov::hint::PerformanceMode hintPerfMode = ov::hint::PerformanceMode::LATENCY; + bool changedHintPerfMode = false; ov::log::Level logLevel = ov::log::Level::NO; uint32_t hintNumRequests = 0; bool enableCpuPinning = true; diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 3e47254e324a92..704894b24448ef 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -528,42 +528,49 @@ std::vector> generate_stream_info(const int streams, std::vector>& proc_type_table, int preferred_nthreads_per_stream) { int model_prefer_threads = preferred_nthreads_per_stream; - IStreamsExecutor::Config& executor_config = config.streamExecutorConfig; proc_type_table = apply_scheduling_core_type(config.schedulingCoreType, proc_type_table); proc_type_table = apply_hyper_threading(config.enableHyperThreading, config.changedHyperThreading, ov::util::to_string(config.hintPerfMode), proc_type_table); - executor_config._cpu_reservation = get_cpu_pinning(config.enableCpuPinning, - config.changedCpuPinning, - streams, - config.latencyThreadingMode, - proc_type_table); + auto cpu_reservation = get_cpu_pinning(config.enableCpuPinning, + config.changedCpuPinning, + streams, + config.latencyThreadingMode, + proc_type_table); if (-1 == preferred_nthreads_per_stream) { model_prefer_threads = get_model_prefer_threads(streams, proc_type_table, model, config); } - executor_config._streams_info_table = get_streams_info_table(executor_config._streams, - executor_config._streams_changed, - executor_config._threads, - config.hintNumRequests, - model_prefer_threads, - input_current_socket_id, - ov::util::to_string(config.hintPerfMode), - config.latencyThreadingMode, - proc_type_table); + auto streams_info_table = get_streams_info_table(config.streams, + config.streamsChanged, + config.threads, + config.hintNumRequests, + model_prefer_threads, + input_current_socket_id, + ov::util::to_string(config.hintPerfMode), + config.latencyThreadingMode, + proc_type_table); + + config.streamExecutorConfig = IStreamsExecutor::Config{"CPUStreamsExecutor", + config.streams, + config.threadsPerStream, + config.threadBindingType, + 1, + 0, + config.threads, + IStreamsExecutor::Config::PreferredCoreType::ANY, + streams_info_table, + cpu_reservation}; + return proc_type_table; } void get_num_streams(const int streams, const std::shared_ptr& model, Config& config) { - IStreamsExecutor::Config& executor_config = config.streamExecutorConfig; std::vector> proc_type_table = get_proc_type_table(); generate_stream_info(streams, -1, model, config, proc_type_table); - - executor_config = IStreamsExecutor::Config::reserve_cpu_threads(executor_config); - executor_config._threadsPerStream = executor_config._streams_info_table[0][THREADS_PER_STREAM]; } int get_default_latency_streams(Config::LatencyThreadingMode latency_threading_mode) { diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp index 5b4e325134074d..a33cdd1b2ce942 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp @@ -89,16 +89,6 @@ std::vector> generate_stream_info(const int streams, std::vector>& proc_type_table, int preferred_nthreads_per_stream = -1); -struct StreamCfg { - int num_streams; // Number of streams - int num_threads; // Number of threads - int big_core_streams; // Number of streams in Performance-core(big core) - int small_core_streams; // Number of streams in Efficient-core(small core) - int threads_per_stream_big; // Threads per stream in big cores - int threads_per_stream_small; // Threads per stream in small cores - int small_core_offset; -}; - /** * @brief Get information about number of streams, threads and pinning threads on different processors * @param[in] streams number of streams diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index 2c6f5e8ba2fefa..a4c7477a5a04bb 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -306,7 +306,7 @@ void Graph::InitDescriptors() { } void Graph::ResolveInplaceDirections() { - OV_ITT_SCOPED_TASK(itt::domains::intel_cpu, "Graph::ResolveInplaceDirections"); + OV_ITT_SCOPED_TASK(itt::domains::intel_cpu, "Graph::ResolveInplaceDirections"); for (auto& node : graphNodes) { resolveInPlaceDirection(node); diff --git a/src/plugins/intel_cpu/src/graph_optimizer.cpp b/src/plugins/intel_cpu/src/graph_optimizer.cpp index 4b77fec6c0c9dd..b0fa76a845e5b4 100644 --- a/src/plugins/intel_cpu/src/graph_optimizer.cpp +++ b/src/plugins/intel_cpu/src/graph_optimizer.cpp @@ -12,6 +12,7 @@ #include "nodes/eltwise.h" #include "nodes/fake_quantize.h" #include "nodes/fullyconnected.h" +#include "nodes/gather.h" #include "nodes/input.h" #include "nodes/interpolate.h" #include "nodes/memory.hpp" @@ -71,6 +72,10 @@ void GraphOptimizer::ApplyCommonGraphOptimizations(Graph &graph) { FuseFCAndWeightsDecompression(graph); graph.RemoveDroppedNodes(); + OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "FuseGatherAndWeightsDecompression"); + FuseGatherAndWeightsDecompression(graph); + graph.RemoveDroppedNodes(); + OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "FuseConvolutionAndBias"); FuseConvolutionMatMulDeconvAndBias(graph); graph.RemoveDroppedNodes(); @@ -507,6 +512,103 @@ void GraphOptimizer::FuseFCAndWeightsDecompression(Graph &graph) { } } +void GraphOptimizer::FuseGatherAndWeightsDecompression(Graph &graph) { + std::set supportedWeightsPrecisions{ov::element::u8}; + auto expectedNode = [](NodePtr node, Type expectedType) { + return node->getType() == expectedType && node->getChildEdges().size() == 1; + }; + + auto& graphNodes = graph.GetNodes(); + for (size_t i = 0; i < graphNodes.size(); i++) { + const auto gatherNode = dynamic_cast(graphNodes[i].get()); + if (gatherNode == nullptr) + continue; + + // Multiply + const auto multiplyNode = gatherNode->getParentEdgeAt(0)->getParent(); + if (!expectedNode(multiplyNode, Type::Eltwise) || multiplyNode->getAlgorithm() != Algorithm::EltwiseMultiply || + !multiplyNode->isConstant()) + continue; + + CPU_GRAPH_OPTIMIZER_SCOPE(FuseGatherAndWeightsDecompression); + const auto multiplyConstNode = multiplyNode->getParentEdgeAt(1)->getParent(); + if (!expectedNode(multiplyConstNode, Type::Input)) + continue; + + const auto mulParent = multiplyNode->getParentEdgeAt(0)->getParent(); + NodePtr subtractNode = mulParent; + if (!expectedNode(subtractNode, Type::Eltwise)) + continue; + auto subtractConstNode = subtractNode->getParentEdgeAt(1)->getParent(); + if (!expectedNode(subtractConstNode, Type::Input)) + continue; + + auto convertNode = subtractNode->getParentEdgeAt(0)->getParent(); + if (!expectedNode(convertNode, Type::Convert)) + continue; + const auto weightsNode = convertNode->getParentEdgeAt(0)->getParent(); + if (!expectedNode(weightsNode, Type::Input)) + continue; + + // Precision limitations + if (supportedWeightsPrecisions.find(weightsNode->getOriginalOutputPrecisionAtPort(0)) == supportedWeightsPrecisions.end()) + continue; + + // Shape limitations + const auto weightsShape = weightsNode->getOutputShapeAtPort(0); + if (weightsShape != multiplyNode->getOutputShapeAtPort(0)) + continue; + + // Get decompressionConstShape + VectorDims decompressionConstShape; + const auto gatherInputWeightsShape = gatherNode->getInputShapeAtPort(0); + if (gatherInputWeightsShape.getRank() != 2u || weightsShape.getRank() != 2u) + continue; + // Should be [vocab_size, 1] + decompressionConstShape = VectorDims{gatherInputWeightsShape.getDims()[0], 1}; + + auto check_decompression_shape = [&decompressionConstShape](const VectorDims& shape_to_check) { + if (shape_to_check.size() != decompressionConstShape.size()) + return false; + return std::equal(shape_to_check.begin(), shape_to_check.end(), decompressionConstShape.begin()); + }; + if (!check_decompression_shape(multiplyConstNode->getOutputShapeAtPort(0).getDims())) + continue; + if (!check_decompression_shape(subtractConstNode->getOutputShapeAtPort(0).getDims())) + continue; + + // Fusion processing + auto *multiplyInputNode = dynamic_cast(multiplyConstNode.get()); + if (!multiplyInputNode) { + OPENVINO_THROW("Cannot cast ", multiplyInputNode->getName(), " to Input node."); + } + gatherNode->fuseDecompressionMultiply(multiplyInputNode->getMemoryPtr()); + + auto *subtractInputNode = dynamic_cast(subtractConstNode.get()); + if (!subtractInputNode) { + OPENVINO_THROW("Cannot cast ", subtractInputNode->getName(), " to Input node."); + } + gatherNode->fuseDecompressionSubtract(subtractInputNode->getMemoryPtr()); + + gatherNode->addOriginalLayer(multiplyNode->getOriginalLayers()); + gatherNode->addOriginalLayer(convertNode->getOriginalLayers()); + + gatherNode->addOriginalLayer(subtractNode->getOriginalLayers()); + auto subtractConstEdge = subtractConstNode->getChildEdges()[0].lock(); + graph.RemoveEdge(subtractConstEdge); + + auto multiplyConstEdge = multiplyConstNode->getChildEdges()[0].lock(); + graph.RemoveEdge(multiplyConstEdge); + + graph.DropNode(convertNode); + graph.DropNode(subtractNode); + graph.DropNode(multiplyNode); + + const auto& weightsPrecision = weightsNode->getOriginalOutputPrecisionAtPort(0); + gatherNode->setOriginalInputPrecisionAtPort(0, weightsPrecision); + } +} + void GraphOptimizer::FuseConvolutionMatMulDeconvAndBias(Graph &graph) { auto& graphNodes = graph.GetNodes(); diff --git a/src/plugins/intel_cpu/src/graph_optimizer.h b/src/plugins/intel_cpu/src/graph_optimizer.h index 45254aca36304d..0716bedc802c73 100644 --- a/src/plugins/intel_cpu/src/graph_optimizer.h +++ b/src/plugins/intel_cpu/src/graph_optimizer.h @@ -20,6 +20,7 @@ class GraphOptimizer { private: void FuseConvMatmulFCDeconvAndDQScales(Graph &graph); void FuseFCAndWeightsDecompression(Graph &graph); + void FuseGatherAndWeightsDecompression(Graph &graph); void FuseConvolutionMatMulDeconvAndBias(Graph &graph); void FuseDeconvolutionAndSimpleOperation(Graph &graph); void FuseMultiplyAndAdd(Graph &graph); diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp index f2dd4a479066af..a9a852fe32e817 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp @@ -318,9 +318,10 @@ DnnlShapeAgnosticDataPtr DnnlFCPrimitive::createShapeAgnosticData(const FCAttrs& return std::make_shared(postOpData); } -static impl_desc_type implTypeFromPrimDesc(const dnnl::primitive_desc primDesc, const bool useSparseWeights) { +static impl_desc_type implTypeFromPrimDesc(const dnnl::primitive_desc primDesc) { const auto implType = parse_impl_name(primDesc.impl_info_str()); - if (implType == ov::intel_cpu::brgemm_avx512_amx && useSparseWeights) { + if (implType == ov::intel_cpu::brgemm_avx512_amx && + primDesc.weights_desc().get_format_kind() == memory::format_kind::sparsed) { return ov::intel_cpu::brgemm_sparse_avx512_amx; } @@ -340,7 +341,7 @@ DnnlFCPrimitive::DnnlFCPrimitive(const Key& key, implPriorities, key.sparseWeights, useWeightsDecompressionImpl(key.src->getPrecision(), key.wei->getPrecision()))), - m_implType(implTypeFromPrimDesc(m_primDesc, key.sparseWeights)), + m_implType(implTypeFromPrimDesc(m_primDesc)), m_srcDesc(DnnlExtensionUtils::makeDescriptor(m_primDesc.src_desc())), m_weiDesc(DnnlExtensionUtils::makeDescriptor(m_primDesc.weights_desc())), m_dstDesc(DnnlExtensionUtils::makeDescriptor(m_primDesc.dst_desc())), diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 918f7b0afe218a..fe55e1a44c491f 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -201,13 +201,13 @@ static bool useSparseWeightsDecompression(const NodePtr& weightsInput, "%, use sparse weights = ", sparseRate >= minSparseRate); - return sparseRate < minSparseRate; + return sparseRate >= minSparseRate; } void FullyConnected::initSupportedPrimitiveDescriptors() { attrs.withBias = getOriginalInputsNumber() == 3; attrs.dequantizationScales = getDQScales(); - attrs.sparseWeights = useSparseWeightsDecompression(getParentEdgeAt(DATA_ID)->getParent(), + attrs.sparseWeights = useSparseWeightsDecompression(getParentEdgeAt(WEIGHTS_ID)->getParent(), getOriginalInputPrecisionAtPort(DATA_ID), context->getConfig().fcSparseWeiDecompressionRate); postOps = getPostOps(fusedWith); diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp index 864426d8a0620d..7ada726aeaa38e 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather.cpp @@ -13,11 +13,16 @@ #include "openvino/core/parallel.hpp" #include #include "common/cpu_memcpy.h" +#include "common/cpu_convert.h" #include "utils/general_utils.h" #include "kernels/x64/gather_uni_kernel.hpp" #include #include "shape_inference/custom/gather.hpp" #include "utils/ngraph_utils.hpp" +#include "snippets/utils.hpp" +#include "memory_desc/dnnl_blocked_memory_desc.h" +#include "openvino/core/type/element_type.hpp" +#include "openvino/core/except.hpp" using namespace dnnl::impl::cpu; @@ -29,6 +34,9 @@ namespace node { bool Gather::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { + if (op->get_output_element_type(0) == element::string) { + return false; + } if (!one_of(op->get_type_info(), ov::op::v7::Gather::get_type_info_static(), ov::op::v8::Gather::get_type_info_static())) { @@ -135,10 +143,19 @@ void Gather::initSupportedPrimitiveDescriptors() { // Implementation desc type will be redefined in the fn prepareParams if a kernel will be created. ov::element::Type dataPrecision = getOriginalInputPrecisionAtPort(GATHER_DATA); + + canOptimizeCompressedEmbedding = false; + if ((decompressionSubtractPtr != nullptr) && (decompressionMultiplyPtr != nullptr)) { + if (dataPrecision != ov::element::u8 || !isAxisInputConst || inputShapes[GATHER_DATA].getRank() != 2u) { + OPENVINO_THROW("Compression gather doesn't support demanded precisions, axis, data rank"); + } + canOptimizeCompressedEmbedding = true; + } + addSupportedPrimDesc({{LayoutType::ncsp, dataPrecision}, {LayoutType::ncsp, ov::element::i32}, {LayoutType::ncsp, ov::element::i32, isAxisInputConst}}, - {{LayoutType::ncsp, dataPrecision}}, + {{LayoutType::ncsp, canOptimizeCompressedEmbedding ? ov::element::f32 : dataPrecision}}, ref_any); // Let's check for the special inPlace memory use case @@ -273,6 +290,10 @@ void Gather::prepareParams() { if (getSelectedPrimitiveDescriptor() == nullptr) THROW_ERROR(" has unidentified preferable primitive descriptor."); + if (canOptimizeCompressedEmbedding) { + return; + } + // short 1D vector fast execution impl (typical in shape infer subgraph) canOptimize1DCase = false; if (dataSrcRank <= 1 && dataMemPtr->getDesc().getPrecision() == ov::element::i32) { @@ -335,10 +356,16 @@ void Gather::execute(dnnl::stream strm) { return; } + if (canOptimizeCompressedEmbedding) { + execCompressedCase(); + return; + } + if (canOptimize1DCase) { exec1DCase(); return; } + #if defined(OPENVINO_ARCH_X86_64) if (jitKernel && jitKernel->isSupportedConfiguration(afterAxisSize)) { const void* srcIndices = getSrcDataAtPort(GATHER_INDICES); @@ -402,6 +429,10 @@ void Gather::executeDynamicImpl(dnnl::stream strm) { exec1DCase(); return; } + if (canOptimizeCompressedEmbedding) { + execCompressedCase(); + return; + } #if defined(OPENVINO_ARCH_X86_64) if (jitKernel && jitKernel->isSupportedConfiguration(afterAxisSize)) { const void* srcIndices = getSrcDataAtPort(GATHER_INDICES); @@ -585,6 +616,46 @@ void Gather::exec1DCase() { } } +void Gather::execCompressedCase() { + DEBUG_LOG(getName(), " execCompressedCase"); + auto srcMemPtr = getParentEdgeAt(GATHER_DATA)->getMemoryPtr(); + auto idxMemPtr = getParentEdgeAt(GATHER_INDICES)->getMemoryPtr(); + + const auto* psrc = srcMemPtr->getDataAs(); + const auto* pidx = idxMemPtr->getDataAs(); + + const auto* zp = decompressionSubtractPtr->getDataAs(); + const auto* scale = decompressionMultiplyPtr->getDataAs(); + + auto* pdst = getDstDataAtPortAs(0); + + const auto& idxDims = idxMemPtr->getStaticDims(); + const auto batch = idxDims[0]; + const auto seqLen = idxDims[1]; + + auto axisDim = srcMemPtr->getStaticDims()[0]; + auto feaDim = srcMemPtr->getStaticDims()[1]; + + parallel_for2d(batch, seqLen, [&](size_t b, size_t s) { + auto dstIdx = b * seqLen + s; + auto ii = pidx[dstIdx]; + if (ii < 0) { + if (reverseIndexing) + ii += axisDim; + else + ii = axisDim; + } + + auto* src = psrc + ii * feaDim; + auto* dst = pdst + dstIdx * feaDim; + auto& deq_zp = zp[ii]; + auto& deq_scale = scale[ii]; + for (size_t k = 0; k < feaDim; k++) { + dst[k] = (static_cast(src[k]) - deq_zp) * deq_scale; + } + }); +} + bool Gather::created() const { return getType() == Type::Gather; } @@ -629,6 +700,30 @@ void Gather::resolveInPlaceEdges(Edge::LOOK look) { } } +void Gather::fuseDecompressionMultiply(const MemoryCPtr& memory) { + fuseDecompressionConstant(memory, decompressionMultiplyPtr); +} + +void Gather::fuseDecompressionSubtract(const MemoryCPtr& memory) { + fuseDecompressionConstant(memory, decompressionSubtractPtr); +} + +void Gather::fuseDecompressionConstant(const MemoryCPtr& memory, MemoryCPtr& decompressionValuesPtr) { + const auto decompression_prc = ov::element::f32; + if (memory->getDesc().getPrecision() == decompression_prc) { + decompressionValuesPtr = memory; + } else { + DnnlBlockedMemoryDesc memoryDesc(decompression_prc, memory->getShape()); + decompressionValuesPtr = std::make_shared(getEngine(), memoryDesc, nullptr, false); + const auto elementsCount = memory->getDescWithType()->getPaddedElementsCount(); + cpu_convert(memory->getData(), + decompressionValuesPtr->getData(), + DnnlExtensionUtils::DataTypeToElementType(memory->getDataType()), + ov::element::f32, + elementsCount); + } +} + } // namespace node } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/gather.h b/src/plugins/intel_cpu/src/nodes/gather.h index 87f4f3a09ce5be..5e6b6c999585ff 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.h +++ b/src/plugins/intel_cpu/src/nodes/gather.h @@ -27,6 +27,9 @@ class Gather : public Node { bool isExecutable() const override; void resolveInPlaceEdges(Edge::LOOK look) override; + void fuseDecompressionMultiply(const MemoryCPtr& memory); + void fuseDecompressionSubtract(const MemoryCPtr& memory); + static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; struct threadExecParams { @@ -55,10 +58,14 @@ class Gather : public Node { private: void initShortParams(threadExecParams& p, uint64_t start); void execReference(); + void fuseDecompressionConstant(const MemoryCPtr& memory, MemoryCPtr& decompressionValuesPtr); bool canOptimize1DCase = false; void exec1DCase(); + bool canOptimizeCompressedEmbedding = false; + void execCompressedCase(); + bool isDataShapeStat = false; bool isIdxShapeStat = false; bool isAxisInputConst = false; @@ -91,6 +98,9 @@ class Gather : public Node { static constexpr size_t GATHER_AXIS = 2; std::shared_ptr jitKernel; + + MemoryCPtr decompressionSubtractPtr = nullptr; + MemoryCPtr decompressionMultiplyPtr = nullptr; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index a899f277e73d72..4b18b1b391ae13 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -8,7 +8,6 @@ #include "itt.h" #include "openvino/runtime/intel_cpu/properties.hpp" #include "openvino/runtime/internal_properties.hpp" -#include "openvino/runtime/performance_heuristics.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/runtime/threading/cpu_streams_info.hpp" #include "openvino/runtime/threading/executor_manager.hpp" @@ -185,272 +184,49 @@ static bool streamsSet(const ov::AnyMap& config) { return config.count(ov::num_streams.name()); } -void Engine::apply_performance_hints(ov::AnyMap& config, const std::shared_ptr& model) const { - auto getNumStreamsThroughput = [&]() { - const auto isa = dnnl::get_effective_cpu_isa(); - float isaSpecificThreshold = 1.0f; - switch (isa) { - case dnnl::cpu_isa::sse41: - isaSpecificThreshold = 0.5f; - break; - case dnnl::cpu_isa::avx2: - case dnnl::cpu_isa::avx512_core: - isaSpecificThreshold = 1.0f; - break; - case dnnl::cpu_isa::avx512_core_vnni: - case dnnl::cpu_isa::avx2_vnni: - isaSpecificThreshold = 2.0f; - break; - case dnnl::cpu_isa::avx512_core_amx: - isaSpecificThreshold = 4.0f; - break; - default: - isaSpecificThreshold = 1.0f; - } - // the more "capable" the CPU in general, the more streams we may want to keep to keep it utilized - const float memThresholdAssumeLimitedForISA = ov::MemBandwidthPressure::LIMITED / isaSpecificThreshold; - const float L2_cache_size = dnnl::utils::get_cache_size(2 /*level*/, true /*per core */); - ov::MemBandwidthPressure networkToleranceForLowCache = - ov::mem_bandwidth_pressure_tolerance(model, L2_cache_size, memThresholdAssumeLimitedForISA); - const auto default_streams = get_streams_num(engConfig.streamExecutorConfig._threadBindingType, - ov::threading::IStreamsExecutor::Config::StreamMode::DEFAULT, - engConfig.streamExecutorConfig._enable_hyper_thread); - auto streams_info = default_streams; - if (networkToleranceForLowCache.max_mem_tolerance == ov::MemBandwidthPressure::UNKNOWN) { - if ((networkToleranceForLowCache.ratio_compute_convs == ov::MemBandwidthPressure::ALL) || - (networkToleranceForLowCache.ratio_compute_deconvs == ov::MemBandwidthPressure::ALL)) { - // all relevant layers (convs, etc) are compute-limited, the most aggressive val for #streams - streams_info = get_streams_num(engConfig.streamExecutorConfig._threadBindingType, - ov::threading::IStreamsExecutor::Config::StreamMode::AGGRESSIVE, - engConfig.streamExecutorConfig._enable_hyper_thread); - } // otherwise (no recognized layers) falling back to the default value - } else if (networkToleranceForLowCache.max_mem_tolerance > memThresholdAssumeLimitedForISA) { - // network is below the ISA-specific threshold - streams_info = get_streams_num(engConfig.streamExecutorConfig._threadBindingType, - ov::threading::IStreamsExecutor::Config::StreamMode::AGGRESSIVE, - engConfig.streamExecutorConfig._enable_hyper_thread); - } else if (networkToleranceForLowCache.max_mem_tolerance > ov::MemBandwidthPressure::LIMITED) { - // network is below general threshold - streams_info = get_streams_num(engConfig.streamExecutorConfig._threadBindingType, - ov::threading::IStreamsExecutor::Config::StreamMode::LESSAGGRESSIVE, - engConfig.streamExecutorConfig._enable_hyper_thread); - streams_info.num_streams = std::max(default_streams.num_streams, streams_info.num_streams); - } - - auto num_requests = config.find(ov::hint::num_requests.name()); - if (num_requests != config.end()) { // arrived with config to the LoadNetwork (and thus higher pri) - int val = -1; - try { - ov::Any value = num_requests->second.as(); - val = value.as(); - if (val < 0) - OPENVINO_THROW("invalid value!"); - } catch (const ov::Exception&) { - OPENVINO_THROW("Wrong value of ", - num_requests->second.as(), - " for property key ", - ov::hint::num_requests.name(), - ". Expected only positive integer numbers"); - } - if (val > 0) - streams_info.num_streams = std::min(streams_info.num_streams, val); - } else if (engConfig.hintNumRequests > 0) { // set thru SetConfig to the plugin, 2nd priority - streams_info.num_streams = std::min(streams_info.num_streams, static_cast(engConfig.hintNumRequests)); - } - return std::pair(std::to_string(streams_info.num_streams), streams_info); - }; - - auto getPerfHintName = [&]() { - const bool streamsExplicitlySetForModel = streamsSet(config); - // checking streams (to avoid overriding what user might explicitly set in the incoming config or previously via - // SetConfig) - if (streamsExplicitlySetForModel || streamsExplicitlySetForEngine) - return std::string(); - - const auto& perf_hint = config.find(ov::hint::performance_mode.name()); - /* performance hints set for network has higher pririty than engine ones. - * This applies for all the configuration parameters */ - const auto& perf_hint_name = (perf_hint != config.end()) ? perf_hint->second.as() - : ov::util::to_string(engConfig.hintPerfMode); - return perf_hint_name; - }; - - // We compute both hints values because the optimal number of streams are computed based on ov::Model - // while we export model in cpu internal opset so we need to save precomputed optimal # streams for both hint modes - const auto latency_hints = ov::util::to_string(ov::streams::NUMA); - const auto tput_hints = getNumStreamsThroughput(); - - // save hints parameters to model rt_info - ov::AnyMap hints_props; - const auto latency_name = - ov::util::to_string(ov::hint::PerformanceMode::LATENCY) + "_" + std::string(ov::num_streams.name()); - const auto tput_name = - ov::util::to_string(ov::hint::PerformanceMode::THROUGHPUT) + "_" + std::string(ov::num_streams.name()); - hints_props.insert({latency_name, latency_hints}); - hints_props.insert({tput_name, std::to_string(tput_hints.second.num_streams)}); - model->set_rt_info(hints_props, "intel_cpu_hints_config"); - - const auto perf_hint_name = getPerfHintName(); - if (perf_hint_name == ov::util::to_string(ov::hint::PerformanceMode::LATENCY)) { - config[ov::num_streams.name()] = latency_hints; - } else if (perf_hint_name == ov::util::to_string(ov::hint::PerformanceMode::THROUGHPUT)) { - config[ov::num_streams.name()] = tput_hints.first; - config[ov::threading::big_core_streams.name()] = std::to_string(tput_hints.second.big_core_streams); - config[ov::threading::small_core_streams.name()] = std::to_string(tput_hints.second.small_core_streams); - config[ov::threading::threads_per_stream_big.name()] = std::to_string(tput_hints.second.threads_per_stream_big); - config[ov::threading::threads_per_stream_small.name()] = - std::to_string(tput_hints.second.threads_per_stream_small); - config[ov::threading::small_core_offset.name()] = std::to_string(tput_hints.second.small_core_offset); - } -} - void Engine::get_performance_streams(Config& config, const std::shared_ptr& model) const{ const int latency_streams = get_default_latency_streams(config.latencyThreadingMode); + int streams_set = config.streams; int streams; - if (config.streamExecutorConfig._streams_changed) { - streams = config.streamExecutorConfig._streams; + if (config.streamsChanged) { + streams = streams_set; } else if (config.hintPerfMode == ov::hint::PerformanceMode::LATENCY) { streams = latency_streams; } else if (config.hintPerfMode == ov::hint::PerformanceMode::THROUGHPUT) { streams = 0; } else { - streams = config.streamExecutorConfig._streams == 1 ? 0 : config.streamExecutorConfig._streams; + streams = streams_set == 1 ? 0 : streams_set; } - if (!((0 == config.streamExecutorConfig._streams) && config.streamExecutorConfig._streams_changed)) { + if (!((0 == streams_set) && config.streamsChanged)) { get_num_streams(streams, model, config); - } else { - config.streamExecutorConfig.set_config_zero_stream(); } } void Engine::calculate_streams(Config& conf, const std::shared_ptr& model, bool imported) const { - // import config props from caching model - if (imported && !is_cpu_map_available()) { - if (model->has_rt_info("intel_cpu_hints_config")) { - const auto perf_mode = conf.hintPerfMode; - if (perf_mode == ov::hint::PerformanceMode::LATENCY || perf_mode == ov::hint::PerformanceMode::THROUGHPUT) { - const auto& hints_config = model->get_rt_info("intel_cpu_hints_config"); - const auto hints_param_name = - ov::util::to_string(perf_mode) + "_" + std::string(ov::num_streams.name()); - const auto it = hints_config.find(hints_param_name); - if (it != hints_config.end()) { - conf.readProperties({{std::string(ov::num_streams.name()), it->second.as()}}); - } else { - OPENVINO_THROW("Cache file doesn't contain precalculated number of streams for mode ", - ov::util::to_string(perf_mode)); - } + const auto model_prefer_name = std::string("MODEL_PREFER_THREADS"); + if (imported && model->has_rt_info("intel_cpu_hints_config")) { + // load model_prefer_threads from cache + int cache_model_prefer; + const auto& hints_config = model->get_rt_info("intel_cpu_hints_config"); + const auto it_model_prefer = hints_config.find(model_prefer_name); + if (it_model_prefer != hints_config.end()) { + try { + cache_model_prefer = it_model_prefer->second.as(); + } catch (const ov::Exception&) { + OPENVINO_THROW("Cache file doesn't have valid value for " + model_prefer_name); } - } - } - if (is_cpu_map_available()) { - const auto model_prefer_name = std::string("MODEL_PREFER_THREADS"); - if (imported && model->has_rt_info("intel_cpu_hints_config")) { - // load model_prefer_threads from cache - int cache_model_prefer; - const auto& hints_config = model->get_rt_info("intel_cpu_hints_config"); - const auto it_model_prefer = hints_config.find(model_prefer_name); - if (it_model_prefer != hints_config.end()) { - try { - cache_model_prefer = it_model_prefer->second.as(); - } catch (const ov::Exception&) { - OPENVINO_THROW("Cache file doesn't have valid value for " + model_prefer_name); - } - - conf.modelPreferThreads = cache_model_prefer; - } - } - get_performance_streams(conf, model); - // save model_prefer_threads to model rt_info when loading network - if (!imported) { - ov::AnyMap hints_props; - hints_props.insert({model_prefer_name, std::to_string(conf.modelPreferThreads)}); - model->set_rt_info(hints_props, "intel_cpu_hints_config"); + conf.modelPreferThreads = cache_model_prefer; } } -} - -StreamCfg Engine::get_streams_num(ov::threading::IStreamsExecutor::ThreadBindingType thread_binding_type, - int stream_mode, - const bool enable_hyper_thread) const { - const int sockets = static_cast(get_available_numa_nodes().size()); - const int num_cores = - thread_binding_type == IStreamsExecutor::ThreadBindingType::HYBRID_AWARE - ? parallel_get_max_threads() - : (sockets == 1 ? (enable_hyper_thread ? parallel_get_max_threads() : get_number_of_cpu_cores()) - : get_number_of_cpu_cores()); - const int num_cores_phy = get_number_of_cpu_cores(); - const int num_big_cores_phy = get_number_of_cpu_cores(true); - const int num_small_cores = num_cores_phy - num_big_cores_phy; - const int num_big_cores = num_cores > num_cores_phy ? num_big_cores_phy * 2 : num_big_cores_phy; - StreamCfg stream_cfg = {0}; - - if (stream_mode == IStreamsExecutor::Config::StreamMode::DEFAULT) { - // bare minimum of streams (that evenly divides available number of core) - if (thread_binding_type == IStreamsExecutor::ThreadBindingType::HYBRID_AWARE) { - if (0 == num_big_cores_phy % 4) { - stream_cfg.threads_per_stream_big = 4; - } else if (0 == num_big_cores_phy % 5) { - stream_cfg.threads_per_stream_big = 5; - } else if (0 == num_big_cores_phy % 3) { - stream_cfg.threads_per_stream_big = 3; - } else { // if user disables some cores say in BIOS, so we got weird #cores which is not easy to divide - stream_cfg.threads_per_stream_big = num_big_cores_phy; - } - - stream_cfg.big_core_streams = num_big_cores / stream_cfg.threads_per_stream_big; - stream_cfg.threads_per_stream_small = stream_cfg.threads_per_stream_big; - if (num_small_cores == 0) { - stream_cfg.threads_per_stream_small = 0; - } else if (num_small_cores < stream_cfg.threads_per_stream_small) { - stream_cfg.small_core_streams = 1; - stream_cfg.threads_per_stream_small = num_small_cores; - stream_cfg.threads_per_stream_big = stream_cfg.threads_per_stream_small; - // Balance the computation of physical core and logical core, the number of threads on the physical core - // and logical core should be equal - stream_cfg.big_core_streams = num_big_cores_phy / stream_cfg.threads_per_stream_big * 2; - } else { - stream_cfg.small_core_streams = num_small_cores / stream_cfg.threads_per_stream_small; - } - } else { - if (0 == num_cores % 4) - stream_cfg.num_streams = std::max(4, num_cores / 4); - else if (0 == num_cores % 5) - stream_cfg.num_streams = std::max(5, num_cores / 5); - else if (0 == num_cores % 3) - stream_cfg.num_streams = std::max(3, num_cores / 3); - else // if user disables some cores say in BIOS, so we got weird #cores which is not easy to divide - stream_cfg.num_streams = 1; - } - } else if (stream_mode == IStreamsExecutor::Config::StreamMode::AGGRESSIVE) { - if (thread_binding_type == IStreamsExecutor::ThreadBindingType::HYBRID_AWARE) { - stream_cfg.big_core_streams = num_big_cores; - stream_cfg.small_core_streams = num_small_cores; - stream_cfg.threads_per_stream_big = num_big_cores / stream_cfg.big_core_streams; - stream_cfg.threads_per_stream_small = - num_small_cores == 0 ? 0 : num_small_cores / stream_cfg.small_core_streams; - } else { - stream_cfg.num_streams = num_cores_phy; - } - } else if (stream_mode == IStreamsExecutor::Config::StreamMode::LESSAGGRESSIVE) { - if (thread_binding_type == IStreamsExecutor::ThreadBindingType::HYBRID_AWARE) { - stream_cfg.big_core_streams = num_big_cores / 2; - stream_cfg.small_core_streams = num_small_cores / 2; - stream_cfg.threads_per_stream_big = num_big_cores / stream_cfg.big_core_streams; - stream_cfg.threads_per_stream_small = - num_small_cores == 0 ? 0 : num_small_cores / stream_cfg.small_core_streams; - } else { - stream_cfg.num_streams = num_cores_phy / 2; - } - } else { - OPENVINO_THROW("Wrong stream mode to get num of streams: ", stream_mode); + get_performance_streams(conf, model); + // save model_prefer_threads to model rt_info when loading network + if (!imported) { + ov::AnyMap hints_props; + hints_props.insert({model_prefer_name, std::to_string(conf.modelPreferThreads)}); + model->set_rt_info(hints_props, "intel_cpu_hints_config"); } - stream_cfg.num_streams = stream_cfg.num_streams > 0 - ? stream_cfg.num_streams - : stream_cfg.big_core_streams + stream_cfg.small_core_streams; - stream_cfg.small_core_offset = num_small_cores == 0 ? 0 : num_big_cores; - return stream_cfg; } static bool shouldEnableLPT(const ov::AnyMap& modelConfig, const Config& engineConfig) { @@ -543,10 +319,6 @@ Engine::compile_model(const std::shared_ptr& model, const ov::A transformations.UpToLpt(); - if (!is_cpu_map_available()) { - apply_performance_hints(config, cloned_model); - } - conf.readProperties(config, modelType); calculate_streams(conf, cloned_model); @@ -612,16 +384,16 @@ bool Engine::is_legacy_api() const { ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) const { if (name == ov::optimal_number_of_infer_requests) { - const auto streams = engConfig.streamExecutorConfig._streams; + const auto streams = engConfig.streamExecutorConfig.get_streams(); return decltype(ov::optimal_number_of_infer_requests)::value_type( streams); // ov::optimal_number_of_infer_requests has no negative values } else if (name == ov::num_streams) { - const auto streams = engConfig.streamExecutorConfig._streams; + const auto streams = engConfig.streamExecutorConfig.get_streams(); return decltype(ov::num_streams)::value_type( streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2) OPENVINO_SUPPRESS_DEPRECATED_START } else if (name == ov::affinity) { - const auto affinity = engConfig.streamExecutorConfig._threadBindingType; + const auto affinity = engConfig.threadBindingType; switch (affinity) { case IStreamsExecutor::ThreadBindingType::NONE: return ov::Affinity::NONE; @@ -637,8 +409,8 @@ ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) } else if (name == ov::device::id.name()) { return decltype(ov::device::id)::value_type{engConfig.device_id}; } else if (name == ov::inference_num_threads) { - const auto num_threads = engConfig.streamExecutorConfig._threads; - return decltype(ov::inference_num_threads)::value_type(num_threads); + const auto threads = engConfig.streamExecutorConfig.get_threads(); + return decltype(ov::inference_num_threads)::value_type(threads); } else if (name == ov::enable_profiling.name()) { const bool perfCount = engConfig.collectPerfCounters; return decltype(ov::enable_profiling)::value_type(perfCount); @@ -707,7 +479,6 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio RO_property(ov::device::architecture.name()), }; // the whole config is RW before model is loaded. - OPENVINO_SUPPRESS_DEPRECATED_START std::vector rwProperties {RW_property(ov::num_streams.name()), RW_property(ov::affinity.name()), RW_property(ov::inference_num_threads.name()), @@ -724,7 +495,6 @@ ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& optio RW_property(ov::log::level.name()), RW_property(ov::intel_cpu::sparse_weights_decompression_rate.name()), }; - OPENVINO_SUPPRESS_DEPRECATED_END std::vector supportedProperties; supportedProperties.reserve(roProperties.size() + rwProperties.size()); diff --git a/src/plugins/intel_cpu/src/plugin.h b/src/plugins/intel_cpu/src/plugin.h index 53f52706f3c0fd..5f33c05a9ba8f9 100644 --- a/src/plugins/intel_cpu/src/plugin.h +++ b/src/plugins/intel_cpu/src/plugin.h @@ -48,11 +48,7 @@ class Engine : public ov::IPlugin { ov::Any get_ro_property(const std::string& name, const ov::AnyMap& options) const; - void apply_performance_hints(ov::AnyMap &config, const std::shared_ptr& model) const; - void get_performance_streams(Config &config, const std::shared_ptr& model) const; - StreamCfg get_streams_num(ov::threading::IStreamsExecutor::ThreadBindingType thread_binding_type, - int stream_mode, - const bool enable_hyper_thread = true) const; + void get_performance_streams(Config& config, const std::shared_ptr& model) const; void calculate_streams(Config& conf, const std::shared_ptr& model, bool imported = false) const; Config engConfig; diff --git a/src/plugins/intel_cpu/src/serialize.cpp b/src/plugins/intel_cpu/src/serialize.cpp index 0b91061684e741..4453854fb0754b 100644 --- a/src/plugins/intel_cpu/src/serialize.cpp +++ b/src/plugins/intel_cpu/src/serialize.cpp @@ -55,9 +55,24 @@ void ModelDeserializer::operator>>(std::shared_ptr& model) { std::string xmlString; ov::Tensor dataBlob; + // get file size before seek content + // blob from cache may have other header, skip it + const size_t _pos = _istream.tellg(); + _istream.seekg(0, _istream.end); + const size_t file_size = _istream.tellg(); + _istream.seekg(_pos, _istream.beg); + StreamSerialize::DataHeader hdr = {}; _istream.read(reinterpret_cast(&hdr), sizeof hdr); + // check if model header contains valid data + bool isValidModel = (hdr.custom_data_offset == sizeof(hdr) + _pos) && + (hdr.custom_data_size == hdr.consts_offset - hdr.custom_data_offset) && + (hdr.consts_size == hdr.model_offset - hdr.consts_offset) && + (hdr.model_size = file_size - hdr.model_offset); + if (!isValidModel) { + OPENVINO_THROW("Failed to read CPU device xml header"); + } // read model input/output precisions _istream.seekg(hdr.custom_data_offset); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp index be08b6660785d8..6489a2947afa82 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp @@ -55,16 +55,26 @@ StatefulSDPAFusion::StatefulSDPAFusion() { auto multi_query_bcst = [](std::shared_ptr kv) { auto reshape_kv = wrap_type({kv, any_input()}); - auto unsqueeze_kv = makePattern({kv, -2}); - auto multiply_kv = wrap_type({reshape_kv | unsqueeze_kv, any_input()}); + auto unsqueeze_kv = makePattern({kv, any_input()}); + + auto check_one = [] (Output output) -> bool { + auto node = std::dynamic_pointer_cast(output.get_node_shared_ptr()); + const auto& bcst_arg = node->cast_vector(); + return std::all_of(bcst_arg.begin(), bcst_arg.end(), [](float i) { + return i == 1.0f; + }); + }; + auto constant_bcst = wrap_type(check_one); + + auto computed_bcst = makePattern({wrap_type(check_one), + any_input(), any_input()}, {{"mode", "numpy"}}); + + auto multiply_kv = wrap_type({reshape_kv | unsqueeze_kv, constant_bcst | computed_bcst}); return wrap_type({multiply_kv, any_input()}); }; - auto k_bcst = multi_query_bcst(concat_k); - auto v_bcst = multi_query_bcst(concat_v); - - auto present_k = concat_k | k_bcst; - auto present_v = concat_v | v_bcst; + auto present_k = concat_k | multi_query_bcst(concat_k); + auto present_v = concat_v | multi_query_bcst(concat_v); // canonical q/k/v shape definition: [B,H,...L,S] auto sdp0 = makePattern({cur_q, present_k, present_v}); @@ -98,7 +108,7 @@ StatefulSDPAFusion::StatefulSDPAFusion() { auto find_assign = [&](const ov::Output& out, opset6::Assign*& assign, opset1::Convert*& cvt) { auto present_to = out.get_target_inputs(); - if (present_to.size() != 2) + if (present_to.size() < 2) return false; for (auto& to : present_to) { auto to_node = to.get_node(); @@ -136,25 +146,6 @@ StatefulSDPAFusion::StatefulSDPAFusion() { if (!check_valid_children_type(past_k_node) || !check_valid_children_type(past_v_node)) { return false; } - // check whether multi-query's bcst has valid parameter. - auto check_bcst = [](const std::shared_ptr& ptr) { - const auto multiply = ptr->get_input_node_shared_ptr(0); - const auto constant_node = ov::as_type_ptr(multiply->get_input_node_shared_ptr(1)); - if (!constant_node) - return false; - const auto& bcst_arg = constant_node->cast_vector(); - return std::all_of(bcst_arg.begin(), bcst_arg.end(), [](float i) { - return i == 1.0; - }); - }; - - if (pattern_map.count(k_bcst) && !check_bcst(pattern_map.at(k_bcst).get_node_shared_ptr())) { - return false; - } - - if (pattern_map.count(v_bcst) && !check_bcst(pattern_map.at(v_bcst).get_node_shared_ptr())) { - return false; - } const auto concat_k_node = ov::as_type_ptr(pattern_map.at(concat_k).get_node_shared_ptr()); const auto concat_v_node = ov::as_type_ptr(pattern_map.at(concat_v).get_node_shared_ptr()); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp index b427ce5f84dafe..7f12b67d54aae4 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp @@ -10,6 +10,7 @@ #include #include "utils/general_utils.h" #include +#include "transformations/utils.hpp" #include "itt.hpp" @@ -401,6 +402,9 @@ bool isSuitableMatMulWithConstantPath(const std::shared_ptr& node) { !ov::is_type(node->get_input_node_shared_ptr(1)) && ov::op::util::is_on_constant_path(node->input_value(1)); } +bool isSuitableGatherWithConstantPath(const std::shared_ptr& node) { + return is_gather_with_compressed_weights(node); +} // Continue fusing chain of the passed type if the node has one child // Otherwise mark node as FusedTerminator (Fused, but fusing chain is interrupted) void PropagateIfHasOnlyChild(const std::shared_ptr &node, NodeFusingType nodeType) { @@ -477,7 +481,14 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr &m) { }; std::unordered_set visited; ov::op::util::visit_constant_path(node->get_input_node_ptr(1), visited, markup_func); + } else if (isSuitableGatherWithConstantPath(node)) { + auto markup_func = [](Node* node) { + SetSnippetsNodeType(node->shared_from_this(), snippets::pass::SnippetsNodeType::SkippedByPlugin); + }; + std::unordered_set visited; + ov::op::util::visit_constant_path(node->get_input_node_ptr(0), visited, markup_func); } + if (isSuitableConvolutionParent(node)) { // Initiate fusing chain SetNodeFusingType(node, NodeFusingType::FusedWithConvolution); diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index bd546f844dbfdd..17c2574affb081 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -85,6 +85,7 @@ #include "transformations/init_node_info.hpp" #include "utils/ngraph_transformation.hpp" #include "utils/print_model.hpp" +#include "transformations/utils.hpp" // LPT transformations #include "low_precision/add.hpp" @@ -160,8 +161,13 @@ bool Transformations::is_decompression_multiply(const_node_ptr& node) const { } if (consumer != nullptr && ov::is_type(consumer)) { consumer = get_single_consumer(consumer); - if (consumer != nullptr && ov::is_type(consumer)) { - return true; + if (consumer != nullptr) { + if (ov::is_type(consumer)) { + return true; + } + if (is_gather_with_compressed_weights(consumer)) { + return true; + } } } return false; @@ -694,7 +700,7 @@ void Transformations::MainSnippets(void) { // To avoid sitations when Transpose is not alone node between MatMul and Result, // Plugin disables Transpose tokenization on output tokenization_config.mha_token_enable_transpose_on_output = (inferencePrecision == ov::element::f32); - tokenization_config.concurrency = config.streamExecutorConfig._threadsPerStream; + tokenization_config.concurrency = config.threadsPerStream; if (tokenization_config.concurrency == 0) tokenization_config.concurrency = parallel_get_max_threads(); // The optimization "SplitDimensionM" depends on target machine (thread count). diff --git a/src/plugins/intel_cpu/src/transformations/utils.cpp b/src/plugins/intel_cpu/src/transformations/utils.cpp index f1943027917e8c..cd9da617108644 100644 --- a/src/plugins/intel_cpu/src/transformations/utils.cpp +++ b/src/plugins/intel_cpu/src/transformations/utils.cpp @@ -40,5 +40,89 @@ bool has_matmul_with_compressed_weights(const std::shared_ptr& return false; } +// Check specific pattern: +// Constant +// | +// Convert Constant +// \ / +// Subtract Constant +// \ / +// Multiply +// | +// Convert input Constant +// \ / / +// \ / / +// Gather +bool is_gather_with_compressed_weights(const std::shared_ptr& node) { + if (!ov::is_type(node)) { + return false; + } + if (node->get_input_size() != 3) { + return false; + } + + auto is_constant_with_2d = [](const ov::Node* node) { + const ov::Node* const_node = ov::is_type(node) ? node->get_input_node_ptr(0) : node; + + if (ov::is_type(const_node) && const_node->get_input_size() == 0) { + auto cur_shape = const_node->get_output_shape(0); + if (cur_shape.size() == 2 && cur_shape[1] == 1u) { + return true; + } + } + return false; + }; + + // Check axis + auto axis = node->get_input_node_ptr(2); + auto axisPtr = ov::as_type(axis); + if (!axisPtr) { + return false; + } + int32_t axis_const = axisPtr->cast_vector()[0]; + if (axis_const != 0) { + return false; + } + + // Check weights + ov::Node* multiply = nullptr; + auto multiply_convert = node->get_input_node_ptr(0); + if (ov::is_type(multiply_convert)) { + multiply = multiply_convert->get_input_node_ptr(0); + } else { + multiply = node->get_input_node_ptr(0); + } + if (!ov::is_type(multiply)) { + return false; + } + if (!is_constant_with_2d(multiply->get_input_node_ptr(1))) { + return false; + } + + auto subtract = multiply->get_input_node_ptr(0); + if (!ov::is_type(subtract)) { + return false; + } + if (!is_constant_with_2d(subtract->get_input_node_ptr(1))) { + return false; + } + + auto weights_convert = subtract->get_input_node_ptr(0); + if (!ov::is_type(weights_convert)) { + return false; + } + + auto weights = weights_convert->get_input_node_ptr(0); + auto weights_ptr = ov::as_type(weights); + if (!weights_ptr) { + return false; + } + auto weights_shape = weights_ptr->get_output_shape(0); + if (weights_shape.size() != 2u) { + return false; + } + return true; +} + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/utils.hpp b/src/plugins/intel_cpu/src/transformations/utils.hpp index 64ebca6dfa5ed9..fabc95abf1525c 100644 --- a/src/plugins/intel_cpu/src/transformations/utils.hpp +++ b/src/plugins/intel_cpu/src/transformations/utils.hpp @@ -11,5 +11,7 @@ namespace intel_cpu { bool has_matmul_with_compressed_weights(const std::shared_ptr& model); +bool is_gather_with_compressed_weights(const std::shared_ptr& node); + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp index 11de096fc0f26f..3b758f928e0871 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp @@ -197,12 +197,10 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigHintInferencePrecision) { ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(value, forcedPrecision); - OPENVINO_SUPPRESS_DEPRECATED_START const auto forced_precision_deprecated = ov::element::f32; ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forced_precision_deprecated))); ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(value, forced_precision_deprecated); - OPENVINO_SUPPRESS_DEPRECATED_END } TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigEnableProfiling) { diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.hpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.hpp index 263d3b4914d0b5..7b8e4ba3385233 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.hpp @@ -25,7 +25,7 @@ typedef std::tuple< ElementType, // Input precision ElementType, // Output precision InputShape, // Input shape - LayerTestsUtils::TargetDevice // Device name + ov::test::TargetDevice // Device name > convLayerTestParamsSet; typedef std::tuple< diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_bag_offsets_sum.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_bag_offsets_sum.cpp index 7248e95d5a8225..2b3dc2dd3398b3 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_bag_offsets_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_bag_offsets_sum.cpp @@ -23,7 +23,7 @@ typedef std::tuple + ov::test::TargetDevice> embeddingBagOffsetsSumLayerTestParamsSet; class EmbeddingBagOffsetsSumLayerCPUTest : public testing::WithParamInterface, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_bag_packed_sum.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_bag_packed_sum.cpp index d93dcf877054b8..f976441d6351b3 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_bag_packed_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_bag_packed_sum.cpp @@ -20,7 +20,7 @@ typedef std::tuple + ov::test::TargetDevice> embeddingBagPackedSumLayerTestParamsSet; class EmbeddingBagPackedSumLayerCPUTest : public testing::WithParamInterface, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_segments_sum.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_segments_sum.cpp index ada1ffe8a9ca95..50b57b7a6dd822 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_segments_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/embedding_segments_sum.cpp @@ -24,7 +24,7 @@ typedef std::tuple + ov::test::TargetDevice> embeddingSegmentsSumLayerTestParamsSet; class EmbeddingSegmentsSumLayerCPUTest : public testing::WithParamInterface, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/range.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/range.cpp index ba2f6357221b07..985456e52fdaeb 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/range.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/range.cpp @@ -11,7 +11,7 @@ // // namespace CPULayerTestsDefinitions { // typedef std::tuple< -// std::pair, std::vector>>, // input shape +// std::pair, std::vector>>, // input shape // std::tuple, // start, limit, delta // Precision // output type //> RangeSpecificParams; @@ -19,7 +19,7 @@ // typedef std::tuple< // RangeSpecificParams, // InferenceEngine::Precision, // Net precision -// LayerTestsUtils::TargetDevice // Device name +// ov::test::TargetDevice // Device name //> RangeLayerTestParams; // // typedef std::tuple< @@ -38,7 +38,7 @@ // std::tie(basicParamsSet, cpuParams) = obj.param; // std::string td; // Precision netPrc = Precision::FP32; -// std::pair, std::vector>> shapes; +// std::pair, std::vector>> shapes; // // RangeSpecificParams rangePar; // std::tie(rangePar, netPrc, td) = basicParamsSet; @@ -98,7 +98,7 @@ // std::tie(basicParamsSet, cpuParams) = this->GetParam(); // std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; // CPULayerTestsDefinitions::RangeSpecificParams rangeParams; -// std::pair, std::vector>> shapes; +// std::pair, std::vector>> shapes; // std::tie(rangeParams, inPrc, targetDevice) = basicParamsSet; // std::tuple rangeInputs; // @@ -111,9 +111,9 @@ // step = std::get<2>(rangeInputs); // auto ngOutPr = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc); // auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); -// auto startPar = std::make_shared(ngNetPrc, ngraph::Shape{}); -// auto stopPar = std::make_shared(ngNetPrc, ngraph::Shape{}); -// auto stepPar = std::make_shared(ngNetPrc, ngraph::Shape{}); +// auto startPar = std::make_shared(ngNetPrc, ov::Shape{}); +// auto stopPar = std::make_shared(ngNetPrc, ov::Shape{}); +// auto stepPar = std::make_shared(ngNetPrc, ov::Shape{}); // auto range = std::make_shared(startPar, stopPar, stepPar, ngOutPr); // range->get_rt_info() = getCPUInfo(); // selectedType = std::string("ref_any_") + (inPrc == outPrc ? inPrc.name() : "FP32"); @@ -149,14 +149,14 @@ // InferenceEngine::Precision::I32 // }; // -// std::vector, std::vector>>> inShapesDynamic = +// std::vector, std::vector>>> inShapesDynamic = // { // {{ngraph::PartialShape(), ngraph::PartialShape(), ngraph::PartialShape()}, -// {{ngraph::Shape{}, ngraph::Shape{}, ngraph::Shape{}}, {ngraph::Shape{}, ngraph::Shape{}, ngraph::Shape{}}}} +// {{ov::Shape{}, ov::Shape{}, ov::Shape{}}, {ov::Shape{}, ov::Shape{}, ov::Shape{}}}} // }; -// std::vector, std::vector>>> +// std::vector, std::vector>>> // inShapesPseudoStatic = { -// {{}, {{ngraph::Shape{}, ngraph::Shape{}, ngraph::Shape{}}}} +// {{}, {{ov::Shape{}, ov::Shape{}, ov::Shape{}}}} // }; // // const std::vector> rangeInputValues = { diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/roi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/roi_pooling.cpp index 8f2bcb583782e7..8d67ee0724ceb2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/roi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/roi_pooling.cpp @@ -23,7 +23,7 @@ using roiPoolingParams = std::tuple; // Device name + ov::test::TargetDevice>; // Device name using ROIPoolingCPUTestParamsSet = std::tuple; using ROIAlignLayerCPUTestParamsSet = std::tuple< diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/concat_const_inplace.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/concat_const_inplace.cpp index f070cbca71604d..bb12b5de29b23d 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/concat_const_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/concat_const_inplace.cpp @@ -52,7 +52,9 @@ class ConcatConstantInPlaceTest : public testing::WithParamInterface weightValuesFP32(12); ov::Shape convFilterShape = {1, 12, 1, 1}; - FuncTestUtils::fillInputsBySinValues(weightValuesFP32.data(), weightValuesFP32.size()); + for (size_t i = 0; i < weightValuesFP32.size(); i++) { + weightValuesFP32.data()[i] = sin(static_cast(i)); + } auto weightsNode = std::make_shared(ov::element::f32, convFilterShape, weightValuesFP32); std::shared_ptr conv = std::make_shared(concat, weightsNode, diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/gather_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/gather_weights_decompression.cpp new file mode 100644 index 00000000000000..eb3f52ddecc6e2 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/gather_weights_decompression.cpp @@ -0,0 +1,202 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/node_builders/constant.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "utils/fusing_test_utils.hpp" +#include "transformations/rt_info/decompression.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +/* + * WP - weights precision + * DP - decompression precision + * IP - input precision + * OP - output precision + * + * Weights(WP) Subtract_const(WP) + * | / + * Convert(DP) Convert(DP) + * \ / + * Subtract(DP) + * \ Multiply_const(DP) + * \ / + * Multiply + * / + * Data(IP) Convert(OP) + * \ / + * Gather(OP) Weights(OP) + * \ / + * MatMul(OP) (Add MatMul in order to test OP==bf16 in SPR) + */ + +struct InputAndWeigthsShapeParams { + InputAndWeigthsShapeParams() = default; + InputAndWeigthsShapeParams(InputShape _data_shape, ov::Shape _weights_shape) + : data_shape(std::move(_data_shape)), + weights_shape(std::move(_weights_shape)) {} + + InputShape data_shape; + ov::Shape weights_shape; +}; + +using GatherWeightsDecompressParams = std::tuple; // should use decompression implementation + +class GatherWeightsDecompression : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CpuTestWithFusing { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + InputAndWeigthsShapeParams shape_params; + ov::AnyMap additional_config; + fusingSpecificParams fusing_params; + bool should_fuse; + + std::tie(shape_params, additional_config, fusing_params, should_fuse) = obj.param; + + std::ostringstream result; + result << "data_shape=" << shape_params.data_shape << "_"; + result << "weights_shape=" << shape_params.weights_shape << "_"; + + result << "config=("; + for (const auto& configEntry : additional_config) { + result << configEntry.first << ", " << configEntry.second.as() << ":"; + } + result << ")"; + result << CpuTestWithFusing::getTestCaseName(fusing_params); + + return result.str(); + } + +protected: + std::shared_ptr initDecompressionWeights(const ov::Shape& weights_shape, + const ov::element::Type weights_precision) { + auto weights = ov::test::utils::make_constant(weights_precision, + weights_shape, + ov::test::utils::InputGenerateData{0, 255}); + weights->set_friendly_name("Compressed_weights"); + auto weights_convert = std::make_shared(weights, ov::element::f16); + + std::shared_ptr zp_const = ov::test::utils::make_constant(ov::element::u8, + ov::Shape{weights_shape[0], 1}, + ov::test::utils::InputGenerateData{}); + auto zp_convert = std::make_shared(zp_const, ov::element::f16); + + std::shared_ptr scale_const = + ov::test::utils::make_constant(ov::element::f16, + ov::Shape{weights_shape[0], 1}, + ov::test::utils::InputGenerateData{}); + auto subtract = std::make_shared(weights_convert, zp_convert); + auto multiply = std::make_shared(subtract, scale_const); + auto last_node = std::make_shared(multiply, ov::element::f32); + return last_node; + } + + std::shared_ptr initSubgraph(const ov::PartialShape& data_shape, + const ov::Shape& weights_shape, + const ov::element::Type data_precision) { + ov::ParameterVector params{std::make_shared(ov::element::i64, data_shape)}; + auto params_convert = std::make_shared(params[0], ov::element::i32); + auto axis = ov::op::v0::Constant::create(element::i32, Shape{1}, {0}); + + const auto weights_subgraph = initDecompressionWeights(weights_shape, + ov::element::u8); + + auto gather = std::make_shared(weights_subgraph, params_convert, axis); + gather->set_friendly_name("GatherCompression"); + + auto matB = ov::op::v0::Constant::create(element::f32, Shape{weights_shape[1], 1}, {1}); + auto matMul = std::make_shared(gather, matB, false, false); + return makeNgraphFunction(data_precision, params, matMul, "GatherWeightsDecompression"); + } + + void SetUp() override { + targetDevice = ov::test::utils::DEVICE_CPU; + + InputAndWeigthsShapeParams shape_params; + ov::AnyMap additional_config; + fusingSpecificParams fusing_params; + bool should_fuse; + + std::tie(shape_params, additional_config, fusing_params, should_fuse) = GetParam(); + + configuration.insert(additional_config.begin(), additional_config.end()); + std::tie(postOpMgrPtr, fusedOps) = fusing_params; + init_input_shapes({shape_params.data_shape, {{}, {{shape_params.weights_shape}}}}); + + ElementType netType = ov::element::f32; + inType = outType = netType; + + function = initSubgraph(inputDynamicShapes[0], shape_params.weights_shape, netType); + } + + void check_results() { + bool weights_found = false; + for (const auto& n : compiledModel.get_runtime_model()->get_ordered_ops()) { + if (n->get_friendly_name() == "Compressed_weights") { + ASSERT_EQ(n->get_output_element_type(0), ov::element::u8); + weights_found = true; + } + } + ASSERT_TRUE(weights_found); + + bool gather_found = false; + for (const auto& n : compiledModel.get_runtime_model()->get_ordered_ops()) { + if (n->get_friendly_name() == "GatherCompression") { + ASSERT_EQ(n->get_input_element_type(0), ov::element::u8); + ASSERT_EQ(n->get_output_element_type(0), ov::element::f32); + gather_found = true; + } + } + ASSERT_TRUE(gather_found); + + CheckNumberOfNodesWithType(compiledModel, "Convert", 1); + CheckNumberOfNodesWithType(compiledModel, "Subtract", 0); + CheckNumberOfNodesWithType(compiledModel, "Multiply", 0); + CheckNumberOfNodesWithType(compiledModel, "Subgraph", 0); + } +}; + +TEST_P(GatherWeightsDecompression, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + run(); + check_results(); +} + +namespace { + +std::vector filter_additional_config() { + std::vector additional_config = {}; + additional_config.push_back({{ov::hint::inference_precision(ov::element::f32)}}); + if (ov::with_cpu_x86_bfloat16()) { + additional_config.push_back({{ov::hint::inference_precision(ov::element::bf16)}}); + } + + return additional_config; +} + +const std::vector input_weights_shapes = { + {{{-1, -1}, {{1, 1}}}, {16, 32}}, + {{{-1, -1}, {{1, 8}}}, {16, 64}}, + {{{}, {{2, 1}}}, {16, 33}} +}; + +const std::vector fs_params{emptyFusingSpec, fusingBias}; + +INSTANTIATE_TEST_SUITE_P(smoke_GatherCompressedWeights_basic, + GatherWeightsDecompression, + ::testing::Combine(::testing::ValuesIn(input_weights_shapes), + ::testing::ValuesIn(filter_additional_config()), + ::testing::ValuesIn(fs_params), + ::testing::Values(true)), + GatherWeightsDecompression::getTestCaseName); +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp index 810cf2172ad140..7d30811f7a4332 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp @@ -2,23 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "functional_test_utils/core_config.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) { - // Within the test scope we don't need any implicit bf16 optimisations, so let's run the network as is. - auto& configuration = test->GetConfiguration(); - if (!configuration.count(ov::hint::inference_precision.name())) { - configuration.insert({ov::hint::inference_precision.name(), ov::element::f32.to_string()}); - } -#if defined(OV_CPU_ARM_ENABLE_FP16) - //force fp32 inference precision if it is not configured specially - if (!configuration.count(ov::hint::inference_precision.name())) { - configuration.insert({ov::hint::inference_precision.name(), ov::element::f32.to_string()}); - } - #endif -} - namespace ov { namespace test { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/disable_lowering_precision.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/disable_lowering_precision.cpp index f31fe30ce39941..70a00425bfa5a9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/disable_lowering_precision.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/disable_lowering_precision.cpp @@ -9,7 +9,6 @@ #include "openvino/runtime/system_conf.hpp" using namespace ExecutionGraphTests; -using namespace InferenceEngine; namespace { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp index c2956f69dc73e7..3abe82596cb61b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -156,7 +156,7 @@ std::vector eltwise_op_typesSingleThread = { }; ov::AnyMap additional_config_single_thread = { - ov::inference_num_threads(1), + ov::inference_num_threads(1), }; const auto single_thread_params = ::testing::Combine( diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp index 68a16954a19a85..b0e1c92f3f2ec4 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gather.cpp @@ -10,6 +10,7 @@ namespace { using ov::test::Gather7LayerTest; using ov::test::Gather8LayerTest; using ov::test::Gather8withIndicesDataLayerTest; +using ov::test::GatherStringWithIndicesDataLayerTest; const std::vector model_types = { ov::element::f32, @@ -229,4 +230,62 @@ const auto gatherWithIndicesParams = testing::Combine( INSTANTIATE_TEST_CASE_P(smoke, Gather8withIndicesDataLayerTest, gatherWithIndicesParams, Gather8withIndicesDataLayerTest::getTestCaseName); +std::vector string_cases_params{ + {ov::test::static_shapes_to_test_representation(std::vector{{3}}), // input shape + ov::Shape{1}, // indices shape + std::tuple{0, 0}, // axis, batch + ov::element::string, // model type + ov::test::utils::DEVICE_CPU, // device + std::vector{0}, // indices value + std::vector{"Abc", "xyz", "..."}}, // data str value + {ov::test::static_shapes_to_test_representation(std::vector{{3}}), + ov::Shape{1}, + std::tuple{0, 0}, + ov::element::string, + ov::test::utils::DEVICE_CPU, + std::vector{1}, + std::vector{"Abc", "xyz", "..."}}, + {ov::test::static_shapes_to_test_representation(std::vector{{3}}), + ov::Shape{2}, + std::tuple{0, 0}, + ov::element::string, + ov::test::utils::DEVICE_CPU, + std::vector{0, 2}, + std::vector{"Abc", "xyz", "..."}}, + {ov::test::static_shapes_to_test_representation(std::vector{{3}}), + ov::Shape{2}, + std::tuple{0, 0}, + ov::element::string, + ov::test::utils::DEVICE_CPU, + std::vector{0, 1}, + std::vector{"Ab", "1345", "xyz"}}, + {ov::test::static_shapes_to_test_representation(std::vector{{2, 2}}), + ov::Shape{1}, + std::tuple{0, 0}, + ov::element::string, + ov::test::utils::DEVICE_CPU, + std::vector{1}, + std::vector{"A", "B c", "d.Ef", " G h,i;"}}, + {ov::test::static_shapes_to_test_representation(std::vector{{2, 2, 2}}), + ov::Shape{1}, + std::tuple{0, 0}, + ov::element::string, + ov::test::utils::DEVICE_CPU, + std::vector{1}, + std::vector{"A", "B c", "d.Ef", " G h,i;", "JK ", "l,m,n,", " ", " \0"}}, + {ov::test::static_shapes_to_test_representation(std::vector{{2, 1, 2}}), + ov::Shape{2, 1, 2}, + std::tuple{2, 2}, + ov::element::string, + ov::test::utils::DEVICE_CPU, + std::vector{0, 1, 1, 0}, + std::vector{"A", "B c", "d.Ef", " G h,i;"}}}; + +const auto gatherWithStringParams = testing::ValuesIn(string_cases_params); + +INSTANTIATE_TEST_CASE_P(smoke_gather_string, + GatherStringWithIndicesDataLayerTest, + gatherWithStringParams, + GatherStringWithIndicesDataLayerTest::getTestCaseName); + } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp index ff058cecf6fc35..ad68b01bc2184f 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp @@ -4,7 +4,7 @@ #include -#include "single_layer_tests/softmax.hpp" +#include "single_op_tests/softmax.hpp" #include "common_test_utils/test_constants.hpp" using namespace ov::test::subgraph; diff --git a/src/plugins/intel_cpu/tests/unit/nodes/eltwise_node_test.cpp b/src/plugins/intel_cpu/tests/unit/nodes/eltwise_node_test.cpp index f95ac227f96636..7acd73a4999ad3 100644 --- a/src/plugins/intel_cpu/tests/unit/nodes/eltwise_node_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/nodes/eltwise_node_test.cpp @@ -4,7 +4,6 @@ #include #include -#include "ie_common.h" #include "nodes/eltwise.h" using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp b/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp index 12927158aec087..bc87c69cd84281 100644 --- a/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/nodes/reorder_node_test.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp index 65387645967012..a24762782d6d0f 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp @@ -8,6 +8,7 @@ #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" #include "openvino/runtime/system_conf.hpp" +#include "os/cpu_map_info.hpp" using namespace testing; using namespace ov; @@ -46,9 +47,10 @@ void make_config(StreamGenerateionTestCase& test_data, ov::intel_cpu::Config& co config.hintPerfMode = test_data.input_pm_hint; config.latencyThreadingMode = test_data.input_latency_threading_mode; config.hintNumRequests = test_data.input_request; - config.streamExecutorConfig._streams = test_data.input_stream; - config.streamExecutorConfig._streams_changed = test_data.input_stream_changed; - config.streamExecutorConfig._threads = test_data.input_thread; + config.streams = test_data.input_stream_changed ? test_data.input_stream + : (test_data.input_stream == 0 ? 1 : test_data.input_stream); + config.streamsChanged = test_data.input_stream_changed; + config.threads = test_data.input_thread; } class StreamGenerationTests : public ov::test::TestsCommon, @@ -59,6 +61,9 @@ class StreamGenerationTests : public ov::test::TestsCommon, ov::intel_cpu::Config config; make_config(test_data, config); + CPU& cpu = cpu_info(); + cpu._proc_type_table = test_data.input_proc_type_table; + auto proc_type_table = ov::intel_cpu::generate_stream_info(test_data.input_stream, test_data.input_socket_id, nullptr, @@ -66,9 +71,9 @@ class StreamGenerationTests : public ov::test::TestsCommon, test_data.input_proc_type_table, test_data.input_model_prefer); - ASSERT_EQ(test_data.output_stream_info_table, config.streamExecutorConfig._streams_info_table); + ASSERT_EQ(test_data.output_stream_info_table, config.streamExecutorConfig.get_streams_info_table()); ASSERT_EQ(test_data.output_proc_type_table, proc_type_table); - ASSERT_EQ(test_data.output_cpu_value, config.streamExecutorConfig._cpu_reservation); + ASSERT_EQ(test_data.output_cpu_value, config.streamExecutorConfig.get_cpu_reservation()); ASSERT_EQ(test_data.output_ht_value, config.enableHyperThreading); ASSERT_EQ(test_data.output_type, config.schedulingCoreType); ASSERT_EQ(test_data.output_pm_hint, config.hintPerfMode); diff --git a/src/plugins/intel_cpu/tests/unit/transformations/state_concat_sdpa.cpp b/src/plugins/intel_cpu/tests/unit/transformations/state_concat_sdpa.cpp index ffa562ba4d86a1..df38e268068deb 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/state_concat_sdpa.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/state_concat_sdpa.cpp @@ -13,25 +13,32 @@ #include #include #include -#include +#include #include "common_test_utils/ov_test_utils.hpp" +#include "utils/gen_pattern.hpp" +#include "utils/print_model.hpp" using namespace testing; -using namespace ov::intel_cpu; using namespace ov; +using namespace ov::intel_cpu; +using namespace ov::gen_pattern; -static std::shared_ptr makeSDPA(const ov::PartialShape& inputShape, bool isRef = false, bool hasConvert = false) { +static std::shared_ptr makeSDPA(const ov::PartialShape& inputShape, bool isRef = false, bool hasConvert = false, bool hasMultiquery = false) { auto q = std::make_shared(element::f32, inputShape); - auto k = std::make_shared(element::f32, inputShape); - auto v = std::make_shared(element::f32, inputShape); + auto kvInputShape = inputShape; + if (hasMultiquery) { + kvInputShape[1] = inputShape[1] / 4; + } + auto k = std::make_shared(element::f32, kvInputShape); + auto v = std::make_shared(element::f32, kvInputShape); auto init = std::make_shared(element::f32, inputShape); auto beam_idx = std::make_shared(element::i32, ov::PartialShape{-1}); auto var_k = std::make_shared( - ov::op::util::VariableInfo{inputShape, element::f32, "pastk"}); + ov::op::util::VariableInfo{kvInputShape, element::f32, "pastk"}); std::shared_ptr pastk = std::make_shared(k, var_k); auto var_v = std::make_shared( - ov::op::util::VariableInfo{inputShape, element::f32, "pastv"}); + ov::op::util::VariableInfo{kvInputShape, element::f32, "pastv"}); std::shared_ptr pastv = std::make_shared(v, var_v); Output concatK, concatV, sdp; if (hasConvert) { @@ -50,7 +57,31 @@ static std::shared_ptr makeSDPA(const ov::PartialShape& inputShape, b pastv = std::make_shared(pastv, beam_idx, op::v0::Constant::create(element::i32, {1}, {0})); concatK = std::make_shared(OutputVector{pastk, k}, 2); concatV = std::make_shared(OutputVector{pastv, v}, 2); - sdp = std::make_shared(q, concatK, concatV, false); + if (hasMultiquery) { + auto make_multi_query = [&] (const Output& conat) { + auto beam_idx_shape = makeOP>({beam_idx}, + {{"type_relax", true}, {"input_data_types", {}}, {"output_data_types", {element::i32}}}); + auto unsqueeze_concat = makeOP({conat, 2}); + auto concat_shape = makeOP>( + {conat}, + {{"type_relax", true}, {"input_data_types", {}}, {"output_data_types", {element::i32}}}); + auto gather_ls = makeOP({concat_shape, {2, 3}, 0}, {{"batch_dims", 0}}); + auto expected_group_shape = makeOP({beam_idx_shape, {inputShape[1] / 4}, {4}, gather_ls}, {{"axis", 0}}); + auto expand_Abs = makeOP({expected_group_shape}); + auto axis_mapping = makeConst(element::u8, ov::Shape({}), 0); + auto expand_ones = makeOP({{1.0f}, + expand_Abs, + axis_mapping}, {{"mode", "numpy"}}); + auto expand_Broadcast = makeOP({unsqueeze_concat, + expand_ones}, {{"auto_broadcast", "numpy"}}); + auto expected_shape = makeOP({beam_idx_shape, {inputShape[1]}, gather_ls}, {{"axis", 0}}); + auto reshape_Reshape = makeOP({expand_Broadcast, expected_shape}, {{"special_zero", false}}); + return reshape_Reshape; + }; + sdp = std::make_shared(q, make_multi_query(concatK), make_multi_query(concatV), false); + } else { + sdp = std::make_shared(q, concatK, concatV, false); + } } if (hasConvert) { concatK = std::make_shared(concatK, element::f32); @@ -106,3 +137,24 @@ TEST(TransformationTests, StateConcatSDPAWithConvert) { ASSERT_TRUE(res.first) << res.second; } } + +TEST(TransformationTests, StateConcatSDPAMixtral) { + std::shared_ptr f(nullptr), f_ref(nullptr); + { + using namespace ov; + auto inputShape = ov::PartialShape{-1, 32, -1, 64}; + { + f = makeSDPA(inputShape, false, false, true); + pass::Manager m; + m.register_pass(); + m.register_pass(); + m.run_passes(f); + } + //construct ref interaction + { + f_ref = makeSDPA(inputShape, true, false, true); + } + auto res = compare_functions(f, f_ref); + ASSERT_TRUE(res.first) << res.second; + } +} diff --git a/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp b/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp index 13b8bba7f848f8..9aab52c866c1f1 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp @@ -17,7 +17,6 @@ #include #include #include "ov_ops/type_relaxed.hpp" -#include #include "common_test_utils/ov_test_utils.hpp" diff --git a/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp b/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp index 3080853d437b3d..07927abe52a107 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/graph/network.hpp @@ -220,6 +220,7 @@ struct network { const ov::intel_gpu::VariableStateInfo& get_variable_info(const std::string &variable_id) const; const ov::intel_gpu::VariablesMap& get_variables() const; const ov::intel_gpu::VariablesInfoMap& get_variables_info() const; + std::vector get_kv_cache_ids() const { return kv_cache_ids; } const ExecutionConfig& get_config() const { return _config; } @@ -255,6 +256,7 @@ struct network { ov::intel_gpu::VariablesMap _variables_states; ov::intel_gpu::VariablesInfoMap _variables_state_info; + std::vector kv_cache_ids; program::primitives_info _prims_info; std::map _ext_id_mapping; diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/swiglu.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/swiglu.hpp new file mode 100644 index 00000000000000..249bfff3287184 --- /dev/null +++ b/src/plugins/intel_gpu/include/intel_gpu/op/swiglu.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" + +namespace ov { +namespace intel_gpu { +namespace op { + +/// \brief Operator performing Swish Gated Linear Unit Activation +/// This operation performs gated linear unit activation that combines swish activation function +class SwiGLU : public ov::op::Op { +public: + OPENVINO_OP("SwiGLU", "gpu_opset"); + + SwiGLU() = default; + /// \brief Constructs an SwiGLU operation. + /// + /// \param data Input tensor with data + /// \param axis The index of an axis in "data" along which to perform the split + /// \param split_lenghts A list containing the sizes of each output tensor along the split "axis" + /// \param output_type Output element type + SwiGLU(const Output& data, + int64_t axis, + int64_t split_lengths, + const ov::element::Type output_type = ov::element::undefined); + + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + + int64_t get_axis() const { return m_axis; } + int64_t get_split_lengths() const { return m_split_lengths; } + + void set_axis(int64_t axis) { m_axis = axis; } + void set_split_lengths(int64_t split_lengths) { m_split_lengths = split_lengths; } + +private: + int64_t m_axis; + int64_t m_split_lengths; + ov::element::Type m_output_type; +}; + +std::vector shape_infer(const SwiGLU* op, std::vector input_shapes); + +} // namespace op +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp index 75bf7f51c19000..af0497c0e9d5d4 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp @@ -276,3 +276,4 @@ REGISTER_FACTORY(internal, GatherCompressed); REGISTER_FACTORY(internal, KVCache); REGISTER_FACTORY(internal, ReadValue); REGISTER_FACTORY(internal, Gemm); +REGISTER_FACTORY(internal, SwiGLU); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/swiglu.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/swiglu.hpp new file mode 100644 index 00000000000000..6602f9ddd18aeb --- /dev/null +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/swiglu.hpp @@ -0,0 +1,67 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "primitive.hpp" + +namespace cldnn { + +/// @brief Swish Gated Linear Unit Activation primitive +/// @details Performs gated linear unit activation that combines swish activation function +struct swiglu : public primitive_base { + CLDNN_DECLARE_PRIMITIVE(swiglu); + + swiglu() : primitive_base("", {}) {} + + /// @brief Constructs swiglu primitive + /// @param id This primitive id + /// @param input Input primitive id + /// @param axis The index of an axis in data along which to perform the split + /// @param split_lengths A list containing the sizes of each output tensor along the split axis + /// @param output_size Output data size of the primitive + swiglu(const primitive_id& id, + const input_info& input, + const int64_t& axis, + const int64_t& split_lengths, + const tensor output_size, + const padding& output_padding = padding()) + : primitive_base(id, {input}, {output_padding}), + axis(axis), + split_lengths(split_lengths), + output_size(output_size) {} + + int64_t axis; + int64_t split_lengths; + tensor output_size; + + size_t hash() const override { + size_t seed = primitive::hash(); + seed = hash_combine(seed, axis); + seed = hash_combine(seed, split_lengths); + return seed; + } + + bool operator==(const primitive& rhs) const override { + if (!compare_common_params(rhs)) + return false; + + auto rhs_casted = downcast(rhs); + return axis == rhs_casted.axis && split_lengths == rhs_casted.split_lengths; + } + + void save(BinaryOutputBuffer& ob) const override { + primitive_base::save(ob); + ob << axis; + ob << split_lengths; + ob << output_size; + } + + void load(BinaryInputBuffer& ib) override { + primitive_base::load(ib); + ib >> axis; + ib >> split_lengths; + ib >> output_size; + } +}; +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp index bb2dba327f15b7..856195f32b43c5 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp @@ -79,6 +79,7 @@ void register_implementations() { REGISTER_OCL(space_to_depth); REGISTER_OCL(slice); REGISTER_OCL(strided_slice); + REGISTER_OCL(swiglu); REGISTER_OCL(tile); REGISTER_OCL(gather_tree); REGISTER_OCL(resample); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp index 6c27c72dc4caae..e338d638e438f3 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp @@ -68,6 +68,7 @@ #include "intel_gpu/primitives/softmax.hpp" #include "intel_gpu/primitives/space_to_batch.hpp" #include "intel_gpu/primitives/strided_slice.hpp" +#include "intel_gpu/primitives/swiglu.hpp" #include "intel_gpu/primitives/tile.hpp" #include "intel_gpu/primitives/non_zero.hpp" #include "intel_gpu/primitives/eye.hpp" @@ -157,6 +158,7 @@ REGISTER_OCL(softmax); REGISTER_OCL(space_to_batch); REGISTER_OCL(space_to_depth); REGISTER_OCL(strided_slice); +REGISTER_OCL(swiglu); REGISTER_OCL(tile); REGISTER_OCL(gather_tree); REGISTER_OCL(resample); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/swiglu.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/swiglu.cpp new file mode 100644 index 00000000000000..5e628ff50a6656 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/swiglu.cpp @@ -0,0 +1,79 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "primitive_base.hpp" +#include "validation_util.hpp" + +#include "swiglu_inst.h" +#include "swiglu/swiglu_kernel_selector.h" +#include "swiglu/swiglu_kernel_ref.h" + +namespace cldnn { +namespace ocl { + +struct swiglu_impl : typed_primitive_impl_ocl { + using parent = typed_primitive_impl_ocl; + using parent::parent; + using kernel_selector_t = kernel_selector::swiglu_kernel_selector; + using kernel_params_t = std::pair; + + DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::swiglu_impl); + + std::unique_ptr clone() const override { + return make_unique(*this); + } + + void load(BinaryInputBuffer& ib) override { + parent::load(ib); + if (is_dynamic()) { + auto& kernel_selector = kernel_selector_t::Instance(); + auto kernel_impl = kernel_selector.GetImplementation(_kernel_data.kernelName); + kernel_impl->GetUpdateDispatchDataFunc(_kernel_data); + } + } + + static kernel_params_t get_kernel_params(const kernel_impl_params& impl_param, bool is_shape_agnostic = false) { + const auto& primitive = impl_param.typed_desc(); + auto params = get_default_params(impl_param, is_shape_agnostic); + auto optional_params = get_default_optional_params(impl_param.get_program()); + + auto rank = impl_param.get_input_layout(0).get_partial_shape().rank(); + params.axis = ov::util::normalize(primitive->axis, rank.get_length()); + params.split_length = primitive->split_lengths; + + return {params, optional_params}; + } + + void update_dispatch_data(const kernel_impl_params& impl_param) override { + auto kernel_params = get_kernel_params(impl_param, true); + (_kernel_data.update_dispatch_data_func)(kernel_params.first, _kernel_data); + } +}; + +namespace detail { + +attach_swiglu_impl::attach_swiglu_impl() { + auto types = { + data_types::f32, + data_types::f16 + }; + + auto formats = { + format::bfyx, + format::bfzyx + }; + + implementation_map::add(impl_types::ocl, + shape_types::any, + typed_primitive_impl_ocl::create, + types, + formats); +} + +} // namespace detail +} // namespace ocl +} // namespace cldnn + +BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::swiglu_impl) +BIND_BINARY_BUFFER_WITH_TYPE(cldnn::swiglu) diff --git a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h index 34c4ccf555008b..2c9e34efd30792 100644 --- a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h @@ -36,9 +36,8 @@ class typed_primitive_inst : public typed_primitive_inst_base : public typed_primitive_inst_base; diff --git a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h index 62840e2b9bbb1f..99805e947081c0 100644 --- a/src/plugins/intel_gpu/src/graph/include/primitive_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/primitive_inst.h @@ -299,6 +299,8 @@ class primitive_inst { virtual void update_output_memory() {} + virtual int32_t get_prealloc_iter_num() { return -1; } + protected: primitive_inst(network& network, program_node const& node, bool allocate_memory); diff --git a/src/plugins/intel_gpu/src/graph/include/swiglu_inst.h b/src/plugins/intel_gpu/src/graph/include/swiglu_inst.h new file mode 100644 index 00000000000000..6a5ce08dc54bd2 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/include/swiglu_inst.h @@ -0,0 +1,42 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "intel_gpu/primitives/swiglu.hpp" +#include "primitive_inst.h" + +#include + +namespace cldnn { + +template <> +struct typed_program_node : public typed_program_node_base { + using parent = typed_program_node_base; + +public: + using parent::parent; + + program_node& input(size_t index = 0) const { return get_dependency(index); } + std::vector get_shape_infer_dependencies() const override { return {}; } +}; + +using swiglu_node = typed_program_node; + +template <> +class typed_primitive_inst : public typed_primitive_inst_base { + using parent = typed_primitive_inst_base; + using parent::parent; + +public: + template + static std::vector calc_output_layouts(swiglu_node const& /*node*/, const kernel_impl_params& impl_params); + static layout calc_output_layout(swiglu_node const& node, kernel_impl_params const& impl_params); + static std::string to_string(swiglu_node const& node); + + typed_primitive_inst(network& network, swiglu_node const& node); +}; + +using swiglu_inst = typed_primitive_inst; + +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/kv_cache.cpp b/src/plugins/intel_gpu/src/graph/kv_cache.cpp index 0e44de3fc2fb9c..aaf4a1cbfd4cc9 100644 --- a/src/plugins/intel_gpu/src/graph/kv_cache.cpp +++ b/src/plugins/intel_gpu/src/graph/kv_cache.cpp @@ -16,6 +16,7 @@ GPU_DEFINE_PRIMITIVE_TYPE_ID(kv_cache) kv_cache_inst::typed_primitive_inst(network& network, const kv_cache_node& node) : parent{network, node, false}, memory_state::variable{node.get_primitive()->variable_info.variable_id} { + kv_cache_id = network.get_kv_cache_ids().size(); } layout kv_cache_inst::calc_output_layout(const kv_cache_node& node, kernel_impl_params const& impl_param) { @@ -55,4 +56,7 @@ std::string kv_cache_inst::to_string(const kv_cache_node& node) { return primitive_description.str(); } +int32_t kv_cache_inst::get_prealloc_iter_num() { + return 128 + kv_cache_id % 64; +} } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/network.cpp b/src/plugins/intel_gpu/src/graph/network.cpp index 80b077e740c99b..3fe7d3b483b59f 100644 --- a/src/plugins/intel_gpu/src/graph/network.cpp +++ b/src/plugins/intel_gpu/src/graph/network.cpp @@ -34,6 +34,7 @@ #include "assign_inst.h" #include "read_value_inst.h" #include "reshape_inst.h" +#include "kv_cache_inst.h" #include "program_helpers.h" #include "to_string_utils.h" #include "kernels_cache.hpp" @@ -1329,6 +1330,9 @@ void network::allocate_primitive_instance(program_node const& node) { if (node.is_type()) _data_outputs.push_back(inst); } + if (node.is_type()) { + kv_cache_ids.push_back(node.id()); + } if (auto state_prim = std::dynamic_pointer_cast(inst)) { set_variables_state_info(state_prim->variable_id(), node.get_output_layout(0), state_prim->get_user_specified_type()); } diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index cd2800f0138614..30dbb7a4246dcd 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -568,7 +568,7 @@ event::ptr primitive_inst::realloc_if_needed() { auto current_shape = updated_layout.get_shape(); std::pair prealloc_info; - int32_t tmp_prealloc_count = _node->is_type() ? kv_cache_inst::get_prealloc_iter_num() : -1; + int32_t tmp_prealloc_count = get_prealloc_iter_num(); GPU_DEBUG_IF(debug_config->mem_preallocation_params.is_initialized) { // If debug config is set, repsect the config most tmp_prealloc_count = -1; @@ -608,13 +608,15 @@ event::ptr primitive_inst::realloc_if_needed() { auto desc = _node->as().get_primitive(); auto& variable = get_network().get_variable(desc->variable_info.variable_id); auto present_layout = _impl_params->output_layouts[0]; - const auto& sequence_axis = desc->concat_axis; + auto present_layout_rank = present_layout.get_partial_shape().size(); + const auto sequence_axis = desc->concat_axis >= 0 ? desc->concat_axis + : present_layout_rank + desc->concat_axis; auto sequence_axis_legacy = - kv_cache_inst::get_sequence_axis_legacy(sequence_axis, present_layout.get_partial_shape().size()); + kv_cache_inst::get_sequence_axis_legacy(sequence_axis, present_layout_rank); GPU_DEBUG_TRACE_DETAIL << id() << " is kv_cache => set the variable with newly allocated output memory" << std::endl; bool axis_is_outer_most = true; - for (int64_t dim = 0; dim < sequence_axis; ++dim) { + for (size_t dim = 0; dim < sequence_axis; ++dim) { if (present_layout.get_shape()[dim] > 1) { axis_is_outer_most = false; break; diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index 36e2cda3ab345b..08f7c5a5060a2a 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -101,21 +101,28 @@ using namespace cldnn; using namespace ov::intel_gpu; static ov::threading::IStreamsExecutor::Config make_task_executor_config(const ExecutionConfig& config, std::string tags, int num_streams = 0) { - ov::threading::IStreamsExecutor::Config task_executor_config(tags, 1); - task_executor_config._streams = (num_streams > 0) ? num_streams : config.get_property(ov::compilation_num_threads); + int streams = (num_streams > 0) ? num_streams : config.get_property(ov::compilation_num_threads); auto priority = config.get_property(ov::intel_gpu::hint::host_task_priority); + auto core_type = ov::threading::IStreamsExecutor::Config::ANY; switch (priority) { - case ov::hint::Priority::LOW: task_executor_config._threadPreferredCoreType = ov::threading::IStreamsExecutor::Config::LITTLE; break; - case ov::hint::Priority::MEDIUM: task_executor_config._threadPreferredCoreType = ov::threading::IStreamsExecutor::Config::ANY; break; - case ov::hint::Priority::HIGH: task_executor_config._threadPreferredCoreType = ov::threading::IStreamsExecutor::Config::BIG; break; + case ov::hint::Priority::LOW: core_type = ov::threading::IStreamsExecutor::Config::LITTLE; break; + case ov::hint::Priority::MEDIUM: core_type = ov::threading::IStreamsExecutor::Config::ANY; break; + case ov::hint::Priority::HIGH: core_type = ov::threading::IStreamsExecutor::Config::BIG; break; default: OPENVINO_ASSERT(false, "[GPU] Can't create task executor: invalid host task priority value: ", priority); } bool enable_cpu_pinning = config.get_property(ov::hint::enable_cpu_pinning); - task_executor_config.update_executor_config(task_executor_config._streams, - 1, - task_executor_config._threadPreferredCoreType, - enable_cpu_pinning); + ov::threading::IStreamsExecutor::Config task_executor_config( + tags, + streams, + 1, + ov::threading::IStreamsExecutor::ThreadBindingType::NONE, + 1, + 0, + 0, + core_type, + {}, + enable_cpu_pinning); return task_executor_config; } diff --git a/src/plugins/intel_gpu/src/graph/swiglu.cpp b/src/plugins/intel_gpu/src/graph/swiglu.cpp new file mode 100644 index 00000000000000..c3d36e15684610 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/swiglu.cpp @@ -0,0 +1,69 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "intel_gpu/op/swiglu.hpp" +#include "swiglu_inst.h" + +#include "primitive_type_base.h" +#include "json_object.h" +#include + +namespace cldnn { +GPU_DEFINE_PRIMITIVE_TYPE_ID(swiglu); + +layout swiglu_inst::calc_output_layout(swiglu_node const& node, kernel_impl_params const& impl_param) { + auto desc = impl_param.typed_desc(); + auto input_layout = impl_param.get_input_layout(); + auto output_type = impl_param.desc->output_data_types[0].value_or(input_layout.data_type); + auto output_format = input_layout.format; + + return layout(output_type, output_format, desc->output_size); +} + +template +std::vector swiglu_inst::calc_output_layouts(swiglu_node const& /*node*/, const kernel_impl_params& impl_param) { + auto desc = impl_param.typed_desc(); + auto input_layout = impl_param.get_input_layout(); + auto output_type = impl_param.desc->output_data_types[0].value_or(input_layout.data_type); + auto output_format = input_layout.format; + + ov::intel_gpu::op::SwiGLU op; + op.set_axis(desc->axis); + op.set_split_lengths(desc->split_lengths); + + std::vector input_shapes = { + impl_param.get_input_layout(0).get(), + ShapeType(ov::Shape({})), + ShapeType(ov::Shape{2}) + }; + + std::vector output_shapes = shape_infer(&op, input_shapes); + + return { layout(output_shapes[0], output_type, output_format) }; +} + +template std::vector swiglu_inst::calc_output_layouts(swiglu_node const& node, + const kernel_impl_params& impl_param); + +std::string swiglu_inst::to_string(swiglu_node const& node) { + auto desc = node.get_primitive(); + auto node_info = node.desc_to_json(); + auto& input = node.input(); + + std::stringstream primitive_description; + + json_composite swiglu_info; + swiglu_info.add("input_id", input.id()); + swiglu_info.add("axis", desc->axis); + swiglu_info.add("split_lengths", desc->split_lengths); + + node_info->add("swiglu_info", swiglu_info); + node_info->dump(primitive_description); + + return primitive_description.str(); +} + +swiglu_inst::typed_primitive_inst(network& network, swiglu_node const& node) : parent(network, node) {} + +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/swiglu_gpu_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/swiglu_gpu_ref.cl new file mode 100644 index 00000000000000..a943a6e101cea1 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/swiglu_gpu_ref.cl @@ -0,0 +1,43 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "include/batch_headers/fetch_data.cl" + +KERNEL(swiglu_gpu_ref)( + OPTIONAL_SHAPE_INFO_ARG + const __global INPUT0_TYPE* input, + __global OUTPUT_TYPE* output) +{ +#if OUTPUT_DIMS == 5 + uint data_idx = (uint)get_global_id(GWS_YX); + const uint x = data_idx % OUTPUT_SIZE_X; + data_idx = data_idx / OUTPUT_SIZE_X; + const uint y = data_idx % OUTPUT_SIZE_Y; + data_idx = data_idx / OUTPUT_SIZE_Y; + const uint z = data_idx % OUTPUT_SIZE_Z; +#else // 2D spatial + const uint x = (uint)get_global_id(GWS_YX) % OUTPUT_SIZE_X; + const uint y = (uint)get_global_id(GWS_YX) / OUTPUT_SIZE_X; +#endif + const uint f = (uint)get_global_id(GWS_FEATURE); + const uint b = (uint)get_global_id(GWS_BATCH); + +#if OUTPUT_DIMS == 5 + const uint output_idx = OUTPUT_GET_INDEX(b, f, z, y, x); + const uint gate_idx = INPUT0_GET_INDEX(b, f, z, y, x); + const uint input_idx = INPUT0_GET_INDEX(b, f, z, y, x) + SPLIT_LENGTH; +#else // 2D spatial + const uint output_idx = OUTPUT_GET_INDEX(b, f, y, x); + const uint gate_idx = INPUT0_GET_INDEX(b, f, y, x); + const uint input_idx = INPUT0_GET_INDEX(b, f, y, x) + SPLIT_LENGTH; +#endif + + ACCUMULATOR_TYPE res = ACCUMULATOR_VAL_ZERO; + + res = (ACCUMULATOR_TYPE)input[gate_idx]; + res /= ACCUMULATOR_VAL_ONE + exp(-(ACCUMULATOR_VAL_ONE * res)); + res *= (ACCUMULATOR_TYPE)input[input_idx]; + + output[output_idx] = TO_OUTPUT_TYPE(res); +} diff --git a/src/plugins/intel_gpu/src/kernel_selector/common_types.h b/src/plugins/intel_gpu/src/kernel_selector/common_types.h index 2b6f7be857be8c..0f026a77ae855c 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/common_types.h +++ b/src/plugins/intel_gpu/src/kernel_selector/common_types.h @@ -94,6 +94,7 @@ enum class KernelType { UNIQUE_COUNT, UNIQUE_GATHER, RMS, + SWIGLU, }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_ref.cpp new file mode 100644 index 00000000000000..fac86e7b2135aa --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_ref.cpp @@ -0,0 +1,115 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "swiglu_kernel_ref.h" +#include "kernel_selector_utils.h" +#include + +namespace kernel_selector { +ParamsKey SwiGLUKernelRef::GetSupportedKey() const { + ParamsKey k; + k.EnableInputDataType(Datatype::F16); + k.EnableInputDataType(Datatype::F32); + k.EnableOutputDataType(Datatype::F16); + k.EnableOutputDataType(Datatype::F32); + k.EnableInputLayout(DataLayout::bfyx); + k.EnableInputLayout(DataLayout::bfzyx); + k.EnableOutputLayout(DataLayout::bfyx); + k.EnableOutputLayout(DataLayout::bfzyx); + k.EnableTensorOffset(); + k.EnableTensorPitches(); + k.EnableBatching(); + k.EnableDifferentTypes(); + k.EnableDynamicShapesSupport(); + return k; +} + +JitConstants SwiGLUKernelRef::GetJitConstants(const swiglu_params& params) const { + JitConstants jit = MakeBaseParamsJitConstants(params); + + jit.AddConstants({MakeJitConstant("AXIS", params.axis)}); + jit.AddConstants({MakeJitConstant("SPLIT_LENGTH", params.split_length)}); + jit.Merge(MakeTypeJitConstants(GetAccumulatorType(params), "ACCUMULATOR")); + jit.Merge(GetTensorFriendlyWorkGroupsJit(params.outputs[0])); + + return jit; +} + +CommonDispatchData SwiGLUKernelRef::SetDefault(const swiglu_params& params) const { + CommonDispatchData dispatchData; + + dispatchData.gws = GetTensorFriendlyWorkGroups(params.outputs[0]); + dispatchData.lws = GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo); + + return dispatchData; +} + +void SwiGLUKernelRef::GetUpdateDispatchDataFunc(KernelData& kd) const { + kd.update_dispatch_data_func = [this](const Params& params, KernelData& kd) { + const auto& prim_params = static_cast(params); + auto dispatchData = SetDefault(prim_params); + OPENVINO_ASSERT(kd.kernels.size() == 1, "[GPU] Invalid kernels size for update dispatch data func"); + kd.kernels[0].params.workGroups.global = dispatchData.gws; + kd.kernels[0].params.workGroups.local = dispatchData.lws; + kd.kernels[0].skip_execution = KernelData::SkipKernelExecution(prim_params); + }; +} + +KernelsData SwiGLUKernelRef::GetKernelsData(const Params& params, const optional_params& options) const { + assert(params.GetType() == KernelType::SWIGLU); + + if (!Validate(params, options)) + return {}; + + const swiglu_params& orgParams = static_cast(params); + auto dispatchData = SetDefault(orgParams); + + KernelData kd = KernelData::Default(params); + + auto cldnn_jit = GetJitConstants(orgParams); + auto entry_point = GetEntryPoint(kernelName, orgParams.layerID, params, options); + auto jit = CreateJit(kernelName, cldnn_jit, entry_point); + + GetUpdateDispatchDataFunc(kd); + + auto& kernel = kd.kernels[0]; + FillCLKernelData(kernel, + dispatchData, + params.engineInfo, + kernelName, + jit, + entry_point, + EXE_MODE_DEFAULT, + false, + false, + 1, + GetFusedPrimitiveInputsCount(params), + 1, + orgParams.has_dynamic_tensors()); + + return {kd}; +} + +KernelsPriority SwiGLUKernelRef::GetKernelsPriority(const Params& /*params*/, const optional_params& /*options*/) const { + return DONT_USE_IF_HAVE_SOMETHING_ELSE; +} + +Datatype SwiGLUKernelRef::GetAccumulatorType(const swiglu_params& params) const { + Datatype types[] = { Datatype::F32, Datatype::F16, Datatype::INT64, Datatype::INT32, Datatype::UINT32}; + + for (Datatype type : types) + for (auto& in : params.inputs) + if (in.GetDType() == type) + return type; + + return Datatype::F32; +} + +bool SwiGLUKernelRef::Validate(const Params& params, const optional_params& options) const { + if (!KernelBaseOpenCL::Validate(params, options)) + return false; + + return true; +} +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_ref.h new file mode 100644 index 00000000000000..b4b5e4cac87be7 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_ref.h @@ -0,0 +1,42 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "kernel_base_opencl.h" + +namespace kernel_selector { +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// swiglu_params +/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +struct swiglu_params : public base_params { + swiglu_params() : base_params(KernelType::SWIGLU), axis(0), split_length(0) {} + int32_t axis; + int32_t split_length; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// swiglu_optional_params +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +struct swiglu_optional_params : optional_params { + swiglu_optional_params() : optional_params(KernelType::SWIGLU) {} +}; + +class SwiGLUKernelRef : public KernelBaseOpenCL { +public: + SwiGLUKernelRef() : KernelBaseOpenCL("swiglu_gpu_ref") {} + virtual ~SwiGLUKernelRef() {} + + virtual JitConstants GetJitConstants(const swiglu_params& params) const; + virtual CommonDispatchData SetDefault(const swiglu_params& params) const; + KernelsData GetKernelsData(const Params& params, const optional_params& options) const override; + KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override; + Datatype GetAccumulatorType(const swiglu_params& params) const; + ParamsKey GetSupportedKey() const override; + +protected: + bool Validate(const Params&, const optional_params&) const override; + void GetUpdateDispatchDataFunc(KernelData& kd) const override; +}; +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_selector.cpp new file mode 100644 index 00000000000000..0287f7c1ae4ddc --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_selector.cpp @@ -0,0 +1,14 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "swiglu_kernel_selector.h" +#include "swiglu_kernel_ref.h" + +namespace kernel_selector { +swiglu_kernel_selector::swiglu_kernel_selector() { Attach(); } + +KernelsData swiglu_kernel_selector::GetBestKernels(const Params& params, const optional_params& options) const { + return GetNaiveBestKernel(params, options, KernelType::SWIGLU); +} +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_selector.h new file mode 100644 index 00000000000000..a245a49d11f783 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/swiglu/swiglu_kernel_selector.h @@ -0,0 +1,23 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "kernel_selector.h" + +namespace kernel_selector { +class swiglu_kernel_selector : public kernel_selector_base { +public: + static swiglu_kernel_selector& Instance() { + static swiglu_kernel_selector instance_; + return instance_; + } + + swiglu_kernel_selector(); + + virtual ~swiglu_kernel_selector() {} + + KernelsData GetBestKernels(const Params& params, const optional_params& options) const override; +}; +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp index 2bed086a94ca8f..543c77734cc492 100644 --- a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp +++ b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp @@ -36,7 +36,7 @@ std::shared_ptr create_task_executor(const std::sh } else if (config.get_property(ov::hint::enable_cpu_pinning)) { auto executor_config = ov::threading::IStreamsExecutor::Config{"Intel GPU plugin executor", - 0, + config.get_property(ov::num_streams), 0, ov::threading::IStreamsExecutor::ThreadBindingType::CORES, 1, @@ -45,8 +45,7 @@ std::shared_ptr create_task_executor(const std::sh ov::threading::IStreamsExecutor::Config::PreferredCoreType::BIG, {{config.get_property(ov::num_streams), MAIN_CORE_PROC, 1, 0, 0}}, true}; - auto post_config = ov::threading::IStreamsExecutor::Config::reserve_cpu_threads(executor_config); - return std::make_shared(post_config); + return std::make_shared(executor_config); } else { return std::make_shared( ov::threading::IStreamsExecutor::Config{"Intel GPU plugin executor", config.get_property(ov::num_streams)}); diff --git a/src/plugins/intel_gpu/src/plugin/ops/swiglu.cpp b/src/plugins/intel_gpu/src/plugin/ops/swiglu.cpp new file mode 100644 index 00000000000000..def2a52d281de2 --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/ops/swiglu.cpp @@ -0,0 +1,48 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "intel_gpu/op/swiglu.hpp" +#include "intel_gpu/plugin/program_builder.hpp" +#include "intel_gpu/plugin/common_utils.hpp" +#include "intel_gpu/primitives/swiglu.hpp" + +namespace ov { +namespace op { +namespace internal { +using SwiGLU = ov::intel_gpu::op::SwiGLU; +} // namespace internal +} // namespace op +} // namespace ov + +namespace ov { +namespace intel_gpu { + +static void CreateSwiGLUOp(ProgramBuilder& p, const std::shared_ptr& op) { + validate_inputs_count(op, {1}); + auto inputs = p.GetInputInfo(op); + std::string primitive_name = layer_type_name_ID(op); + + if (p.use_new_shape_infer()) { + auto prim = cldnn::swiglu(primitive_name, + inputs[0], + op->get_axis(), + op->get_split_lengths(), + cldnn::tensor()); + prim.output_data_types = get_output_data_types(op); + p.add_primitive(*op, prim); + } else { + auto prim = cldnn::swiglu(primitive_name, + inputs[0], + op->get_axis(), + op->get_split_lengths(), + tensor_from_dims(op->get_output_shape(0))); + prim.output_data_types = get_output_data_types(op); + p.add_primitive(*op, prim); + } +} + +REGISTER_FACTORY_IMPL(internal, SwiGLU); + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/swiglu.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/swiglu.cpp new file mode 100644 index 00000000000000..e2d87fae0a24ba --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/swiglu.cpp @@ -0,0 +1,64 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "intel_gpu/op/swiglu.hpp" +#include "validation_util.hpp" +#include "variadic_split_shape_inference.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/op/variadic_split.hpp" + +namespace ov { +namespace intel_gpu { +namespace op { + +SwiGLU::SwiGLU(const Output& data, + int64_t axis, + int64_t split_lengths, + const ov::element::Type output_type) + : Op({data}), m_axis(axis), m_split_lengths(split_lengths), m_output_type(output_type) { + validate_and_infer_types(); +} + +bool SwiGLU::visit_attributes(ov::AttributeVisitor& visitor) { + visitor.on_attribute("axis", m_axis); + visitor.on_attribute("split_lengths", m_split_lengths); + visitor.on_attribute("output_type", m_output_type); + return true; +} + +void SwiGLU::validate_and_infer_types() { + auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + + std::vector input_shapes = { + get_input_partial_shape(0), + ov::PartialShape(ov::Shape{}), + ov::PartialShape(ov::Shape{2}) + }; + + set_output_type(0, output_type, shape_infer(this, input_shapes)[0]); +} + +std::shared_ptr SwiGLU::clone_with_new_inputs(const ov::OutputVector& new_args) const { + check_new_args_count(this, new_args); + return std::make_shared(new_args.at(0), + m_axis, + m_split_lengths, + m_output_type); +} + +std::vector shape_infer(const SwiGLU* op, std::vector input_shapes) { + ov::op::v1::VariadicSplit variadic_split; + std::vector axis = { op->get_axis() }; + std::vector split_lengths = { op->get_split_lengths(), -1 }; + + std::unordered_map const_data; + const_data.emplace(1, ov::Tensor(ov::element::i64, ov::Shape{}, static_cast(axis.data()))); + const_data.emplace(2, ov::Tensor(ov::element::i64, ov::Shape{split_lengths.size()}, static_cast(split_lengths.data()))); + + return ov::op::v1::shape_infer(&variadic_split, input_shapes, ov::make_tensor_accessor(const_data)); +} + +} // namespace op +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/swiglu_fusion.cpp b/src/plugins/intel_gpu/src/plugin/transformations/swiglu_fusion.cpp new file mode 100644 index 00000000000000..1307cd5f71c1f3 --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/swiglu_fusion.cpp @@ -0,0 +1,94 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "swiglu_fusion.hpp" + +#include "intel_gpu/op/swiglu.hpp" + +#include "openvino/core/rt_info.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/swish.hpp" +#include "openvino/op/variadic_split.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "transformations/utils/utils.hpp" + +namespace ov { +namespace intel_gpu { + +SwiGLUFusion::SwiGLUFusion() { + using namespace ov::pass::pattern; + + auto last_dim_static = [](const ov::Output& output) { + auto out_ps = output.get_node()->get_output_partial_shape(0); + return out_ps.rank().is_static() && out_ps[out_ps.rank().get_length() - 1].is_static() && out_ps.size() <= 5; + }; + + // Detect SwiGLU decomposition pattern + // SwiGLU(Xw, Xv, beta) = (Xw * (1.0 + exp(-beta * Xw))) * Xv + auto data_m = any_input(last_dim_static); + + // VariadicSplit(X, axis, split_lengths) = Xw, Xv + auto axis_const_m = wrap_type(); + auto split_lengths_const_m = wrap_type(); + auto variadic_split_m = wrap_type({data_m, axis_const_m, split_lengths_const_m}); + variadic_split_m->set_output_size(2); + + // Swish(Xw) = Xw * (1.0 + exp(-beta * Xw)) + auto swish_m = wrap_type({variadic_split_m->output(0)}); + + // Mul(Xw, Xv) = Swish(Xw) * Xv + auto mul_m = wrap_type({swish_m, variadic_split_m->output(1)}); + + ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + OPENVINO_ASSERT(pattern_map.count(mul_m)); + OPENVINO_ASSERT(pattern_map.count(swish_m)); + OPENVINO_ASSERT(pattern_map.count(variadic_split_m)); + OPENVINO_ASSERT(pattern_map.count(split_lengths_const_m)); + OPENVINO_ASSERT(pattern_map.count(axis_const_m)); + auto mul = std::dynamic_pointer_cast(pattern_map.at(mul_m).get_node_shared_ptr()); + if (!mul || transformation_callback(mul)) + return false; + if (mul->input_value(1).get_index() != 1) + return false; + + auto variadic_split = std::dynamic_pointer_cast(pattern_map.at(variadic_split_m).get_node_shared_ptr()); + auto variadic_split_in_ps = variadic_split->get_input_partial_shape(0); + auto last_dim = variadic_split_in_ps.rank().get_length() - 1; + + auto axis = std::dynamic_pointer_cast(pattern_map.at(axis_const_m).get_node_shared_ptr()); + bool valid_axis_const_values = ov::op::util::has_constant_value(axis, -1) || + ov::op::util::has_constant_value(axis, last_dim); + if (!valid_axis_const_values) + return false; + auto axis_value = axis->cast_vector()[0]; + + auto split_lengths = std::dynamic_pointer_cast(pattern_map.at(split_lengths_const_m).get_node_shared_ptr()); + auto split_lengths_value = split_lengths->cast_vector()[0]; + // Allow only case that exactly splits in half along the last dimension + auto split_length = variadic_split_in_ps[last_dim].get_length() / 2; + if (split_lengths_value != split_length) + return false; + + auto data = pattern_map.at(data_m); + auto output_type = m.get_match_root()->get_output_element_type(0); + + auto swiglu = std::make_shared(data, + axis_value, + split_lengths_value, + output_type); + swiglu->set_friendly_name(m.get_match_root()->get_friendly_name()); + ov::copy_runtime_info(m.get_matched_nodes(), swiglu); + ov::replace_node(m.get_match_root(), swiglu); + + return true; + }; + + auto m = std::make_shared(mul_m, "SwiGLUFusion"); + this->register_matcher(m, callback); +} + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations/swiglu_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/swiglu_fusion.hpp new file mode 100644 index 00000000000000..fa0d86e7175490 --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/transformations/swiglu_fusion.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" + +namespace ov { +namespace intel_gpu { + +class SwiGLUFusion : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("SwiGLUFusion", "0"); + SwiGLUFusion(); +}; + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 1e90482984d169..f6b3e9ffda7830 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -125,6 +125,7 @@ #include "plugin/transformations/fc_convert_fusion.hpp" #include "plugin/transformations/clamp_fp16_output.hpp" #include "plugin/transformations/transpose_matmul_fusion.hpp" +#include "plugin/transformations/swiglu_fusion.hpp" #include "transformations/low_precision/mark_dequantization_subgraph.hpp" #include "low_precision/pull_reshape_through_dequantization.hpp" @@ -705,6 +706,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { manager.register_pass(); if (!device_info.supports_immad) manager.register_pass(); + manager.register_pass(); // This is supposed to be the last pass to ensure that we don't have name collisions until // GPU plugin stops using friendly names for program creation diff --git a/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp b/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp index 7696d547ea1c22..dacdcc29fbd74b 100644 --- a/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp +++ b/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp @@ -29,6 +29,7 @@ inline std::shared_ptr make_llm_kv_cache_pattern(ov::Dimension batch ov::Dimension n_heads = ov::Dimension::dynamic(), ov::Dimension n_features = ov::Dimension::dynamic(), ov::element::Type_t element_type = ov::element::f32, + int64_t concat_axis = 2, bool stateful = false, bool fuse_cache_reorder = false, bool build_state_initializer = false) { @@ -73,7 +74,7 @@ inline std::shared_ptr make_llm_kv_cache_pattern(ov::Dimension batch auto transpose_const = ov::op::v0::Constant::create(ov::element::i32, {new_token_size.size()}, {0, 2, 1, 3}); auto transpose = std::make_shared(in_new_token, transpose_const); - auto concat = std::make_shared(ov::OutputVector{concat_input, transpose}, 2); + auto concat = std::make_shared(ov::OutputVector{concat_input, transpose}, concat_axis); auto convert = std::make_shared(concat, element_type); auto kv_present = std::make_shared(convert); kv_present->set_friendly_name("present_key_values"); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/core_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/core_config.cpp index 33bc1ad0f1a991..640b73f28ae0ce 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/core_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/core_config.cpp @@ -2,24 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "functional_test_utils/core_config.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) { - std::shared_ptr core = PluginCache::get().ie(); - ov::element::Type hint = ov::element::f32; - for (auto& param : test->GetFunction()->get_parameters()) { - if (param->get_output_element_type(0) == ov::element::f16) { - hint = ov::element::f16; - break; - } - } - - // Set inference_precision hint to run fp32 model in fp32 runtime precision as default plugin execution precision may vary - std::map config = {{"INFERENCE_PRECISION_HINT", hint.get_type_name()}}; - core->SetConfig(config, ov::test::utils::DEVICE_GPU); -} - namespace ov { namespace test { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_prior_grid_generator.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_prior_grid_generator.cpp index 4abaf5b1198e77..b181d2f0c97877 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_prior_grid_generator.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/experimental_detectron_prior_grid_generator.cpp @@ -71,4 +71,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_ExperimentalDetectronPriorGridGenerator_f16, testing::ValuesIn({ov::element::Type_t::f16}), testing::Values(ov::test::utils::DEVICE_GPU)), ExperimentalDetectronPriorGridGeneratorLayerTest::getTestCaseName); -} // namespace +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_normalization.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_normalization.cpp index 72bb27eba05c54..8f222cc2f36c46 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_normalization.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_normalization.cpp @@ -1,9 +1,9 @@ // Copyright (C) 2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/group_normalization.hpp" +#include "single_op_tests/group_normalization.hpp" -using namespace ov::test::subgraph; +using namespace ov::test; namespace { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp index 92da7f1b0b44c8..00b844defb0686 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp @@ -4,7 +4,7 @@ #include -#include "single_layer_tests/softmax.hpp" +#include "single_op_tests/softmax.hpp" #include "common_test_utils/test_constants.hpp" using namespace ov::test::subgraph; diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp index 1e164c42fda131..213464a594c795 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/ctc_greedy_decoder_seq_len.cpp @@ -14,7 +14,6 @@ #include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -using namespace InferenceEngine; using namespace ov::test; namespace GPULayerTestsDefinitions { diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp index 59da844cab2856..e3eb1cec066082 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp @@ -255,6 +255,7 @@ class KVCacheTests: public ::testing::Test { bool fuse_cache_reorder, bool build_state_initializer, size_t batch = 1, + int64_t concat_axis = 2, ov::element::Type model_element_type = ov::element::f16, size_t num_iter = 10) { #if defined(ANDROID) @@ -291,6 +292,7 @@ class KVCacheTests: public ::testing::Test { n_heads, n_features, element_type, + concat_axis, stateful, fuse_cache_reorder, build_state_initializer && stateful); @@ -298,6 +300,7 @@ class KVCacheTests: public ::testing::Test { n_heads, n_features, element_type, + concat_axis, !stateful, fuse_cache_reorder, build_state_initializer && !stateful); @@ -458,8 +461,8 @@ TEST_F(KVCacheTests, smoke_multipleIterations_cached) { this->test_smoke_multipleIterations(true); } -TEST_F(KVCacheTests, smoke_multipleIterations_stateful_no_gather_no_initializer) { - this->test_smoke_multipleIterations_stateful(false, false, false); +TEST_F(KVCacheTests, smoke_multipleIterations_stateful_no_gather_no_initializer_concat_neg_axis) { + this->test_smoke_multipleIterations_stateful(false, false, false, 1, -2); } TEST_F(KVCacheTests, smoke_multipleIterations_stateful_no_gather_no_initializer_cached) { @@ -475,7 +478,7 @@ TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer_c } TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer_f32) { - this->test_smoke_multipleIterations_stateful(false, true, true, 1, ov::element::f32); + this->test_smoke_multipleIterations_stateful(false, true, true, 1, 2, ov::element::f32); } TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer_batch_3) { @@ -483,7 +486,7 @@ TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer_b } TEST_F(KVCacheTests, smoke_multipleIterations_stateful_same_shape_after_reset) { - this->test_smoke_multipleIterations_stateful(false, false, false, 1, ov::element::f16, 0); + this->test_smoke_multipleIterations_stateful(false, false, false, 1, 2, ov::element::f16, 0); } } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/swiglu_fusion.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/swiglu_fusion.cpp new file mode 100644 index 00000000000000..3b209010fe7961 --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/swiglu_fusion.cpp @@ -0,0 +1,123 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/file_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/swish.hpp" +#include "openvino/op/variadic_split.hpp" + +namespace { +using ov::test::InputShape; + +using SwiGLUFusionParams = std::tuple, // input shapes + ov::element::Type>; // input precision + +class SwiGLUFusion : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + std::vector input_shapes; + ov::element::Type input_precision; + + std::tie(input_shapes, input_precision) = obj.param; + + std::ostringstream result; + result << "IS=("; + for (const auto& shape : input_shapes) { + result << ov::test::utils::partialShape2str({shape.first}) << "_"; + } + result << ")_TS="; + for (const auto& shape : input_shapes) { + result << "("; + if (!shape.second.empty()) { + auto itr = shape.second.begin(); + do { + result << ov::test::utils::vec2str(*itr); + } while (++itr != shape.second.end() && result << "_"); + } + result << ")_"; + } + result << "input_precision=" << input_precision; + return result.str(); + } + +protected: + std::shared_ptr init_subgraph(std::vector& input_shapes, + const ov::Shape& target_shape, + const ov::element::Type input_precision) { + ov::ParameterVector params{std::make_shared(input_precision, input_shapes[0])}; + + // VariadicSplit(X, axis, split_lengths) = Xw, Xv + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {-1}); + auto split_lengths_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {48, -1}); + auto variadic_split = std::make_shared(params[0], axis_const, split_lengths_const); + + // Swish(Xw) = Xw * (1.0 + exp(-beta * Xw)) + auto swish = std::make_shared(variadic_split->output(0)); + + // Mul(Xw, Xv) = Swish(Xw) * Xv + auto mul = std::make_shared(swish, variadic_split->output(1)); + + return std::make_shared(ov::NodeVector{mul}, params, "SwiGLUFusion"); + } + + void SetUp() override { + targetDevice = ov::test::utils::DEVICE_GPU; + + std::vector input_shapes; + ov::element::Type input_precision; + + std::tie(input_shapes, input_precision) = GetParam(); + + init_input_shapes(input_shapes); + + inType = outType = input_precision; + + function = init_subgraph(inputDynamicShapes, targetStaticShapes.front().front(), input_precision); + } +}; + +TEST_P(SwiGLUFusion, Inference) { + run(); +} + +TEST_P(SwiGLUFusion, Inference_cached) { + std::stringstream ss; + ss << "gpu_model_cache_" << std::hash{}( + std::string(::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name()) + + std::string(::testing::UnitTest::GetInstance()->current_test_info()->name())); + std::string cacheDirName = ss.str(); + { + ov::test::utils::removeFilesWithExt(cacheDirName, "blob"); + ov::test::utils::removeFilesWithExt(cacheDirName, "cl_cache"); + ov::test::utils::removeDir(cacheDirName); + core->set_property(ov::cache_dir(cacheDirName)); + compile_model(); + } + { + run(); + ov::test::utils::removeFilesWithExt(cacheDirName, "blob"); + ov::test::utils::removeFilesWithExt(cacheDirName, "cl_cache"); + ov::test::utils::removeDir(cacheDirName); + } +} + +const std::vector input_precisions = {ov::element::f32, ov::element::f16}; + +const std::vector> input_shapes_dyn = { + {{{-1, -1, 96}, {{20, 1, 96}}}}, + {{{-1, -1, -1}, {{1, 1, 96}}}}, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_SwiGLUFusion_basic, + SwiGLUFusion, + ::testing::Combine(::testing::ValuesIn(input_shapes_dyn), + ::testing::ValuesIn(input_precisions)), + SwiGLUFusion::getTestCaseName); +} // namespace diff --git a/src/plugins/intel_gpu/tests/unit/passes/kernels_cache_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/kernels_cache_test.cpp index bbdb553d6098a8..66788a16f149bf 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/kernels_cache_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/kernels_cache_test.cpp @@ -94,8 +94,7 @@ TEST(kernels_cache, reuse_kernel_for_static_model_01) { TEST(kernels_cache, sub_kernel_ordering_test) { auto& engine = get_test_engine(); ExecutionConfig config = get_test_default_config(engine); - ov::threading::IStreamsExecutor::Config task_executor_config("sub_kernel_ordering_test", 1); - task_executor_config._streams = 2; + ov::threading::IStreamsExecutor::Config task_executor_config("sub_kernel_ordering_test", 2); auto executor = std::make_shared(task_executor_config); const size_t num_kernels = 9; auto _kernels_cache = std::unique_ptr(new kernels_cache(engine, config, 0, executor)); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/multiple_streams_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/multiple_streams_gpu_test.cpp index 7717fa2b925b64..2fb0a6518dd941 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/multiple_streams_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/multiple_streams_gpu_test.cpp @@ -21,8 +21,7 @@ using namespace ::tests; TEST(multistream_gpu, basic) { const int num_streams = 2; - auto task_config = ov::threading::IStreamsExecutor::Config(); - task_config._streams = num_streams; + auto task_config = ov::threading::IStreamsExecutor::Config{"gpu_test", num_streams}; auto task_executor = std::make_shared(task_config); auto& engine = get_test_engine(); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/swiglu_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/swiglu_gpu_test.cpp new file mode 100644 index 00000000000000..c2ac561127046a --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/test_cases/swiglu_gpu_test.cpp @@ -0,0 +1,91 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "test_utils.h" + +#include +#include +#include +#include "swiglu_inst.h" + +using namespace cldnn; +using namespace ::tests; + +class swiglu_gpu_test : public ::testing::TestWithParam {}; + +template +void swiglu_ref(const memory::ptr input, memory::ptr output, int32_t split_length) { + auto input_layout = input->get_layout(); + auto output_layout = output->get_layout(); + + uint32_t batch_size = output_layout.batch(); + uint32_t feature_size = output_layout.feature(); + uint32_t y_size = output_layout.spatial(1); + uint32_t x_size = output_layout.spatial(0); + + cldnn::mem_lock src(input, get_test_stream()); + cldnn::mem_lock dst(output, get_test_stream()); + + T res; + for (uint32_t b = 0; b < batch_size; ++b) { + for (uint32_t f = 0; f < feature_size; ++f) { + for (uint32_t y = 0; y < y_size; ++y) { + for (uint32_t x = 0; x < x_size; ++x) { + auto tensor_src = tensor(batch(b), feature(f), spatial(x, y, 0, 0)); + auto tensor_dst = tensor(batch(b), feature(f), spatial(x, y, 0, 0)); + size_t src_offset = input_layout.get_linear_offset(tensor_src); + size_t dst_offset = output_layout.get_linear_offset(tensor_dst); + res = src[src_offset]; + res = (res / (static_cast(1) + (std::exp((-(static_cast(1) * res)))))); + res *= src[src_offset + static_cast(split_length)]; + dst[dst_offset] = res; + } + } + } + } +} + +TEST(swiglu_gpu_test, swiglu_test_bfyx_dyn) { + auto& engine = get_test_engine(); + + auto input_layout_dynamic = layout{ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 6}, + data_types::f32, format::bfyx}; + auto input_mem = engine.allocate_memory({ov::PartialShape{2, 1, 6}, data_types::f32, format::bfyx}); + auto output_ref = engine.allocate_memory({ov::PartialShape{2, 1, 3}, data_types::f32, format::bfyx}); + + set_values(input_mem, { + 0.049011f, 0.000260f, -0.176636f, 0.016098f, 0.279297f, 0.036377f, + -0.127686f, 0.066650f, -0.394043f, -0.135620f, 0.040985f, -0.011589f + }); + + swiglu_ref(input_mem, output_ref, 3); + + topology topology; + topology.add(input_layout("input", input_layout_dynamic)); + topology.add(swiglu("swiglu", input_info("input"), -1, 3, tensor())); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + + network network(engine, topology, config); + + network.set_input_data("input", input_mem); + + auto inst = network.get_primitive("swiglu"); + auto impl = inst->get_impl(); + ASSERT_TRUE(impl != nullptr); + ASSERT_TRUE(impl->is_dynamic()); + + auto outputs = network.execute(); + ASSERT_EQ(outputs.size(), size_t(1)); + ASSERT_EQ(outputs.begin()->first, "swiglu"); + + auto output = outputs.begin()->second.get_memory(); + cldnn::mem_lock output_ptr(output, get_test_stream()); + cldnn::mem_lock output_ref_ptr(output_ref, get_test_stream()); + + for (unsigned int i = 0; i < output_ref->count(); ++i) { + EXPECT_NEAR(output_ptr[i], output_ref_ptr[i], 1e-3); + } +} diff --git a/src/plugins/intel_gpu/tests/unit/transformations/swiglu_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/swiglu_fusion_test.cpp new file mode 100644 index 00000000000000..6b87a791bdf1e3 --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/transformations/swiglu_fusion_test.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include +#include +#include "common_test_utils/ov_test_utils.hpp" +#include + +#include + +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/swish.hpp" +#include "openvino/op/variadic_split.hpp" +#include "intel_gpu/op/swiglu.hpp" + +using namespace testing; +using namespace ov::intel_gpu; + +TEST_F(TransformationTestsF, SwiGLUFusionTest1) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ 2, 1, 6 }); + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {-1}); + auto split_lengths_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {3, -1}); + auto variadic_split = std::make_shared(input, axis_const, split_lengths_const); + auto swish = std::make_shared(variadic_split->output(0)); + auto mul = std::make_shared(swish, variadic_split->output(1)); + + model = std::make_shared(ov::NodeVector{mul}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + int64_t axis = -1; + int64_t split_lenghts = 3; + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ 2, 1, 6 });; + auto swiglu = std::make_shared(input, axis, split_lenghts, ov::element::f16); + + model_ref = std::make_shared(ov::NodeVector{swiglu}, ov::ParameterVector{input}); + } +} + +TEST_F(TransformationTestsF, SwiGLUFusionTest2) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ -1, -1, 6 }); + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {0}); + auto split_lengths_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {3, 3}); + auto variadic_split = std::make_shared(input, axis_const, split_lengths_const); + auto swish = std::make_shared(variadic_split->output(0)); + auto mul = std::make_shared(swish, variadic_split->output(1)); + + model = std::make_shared(ov::NodeVector{mul}, ov::ParameterVector{input}); + manager.register_pass(); + } +} + +TEST_F(TransformationTestsF, SwiGLUFusionTest3) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ -1, -1, 6 }); + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {-1}); + auto split_lengths_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {3, -1}); + auto variadic_split = std::make_shared(input, axis_const, split_lengths_const); + auto swish = std::make_shared(variadic_split->output(0)); + auto mul = std::make_shared(swish, variadic_split->output(1)); + + model = std::make_shared(ov::NodeVector{mul}, ov::ParameterVector{input}); + manager.register_pass(); + } + { + int64_t axis = -1; + int64_t split_lenghts = 3; + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ -1, -1, 6 }); + auto swiglu = std::make_shared(input, axis, split_lenghts, ov::element::f16); + + model_ref = std::make_shared(ov::NodeVector{swiglu}, ov::ParameterVector{input}); + } +} + +TEST_F(TransformationTestsF, SwiGLUFusionTest4) { + { + auto input = std::make_shared(ov::element::f16, ov::PartialShape{ -1, -1, 6 }); + auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {-1}); + auto split_lengths_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {3, -1}); + auto variadic_split = std::make_shared(input, axis_const, split_lengths_const); + auto swish = std::make_shared(variadic_split->output(0)); + auto mul = std::make_shared(swish, variadic_split->output(0)); + + model = std::make_shared(ov::NodeVector{mul}, ov::ParameterVector{input}); + manager.register_pass(); + } +} diff --git a/src/plugins/template/src/compiled_model.cpp b/src/plugins/template/src/compiled_model.cpp index 02fa9ec36e485a..4f79900fb2c950 100644 --- a/src/plugins/template/src/compiled_model.cpp +++ b/src/plugins/template/src/compiled_model.cpp @@ -147,7 +147,7 @@ ov::Any ov::template_plugin::CompiledModel::get_property(const std::string& name return decltype(ov::execution_devices)::value_type{get_plugin()->get_device_name() + "." + std::to_string(m_cfg.device_id)}; } else if (ov::optimal_number_of_infer_requests == name) { - unsigned int value = m_cfg.streams_executor_config._streams; + unsigned int value = m_cfg.streams; return decltype(ov::optimal_number_of_infer_requests)::value_type(value); } else if (ov::supported_properties == name) { auto ro_properties = default_ro_properties(); diff --git a/src/plugins/template/src/config.cpp b/src/plugins/template/src/config.cpp index 39e51ad262b465..f83690e6b9fe0c 100644 --- a/src/plugins/template/src/config.cpp +++ b/src/plugins/template/src/config.cpp @@ -14,9 +14,6 @@ Configuration::Configuration() {} Configuration::Configuration(const ov::AnyMap& config, const Configuration& defaultCfg, bool throwOnUnsupported) { *this = defaultCfg; - // If plugin needs to use ov::threading::StreamsExecutor it should be able to process its configuration - auto streamExecutorConfigKeys = - streams_executor_config.get_property(ov::supported_properties.name()).as>(); for (auto&& c : config) { const auto& key = c.first; const auto& value = c.second; @@ -25,9 +22,39 @@ Configuration::Configuration(const ov::AnyMap& config, const Configuration& defa disable_transformations = value.as(); } else if (ov::internal::exclusive_async_requests == key) { exclusive_async_requests = value.as(); - } else if (streamExecutorConfigKeys.end() != - std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { - streams_executor_config.set_property(key, value); + } else if (ov::num_streams.name() == key) { + ov::Any val = value.as(); + auto streams_value = val.as(); + if (streams_value.num >= 0) { + streams = streams_value.num; + } else if (streams_value == ov::streams::NUMA) { + streams = 1; + } else if (streams_value == ov::streams::AUTO) { + streams = ov::threading::IStreamsExecutor::Config::get_default_num_streams(); + } else { + OPENVINO_THROW("Wrong value for property key ", + key, + ". Expected non negative numbers (#streams) or ", + "ov::streams::NUMA|ov::streams::AUTO, Got: ", + value.as()); + } + } else if (ov::inference_num_threads.name() == key) { + int val; + try { + val = value.as(); + } catch (const std::exception&) { + OPENVINO_THROW("Wrong value for property key ", key, ". Expected only positive numbers (#threads)"); + } + if (val < 0) { + OPENVINO_THROW("Wrong value for property key ", key, ". Expected only positive numbers (#threads)"); + } + threads = val; + } else if (ov::internal::threads_per_stream.name() == key) { + try { + threads_per_stream = value.as(); + } catch (const std::exception&) { + OPENVINO_THROW("Wrong value ", value.as(), "for property key ", key); + } } else if (ov::device::id == key) { device_id = std::stoi(value.as()); OPENVINO_ASSERT(device_id <= 0, "Device ID ", device_id, " is not supported"); @@ -45,8 +72,6 @@ Configuration::Configuration(const ov::AnyMap& config, const Configuration& defa OPENVINO_THROW("Unsupported execution mode, should be ACCURACY or PERFORMANCE, but was: ", value.as()); } - } else if (ov::num_streams == key) { - streams_executor_config.set_property(key, value); } else if (ov::hint::num_requests == key) { auto tmp_val = value.as(); int tmp_i = std::stoi(tmp_val); @@ -65,12 +90,7 @@ Configuration::Configuration(const ov::AnyMap& config, const Configuration& defa } ov::Any Configuration::Get(const std::string& name) const { - auto streamExecutorConfigKeys = - streams_executor_config.get_property(ov::supported_properties.name()).as>(); - if ((streamExecutorConfigKeys.end() != - std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), name))) { - return streams_executor_config.get_property(name); - } else if (name == ov::device::id) { + if (name == ov::device::id) { return {std::to_string(device_id)}; } else if (name == ov::enable_profiling) { return {perf_count}; @@ -79,13 +99,11 @@ ov::Any Configuration::Get(const std::string& name) const { } else if (name == ov::template_plugin::disable_transformations) { return {disable_transformations}; } else if (name == ov::num_streams) { - return {std::to_string(streams_executor_config._streams)}; - } else if (name == ov::internal::cpu_bind_thread) { - return streams_executor_config.get_property(name); + return {std::to_string(streams)}; } else if (name == ov::inference_num_threads) { - return {std::to_string(streams_executor_config._threads)}; + return {std::to_string(threads)}; } else if (name == ov::internal::threads_per_stream) { - return {std::to_string(streams_executor_config._threadsPerStream)}; + return {std::to_string(threads_per_stream)}; } else if (name == ov::hint::performance_mode) { return performance_mode; } else if (name == ov::hint::inference_precision) { diff --git a/src/plugins/template/src/config.hpp b/src/plugins/template/src/config.hpp index 918e2b514f724a..ada22f8835d8bd 100644 --- a/src/plugins/template/src/config.hpp +++ b/src/plugins/template/src/config.hpp @@ -33,6 +33,9 @@ struct Configuration { int device_id = 0; bool perf_count = false; ov::threading::IStreamsExecutor::Config streams_executor_config; + int streams = 1; + int threads = 0; + int threads_per_stream = 0; ov::hint::PerformanceMode performance_mode = ov::hint::PerformanceMode::LATENCY; uint32_t num_requests = 1; bool disable_transformations = false; diff --git a/src/plugins/template/src/plugin.cpp b/src/plugins/template/src/plugin.cpp index 941fd8d2d115a4..ad98e81aec2c09 100644 --- a/src/plugins/template/src/plugin.cpp +++ b/src/plugins/template/src/plugin.cpp @@ -99,9 +99,14 @@ std::shared_ptr ov::template_plugin::Plugin::compile_model( OV_ITT_SCOPED_TASK(itt::domains::TemplatePlugin, "Plugin::compile_model"); auto fullConfig = Configuration{properties, m_cfg}; + fullConfig.streams_executor_config = ov::threading::IStreamsExecutor::Config{stream_executor_name, + fullConfig.streams, + fullConfig.threads_per_stream}; auto streamsExecutorConfig = ov::threading::IStreamsExecutor::Config::make_default_multi_threaded(fullConfig.streams_executor_config); - streamsExecutorConfig._name = stream_executor_name; + fullConfig.streams = streamsExecutorConfig.get_streams(); + fullConfig.threads = streamsExecutorConfig.get_threads(); + fullConfig.threads_per_stream = streamsExecutorConfig.get_threads_per_stream(); auto compiled_model = std::make_shared( model->clone(), shared_from_this(), @@ -138,6 +143,9 @@ std::shared_ptr ov::template_plugin::Plugin::import_model( } auto fullConfig = Configuration{_properties, m_cfg}; + fullConfig.streams_executor_config = ov::threading::IStreamsExecutor::Config{stream_executor_name, + fullConfig.streams, + fullConfig.threads_per_stream}; // read XML content std::string xmlString; std::uint64_t dataSize = 0; @@ -156,7 +164,9 @@ std::shared_ptr ov::template_plugin::Plugin::import_model( auto ov_model = get_core()->read_model(xmlString, weights); auto streamsExecutorConfig = ov::threading::IStreamsExecutor::Config::make_default_multi_threaded(fullConfig.streams_executor_config); - streamsExecutorConfig._name = stream_executor_name; + fullConfig.streams = streamsExecutorConfig.get_streams(); + fullConfig.threads = streamsExecutorConfig.get_threads(); + fullConfig.threads_per_stream = streamsExecutorConfig.get_threads_per_stream(); auto compiled_model = std::make_shared(ov_model, shared_from_this(), diff --git a/src/plugins/template/tests/functional/core_config.cpp b/src/plugins/template/tests/functional/core_config.cpp index 2c54a0d17b2f8d..6c1c4e433b08e3 100644 --- a/src/plugins/template/tests/functional/core_config.cpp +++ b/src/plugins/template/tests/functional/core_config.cpp @@ -2,12 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "functional_test_utils/core_config.hpp" - #include "shared_test_classes/base/ov_subgraph.hpp" -void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {} - namespace ov { namespace test { diff --git a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp index 845c5c0b15b1c8..54aa3d750fb7e5 100644 --- a/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp +++ b/src/plugins/template/tests/functional/op_reference/base_reference_test.cpp @@ -5,6 +5,7 @@ #include +#include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/allocator.hpp" @@ -38,6 +39,9 @@ void CommonReferenceTest::FillInputs() { for (size_t i = 0; i < functionParams.size(); i++) { const auto& param = functionParams[i]; + if (param->get_element_type() == ov::element::string) { + continue; + } ov::Tensor blob; if (param->get_partial_shape().is_static()) { @@ -211,6 +215,9 @@ void CommonReferenceTest::ValidateBlobs(const ov::Tensor& refBlob, threshold, abs_threshold); break; + case ov::element::string: + ov::test::utils::compare_str(refBlob, outBlob); + break; default: FAIL() << "Comparator for " << element_type << " element type isn't supported"; } diff --git a/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp b/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp index 4c2c6711d0ae63..439736101b1cfd 100644 --- a/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp +++ b/src/plugins/template/tests/functional/op_reference/base_reference_test.hpp @@ -58,11 +58,14 @@ ov::Tensor CreateTensor(const ov::element::Type& element_type, const std::vector template ov::Tensor CreateTensor(const ov::Shape& shape, const ov::element::Type& element_type, const std::vector& values) { ov::Tensor tensor{element_type, shape}; - size_t size = sizeof(T) * values.size(); - if (tensor.get_byte_size() < size) - size = tensor.get_byte_size(); - std::memcpy(tensor.data(), values.data(), size); - + if (element_type == ov::element::string) { + std::copy_n(values.data(), shape_size(shape), tensor.data()); + } else { + size_t size = sizeof(T) * values.size(); + if (tensor.get_byte_size() < size) + size = tensor.get_byte_size(); + std::memcpy(tensor.data(), values.data(), size); + } return tensor; } diff --git a/src/plugins/template/tests/functional/op_reference/gather.cpp b/src/plugins/template/tests/functional/op_reference/gather.cpp index 99b1c82a8d91d8..409a66223c4d5e 100644 --- a/src/plugins/template/tests/functional/op_reference/gather.cpp +++ b/src/plugins/template/tests/functional/op_reference/gather.cpp @@ -441,6 +441,62 @@ std::vector generateParamsV7 +std::vector generateParamsStringValue() { + using T = typename element_type_traits::value_type; + using T_I = typename element_type_traits::value_type; + using T_A = typename element_type_traits::value_type; + std::vector params{ + {reference_tests::Tensor(ET, {2}, std::vector{"A", "B c"}), + reference_tests::Tensor(ET_I, {1}, std::vector{1}), + reference_tests::Tensor(ET_A, {1}, std::vector{0}), + 0, + reference_tests::Tensor(ET, {1}, std::vector{"B c"}), + "gather_string_1D_data"}, + {reference_tests::Tensor(ET, {2, 2}, std::vector{"A", "B c", "d.Ef", " G h,i;"}), + reference_tests::Tensor(ET_I, {1}, std::vector{1}), + reference_tests::Tensor(ET_A, {1}, std::vector{0}), + 0, + reference_tests::Tensor(ET, {1, 2}, std::vector{"d.Ef", " G h,i;"}), + "gather_string_2D_data"}, + {reference_tests::Tensor(ET, {2, 2}, std::vector{"A", "B c", "d.Ef", " G h,i;"}), + reference_tests::Tensor(ET_I, {2, 1}, std::vector{0, 1}), + reference_tests::Tensor(ET_A, {1}, std::vector{1}), + 1, + reference_tests::Tensor(ET, {2, 1}, std::vector{"A", " G h,i;"}), + "gather_string_2D_data_batch_dims_1"}, + {reference_tests::Tensor(ET, {2, 2}, std::vector{"A", "B c", "d.Ef", " G h,i;"}), + reference_tests::Tensor(ET_I, {2, 1}, std::vector{1, 0}), + reference_tests::Tensor(ET_A, {1}, std::vector{1}), + 1, + reference_tests::Tensor(ET, {2, 1}, std::vector{"B c", "d.Ef"}), + "gather_string_2D_data_batch_dims_1_reversed"}, + {reference_tests::Tensor(ET, {2, 1, 2}, std::vector{"A", "B c", "d.Ef", " G h,i;"}), + reference_tests::Tensor(ET_I, {2, 1, 2}, std::vector{0, 1, 1, 0}), + reference_tests::Tensor(ET_A, {1}, std::vector{2}), + 2, + reference_tests::Tensor(ET, {2, 1, 2}, std::vector{"A", "B c", " G h,i;", "d.Ef"}), + "gather_string_3D_data_batch_dims_2"}, + {reference_tests::Tensor(ET, + {2, 2, 2}, + std::vector{"A", "B c", "d.Ef", " G h,i;", "JK ", "l,m,n,", " ", " \0"}), + reference_tests::Tensor(ET_I, {1}, std::vector{1}), + reference_tests::Tensor(ET_A, {1}, std::vector{1}), + 0, + reference_tests::Tensor(ET, {2, 1, 2}, std::vector{"d.Ef", " G h,i;", " ", " \0"}), + "gather_string_3D_data_axis_1"}, + {reference_tests::Tensor(ET, + {2, 2, 2}, + std::vector{"A", "B c", "d.Ef", " G h,i;", "JK ", "l,m,n,", " ", " \0"}), + reference_tests::Tensor(ET_I, {1}, std::vector{1}), + reference_tests::Tensor(ET_A, {1}, std::vector{0}), + 0, + reference_tests::Tensor(ET, {1, 2, 2}, std::vector{"JK ", "l,m,n,", " ", " \0"}), + "gather_string_3D_data_axis_0"}, + }; + return params; +} + template std::vector generateParamsFloatValueV7() { using T = typename element_type_traits::value_type; @@ -633,6 +689,7 @@ std::vector generateCombinedParamsV7() { generateParamsFloatValueV7(), generateParamsFloatValueV7(), generateParamsFloatValueV7(), + generateParamsStringValue(), }; std::vector combinedParams; @@ -717,6 +774,7 @@ std::vector generateCombinedParamsV8() { generateParamsV8(), generateParamsV8(), generateParamsV8(), + generateParamsStringValue(), }; std::vector combinedParams; diff --git a/src/plugins/template/tests/functional/op_reference/if.cpp b/src/plugins/template/tests/functional/op_reference/if.cpp index 5f51a03f02d427..fe05f18c27f50d 100644 --- a/src/plugins/template/tests/functional/op_reference/if.cpp +++ b/src/plugins/template/tests/functional/op_reference/if.cpp @@ -8,7 +8,6 @@ #include #include "base_reference_test.hpp" -#include "ie_core.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" using namespace reference_tests; diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp new file mode 100644 index 00000000000000..6d8fd09d192786 --- /dev/null +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/memory_states.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/memory_states.hpp" + +namespace { +using ov::test::behavior::memoryStateParams; +using ov::test::behavior::OVInferRequestVariableStateTest; + +std::vector memoryStateTestCases = {memoryStateParams(OVInferRequestVariableStateTest::get_network(), + {"c_1-3", "r_1-3"}, + ov::test::utils::DEVICE_TEMPLATE, + {})}; + +INSTANTIATE_TEST_SUITE_P(smoke_Template_BehaviorTests, + OVInferRequestVariableStateTest, + ::testing::ValuesIn(memoryStateTestCases), + OVInferRequestVariableStateTest::getTestCaseName); +} // namespace diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/properties_tests.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/properties_tests.cpp new file mode 100644 index 00000000000000..8aae5e01116c62 --- /dev/null +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/properties_tests.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_infer_request/properties_tests.hpp" + +#include + +namespace { +using ov::test::behavior::InferRequestPropertiesTest; + +const std::vector configs = {{}}; + +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, + InferRequestPropertiesTest, + ::testing::Combine(::testing::Values(1u), + ::testing::Values(ov::test::utils::DEVICE_TEMPLATE), + ::testing::ValuesIn(configs)), + InferRequestPropertiesTest::getTestCaseName); + +} // namespace diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/hetero_synthetic.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/hetero_synthetic.cpp new file mode 100644 index 00000000000000..090ce0e32f1917 --- /dev/null +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_plugin/hetero_synthetic.cpp @@ -0,0 +1,107 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "behavior/ov_plugin/hetero_synthetic.hpp" + +#include + +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp" + +namespace { +using ov::test::behavior::OVHeteroSyntheticTest; +using ov::test::behavior::PluginParameter; + +// this tests load plugin by library name: this is not available during static linkage +#ifndef OPENVINO_STATIC_LIBRARY + +INSTANTIATE_TEST_SUITE_P(smoke_manyTargetInputs, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{ + {"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::withMajorNodesFunctions( + [] { + return ov::test::utils::make_conv_pool2_relu2(); + }, + {"Conv_1"}, + true))), + OVHeteroSyntheticTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_SingleMajorNode, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{ + {"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::_singleMajorNodeFunctions)), + OVHeteroSyntheticTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_RandomMajorNodes, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{ + {"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::_randomMajorNodeFunctions)), + OVHeteroSyntheticTest::getTestCaseName); + +static std::vector()>> dynamicBuilders = { + [] { + return ov::test::utils::make_conv_pool_relu_non_zero(); + }, +}; + +INSTANTIATE_TEST_SUITE_P( + smoke_NonZeroMajorNode_dynamic, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{{"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::withMajorNodesFunctions(dynamicBuilders.front(), + {"nonZero_1"}))), + OVHeteroSyntheticTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_NonZeroMajorNode_dynamic_batch, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{{"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::withMajorNodesFunctions(dynamicBuilders.front(), + {"nonZero_1"}, + true))), + OVHeteroSyntheticTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_SingleMajorNode_dynamic, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{{"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::singleMajorNodeFunctions(dynamicBuilders))), + OVHeteroSyntheticTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + nightly_RandomMajorNodes_dynamic, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{{"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::randomMajorNodeFunctions(dynamicBuilders))), + OVHeteroSyntheticTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_SingleMajorNode_dynamic_batch, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{{"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::singleMajorNodeFunctions(dynamicBuilders, true))), + OVHeteroSyntheticTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + nightly_RandomMajorNodes_dynamic_batch, + OVHeteroSyntheticTest, + ::testing::Combine(::testing::Values(std::vector{{"TEMPLATE0", "openvino_template_plugin"}, + {"TEMPLATE1", "openvino_template_plugin"}}), + ::testing::ValuesIn(OVHeteroSyntheticTest::randomMajorNodeFunctions(dynamicBuilders, true))), + OVHeteroSyntheticTest::getTestCaseName); + +#endif // !OPENVINO_STATIC_LIBRARY + +} // namespace diff --git a/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp index 274b1d1a3332b1..7689d706432d20 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/eltwise.hpp" +#include "single_op_tests/eltwise.hpp" #include #include "common_test_utils/test_constants.hpp" -using namespace ov::test::subgraph; +using namespace ov::test; namespace { std::vector> inShapesStatic = { diff --git a/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp b/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp index 6b268c7f59cf04..18c1c2f3dfc7d1 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/softmax.hpp" +#include "single_op_tests/softmax.hpp" #include diff --git a/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.cpp b/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.cpp deleted file mode 100644 index abdf47a4b8c3b0..00000000000000 --- a/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.cpp +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "base_reference_cnn_test.hpp" - -#include - -#include "functional_test_utils/ov_plugin_cache.hpp" -#include "openvino/core/type/element_type.hpp" -#include "openvino/runtime/allocator.hpp" -#include "openvino/runtime/tensor.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "transformations/utils/utils.hpp" - -using namespace ov; - -namespace reference_tests { - -ReferenceCNNTest::ReferenceCNNTest() : targetDevice("TEMPLATE") { - core = test::utils::PluginCache::get().core(targetDevice); - legacy_core = PluginCache::get().ie(targetDevice); -} - -void ReferenceCNNTest::Exec() { - LoadNetwork(); - LoadNetworkLegacy(); - - if (legacy_input_blobs.empty() && inputData.empty()) { - FillInputs(); - } - Infer(); - InferLegacy(); - - Validate(); -} - -void ReferenceCNNTest::LoadNetwork() { - executableNetwork = core->compile_model(function, targetDevice); -} - -void ReferenceCNNTest::LoadNetworkLegacy() { - auto inputInfo = legacy_network.getInputsInfo(); - auto outputInfo = legacy_network.getOutputsInfo(); - for (const auto& param : function->get_parameters()) { - inputInfo[param->get_friendly_name()]->setPrecision( - InferenceEngine::details::convertPrecision(param->get_element_type())); - } - for (const auto& result : function->get_results()) { - outputInfo[ov::op::util::create_ie_output_name(result->input_value(0))]->setPrecision( - InferenceEngine::details::convertPrecision(result->get_element_type())); - } - legacy_exec_network = legacy_core->LoadNetwork(legacy_network, targetDevice); -} - -void ReferenceCNNTest::FillInputs() { - const auto& params = function->get_parameters(); - std::default_random_engine random(0); // hard-coded seed to make test results predictable - std::uniform_int_distribution distrib(0, 255); - for (const auto& param : params) { - auto elem_count = shape_size(param->get_output_tensor(0).get_shape()); - InferenceEngine::TensorDesc d(InferenceEngine::Precision::FP32, - param->get_output_tensor(0).get_shape(), - InferenceEngine::Layout::NCHW); - auto blob = make_blob_with_precision(d); - blob->allocate(); - - auto mBlob = InferenceEngine::as(blob); - auto mBlobHolder = mBlob->wmap(); - auto buf = mBlobHolder.as(); - ASSERT_EQ(mBlob->size(), elem_count); - - ov::Tensor ov_blob; - ov_blob = ov::Tensor(param->get_element_type(), param->get_shape()); - auto ov_buf = static_cast(ov_blob.data()); - - for (size_t j = 0; j < elem_count; j++) { - auto v = distrib(random); - buf[j] = static_cast(v); - ov_buf[j] = static_cast(v); - } - legacy_input_blobs[param->get_friendly_name()] = blob; - inputData.push_back(ov_blob); - } -} - -void ReferenceCNNTest::Infer() { - inferRequest = executableNetwork.create_infer_request(); - const auto& functionParams = function->get_parameters(); - - for (size_t i = 0; i < functionParams.size(); ++i) { - const auto& param = functionParams[i]; - inferRequest.set_tensor(param->get_friendly_name(), inputData[i]); - } - inferRequest.infer(); -} - -void ReferenceCNNTest::InferLegacy() { - legacy_infer_request = legacy_exec_network.CreateInferRequest(); - legacy_infer_request.SetInput(legacy_input_blobs); - legacy_infer_request.Infer(); -} - -void ReferenceCNNTest::Validate() { - for (const auto& result : function->get_results()) { - auto name = ov::op::util::create_ie_output_name(result->input_value(0)); - outputs_ov20.emplace_back(inferRequest.get_tensor(name)); - auto outBlob = legacy_infer_request.GetBlob(name); - auto outMem = outBlob->buffer(); - void* outData = outMem.as(); - outputs_legacy.emplace_back(element::f32, result->get_shape(), outData); - } - for (size_t i = 0; i < outputs_legacy.size(); i++) { - CommonReferenceTest::ValidateBlobs(outputs_legacy[i], outputs_ov20[i], i, threshold, abs_threshold); - } -} - -} // namespace reference_tests diff --git a/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.hpp b/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.hpp deleted file mode 100644 index 4dcb27ee2c2dc4..00000000000000 --- a/src/plugins/template/tests/functional/subgraph_reference/base_reference_cnn_test.hpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "base_reference_test.hpp" -#include "inference_engine.hpp" - -namespace reference_tests { - -class ReferenceCNNTest { -public: - ReferenceCNNTest(); - - void Exec(); - - void LoadNetwork(); - void FillInputs(); // Both for legacy and for OV2.0 API - void Infer(); - - void LoadNetworkLegacy(); - void InferLegacy(); - - virtual void Validate(); - -protected: - const std::string targetDevice; - std::shared_ptr function; - InferenceEngine::CNNNetwork legacy_network; - - float threshold = 1e-5f; // Relative diff - float abs_threshold = -1.f; // Absolute diff (not used when negative) - - std::vector outputs_ov20; - std::vector outputs_legacy; - -protected: - // These will be generated by default, if user has not specified inputs manually - std::vector inputData; - InferenceEngine::BlobMap legacy_input_blobs; - -private: - std::shared_ptr core; - ov::CompiledModel executableNetwork; - ov::InferRequest inferRequest; - - std::shared_ptr legacy_core; - InferenceEngine::ExecutableNetwork legacy_exec_network; - InferenceEngine::InferRequest legacy_infer_request; -}; - -} // namespace reference_tests diff --git a/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp b/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp deleted file mode 100644 index d27122a1c01d38..00000000000000 --- a/src/plugins/template/tests/functional/subgraph_reference/preprocess_opencv.cpp +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#ifdef OPENCV_TEMPLATE_TESTS - -# include -# include - -# include - -# include "base_reference_test.hpp" -# include "openvino/core/preprocess/pre_post_process.hpp" -# include "shared_test_classes/base/layer_test_utils.hpp" -# include "shared_test_classes/single_layer/convert_color_i420.hpp" -# include "shared_test_classes/single_layer/convert_color_nv12.hpp" - -using namespace ov; -using namespace ov::preprocess; -using namespace reference_tests; -namespace { - -class PreprocessOpenCVReferenceTest : public testing::Test, public CommonReferenceTest { -public: - void SetUp() override { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - } -}; - -/// \brief Test class with counting deviated pixels -/// -/// OpenCV contains custom implementation for 8U and 16U (all calculations -/// are done in INTs instead of FLOATs), so deviation in 1 color step -/// between pixels is expected -class PreprocessOpenCVReferenceTest_8U : public PreprocessOpenCVReferenceTest { -public: - void Validate() override { - threshold = 1.f; - abs_threshold = 1.f; - // No pixels with deviation of more than 1 color step - CommonReferenceTest::Validate(); - // Less than 2% of deviations with 1 color step. 2% is experimental value - // For very precise (acceptable) float calculations - 1.4% deviation with G-API/OpenCV is observed - LayerTestsDefinitions::NV12TestUtils::ValidateColors(refOutData[0].data(), - actualOutData[0].data(), - refOutData[0].get_size(), - 0.02); - } -}; - -} // namespace - -static std::shared_ptr create_simple_function(element::Type type, const PartialShape& shape) { - auto data1 = std::make_shared(type, shape); - data1->set_friendly_name("input1"); - data1->get_output_tensor(0).set_names({"tensor_input1", "input1"}); - auto c = op::v0::Constant::create(type, {1}, {0}); - auto op = std::make_shared(data1, c); - op->set_friendly_name("Add0"); - auto res = std::make_shared(op); - res->set_friendly_name("Result1"); - res->get_output_tensor(0).set_names({"tensor_output1", "Result1"}); - return std::make_shared(ResultVector{res}, ParameterVector{data1}); -} - -TEST_F(PreprocessOpenCVReferenceTest, convert_rgb_gray_fp32) { - const size_t input_height = 50; - const size_t input_width = 50; - auto input_shape = Shape{1, input_height, input_width, 3}; - auto model_shape = Shape{1, input_height, input_width, 1}; - - auto input_img = std::vector(shape_size(input_shape)); - std::default_random_engine random(0); // hard-coded seed to make test results predictable - std::uniform_int_distribution distrib(-5, 300); - for (std::size_t i = 0; i < shape_size(input_shape); i++) - input_img[i] = static_cast(distrib(random)); - - function = create_simple_function(element::f32, model_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_color_format(ColorFormat::RGB); - p.input().preprocess().convert_color(ColorFormat::GRAY); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data()); - - // Calculate reference expected values from OpenCV - cv::Mat cvPic = cv::Mat(input_height, input_width, CV_32FC3, input_img.data()); - cv::Mat picGRAY; - cv::cvtColor(cvPic, picGRAY, CV_RGB2GRAY); - refOutData.emplace_back(param->get_element_type(), model_shape, picGRAY.data); - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest, convert_bgr_gray_fp32) { - const size_t input_height = 50; - const size_t input_width = 50; - auto input_shape = Shape{1, input_height, input_width, 3}; - auto model_shape = Shape{1, input_height, input_width, 1}; - - auto input_img = std::vector(shape_size(input_shape)); - std::default_random_engine random(0); // hard-coded seed to make test results predictable - std::uniform_int_distribution distrib(-5, 300); - for (std::size_t i = 0; i < shape_size(input_shape); i++) - input_img[i] = static_cast(distrib(random)); - - function = create_simple_function(element::f32, model_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_color_format(ColorFormat::BGR); - p.input().preprocess().convert_color(ColorFormat::GRAY); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data()); - - // Calculate reference expected values from OpenCV - cv::Mat cvPic = cv::Mat(input_height, input_width, CV_32FC3, input_img.data()); - cv::Mat picGRAY; - cv::cvtColor(cvPic, picGRAY, CV_BGR2GRAY); - refOutData.emplace_back(param->get_element_type(), model_shape, picGRAY.data); - - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest_8U, convert_rgb_gray_u8) { - const size_t input_height = 50; - const size_t input_width = 50; - auto input_shape = Shape{1, input_height, input_width, 3}; - auto model_shape = Shape{1, input_height, input_width, 1}; - - auto input_img = std::vector(shape_size(input_shape)); - std::default_random_engine random(0); // hard-coded seed to make test results predictable - std::uniform_int_distribution distrib(0, 255); - for (std::size_t i = 0; i < shape_size(input_shape); i++) - input_img[i] = static_cast(distrib(random)); - - function = create_simple_function(element::u8, model_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_color_format(ColorFormat::RGB); - p.input().preprocess().convert_color(ColorFormat::GRAY); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data()); - - // Calculate reference expected values from OpenCV - cv::Mat cvPic = cv::Mat(input_height, input_width, CV_8UC3, input_img.data()); - cv::Mat picGRAY; - cv::cvtColor(cvPic, picGRAY, CV_RGB2GRAY); - refOutData.emplace_back(param->get_element_type(), model_shape, picGRAY.data); - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest_8U, convert_bgr_gray_u8) { - const size_t input_height = 50; - const size_t input_width = 50; - auto input_shape = Shape{1, input_height, input_width, 3}; - auto model_shape = Shape{1, input_height, input_width, 1}; - - auto input_img = std::vector(shape_size(input_shape)); - std::default_random_engine random(0); // hard-coded seed to make test results predictable - std::uniform_int_distribution distrib(0, 255); - for (std::size_t i = 0; i < shape_size(input_shape); i++) - input_img[i] = static_cast(distrib(random)); - - function = create_simple_function(element::u8, model_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_color_format(ColorFormat::BGR); - p.input().preprocess().convert_color(ColorFormat::GRAY); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data()); - - // Calculate reference expected values from OpenCV - cv::Mat cvPic = cv::Mat(input_height, input_width, CV_8UC3, input_img.data()); - cv::Mat picGRAY; - cv::cvtColor(cvPic, picGRAY, CV_BGR2GRAY); - refOutData.emplace_back(param->get_element_type(), model_shape, picGRAY.data); - - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest_8U, convert_i420_full_color_range) { - size_t height = 64; // 64/2 = 32 values for R - size_t width = 64; // 64/2 = 32 values for G - int b_step = 5; - int b_dim = 255 / b_step + 1; - - // Test various possible r/g/b values within dimensions - auto ov20_input_yuv = LayerTestsDefinitions::I420TestUtils::color_test_image(height, width, b_step); - - auto full_height = height * b_dim; - auto func_shape = Shape{1, full_height, width, 3}; - function = create_simple_function(element::u8, func_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_color_format(ColorFormat::I420_SINGLE_PLANE); - p.input().preprocess().convert_color(ColorFormat::BGR); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), ov20_input_yuv.data()); - - // Calculate reference expected values from OpenCV - cv::Mat picYV12 = - cv::Mat(static_cast(full_height) * 3 / 2, static_cast(width), CV_8UC1, ov20_input_yuv.data()); - cv::Mat picBGR; - cv::cvtColor(picYV12, picBGR, CV_YUV2BGR_I420); - refOutData.emplace_back(param->get_element_type(), func_shape, picBGR.data); - - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest_8U, convert_nv12_full_color_range) { - size_t height = 64; // 64/2 = 32 values for R - size_t width = 64; // 64/2 = 32 values for G - int b_step = 5; - int b_dim = 255 / b_step + 1; - - // Test various possible r/g/b values within dimensions - auto ov20_input_yuv = LayerTestsDefinitions::NV12TestUtils::color_test_image(height, width, b_step); - - auto full_height = height * b_dim; - auto func_shape = Shape{1, full_height, width, 3}; - function = create_simple_function(element::u8, func_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE); - p.input().preprocess().convert_color(ColorFormat::BGR); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), ov20_input_yuv.data()); - - // Calculate reference expected values from OpenCV - cv::Mat picYV12 = - cv::Mat(static_cast(full_height) * 3 / 2, static_cast(width), CV_8UC1, ov20_input_yuv.data()); - cv::Mat picBGR; - cv::cvtColor(picYV12, picBGR, CV_YUV2BGR_NV12); - refOutData.emplace_back(param->get_element_type(), func_shape, picBGR.data); - - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest_8U, convert_nv12_colored) { - auto input_yuv = std::vector{235, 81, 235, 81, 109, 184}; - auto func_shape = Shape{1, 2, 2, 3}; - function = create_simple_function(element::u8, func_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_color_format(ColorFormat::NV12_SINGLE_PLANE); - p.input().preprocess().convert_color(ColorFormat::BGR); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), input_yuv.data()); - - // Calculate reference expected values from OpenCV - cv::Mat picYV12 = cv::Mat(3, 2, CV_8UC1, input_yuv.data()); - cv::Mat picBGR; - cv::cvtColor(picYV12, picBGR, CV_YUV2BGR_NV12); - refOutData.emplace_back(param->get_element_type(), func_shape, picBGR.data); - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest, resize_u8_simple_linear) { - auto input_shape = Shape{1, 1, 2, 2}; - auto func_shape = Shape{1, 1, 1, 1}; - auto input_img = std::vector{5, 5, 5, 4}; - function = create_simple_function(element::u8, func_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_spatial_static_shape(2, 2); - p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR); - p.input().model().set_layout("NCHW"); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data()); - - // Calculate reference expected values from OpenCV - cv::Mat cvPic = cv::Mat(2, 2, CV_8UC1, input_img.data()); - cv::Mat cvPicResized; - cv::resize(cvPic, cvPicResized, cv::Size(1, 1), cv::INTER_NEAREST); - refOutData.emplace_back(param->get_element_type(), func_shape, cvPicResized.data); - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest_8U, resize_u8_large_picture_linear) { - const size_t input_height = 50; - const size_t input_width = 50; - const size_t func_height = 37; - const size_t func_width = 31; - auto input_shape = Shape{1, 1, input_height, input_width}; - auto func_shape = Shape{1, 1, func_height, func_width}; - auto input_img = std::vector(shape_size(input_shape)); - std::default_random_engine random(0); // hard-coded seed to make test results predictable - std::uniform_int_distribution distrib(0, 255); - for (std::size_t i = 0; i < shape_size(input_shape); i++) { - auto v = distrib(random); - input_img[i] = static_cast(v); - } - function = create_simple_function(element::u8, func_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_spatial_static_shape(input_height, input_width); - p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR); - p.input().model().set_layout("NCHW"); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data()); - - // Calculate reference expected values from OpenCV - cv::Mat cvPic = cv::Mat(input_height, input_width, CV_8UC1, input_img.data()); - cv::Mat cvPicResized; - cv::resize(cvPic, cvPicResized, cv::Size(func_width, func_height), cv::INTER_LINEAR_EXACT); - refOutData.emplace_back(param->get_element_type(), func_shape, cvPicResized.data); - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest, resize_f32_large_picture_linear) { - threshold = 0.01f; - abs_threshold = 0.01f; - const size_t input_height = 50; - const size_t input_width = 50; - const size_t func_height = 37; - const size_t func_width = 31; - auto input_shape = Shape{1, 1, input_height, input_width}; - auto func_shape = Shape{1, 1, func_height, func_width}; - auto input_img = std::vector(shape_size(input_shape)); - std::default_random_engine random(0); // hard-coded seed to make test results predictable - std::uniform_int_distribution distrib(0, 255); - for (std::size_t i = 0; i < shape_size(input_shape); i++) { - input_img[i] = static_cast(distrib(random)); - } - function = create_simple_function(element::f32, func_shape); - - inputData.clear(); - - auto p = PrePostProcessor(function); - p.input().tensor().set_spatial_static_shape(input_height, input_width); - p.input().preprocess().resize(ResizeAlgorithm::RESIZE_LINEAR); - p.input().model().set_layout("NCHW"); - function = p.build(); - - const auto& param = function->get_parameters()[0]; - inputData.emplace_back(param->get_element_type(), param->get_shape(), input_img.data()); - - // Calculate reference expected values from OpenCV - cv::Mat cvPic = cv::Mat(input_height, input_width, CV_32FC1, input_img.data()); - cv::Mat cvPicResized; - cv::resize(cvPic, cvPicResized, cv::Size(func_width, func_height), cv::INTER_LINEAR_EXACT); - refOutData.emplace_back(param->get_element_type(), func_shape, cvPicResized.data); - // Exec now - Exec(); -} - -TEST_F(PreprocessOpenCVReferenceTest, DISABLED_resize_f32_large_picture_cubic_small) { - const size_t input_height = 4; - const size_t input_width = 4; - const size_t func_height = 3; - const size_t func_width = 3; - auto input_shape = Shape{1, 1, input_height, input_width}; - auto func_shape = Shape{1, 1, func_height, func_width}; - auto element_type = element::f32; - auto input_img = std::vector{1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f, 1.f, 1.f, 2.f, 3.f, 4.f, 4.f, 3.f, 2.f, 1.f}; - function = create_simple_function(element_type, func_shape); - auto p = PrePostProcessor(function); - p.input().tensor().set_spatial_static_shape(input_height, input_width); - p.input().preprocess().resize(ResizeAlgorithm::RESIZE_CUBIC); - p.input().model().set_layout("NCHW"); - function = p.build(); - - inputData.emplace_back(element_type, input_shape, input_img.data()); - - // Calculate reference expected values from OpenCV - cv::Mat cvPic = cv::Mat(input_height, input_width, CV_32FC1, input_img.data()); - cv::Mat cvPicResized; - cv::resize(cvPic, cvPicResized, cv::Size(func_width, func_height), cv::INTER_CUBIC); - refOutData.emplace_back(element_type, func_shape, cvPicResized.data); - // Exec now - Exec(); -} - -#endif // OPENCV_TEMPLATE_TESTS diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/core_config.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/core_config.cpp index 4191dd7116e983..47939f62202dde 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/core_config.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/core_config.cpp @@ -2,16 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "functional_test_utils/core_config.hpp" #include "common_test_utils/file_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "openvino/util/file_util.hpp" #include "conformance.hpp" -// todo: remove as LayerTestBase will be removed -void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {} - namespace ov { namespace test { diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/core_config.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/core_config.cpp index b572f190555828..47939f62202dde 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/core_config.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/core_config.cpp @@ -2,16 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "functional_test_utils/core_config.hpp" #include "common_test_utils/file_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "openvino/util/file_util.hpp" #include "conformance.hpp" -// todo: remove as LayerTestClass will be removed -void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {} - namespace ov { namespace test { diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/hetero_synthetic.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/hetero_synthetic.hpp index 53eac08ca97ded..3d9141df798c09 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/hetero_synthetic.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/hetero_synthetic.hpp @@ -55,6 +55,11 @@ class OVHeteroSyntheticTest : public testing::WithParamInterface randomMajorNodeFunctions( const std::vector()>>& builders, bool dynamic_batch = false, uint32_t seed = 0); + static std::vector withMajorNodesFunctions( + const std::function()>& builder, + const std::unordered_set& majorNodes, + bool dynamic_batch = false); + static std::vector _singleMajorNodeFunctions; static std::vector _randomMajorNodeFunctions; }; diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp deleted file mode 100644 index c8f8e9647e97d9..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/activation.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/activation.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ActivationLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(ActivationParamLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(ActivationDynamicLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/adaptive_pooling.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/adaptive_pooling.hpp deleted file mode 100644 index 3caf98b7566cfd..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/adaptive_pooling.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/adaptive_pooling.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(AdaPoolLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp deleted file mode 100644 index a00d00b6a55e9b..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/batch_norm.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(BatchNormLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/batch_to_space.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/batch_to_space.hpp deleted file mode 100644 index 8d9a51a07ed52e..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/batch_to_space.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/single_layer/batch_to_space.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(BatchToSpaceLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/binary_convolution.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/binary_convolution.hpp deleted file mode 100644 index 6d76bf71a3ee66..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/binary_convolution.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/binary_convolution.hpp" - -namespace LayerTestsDefinitions { -TEST_P(BinaryConvolutionLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/broadcast.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/broadcast.hpp deleted file mode 100644 index 99093306807476..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/broadcast.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/broadcast.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(BroadcastLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/bucketize.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/bucketize.hpp deleted file mode 100644 index ec647ebc0b8076..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/bucketize.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/bucketize.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(BucketizeLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/clamp.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/clamp.hpp deleted file mode 100644 index 260e57f197731a..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/clamp.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/clamp.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ClampLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/comparison.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/comparison.hpp deleted file mode 100644 index ef9e5cc4f0ced4..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/comparison.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace LayerTestsDefinitions { - -TEST_P(ComparisonLayerTest, ComparisonTests) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/concat.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/concat.hpp deleted file mode 100644 index eb5d0385af59c0..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/concat.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/concat.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ConcatLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/constant.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/constant.hpp deleted file mode 100644 index a62bafd8611796..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/constant.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/constant.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ConstantLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/conversion.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/conversion.hpp deleted file mode 100644 index 924883d4ce5a26..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/conversion.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/conversion.hpp" - -namespace LayerTestsDefinitions { -TEST_P(ConversionLayerTest, CompareWithRefs) { - Run(); -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/convert_color_i420.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/convert_color_i420.hpp deleted file mode 100644 index 14cbf9c7afd83d..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/convert_color_i420.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/convert_color_i420.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ConvertColorI420LayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(ConvertColorI420AccuracyTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/convert_color_nv12.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/convert_color_nv12.hpp deleted file mode 100644 index 5d876dd586eebe..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/convert_color_nv12.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/convert_color_nv12.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ConvertColorNV12LayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(ConvertColorNV12AccuracyTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/convolution.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/convolution.hpp deleted file mode 100644 index 8ca1b2e4f7e864..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/convolution.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/convolution.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ConvolutionLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/convolution_backprop.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/convolution_backprop.hpp deleted file mode 100644 index 2294c37ddd832d..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/convolution_backprop.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/convolution_backprop.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ConvolutionBackpropLayerTest, CompareWithRefs) { - Run(); -} - -} diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/convolution_backprop_data.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/convolution_backprop_data.hpp deleted file mode 100644 index 6b478f796110d6..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/convolution_backprop_data.hpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// DEPRECATED, can't be removed currently due to arm and kmb-plugin dependency (#55568) -#pragma once - -#include "shared_test_classes/single_layer/convolution_backprop_data.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ConvolutionBackpropDataLayerTest, CompareWithRefs) { - Run(); -} - -} diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_greedy_decoder.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_greedy_decoder.hpp deleted file mode 100644 index 1ab7d86238bdbc..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_greedy_decoder.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/ctc_greedy_decoder.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(CTCGreedyDecoderLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_greedy_decoder_seq_len.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_greedy_decoder_seq_len.hpp deleted file mode 100644 index 2ce811e192eda9..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_greedy_decoder_seq_len.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/ctc_greedy_decoder_seq_len.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(CTCGreedyDecoderSeqLenLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_loss.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_loss.hpp deleted file mode 100644 index c54b3b7c42429b..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/ctc_loss.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/ctc_loss.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(CTCLossLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/cum_sum.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/cum_sum.hpp deleted file mode 100644 index 462ba7668aa693..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/cum_sum.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/cum_sum.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(CumSumLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/deformable_convolution.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/deformable_convolution.hpp deleted file mode 100644 index 0f6c7d8dce6c7f..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/deformable_convolution.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/deformable_convolution.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(DeformableConvolutionLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/deformable_psroi_pooling.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/deformable_psroi_pooling.hpp deleted file mode 100644 index b5de0cd79147e9..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/deformable_psroi_pooling.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/deformable_psroi_pooling.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(DeformablePSROIPoolingLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/depth_to_space.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/depth_to_space.hpp deleted file mode 100644 index 149ec97db2be40..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/depth_to_space.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/depth_to_space.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(DepthToSpaceLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/detection_output.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/detection_output.hpp deleted file mode 100644 index 5b4ea932a9ae5c..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/detection_output.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/detection_output.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(DetectionOutputLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/dft.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/dft.hpp deleted file mode 100644 index 5e5eeaf1e76268..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/dft.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/dft.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(DFTLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/einsum.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/einsum.hpp deleted file mode 100644 index b314b540459637..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/einsum.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/einsum.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(EinsumLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/eltwise.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/eltwise.hpp deleted file mode 100644 index 9aaafc52326c9c..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/eltwise.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(EltwiseLayerTest, EltwiseTests) { - run(); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_bag_offsets_sum.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_bag_offsets_sum.hpp deleted file mode 100644 index ca188d843fbd51..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_bag_offsets_sum.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(EmbeddingBagOffsetsSumLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_bag_packed_sum.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_bag_packed_sum.hpp deleted file mode 100644 index d89e4d8d35bb0c..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_bag_packed_sum.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/embedding_bag_packed_sum.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(EmbeddingBagPackedSumLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_segments_sum.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_segments_sum.hpp deleted file mode 100644 index bc7a33d0b1145f..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/embedding_segments_sum.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/embedding_segments_sum.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(EmbeddingSegmentsSumLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_detection_output.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_detection_output.hpp deleted file mode 100644 index 4b6e0c2fd20e2f..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_detection_output.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(ExperimentalDetectronDetectionOutputLayerTest, ExperimentalDetectronDetectionOutputLayerTests) { - run(); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_generate_proposals_single_image.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_generate_proposals_single_image.hpp deleted file mode 100644 index 644de35ce17ff5..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_generate_proposals_single_image.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(ExperimentalDetectronGenerateProposalsSingleImageLayerTest, ExperimentalDetectronGenerateProposalsSingleImageLayerTests) { - run(); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_prior_grid_generator.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_prior_grid_generator.hpp index 18a2ae23455534..c39c45e32a515c 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_prior_grid_generator.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_prior_grid_generator.hpp @@ -16,4 +16,4 @@ TEST_P(ExperimentalDetectronPriorGridGeneratorLayerTest, ExperimentalDetectronPr } // namespace subgraph } // namespace test -} // namespace ov +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_roifeatureextractor.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_roifeatureextractor.hpp deleted file mode 100644 index 314c7b7e933369..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_roifeatureextractor.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(ExperimentalDetectronROIFeatureExtractorLayerTest, ExperimentalDetectronROIFeatureExtractorTests) { - run(); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_topkrois.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_topkrois.hpp deleted file mode 100644 index 4989f5fa545642..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/experimental_detectron_topkrois.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(ExperimentalDetectronTopKROIsLayerTest, ExperimentalDetectronTopKROIsTests) { - run(); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/extract_image_patches.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/extract_image_patches.hpp deleted file mode 100644 index 0ee4d9658582d1..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/extract_image_patches.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/extract_image_patches.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ExtractImagePatchesTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/fake_quantize.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/fake_quantize.hpp deleted file mode 100644 index 70de29607d4326..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/fake_quantize.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/fake_quantize.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(FakeQuantizeLayerTest, CompareWithRefs) { - Run(); - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - - if (BASE_SEED != USE_CLOCK_TIME && - BASE_SEED != USE_INCREMENTAL_SEED) { - return; - } - - size_t nIterations = 1; - for (; nIterations != 0; nIterations--) { - UpdateSeed(); - GenerateInputs(); - Infer(); - Validate(); - } -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/gather.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/gather.hpp deleted file mode 100644 index e909283ff31336..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/gather.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/gather.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(GatherLayerTest, CompareWithRefs) { - Run(); -}; - -TEST_P(Gather7LayerTest, CompareWithRefs) { - Run(); -}; - -TEST_P(Gather8LayerTest, CompareWithRefs) { - Run(); -}; - -TEST_P(Gather8IndiceScalarLayerTest, CompareWithRefs) { - Run(); -}; - -TEST_P(Gather8withIndicesDataLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/gather_elements.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/gather_elements.hpp deleted file mode 100644 index e534d91c3db2b8..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/gather_elements.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/gather_elements.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(GatherElementsLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/gather_nd.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/gather_nd.hpp deleted file mode 100644 index 696ff06165b1e8..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/gather_nd.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/gather_nd.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(GatherNDLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(GatherND8LayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/gather_tree.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/gather_tree.hpp deleted file mode 100644 index caeb462f822bd5..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/gather_tree.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/gather_tree.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(GatherTreeLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/generate_proposals.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/generate_proposals.hpp deleted file mode 100644 index c1e4c78d94cb4f..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/generate_proposals.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(GenerateProposalsLayerTest, GenerateProposalsLayerTests) { - run(); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/grid_sample.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/grid_sample.hpp deleted file mode 100644 index a3fa38ac8d74c4..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/grid_sample.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/grid_sample.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(GridSampleLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/grn.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/grn.hpp deleted file mode 100644 index 30d9350c5c23d5..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/grn.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/grn.hpp" - -namespace LayerTestsDefinitions { -TEST_P(GrnLayerTest, CompareWithRefs) { - Run(); -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/group_convolution.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/group_convolution.hpp deleted file mode 100644 index a6b4a751ed5508..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/group_convolution.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/group_convolution.hpp" - -namespace LayerTestsDefinitions { -TEST_P(GroupConvolutionLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/group_convolution_backprop_data.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/group_convolution_backprop_data.hpp deleted file mode 100644 index 6ab5210c459e00..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/group_convolution_backprop_data.hpp +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/group_convolution_backprop_data.hpp" - -namespace LayerTestsDefinitions { - -// DEPRECATED, remove this old API when KMB (#58495) and ARM (#58496) plugins are migrated to new API -TEST_P(GroupConvBackpropDataLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(GroupConvBackpropLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/gru_cell.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/gru_cell.hpp deleted file mode 100644 index 888ad3ad2ab75e..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/gru_cell.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/gru_cell.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(GRUCellTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/gru_sequence.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/gru_sequence.hpp deleted file mode 100644 index e1f0006f18bf2c..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/gru_sequence.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/gru_sequence.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(GRUSequenceTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/interpolate.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/interpolate.hpp deleted file mode 100644 index 38d0f270662ec9..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/interpolate.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/interpolate.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(InterpolateLayerTest, CompareWithRefs) { - Run(); -} - -using Interpolate11LayerTest = v11::InterpolateLayerTest; - -TEST_P(Interpolate11LayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/log_softmax.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/log_softmax.hpp deleted file mode 100644 index 8e587fedb918e4..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/log_softmax.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/log_softmax.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(LogSoftmaxLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/logical.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/logical.hpp deleted file mode 100644 index 86c1700a4e488b..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/logical.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace LayerTestsDefinitions { -TEST_P(LogicalLayerTest, LogicalTests) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp deleted file mode 100644 index 7c87f19fbe7e18..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/loop.hpp" - -namespace LayerTestsDefinitions { - - -TEST_P(LoopTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - Run(); -} - -TEST_P(StaticShapeLoopTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - Run(); -} - -TEST_P(StaticShapeLoopTest, CompareWithPredefinedRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - LoadNetwork(); - GenerateInputs(); - Infer(); - auto expectedOutputs = PredefinedRefs(); // use predefined refs instead of CalculateRefs function - const auto& actualOutputs = GetOutputs(); - - if (expectedOutputs.empty()) { - return; - } - - IE_ASSERT(actualOutputs.size() == expectedOutputs.size()) - << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size(); - - Compare(expectedOutputs, actualOutputs); -} - -TEST_P(TrivialLoopTest, PassThroughBody) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - InferenceEngine::Precision iePrc; - InferenceEngine::SizeVector ieShape; - std::tie(iePrc, ieShape, targetDevice) = GetParam(); - - const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iePrc); - const auto shape = ov::Shape{ieShape}; - const auto scalarShape = ov::Shape{}; - - auto start = std::make_shared(prc, shape); - auto count = std::make_shared(ov::element::i64, scalarShape, 5); - auto icond = std::make_shared(ov::element::boolean, scalarShape, true); - - // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_cond = std::make_shared(ov::element::boolean, scalarShape); - - auto body = std::make_shared( - ov::OutputVector {b_cond, b_data}, // | passthrough body, no data changes - ov::ParameterVector {b_cond, b_data}); // | input -> output - - auto loop = std::make_shared(count, icond); - loop->set_function(body); - loop->set_special_body_ports({-1, 0}); - loop->set_invariant_input(b_cond, icond); - loop->set_invariant_input(b_data, start); - loop->get_iter_value(b_data, -1); - - function = std::make_shared( - ov::OutputVector {loop}, - ov::ParameterVector {start}); - - // Precalculated ref blobs - auto blob = make_blob_with_precision({iePrc, ieShape, InferenceEngine::TensorDesc::getLayoutByDims(ieShape)}); - blob->allocate(); - ov::test::utils::fill_data_with_broadcast(blob, 0, {10}); - - inputGens[""] = [&] (InferenceEngine::TensorDesc tdesc) { return blob; }; - outputGens[""] = [&] (InferenceEngine::TensorDesc tdesc) { return blob; }; - - Run(); -} - -TEST_P(TrivialLoopTest, UnusedInputBody) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - InferenceEngine::Precision iePrc; - InferenceEngine::SizeVector ieShape; - std::tie(iePrc, ieShape, targetDevice) = GetParam(); - - const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iePrc); - const auto shape = ov::Shape{ieShape}; - const auto scalarShape = ov::Shape{}; - - auto start = std::make_shared(prc, shape); - auto count = std::make_shared(ov::element::i64, scalarShape, 5); - auto icond = std::make_shared(ov::element::boolean, scalarShape, true); - - // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_cond = std::make_shared(ov::element::boolean, scalarShape, true); - auto b_iter = std::make_shared(ov::element::i64, scalarShape); - - auto body = std::make_shared( - ov::OutputVector {b_cond, b_data}, - ov::ParameterVector {b_data, b_iter}); - - auto loop = std::make_shared(count, icond); - loop->set_function(body); - loop->set_special_body_ports({1, 0}); - loop->set_invariant_input(b_data, start); - loop->get_iter_value(b_data, -1); - - function = std::make_shared( - ov::OutputVector {loop}, - ov::ParameterVector {start}); - - // Precalculated ref blobs - auto blob = make_blob_with_precision({iePrc, ieShape, InferenceEngine::TensorDesc::getLayoutByDims(ieShape)}); - blob->allocate(); - ov::test::utils::fill_data_with_broadcast(blob, 0, {10}); - - inputGens[""] = [&] (InferenceEngine::TensorDesc tdesc) { return blob; }; - outputGens[""] = [&] (InferenceEngine::TensorDesc tdesc) { return blob; }; - - Run(); -} - - - -TEST_P(TrivialLoopTest, AutoSlicingInput_CheckPredefinedValues) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - InferenceEngine::Precision iePrc; - InferenceEngine::SizeVector ieShape; - std::tie(iePrc, ieShape, targetDevice) = GetParam(); - const size_t batch_size = 5; - const size_t num_iteration = 3; - ieShape[0] = 1; - auto ieShape_to_slice = ieShape; - ieShape_to_slice[0] = batch_size; - CreateSlicedLoop(batch_size, num_iteration, iePrc, ieShape); - Run(); - // Precalculated ref blobs - auto blob = make_blob_with_precision({iePrc, ieShape_to_slice, InferenceEngine::TensorDesc::getLayoutByDims(ieShape_to_slice)}); - blob->allocate(); - std::vector seq_raw_data(batch_size); - std::iota(seq_raw_data.begin(), seq_raw_data.end(), 1); - ov::test::utils::fill_data_with_broadcast(blob, 0, seq_raw_data); - - auto blob_ref = make_blob_with_precision({iePrc, ieShape, InferenceEngine::TensorDesc::getLayoutByDims(ieShape)}); - blob_ref->allocate(); - ov::test::utils::fill_data_with_broadcast(blob_ref, 0, { num_iteration * (num_iteration + 1) / 2}); - - inputGens[""] = [&] (InferenceEngine::TensorDesc tdesc) { return blob; }; - outputGens[""] = [&] (InferenceEngine::TensorDesc tdesc) { return blob_ref; }; -} - -TEST_P(TrivialLoopTest, AutoSlicingInputWithDynCondition_CheckPredefinedValues) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - InferenceEngine::Precision iePrc; - InferenceEngine::SizeVector ieShape; - std::tie(iePrc, ieShape, targetDevice) = GetParam(); - - // auto slicing size : 5 - // trip count limit : 4 - // dyn exit after iter : 3 - // --------------------- - // should exit after 4 iterations - const size_t batch_size = 5; - const size_t trip_count = 5; - const size_t num_iteration = 3; - - ieShape[0] = 1; - auto ieShape_to_slice = ieShape; - ieShape_to_slice[0] = batch_size; - - CreateSlicedLoopDynCondition(batch_size, num_iteration, iePrc, ieShape, trip_count); - // Precalculated ref blobs - auto blob = make_blob_with_precision({iePrc, ieShape_to_slice, InferenceEngine::TensorDesc::getLayoutByDims(ieShape_to_slice)}); - blob->allocate(); - std::vector seq_raw_data(batch_size); - std::iota(seq_raw_data.begin(), seq_raw_data.end(), 1); - ov::test::utils::fill_data_with_broadcast(blob, 0, seq_raw_data); - - auto blob_ref = make_blob_with_precision({iePrc, ieShape, InferenceEngine::TensorDesc::getLayoutByDims(ieShape)}); - blob_ref->allocate(); - const size_t real_iter = num_iteration + 1; - ov::test::utils::fill_data_with_broadcast(blob_ref, 0, { real_iter * (real_iter + 1) / 2}); - - inputGens[""] = [&] (InferenceEngine::TensorDesc tdesc) { return blob; }; - outputGens[""] = [&] (InferenceEngine::TensorDesc tdesc) { return blob_ref; }; - - Run(); -} - -TEST_P(TrivialLoopTest, AutoSlicingInput_CheckReference) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - InferenceEngine::Precision iePrc; - InferenceEngine::SizeVector ieShape; - std::tie(iePrc, ieShape, targetDevice) = GetParam(); - const size_t batch_size = 5; - const size_t num_iteration = 3; - ieShape[0] = 1; - auto ieShape_to_slice = ieShape; - ieShape_to_slice[0] = batch_size; - CreateSlicedLoop(batch_size, num_iteration, iePrc, ieShape); - Run(); -} - -TEST_P(TrivialLoopTest, AutoSlicingInputWithDynCondition_CheckReference) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - InferenceEngine::Precision iePrc; - InferenceEngine::SizeVector ieShape; - std::tie(iePrc, ieShape, targetDevice) = GetParam(); - - // auto slicing size : 5 - // trip count limit : 4 - // dyn exit after iter : 3 - // --------------------- - // should exit after 4 iterations - const size_t batch_size = 5; - const size_t trip_count = 5; - const size_t num_iteration = 3; - - ieShape[0] = 1; - auto ieShape_to_slice = ieShape; - ieShape_to_slice[0] = batch_size; - - CreateSlicedLoopDynCondition(batch_size, num_iteration, iePrc, ieShape, trip_count); - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/low_precision.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/low_precision.hpp deleted file mode 100644 index f68deb3e73ebb6..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/low_precision.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/low_precision.hpp" - -namespace LowPrecisionTestDefinitions { - - TEST_P(LowPrecisionTest, CompareWithRefs) { - Run(); - } - -} // namespace LowPrecisionTestDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/lrn.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/lrn.hpp deleted file mode 100644 index f49c8f83f00755..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/lrn.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/lrn.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(LrnLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_cell.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_cell.hpp deleted file mode 100644 index dd409653f5cbe0..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_cell.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/lstm_cell.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(LSTMCellTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_cell_basic.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_cell_basic.hpp deleted file mode 100644 index b39ef46ef8165f..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_cell_basic.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/lstm_cell_basic.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(LSTMCellBasicTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_sequence.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_sequence.hpp deleted file mode 100644 index 4a63aef30d47c1..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/lstm_sequence.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/lstm_sequence.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(LSTMSequenceTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/mat_mul.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/mat_mul.hpp deleted file mode 100644 index 310f30433410a3..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/mat_mul.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/mat_mul.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(MatMulTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/matrix_nms.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/matrix_nms.hpp deleted file mode 100644 index b10864baeb0df2..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/matrix_nms.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/matrix_nms.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(MatrixNmsLayerTest, CompareWithRefs) { - run(); -}; - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/memory.h b/src/tests/functional/plugin/shared/include/single_layer_tests/memory.h deleted file mode 100644 index de5c01f78708fc..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/memory.h +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/memory.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(MemoryTest, CompareWithRefs) { - Run(); -}; - -TEST_P(MemoryTestV3, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/minimum_maximum.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/minimum_maximum.hpp deleted file mode 100644 index e04801e2b4315a..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/minimum_maximum.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/minimum_maximum.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(MaxMinLayerTest, CompareWithRefs){ - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/multiclass_nms.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/multiclass_nms.hpp deleted file mode 100644 index bb981de0355afc..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/multiclass_nms.hpp +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/multiclass_nms.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(MulticlassNmsLayerTest, CompareWithRefs) { - run(); -}; - -TEST_P(MulticlassNmsLayerTest8, CompareWithRefs) { - run(); -}; - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/multinomial.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/multinomial.hpp deleted file mode 100644 index 1f1a04a2973dc3..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/multinomial.hpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#pragma once -#include "shared_test_classes/single_layer/multinomial.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(MultinomialTest, CompareWithRefs) { - run(); -} - -TEST_P(MultinomialTest, CompareQueryModel) { - query_model(); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp deleted file mode 100644 index adbabbe5daacea..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/mvn.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(Mvn1LayerTest, CompareWithRefs) { - Run(); -}; - -TEST_P(Mvn6LayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/nms_rotated.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/nms_rotated.hpp deleted file mode 100644 index d02a115acaeb18..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/nms_rotated.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/nms_rotated.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(NmsRotatedLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/non_max_suppression.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/non_max_suppression.hpp deleted file mode 100644 index 9e65aa030b7283..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/non_max_suppression.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/non_max_suppression.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(NmsLayerTest, CompareWithRefs) { - Run(); -}; - -TEST_P(Nms9LayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/nonzero.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/nonzero.hpp deleted file mode 100644 index 6cd8a15fc849f0..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/nonzero.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/nonzero.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(NonZeroLayerTest, CompareWithReference) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/normalize_l2.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/normalize_l2.hpp deleted file mode 100644 index 2f5ba8f540f2de..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/normalize_l2.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/normalize_l2.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(NormalizeL2LayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/one_hot.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/one_hot.hpp deleted file mode 100644 index fb41e10760e50a..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/one_hot.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/one_hot.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(OneHotLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/pad.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/pad.hpp deleted file mode 100644 index 8bdf9a3d2c283c..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/pad.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/pad.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(PadLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(PadLayerTest12, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/pooling.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/pooling.hpp deleted file mode 100644 index a28f149673ecff..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/pooling.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/pooling.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(PoolingLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(GlobalPoolingLayerTest, CompareWithRefs) { - Run(); - - if (targetDevice == std::string{ov::test::utils::DEVICE_GPU}) { - PluginCache::get().reset(); - } -} - -TEST_P(MaxPoolingV8LayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/power.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/power.hpp deleted file mode 100644 index 65c40119408db6..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/power.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/power.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(PowerLayerTest, CompareWithRefs){ - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/prior_box.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/prior_box.hpp deleted file mode 100644 index 3a896e00c535fe..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/prior_box.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/prior_box.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(PriorBoxLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/prior_box_clustered.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/prior_box_clustered.hpp deleted file mode 100644 index 1c218adea0dc06..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/prior_box_clustered.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/prior_box_clustered.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(PriorBoxClusteredLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/proposal.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/proposal.hpp deleted file mode 100644 index 25cbf26bc4f9e3..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/proposal.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/proposal.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ProposalLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/psroi_pooling.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/psroi_pooling.hpp deleted file mode 100644 index 0c97a47860ccd5..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/psroi_pooling.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/psroi_pooling.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(PSROIPoolingLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/random_uniform.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/random_uniform.hpp deleted file mode 100644 index 6198c19ed13359..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/random_uniform.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/random_uniform.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(RandomUniformLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/range.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/range.hpp deleted file mode 100644 index ec3bd867346620..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/range.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/range.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(RangeNumpyLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(RangeLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/rdft.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/rdft.hpp deleted file mode 100644 index c936ef55bc7dc4..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/rdft.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/rdft.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(RDFTLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/reduce_ops.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/reduce_ops.hpp deleted file mode 100644 index d7f925e37f1622..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/reduce_ops.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/reduce_ops.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ReduceOpsLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(ReduceOpsLayerWithSpecificInputTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/region_yolo.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/region_yolo.hpp deleted file mode 100644 index ee559ca31265b9..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/region_yolo.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/region_yolo.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(RegionYoloLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/reorg_yolo.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/reorg_yolo.hpp deleted file mode 100644 index 8ac3830e93cb97..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/reorg_yolo.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/reorg_yolo.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ReorgYoloLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/reshape.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/reshape.hpp deleted file mode 100644 index 8d92717a716877..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/reshape.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/reshape.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ReshapeLayerTest, CompareWithRefsDynamicBath) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/result.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/result.hpp deleted file mode 100644 index 877fab2a9a3192..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/result.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "shared_test_classes/single_layer/result.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ResultLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/reverse.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/reverse.hpp deleted file mode 100644 index 15ff7048caf5d9..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/reverse.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/reverse.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ReverseLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/reverse_sequence.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/reverse_sequence.hpp deleted file mode 100644 index 8765bedfd34f2a..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/reverse_sequence.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/reverse_sequence.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ReverseSequenceLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/rnn_cell.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/rnn_cell.hpp deleted file mode 100644 index 75cb907d8f9d13..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/rnn_cell.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/rnn_cell.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(RNNCellTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/rnn_sequence.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/rnn_sequence.hpp deleted file mode 100644 index 50e98e9fae81fb..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/rnn_sequence.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/rnn_sequence.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(RNNSequenceTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/roi_align.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/roi_align.hpp deleted file mode 100644 index 465f3b72845dac..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/roi_align.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/roi_align.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ROIAlignLayerTest, CompareWithRefs) { - Run(); -} - -TEST_P(ROIAlignV9LayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/roi_pooling.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/roi_pooling.hpp deleted file mode 100644 index 1979dfb6a9a8c5..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/roi_pooling.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/roi_pooling.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ROIPoolingLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/roll.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/roll.hpp deleted file mode 100644 index 965ff6f636794c..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/roll.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/roll.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(RollLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_ND_update.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_ND_update.hpp deleted file mode 100644 index 01fc31d17fe6c8..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_ND_update.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/scatter_ND_update.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ScatterNDUpdateLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_elements_update.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_elements_update.hpp deleted file mode 100644 index bb3b3ad153d5fe..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_elements_update.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/scatter_elements_update.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ScatterElementsUpdateLayerTest, CompareWithRefs) { - Run(); -}; - -TEST_P(ScatterElementsUpdate12LayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_update.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_update.hpp deleted file mode 100644 index 8c9cfa482a7994..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/scatter_update.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/scatter_update.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ScatterUpdateLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/select.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/select.hpp deleted file mode 100644 index 1ad9e07d9c617f..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/select.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/select.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(SelectLayerTest, CompareWithRefImpl) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/shape_of.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/shape_of.hpp deleted file mode 100644 index 758a93d9f51f0b..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/shape_of.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/shape_of.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ShapeOfLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/shuffle_channels.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/shuffle_channels.hpp deleted file mode 100644 index 7d7f6ab29c32e2..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/shuffle_channels.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/shuffle_channels.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(ShuffleChannelsLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/slice.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/slice.hpp deleted file mode 100644 index 41d553c12d8635..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/slice.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/slice.hpp" - -namespace LayerTestsDefinitions { -TEST_P(Slice8LayerTest, CompareWithRefs) { - run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp index 25214ffcd0bdb8..5bb173a87d4be9 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/softmax.hpp @@ -4,28 +4,5 @@ #pragma once -#include "shared_test_classes/single_op/softmax.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -TEST_P(SoftMaxLayerTest, CompareWithRefs) { - run(); -} - -TEST_P(SoftMaxLayerTest, CompareQueryModel) { - query_model(); -} - -TEST_P(SoftMax8LayerTest, CompareWithRefs) { - run(); -} - -TEST_P(SoftMax8LayerTest, CompareQueryModel) { - query_model(); -} - -} // namespace subgraph -} // namespace test -} // namespace ov +// TODO (vurusovs): for NPU compatibility. Remove after fix on NPU side +#include "single_op_tests/softmax.hpp" \ No newline at end of file diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/space_to_batch.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/space_to_batch.hpp deleted file mode 100644 index 3d7bf5fd8e9d1e..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/space_to_batch.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/space_to_batch.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(SpaceToBatchLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/space_to_depth.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/space_to_depth.hpp deleted file mode 100644 index 5d5eb94b3e73c9..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/space_to_depth.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/space_to_depth.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(SpaceToDepthLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/split.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/split.hpp deleted file mode 100644 index 06ef1c2d1b3c99..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/split.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/split.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(SplitLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/squeeze_unsqueeze.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/squeeze_unsqueeze.hpp deleted file mode 100644 index 021f0d4c12b988..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/squeeze_unsqueeze.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/squeeze_unsqueeze.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(SqueezeUnsqueezeLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/strided_slice.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/strided_slice.hpp deleted file mode 100644 index 10cc8ba8131a20..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/strided_slice.hpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/strided_slice.hpp" - -namespace LayerTestsDefinitions { -TEST_P(StridedSliceLayerTest, CompareWithRefs) { - Run(); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/tensor_iterator.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/tensor_iterator.hpp deleted file mode 100644 index c2187e2da69c94..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/tensor_iterator.hpp +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/tensor_iterator.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(TensorIteratorTest, CompareWithRefs) { - Run(); -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/tile.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/tile.hpp deleted file mode 100644 index 6dbffae2d24e8c..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/tile.hpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/tile.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(TileLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions - diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/topk.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/topk.hpp deleted file mode 100644 index 16d8dc09c19ed3..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/topk.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/topk.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(TopKLayerTest, CompareWithRefsDynamicBath) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/transpose.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/transpose.hpp deleted file mode 100644 index 0e44f8ca9dede8..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/transpose.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/transpose.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(TransposeLayerTest, CompareWithRefs) { - Run(); -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/variadic_split.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/variadic_split.hpp deleted file mode 100644 index 557f3b65c62bd5..00000000000000 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/variadic_split.hpp +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/single_layer/variadic_split.hpp" - -namespace LayerTestsDefinitions { - -TEST_P(VariadicSplitLayerTest, CompareWithRefs) { - Run(); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/gather.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/gather.hpp index be9aa74077694e..e6b29a28ad9844 100644 --- a/src/tests/functional/plugin/shared/include/single_op_tests/gather.hpp +++ b/src/tests/functional/plugin/shared/include/single_op_tests/gather.hpp @@ -27,5 +27,10 @@ TEST_P(Gather8IndiceScalarLayerTest, Inference) { TEST_P(Gather8withIndicesDataLayerTest, Inference) { run(); }; + +TEST_P(GatherStringWithIndicesDataLayerTest, Inference) { + run(); +}; + } // namespace test } // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/group_normalization.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/group_normalization.hpp similarity index 72% rename from src/tests/functional/plugin/shared/include/single_layer_tests/group_normalization.hpp rename to src/tests/functional/plugin/shared/include/single_op_tests/group_normalization.hpp index 8f080764adbced..a7887c22dafebe 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/group_normalization.hpp +++ b/src/tests/functional/plugin/shared/include/single_op_tests/group_normalization.hpp @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // #pragma once -#include "shared_test_classes/single_layer/group_normalization.hpp" +#include "shared_test_classes/single_op/group_normalization.hpp" namespace ov { namespace test { -namespace subgraph { TEST_P(GroupNormalizationTest, CompareWithRefs) { run(); @@ -16,6 +15,5 @@ TEST_P(GroupNormalizationTest, CompareQueryModel) { query_model(); } -} // namespace subgraph } // namespace test } // namespace ov diff --git a/src/tests/functional/plugin/shared/include/single_op_tests/softmax.hpp b/src/tests/functional/plugin/shared/include/single_op_tests/softmax.hpp new file mode 100644 index 00000000000000..25214ffcd0bdb8 --- /dev/null +++ b/src/tests/functional/plugin/shared/include/single_op_tests/softmax.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "shared_test_classes/single_op/softmax.hpp" + +namespace ov { +namespace test { +namespace subgraph { + +TEST_P(SoftMaxLayerTest, CompareWithRefs) { + run(); +} + +TEST_P(SoftMaxLayerTest, CompareQueryModel) { + query_model(); +} + +TEST_P(SoftMax8LayerTest, CompareWithRefs) { + run(); +} + +TEST_P(SoftMax8LayerTest, CompareQueryModel) { + query_model(); +} + +} // namespace subgraph +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/shared/src/behavior/compiled_model/import_export.cpp b/src/tests/functional/plugin/shared/src/behavior/compiled_model/import_export.cpp index 074100be73cc2b..72be0c6bcbb0b0 100644 --- a/src/tests/functional/plugin/shared/src/behavior/compiled_model/import_export.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/compiled_model/import_export.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifcorer: Apache-2.0 // - +#include #include "behavior/compiled_model/import_export.hpp" #include "common_test_utils/ov_test_utils.hpp" @@ -298,6 +298,16 @@ TEST_P(OVClassCompiledModelImportExportTestP, smoke_ImportNetworkNoThrowWithDevi OV_ASSERT_NO_THROW(executableNetwork.create_infer_request()); } +TEST_P(OVClassCompiledModelImportExportTestP, smoke_ImportNetworkThrowWithDeviceName) { + SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::Core ie = createCoreWithTemplate(); + std::stringstream wrongStm; + // Import model with wrong format throws exception + OV_EXPECT_THROW((ie.import_model(wrongStm, target_device)), + ov::Exception, + testing::HasSubstr("device xml header")); +} + // // GetRuntimeModel // diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/hetero_synthetic.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/hetero_synthetic.cpp index 125735eb476fd1..232146386fe254 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/hetero_synthetic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/hetero_synthetic.cpp @@ -184,6 +184,14 @@ std::vector OVHeteroSyntheticTest::randomMajorNodeFunctions( return results; } +std::vector OVHeteroSyntheticTest::withMajorNodesFunctions(const std::function()>& builder, + const std::unordered_set& majorNodes, + bool dynamic_batch) { + auto function = builder(); + std::vector result; + result.push_back(FunctionParameter{majorNodes, function, dynamic_batch, 0}); + return result; +} std::vector OVHeteroSyntheticTest::_singleMajorNodeFunctions = OVHeteroSyntheticTest::singleMajorNodeFunctions(builders); diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/disable_lowering_precision.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/disable_lowering_precision.cpp index 5b5feb5c470689..45ba0e98259c75 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/disable_lowering_precision.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/disable_lowering_precision.cpp @@ -9,7 +9,6 @@ #include #include -#include #include "openvino/runtime/exec_model_info.hpp" #include "openvino/core/model.hpp" #include "common_test_utils/common_utils.hpp" diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp index 0879ae91c2562c..6da4d0f86fd173 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp @@ -51,8 +51,6 @@ std::string AddTransformation::getTestCaseName(const testing::TestParamInfo< Add } void AddTransformation::SetUp() { - abs_threshold = 1.1; - rel_threshold = 3; ov::element::Type precision; ov::PartialShape inputShape; AddTestValues param; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp index 6bd189263a8f11..1ba23576aa88aa 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp @@ -24,8 +24,6 @@ std::string BatchToSpaceTransformation::getTestCaseName(const testing::TestParam } void BatchToSpaceTransformation::SetUp() { - abs_threshold = 1.1; - ov::element::Type input_type; BatchToSpaceTransformationParam param; std::tie(input_type, targetDevice, param) = this->GetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp index 5a97477cc9abae..2cf6ee1d670730 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp @@ -28,8 +28,6 @@ std::string ClampTransformation::getTestCaseName(const testing::TestParamInfo inputShapeAndHandling; ov::Shape outputShape; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp index e92913b60189ff..e98dca9eec9bb1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp @@ -33,9 +33,6 @@ std::string ConvolutionQDqTransformation::getTestCaseName(const testing::TestPar } void ConvolutionQDqTransformation::SetUp() { - rel_threshold = 0.1; - abs_threshold = 12.8; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp index 02c366f8fc5298..b1acadbb73dc5d 100755 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp @@ -36,9 +36,6 @@ std::string ConvolutionTransformation::getTestCaseName(const testing::TestParamI } void ConvolutionTransformation::SetUp() { - rel_threshold = 1.0e+10; - abs_threshold = 1.4; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp index 14b49e5800bdf4..f79028e3ccb4cc 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp @@ -36,9 +36,6 @@ std::string ConvolutionWIthIncorrectWeightsTransformation::getTestCaseName(const } void ConvolutionWIthIncorrectWeightsTransformation::SetUp() { - rel_threshold = 0.1; - abs_threshold = 16.1; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index 713bc7d8439867..31270f5180f684 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp index e35d184ba71c7b..ed2d115e1155c1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp @@ -49,7 +49,6 @@ void EliminateFakeQuantizeTransformation::SetUp() { TEST_P(EliminateFakeQuantizeTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - abs_threshold = 2.3; run(); EliminateFakeQuantizeTransformationTestValues testValues; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp index fd766959b0f383..8b9b70481d5880 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp @@ -27,8 +27,6 @@ std::string FakeQuantizeAndAvgPoolTransformation::getTestCaseName(const testing: } void FakeQuantizeAndAvgPoolTransformation::SetUp() { - rel_threshold = 0.5f; - abs_threshold = 1.0; ov::element::Type precision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp index a641d08caed2aa..f447002a927de7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp @@ -27,7 +27,6 @@ std::string FakeQuantizeAndMaxPoolTransformation::getTestCaseName(const testing: } void FakeQuantizeAndMaxPoolTransformation::SetUp() { - abs_threshold = 1.0; ov::element::Type precision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index 5aad24deae06bf..f80290734b2cff 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -35,9 +35,6 @@ std::string FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::getTe } void FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::SetUp() { - rel_threshold = 0.1; - abs_threshold = 0.1; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index de67a53328239e..8b81da2d6b6070 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -28,8 +28,6 @@ std::string FakeQuantizePrecisionSelectionTransformation::getTestCaseName(const } void FakeQuantizePrecisionSelectionTransformation::SetUp() { - abs_threshold = 0.01; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp index fdf773a9a8c784..6d6751db46d5b1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp @@ -32,8 +32,6 @@ std::string FakeQuantizeTransformation::getTestCaseName(const testing::TestParam } void FakeQuantizeTransformation::SetUp() { - abs_threshold = 1.0e-3; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp index 8ed160a3ec2b3f..969b137dcb540a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp @@ -28,8 +28,6 @@ std::string FakeQuantizeWithNotOptimalTransformation::getTestCaseName(const test } void FakeQuantizeWithNotOptimalTransformation::SetUp() { - abs_threshold = 4; - rel_threshold = 2778; SKIP_IF_CURRENT_TEST_IS_DISABLED(); ov::PartialShape inputShape; ov::element::Type netPrecision; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp index 74a31d244b20a6..b26f67cc07d444 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp @@ -37,8 +37,6 @@ std::string FullyConnectedTransformation::getTestCaseName(const testing::TestPar } void FullyConnectedTransformation::SetUp() { - abs_threshold = 0.6; - ov::element::Type precision; MatMulShapes shapes; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp index 85fdbd1bb57d7d..503ffe24462700 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp @@ -36,7 +36,6 @@ std::string FuseConvertTransformation::getTestCaseName(const testing::TestParamI } void FuseConvertTransformation::SetUp() { - abs_threshold = 0.01; ov::PartialShape shape; ov::element::Type precision; ov::builder::subgraph::DequantizationOperations deqOperations; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp index 4ed6f51df2cec2..fac36d8f56b863 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp @@ -33,8 +33,6 @@ std::string FuseDequantizeToFakeQuantizeTransformation::getTestCaseName(const te } void FuseDequantizeToFakeQuantizeTransformation::SetUp() { - abs_threshold = 0.1; - FuseDequantizeToFakeQuantizeTransformationTestValues testValues; std::tie(targetDevice, testValues) = this->GetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index 1ebafccd1a21d8..1a428dce08778e 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -27,7 +27,6 @@ std::string FuseFakeQuantizeAndScaleShiftTransformation::getTestCaseName(const t } void FuseFakeQuantizeAndScaleShiftTransformation::SetUp() { - abs_threshold = 1.8; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp index ce67bff1d06dcd..150f8c146feedb 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp @@ -30,8 +30,6 @@ std::string GemmTransformation::getTestCaseName(const testing::TestParamInfo inputShapes; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp index 6ad674ee9439d5..4658d4a9a684e0 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -33,9 +33,6 @@ std::string GroupConvolutionQDqTransformation::getTestCaseName(const testing::Te } void GroupConvolutionQDqTransformation::SetUp() { - abs_threshold = 153.7; - - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp index b4262c9b14001f..a6e9f54178775c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include @@ -38,8 +37,6 @@ std::string MatMulTransformation::getTestCaseName(const testing::TestParamInfo #include #include #include @@ -38,8 +37,6 @@ std::string MatMulWithConstantTransformation::getTestCaseName(const testing::Tes void MatMulWithConstantTransformation::SetUp() { - abs_threshold = 1.0e-3; - ov::element::Type precision; MatMulWithConstantTransformationTestValues testValues; std::tie(precision, targetDevice, testValues) = this->GetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index 6de4a9423ffc84..9b70e211c1ae38 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -39,9 +39,6 @@ std::string MatMulWithOptimizedConstantFq::getTestCaseName( } void MatMulWithOptimizedConstantFq::SetUp() { - rel_threshold = 0.01; - abs_threshold = 2.1; - ov::element::Type precision; std::pair shapes; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp index 144ba7dd1b8a60..d41e69a80763e5 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp @@ -36,8 +36,6 @@ std::string MoveFakeQuantizeTransformation::getTestCaseName(testing::TestParamIn } void MoveFakeQuantizeTransformation::SetUp() { - abs_threshold = 1.1; - ov::element::Type netPrecision; std::vector inputShapes; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp index 44e0a99c3c4452..a6d5d3b83b3c7a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp @@ -49,8 +49,6 @@ std::string MultiplyTransformation::getTestCaseName(const testing::TestParamInfo } void MultiplyTransformation::SetUp() { - abs_threshold = 0.1; - ov::element::Type precision; ov::PartialShape inputShape; MultiplyTestValues param; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp index f7ec4d8d4afcae..d8b961f6accd95 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp @@ -28,8 +28,6 @@ std::string MultiplyWithOneParentTransformation::getTestCaseName(const testing:: } void MultiplyWithOneParentTransformation::SetUp() { - rel_threshold = 0.01f; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp index 3dfc7692486a6a..f94a99c14035cc 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp @@ -36,7 +36,6 @@ std::string MVNTransformation::getTestCaseName(const testing::TestParamInfo shapes; ov::element::Type precision; std::vector axes; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp index 0beb3f72899172..be639c37df1085 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp @@ -45,8 +45,6 @@ std::string OutputLayersConcat::getTestCaseName(const testing::TestParamInfo activations_shapes; std::vector weights_shapes; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp index 67e232a4a4f77a..1fa455b5da8673 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp @@ -30,7 +30,6 @@ std::string ReduceMaxTransformation::getTestCaseName(const testing::TestParamInf } void ReduceMaxTransformation::SetUp() { - abs_threshold = 1.1; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp index b28c4128686593..3a4611f2288100 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp @@ -42,7 +42,6 @@ std::string ReduceMeanTransformation::getTestCaseName(const testing::TestParamIn } void ReduceMeanTransformation::SetUp() { - abs_threshold = 4.1; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp index 52cdf578ab8d67..9c84ef2c860e29 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp @@ -30,7 +30,6 @@ std::string ReduceMinTransformation::getTestCaseName(const testing::TestParamInf } void ReduceMinTransformation::SetUp() { - abs_threshold = 0.1; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp index 171fbfefa28e67..4cf3f75185b9e7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp @@ -30,8 +30,6 @@ std::string ReduceSumTransformation::getTestCaseName(const testing::TestParamInf } void ReduceSumTransformation::SetUp() { - abs_threshold = 4.1; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp index b4bb254e3d4b71..603349350cdeb0 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp @@ -32,7 +32,6 @@ std::string ReluTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp index ee5eb94eeb970d..d014d159ab4c70 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp @@ -30,7 +30,6 @@ std::string SplitTransformation::getTestCaseName(const testing::TestParamInfo #include #include #include @@ -47,7 +46,6 @@ std::string SqueezeTransformation::getTestCaseName(const testing::TestParamInfo< return result.str(); } void SqueezeTransformation::SetUp() { - abs_threshold = 0.2; ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; SqueezeTransformationParam squeezeParam; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp index 354814d7acafeb..4e585af211d6a5 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/strided_slice_transformation.cpp @@ -40,7 +40,6 @@ std::string StridedSliceTransformation::getTestCaseName(const testing::TestParam } void StridedSliceTransformation::SetUp() { - abs_threshold = 1.0; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp index 98c7ed5593f145..efb614761fea71 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/subtract_transformation.cpp @@ -27,7 +27,6 @@ std::string SubtractTransformation::getTestCaseName(const testing::TestParamInfo } void SubtractTransformation::SetUp() { - abs_threshold = 0.1; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp index fff95b82618a0f..ca93bb11c46800 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp @@ -37,7 +37,6 @@ std::string TransposeAfterMatMulTransformation::getTestCaseName(const testing::T } void TransposeAfterMatMulTransformation::SetUp() { - abs_threshold = 0.6; ov::element::Type precision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp index dd7575a4ff22a2..2c53a7e958b745 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/unsqueeze_transformation.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include #include #include @@ -47,8 +46,6 @@ std::string UnsqueezeTransformation::getTestCaseName(const testing::TestParamInf return result.str(); } void UnsqueezeTransformation::SetUp() { - abs_threshold = 1.0; - rel_threshold = 31.0; ov::element::Type netPrecision; ov::pass::low_precision::LayerTransformation::Params params; UnsqueezeTransformationParam unsqueezeParam; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp index 9a4cee0c6f3397..04d68043a3951b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/variadic_split_transformation.cpp @@ -37,7 +37,6 @@ std::string VariadicSplitTransformation::getTestCaseName(const testing::TestPara void VariadicSplitTransformation::SetUp() { - abs_threshold = 1.0; ov::element::Type precision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/precomp.hpp b/src/tests/functional/plugin/shared/src/precomp.hpp index 3ea31e6cbe21d5..8e740185d89fe5 100644 --- a/src/tests/functional/plugin/shared/src/precomp.hpp +++ b/src/tests/functional/plugin/shared/src/precomp.hpp @@ -30,3 +30,4 @@ #include #include "openvino/core/type/float16.hpp" +#include "openvino/openvino.hpp" diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp index cd0e1375eda074..15d89175ea5382 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/benchmark.hpp @@ -63,139 +63,6 @@ class BenchmarkLayerTestReporter { pugi::xml_document report_xml_{}; }; -} // namespace test -} // namespace ov - -namespace LayerTestsDefinitions { - -template -class BenchmarkLayerTest : public BaseLayerTest { - static_assert(std::is_base_of::value, - "BaseLayerTest should inherit from LayerTestsUtils::LayerTestsCommon"); - -public: - static constexpr int kDefaultNumberOfAttempts = 100; - static constexpr double kMaxAllowedBenchmarkDifference = 0.05; - - void RunBenchmark(const std::initializer_list& nodeTypeNames, - const std::chrono::milliseconds warmupTime = std::chrono::milliseconds(2000), - const int numAttempts = kDefaultNumberOfAttempts) { - bench_node_type_names_ = nodeTypeNames; - warmup_time_ = warmupTime; - num_attempts_ = numAttempts; - this->configuration.insert({"PERF_COUNT", "YES"}); - this->Run(); - } - - void RunBenchmark(const std::string& nodeTypeName, - const std::chrono::milliseconds warmupTime = std::chrono::milliseconds(2000), - const int numAttempts = kDefaultNumberOfAttempts) { - if (!nodeTypeName.empty()) { - RunBenchmark({nodeTypeName}, warmupTime, numAttempts); - } else { - RunBenchmark({}, warmupTime, numAttempts); - } - } - - void Validate() override { - for (const auto& res : curr_bench_results_) { - const auto& node_type_name = res.first; - const auto curr_time = static_cast(res.second); - if (prev_bench_results_.count(node_type_name) > 0) { - const auto prev_time = static_cast(prev_bench_results_[node_type_name]); - const auto delta_time = static_cast(curr_time - prev_time); - if (delta_time/prev_time > kMaxAllowedBenchmarkDifference) { - std::cerr << "node_type_name: " << node_type_name << - ", for test case: " << BaseLayerTest::GetTestName() << - ", has exceeded the benchmark threshold: " << kMaxAllowedBenchmarkDifference << - ". Current: " << curr_time << " us, previous: " << prev_time << " us" << std::endl; - } - } - } - } - -protected: - void Infer() override { - this->inferRequest = this->executableNetwork.CreateInferRequest(); - this->ConfigureInferRequest(); - -#ifdef ENABLE_BENCHMARK_FILE_REPORT - reporter_ = std::unique_ptr( - new ::ov::test::BenchmarkLayerTestReporter{false}); -#else - reporter_ = std::unique_ptr( - new ::ov::test::BenchmarkLayerTestReporter{true}); -#endif - for (const auto& node_type_name : bench_node_type_names_) { - try { - const auto time = reporter_->get_time(node_type_name, BaseLayerTest::GetTestName()); - prev_bench_results_[node_type_name] = time; - } catch (...) { - } - } - - std::map results_us{}; - for (const auto& node_type_name : bench_node_type_names_) { - results_us[node_type_name] = {}; - } - - // Warmup - auto warm_current = std::chrono::steady_clock::now(); - const auto warm_end = warm_current + warmup_time_; - while (warm_current < warm_end) { - this->inferRequest.Infer(); - warm_current = std::chrono::steady_clock::now(); - } - - // Benchmark - for (size_t i = 0; i < num_attempts_; ++i) { - this->inferRequest.Infer(); - const auto& perf_results = this->inferRequest.GetPerformanceCounts(); - for (auto& res : results_us) { - const std::string node_type_name = res.first; - uint64_t& time = res.second; - auto found_profile = std::find_if(perf_results.begin(), perf_results.end(), - [&node_type_name](const InferenceEngine::InferenceEngineProfileInfo& profile) { - return profile.layer_type == node_type_name; - }); - if (found_profile == perf_results.end()) { - IE_THROW() << "Cannot find operator by node type: " << node_type_name; - } - time += found_profile->second.realTime_uSec; - } - } - - std::stringstream report{}; - uint64_t total_us = 0; - for (const auto& res : results_us) { - const std::string node_type_name = res.first; - uint64_t time = res.second; - time /= num_attempts_; - total_us += time; - report << std::fixed << std::setfill('0') << node_type_name << ": " << time << " us\n"; -#ifdef ENABLE_BENCHMARK_FILE_REPORT - curr_bench_results_[node_type_name] = time; - reporter_->report(node_type_name, BaseLayerTest::GetTestName(), time); -#endif - } - report << std::fixed << std::setfill('0') << "Total time: " << total_us << " us\n"; - std::cout << report.str(); - } - -private: - std::unique_ptr reporter_; - std::unordered_map prev_bench_results_; - std::unordered_map curr_bench_results_; - std::vector bench_node_type_names_; - std::chrono::milliseconds warmup_time_; - int num_attempts_; -}; - -} // namespace LayerTestsDefinitions - -namespace ov { -namespace test { - template class BenchmarkLayerTest : public BaseLayerTest { static_assert(std::is_base_of::value, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp index b2a605d89716e8..a23e1be0e84943 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/layer_test_utils.hpp @@ -5,15 +5,12 @@ #pragma once #include -#include #include "common_test_utils/common_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" #include "common_test_utils/test_common.hpp" -#include "functional_test_utils/blob_utils.hpp" +#include "common_test_utils/data_utils.hpp" #include "functional_test_utils/crash_handler.hpp" -#include "functional_test_utils/plugin_cache.hpp" -#include "functional_test_utils/precision_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/summary/environment.hpp" #include "functional_test_utils/summary/op_summary.hpp" @@ -25,126 +22,9 @@ namespace LayerTestsUtils { using TargetDevice = std::string; -typedef std::tuple< - InferenceEngine::Precision, // Network Precision - InferenceEngine::SizeVector, // Input Shape - TargetDevice // Target Device -> basicParams; - -enum RefMode { - INTERPRETER, - CONSTANT_FOLDING, - IE -}; - -class LayerTestsCommon : public ov::test::TestsCommon { -public: - virtual InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &inputInfo) const; - - virtual void Run(); - - static void Compare(const std::vector>> &expected, - const std::vector &actual, - float threshold, - float abs_threshold = -1.f); - - static void Compare(const std::pair> &expected, - const InferenceEngine::Blob::Ptr &actual, - float threshold, - float abs_threshold = -1.f); - - virtual void Compare(const std::vector>> &expectedOutputs, - const std::vector &actualOutputs); - - virtual void Compare(const std::pair> &expected, const InferenceEngine::Blob::Ptr &actual); - - virtual void Compare(const InferenceEngine::Blob::Ptr &expected, const InferenceEngine::Blob::Ptr &actual); - - virtual void Compare(const InferenceEngine::TensorDesc &actualDesc, const InferenceEngine::TensorDesc &expectedDesc); - - std::shared_ptr GetFunction(); - - std::map& GetConfiguration(); - - template - static void Compare(const T_NGRAPH *expected, const T_IE *actual, std::size_t size, float threshold, float abs_threshold = -1.f) { - for (std::size_t i = 0; i < size; ++i) { - const T_NGRAPH &ref = expected[i]; - const auto &res = actual[i]; - const auto absoluteDifference = ov::test::utils::ie_abs(res - ref); - if (abs_threshold > 0.f && absoluteDifference > abs_threshold) { - IE_THROW() << "Absolute comparison of values expected: " << std::to_string(ref) << " and actual: " << std::to_string(res) - << " at index " << i << " with absolute threshold " << abs_threshold - << " failed"; - } - if (absoluteDifference <= threshold) { - continue; - } - double max; - if (sizeof(T_IE) < sizeof(T_NGRAPH)) { - max = static_cast(std::max(ov::test::utils::ie_abs(T_NGRAPH(res)), ov::test::utils::ie_abs(ref))); - } else { - max = static_cast(std::max(ov::test::utils::ie_abs(res), ov::test::utils::ie_abs(T_IE(ref)))); - } - double diff = static_cast(absoluteDifference) / max; - if (max == 0 || (diff > static_cast(threshold)) || - (std::isnan(static_cast(res)) ^ std::isnan(static_cast(ref)))) { - IE_THROW() << "Relative comparison of values expected: " << std::to_string(ref) << " and actual: " << std::to_string(res) - << " at index " << i << " with threshold " << threshold - << " failed"; - } - } - } - +class LayerTestsCommon { protected: LayerTestsCommon(); - - RefMode GetRefMode() { - return refMode; - } - - std::shared_ptr getCore() { - return core; - } - - virtual void ConfigureNetwork(); - - virtual void LoadNetwork(); - - virtual void ExpectLoadNetworkToThrow(const std::string& msg); - - virtual void GenerateInputs(); - - virtual void ConfigureInferRequest(); - - virtual void Infer(); - - TargetDevice targetDevice; - std::shared_ptr function; - std::shared_ptr functionRefs; - std::map configuration; - // Non default values of layouts/precisions will be set to CNNNetwork - InferenceEngine::Layout inLayout = InferenceEngine::Layout::ANY; - InferenceEngine::Layout outLayout = InferenceEngine::Layout::ANY; - InferenceEngine::Precision inPrc = InferenceEngine::Precision::UNSPECIFIED; - InferenceEngine::Precision outPrc = InferenceEngine::Precision::UNSPECIFIED; - InferenceEngine::ExecutableNetwork executableNetwork; - std::vector inputs; - float threshold; - float abs_threshold; - InferenceEngine::CNNNetwork cnnNetwork; - std::shared_ptr core; - - virtual void Validate(); - - virtual std::vector>> CalculateRefs(); - - virtual std::vector GetOutputs(); - - InferenceEngine::InferRequest inferRequest; - -private: - RefMode refMode = RefMode::INTERPRETER; }; } // namespace LayerTestsUtils diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp index 0db366b4e7b034..985ba1e8bbc660 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp @@ -4,7 +4,6 @@ #pragma once -#include "ie_core.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp deleted file mode 100644 index d8fe3c676bc2ea..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/activation.hpp +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ie_core.hpp" -#include "ie_precision.hpp" - -#include "functional_test_utils/blob_utils.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" - -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -static std::map activationNames = { - {ngraph::helpers::ActivationTypes::Sigmoid, "Sigmoid"}, - {ngraph::helpers::ActivationTypes::Tanh, "Tanh"}, - {ngraph::helpers::ActivationTypes::Relu, "Relu"}, - {ngraph::helpers::ActivationTypes::LeakyRelu, "LeakyRelu"}, - {ngraph::helpers::ActivationTypes::Exp, "Exp"}, - {ngraph::helpers::ActivationTypes::Log, "Log"}, - {ngraph::helpers::ActivationTypes::Sign, "Sign"}, - {ngraph::helpers::ActivationTypes::Abs, "Abs"}, - {ngraph::helpers::ActivationTypes::Clamp, "Clamp"}, - {ngraph::helpers::ActivationTypes::Negative, "Negative"}, - {ngraph::helpers::ActivationTypes::Acos, "Acos"}, - {ngraph::helpers::ActivationTypes::Acosh, "Acosh"}, - {ngraph::helpers::ActivationTypes::Asin, "Asin"}, - {ngraph::helpers::ActivationTypes::Asinh, "Asinh"}, - {ngraph::helpers::ActivationTypes::Atan, "Atan"}, - {ngraph::helpers::ActivationTypes::Atanh, "Atanh"}, - {ngraph::helpers::ActivationTypes::Cos, "Cos"}, - {ngraph::helpers::ActivationTypes::Cosh, "Cosh"}, - {ngraph::helpers::ActivationTypes::Floor, "Floor"}, - {ngraph::helpers::ActivationTypes::Sin, "Sin"}, - {ngraph::helpers::ActivationTypes::Sinh, "Sinh"}, - {ngraph::helpers::ActivationTypes::Sqrt, "Sqrt"}, - {ngraph::helpers::ActivationTypes::Tan, "Tan"}, - {ngraph::helpers::ActivationTypes::Elu, "Elu"}, - {ngraph::helpers::ActivationTypes::Erf, "Erf"}, - {ngraph::helpers::ActivationTypes::HardSigmoid, "HardSigmoid"}, - {ngraph::helpers::ActivationTypes::Selu, "Selu"}, - {ngraph::helpers::ActivationTypes::Sigmoid, "Sigmoid"}, - {ngraph::helpers::ActivationTypes::Tanh, "Tanh"}, - {ngraph::helpers::ActivationTypes::Relu, "Relu"}, - {ngraph::helpers::ActivationTypes::LeakyRelu, "LeakyRelu"}, - {ngraph::helpers::ActivationTypes::Exp, "Exp"}, - {ngraph::helpers::ActivationTypes::Log, "Log"}, - {ngraph::helpers::ActivationTypes::Sign, "Sign"}, - {ngraph::helpers::ActivationTypes::Abs, "Abs"}, - {ngraph::helpers::ActivationTypes::Gelu, "Gelu"}, - {ngraph::helpers::ActivationTypes::Ceiling, "Ceiling"}, - {ngraph::helpers::ActivationTypes::PReLu, "PReLu"}, - {ngraph::helpers::ActivationTypes::Mish, "Mish"}, - {ngraph::helpers::ActivationTypes::HSwish, "HSwish"}, - {ngraph::helpers::ActivationTypes::SoftPlus, "SoftPlus"}, - {ngraph::helpers::ActivationTypes::Swish, "Swish"}, - {ngraph::helpers::ActivationTypes::HSigmoid, "HSigmoid"}, - {ngraph::helpers::ActivationTypes::RoundHalfToEven, "RoundHalfToEven"}, - {ngraph::helpers::ActivationTypes::RoundHalfAwayFromZero, "RoundHalfAwayFromZero"}, - {ngraph::helpers::ActivationTypes::GeluErf, "GeluErf"}, - {ngraph::helpers::ActivationTypes::GeluTanh, "GeluTanh"}, - {ngraph::helpers::ActivationTypes::SoftSign, "SoftSign"}, -}; - -typedef std::tuple< - std::pair>, // Activation type and constant value - InferenceEngine::Precision, - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::pair, std::vector>, - std::string> activationParams; - -class ActivationLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - ngraph::helpers::ActivationTypes activationType; - static std::string getTestCaseName(const testing::TestParamInfo &obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - void SetUp() override; -}; - -class ActivationParamLayerTest : public ActivationLayerTest { -protected: - void SetUp() override; - -private: - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - void generateActivationBlob(std::vector constantsValue); - ov::ParameterVector createActivationParams( - ov::element::Type ngPrc, std::vector inShape = {}); - -private: - std::vector constantsValue; -}; - -class ActivationDynamicLayerTest : public ActivationLayerTest { -public: - std::unordered_set static_dims; - void Run() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/adaptive_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/adaptive_pooling.hpp deleted file mode 100644 index 445094c03a1b7d..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/adaptive_pooling.hpp +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -using adapoolParams = std::tuple< - std::vector, // feature map shape - std::vector, // pooled spatial shape - std::string, // pooling mode - InferenceEngine::Precision, // net precision - LayerTestsUtils::TargetDevice>; // device name - -class AdaPoolLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_norm.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_norm.hpp deleted file mode 100644 index b261641ab1ea79..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_norm.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - double, // epsilon - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice // Target device name -> BatchNormLayerTestParams; - -class BatchNormLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_to_space.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_to_space.hpp deleted file mode 100644 index b8730d29ac6e77..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/batch_to_space.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using batchToSpaceParamsTuple = typename std::tuple< - std::vector, // block shape - std::vector, // crops begin - std::vector, // crops end - std::vector, // Input shapes - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string>; // Device name>; - -class BatchToSpaceLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp deleted file mode 100644 index b57cee5745ba25..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using binConvSpecificParams = std::tuple< - InferenceEngine::SizeVector, // Kernel size - InferenceEngine::SizeVector, // Strides - std::vector, // Pads begin - std::vector, // Pads end - InferenceEngine::SizeVector, // Dilations - size_t, // Num Output channels - ov::op::PadType, // Padding type - float>; // Padding value - -using binaryConvolutionTestParamsSet = std::tuple< - binConvSpecificParams, // - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shape - LayerTestsUtils::TargetDevice>; // Device name - -class BinaryConvolutionLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp deleted file mode 100644 index e2634eaf5b0edd..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using BroadcastParamsTuple = typename std::tuple; // Device name - -class BroadcastLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp deleted file mode 100644 index 169ce16bf892ed..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/bucketize.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using bucketizeParamsTuple = std::tuple< - InferenceEngine::SizeVector, // Data shape - InferenceEngine::SizeVector, // Buckets shape - bool, // Right edge of interval - InferenceEngine::Precision, // Data input precision - InferenceEngine::Precision, // Buckets input precision - InferenceEngine::Precision, // Output precision - std::string>; // Device name - -class BucketizeLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp deleted file mode 100644 index 5e2c3a0ab8a64b..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/clamp.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using clampParamsTuple = std::tuple< - InferenceEngine::SizeVector, // Input shape - std::pair, // Interval [min, max] - InferenceEngine::Precision, // Net precision - std::string>; // Device name - -class ClampLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/comparison.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/comparison.hpp deleted file mode 100644 index fb499a5c0396b5..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/comparison.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include -#include - -#include "common_test_utils/common_utils.hpp" -#include "common_test_utils/test_common.hpp" -#include "common_test_utils/test_constants.hpp" -#include "ie_core.hpp" - -namespace LayerTestsDefinitions { -namespace ComparisonParams { -using InputShapesTuple = std::pair, std::vector>; -} // ComparisonParams - -typedef std::tuple< - ComparisonParams::InputShapesTuple, // Input shapes tuple - InferenceEngine::Precision, // NG Inputs precision - ngraph::helpers::ComparisonTypes, // Comparison op type - ngraph::helpers::InputLayerType, // Second input type - InferenceEngine::Precision, // IE in precision - InferenceEngine::Precision, // IE out precision - std::string, // Device name - std::map // Additional network configuration -> ComparisonTestParams; - -class ComparisonLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { - ngraph::helpers::ComparisonTypes comparisonOpType; -protected: - void SetUp() override; - -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &inputInfo) const override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/concat.hpp deleted file mode 100644 index d07d45909d1644..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/concat.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using concatParamsTuple = typename std::tuple< - int, // Concat axis - std::vector>, // Input shapes - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string>; // Device name - -// Multichannel -class ConcatLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp deleted file mode 100644 index 8abc666eccb7e9..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/constant.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using constantParamsTuple = typename std::tuple< - std::vector, // Constant data shape - InferenceEngine::Precision, // Constant data precision - std::vector, // Constant elements - std::string>; // Device name - -class ConstantLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/conversion.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/conversion.hpp deleted file mode 100644 index 476493c512d38b..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/conversion.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include -#include - -#include "common_test_utils/common_utils.hpp" -#include "common_test_utils/test_common.hpp" -#include "common_test_utils/test_constants.hpp" -#include "ie_core.hpp" - -namespace LayerTestsDefinitions { - -static std::map conversionNames = { - {ngraph::helpers::ConversionTypes::CONVERT, "Convert"}, - {ngraph::helpers::ConversionTypes::CONVERT_LIKE, "ConvertLike"}}; - -using ConversionParamsTuple = typename std::tuple>, // Input1 shapes - InferenceEngine::Precision, // Input1 precision - InferenceEngine::Precision, // Input2 precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string>; // Device name - -class ConversionLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp deleted file mode 100644 index 60cef22ee94e96..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_i420.hpp +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using ConvertColorI420ParamsTuple = std::tuple< - ov::Shape, // Input Shape - ov::element::Type, // Element type - bool, // Conversion type - bool, // 1 or 3 planes - std::string>; // Device name - -class ConvertColorI420LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -//---------------------------------------- - -class ConvertColorI420AccuracyTest : public ConvertColorI420LayerTest { -protected: - void GenerateInputs() override; // Generate predefined image with R/G/B combinations - void Validate() override; // Regular validate + percentage of acceptable deviations - std::vector>> CalculateRefs() override; - - std::vector GetOutputs() override; -private: - std::vector expected_output; - InferenceEngine::Blob::Ptr actual_output; -}; - -namespace I420TestUtils { - -template -inline void ValidateColors(const T* expected, const T* actual, size_t size, float dev_threshold, float abs_threshold = 0.01f) { - size_t mismatches = 0; - for (size_t i = 0; i < size; i++) { - if (std::abs(static_cast(expected[i]) - static_cast(actual[i])) > abs_threshold) { - mismatches++; - } - } - ASSERT_LT(static_cast(mismatches) / size, dev_threshold) << mismatches << - " out of " << size << " color mismatches found which exceeds allowed threshold " << dev_threshold; -} - -inline std::vector color_test_image(size_t height, size_t width, int b_step) { - // Test all possible r/g/b values within dimensions - int b_dim = 255 / b_step + 1; - auto input_yuv = std::vector(height * b_dim * width * 3 / 2); - for (int b = 0; b <= 255; b += b_step) { - for (size_t y = 0; y < height / 2; y++) { - for (size_t x = 0; x < width / 2; x++) { - int r = static_cast(y) * 512 / static_cast(height); - int g = static_cast(x) * 512 / static_cast(width); - // Can't use random y/u/v for testing as this can lead to invalid R/G/B values - int y_val = ((66 * r + 129 * g + 25 * b + 128) / 256) + 16; - int u_val = ((-38 * r - 74 * g + 112 * b + 128) / 256) + 128; - int v_val = ((112 * r - 94 * g + 18 * b + 128) / 256) + 128; - - size_t b_offset = height * width * b / b_step * 3 / 2; - size_t u_index = b_offset + height * width + y * width / 2 + x; - size_t v_index = u_index + height * width / 4; - input_yuv[u_index] = u_val; - input_yuv[v_index] = v_val; - size_t y_index = b_offset + y * 2 * width + x * 2; - input_yuv[y_index] = y_val; - input_yuv[y_index + 1] = y_val; - input_yuv[y_index + width] = y_val; - input_yuv[y_index + width + 1] = y_val; - } - } - } - return input_yuv; -} - -} // namespace I420TestUtils -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp deleted file mode 100644 index 2a691611443e9d..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convert_color_nv12.hpp +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using ConvertColorNV12ParamsTuple = std::tuple< - ov::Shape, // Input Shape - ov::element::Type, // Element type - bool, // Conversion type - bool, // 1 or 2 planes - std::string>; // Device name - -class ConvertColorNV12LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -//---------------------------------------- - -class ConvertColorNV12AccuracyTest : public ConvertColorNV12LayerTest { -protected: - void GenerateInputs() override; // Generate predefined image with R/G/B combinations - void Validate() override; // Regular validate + percentage of acceptable deviations - std::vector>> CalculateRefs() override; - - std::vector GetOutputs() override; -private: - std::vector expected_output; - InferenceEngine::Blob::Ptr actual_output; -}; - -namespace NV12TestUtils { - -template -inline void ValidateColors(const T* expected, const T* actual, size_t size, float dev_threshold, float abs_threshold = 0.01f) { - size_t mismatches = 0; - for (size_t i = 0; i < size; i++) { - if (std::abs(static_cast(expected[i]) - static_cast(actual[i])) > abs_threshold) { - mismatches++; - } - } - ASSERT_LT(static_cast(mismatches) / size, dev_threshold) << mismatches << - " out of " << size << " color mismatches found which exceeds allowed threshold " << dev_threshold; -} - -inline std::vector color_test_image(size_t height, size_t width, int b_step) { - // Test all possible r/g/b values within dimensions - int b_dim = 255 / b_step + 1; - auto input_yuv = std::vector(height * b_dim * width * 3 / 2); - for (int b = 0; b <= 255; b += b_step) { - for (size_t y = 0; y < height / 2; y++) { - for (size_t x = 0; x < width / 2; x++) { - int r = static_cast(y) * 512 / static_cast(height); - int g = static_cast(x) * 512 / static_cast(width); - // Can't use random y/u/v for testing as this can lead to invalid R/G/B values - int y_val = ((66 * r + 129 * g + 25 * b + 128) / 256) + 16; - int u_val = ((-38 * r - 74 * g + 112 * b + 128) / 256) + 128; - int v_val = ((112 * r - 94 * g + 18 * b + 128) / 256) + 128; - - size_t b_offset = height * width * b / b_step * 3 / 2; - size_t uv_index = b_offset + height * width + y * width + x * 2; - input_yuv[uv_index] = u_val; - input_yuv[uv_index + 1] = v_val; - size_t y_index = b_offset + y * 2 * width + x * 2; - input_yuv[y_index] = y_val; - input_yuv[y_index + 1] = y_val; - input_yuv[y_index + width] = y_val; - input_yuv[y_index + width + 1] = y_val; - } - } - } - return input_yuv; -} - -} // namespace NV12TestUtils -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp deleted file mode 100644 index 95b0a68c5914b3..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -// ! [test_convolution:definition] -typedef std::tuple< - InferenceEngine::SizeVector, // Kernel size - InferenceEngine::SizeVector, // Strides - std::vector, // Pad begin - std::vector, // Pad end - InferenceEngine::SizeVector, // Dilation - size_t, // Num out channels - ov::op::PadType // Padding type -> convSpecificParams; -typedef std::tuple< - convSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice // Device name -> convLayerTestParamsSet; - -class ConvolutionLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; -// ! [test_convolution:definition] - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp deleted file mode 100644 index 61503f7797b7b9..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - InferenceEngine::SizeVector, // Kernel size - InferenceEngine::SizeVector, // Strides - std::vector, // Pad begin - std::vector, // Pad end - InferenceEngine::SizeVector, // Dilation - size_t, // Num out channels - ov::op::PadType, // Padding type - std::vector // Output padding -> convBackpropSpecificParams; -typedef std::tuple< - convBackpropSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::SizeVector, // Output shapes - LayerTestsUtils::TargetDevice // Device name -> convBackpropLayerTestParamsSet; - -class ConvolutionBackpropLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp deleted file mode 100644 index 5b28cca7187b98..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// DEPRECATED, can't be removed currently due to arm and kmb-plugin dependency (#55568) - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - InferenceEngine::SizeVector, // Kernel size - InferenceEngine::SizeVector, // Strides - std::vector, // Pad begin - std::vector, // Pad end - InferenceEngine::SizeVector, // Dilation - size_t, // Num out channels - ov::op::PadType, // Padding type - std::vector // Output padding -> convBackpropDataSpecificParams; -typedef std::tuple< - convBackpropDataSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::SizeVector, // Output shapes - LayerTestsUtils::TargetDevice // Device name -> convBackpropDataLayerTestParamsSet; - -class ConvolutionBackpropDataLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_greedy_decoder.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_greedy_decoder.hpp deleted file mode 100644 index 8b5291934e0146..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_greedy_decoder.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - InferenceEngine::Precision, - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, - bool, - std::string> ctcGreedyDecoderParams; - -class CTCGreedyDecoderLayerTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_greedy_decoder_seq_len.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_greedy_decoder_seq_len.hpp deleted file mode 100644 index ba3d58d6e6b2a3..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_greedy_decoder_seq_len.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - InferenceEngine::SizeVector, // Input shape - int, // Sequence lengths - InferenceEngine::Precision, // Probabilities precision - InferenceEngine::Precision, // Indices precision - int, // Blank index - bool, // Merge repeated - std::string // Device name - > ctcGreedyDecoderSeqLenParams; - -class CTCGreedyDecoderSeqLenLayerTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_loss.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_loss.hpp deleted file mode 100644 index 53ba55889e3f6a..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/ctc_loss.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Logits shapes - std::vector, // logits length - std::vector>, // labels - std::vector, // labels length - int, // blank index - bool, // preprocessCollapseRepeated - bool, // ctcMergeRepeated - bool // Unique -> CTCLossParamsSubset; - -typedef std::tuple< - CTCLossParamsSubset, - InferenceEngine::Precision, // Float point precision - InferenceEngine::Precision, // Integer precision - LayerTestsUtils::TargetDevice // Device name -> CTCLossParams; - -class CTCLossLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp deleted file mode 100644 index 0b3746b5406543..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/cum_sum.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - int64_t, // Axis - bool, // Exclusive - bool, // Reverse - std::string> cumSumParams; // Device name - -class CumSumLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp deleted file mode 100644 index c93764b6dde36c..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - InferenceEngine::SizeVector, // Deformable values size - InferenceEngine::SizeVector, // Kernel size - InferenceEngine::SizeVector, // Strides - std::vector, // Pad begin - std::vector, // Pad end - InferenceEngine::SizeVector, // Dilation - size_t, // Groups - size_t, // Deformable groups - size_t, // Num out channels - ov::op::PadType, // Padding type - bool, // Bilinear interpolation pad - bool // Modulation -> deformableConvSpecificParams; -typedef std::tuple< - deformableConvSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice // Device name -> deformableConvLayerTestParamsSet; - -class DeformableConvolutionLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp deleted file mode 100644 index 9ad896f62bfa86..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_psroi_pooling.hpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using deformablePSROISpecificParams = std::tuple< - std::vector, // data input shape - std::vector, // rois input shape - std::vector, // trans input shape - int64_t, // output_dim - int64_t, // group_size - float, // spatial_scale - std::vector, // spatial_bins_x_y - float, // trans_std - int64_t>; // part_size - -using deformablePSROILayerTestParams = std::tuple< - deformablePSROISpecificParams, - InferenceEngine::Precision, // Net precision - LayerTestsUtils::TargetDevice>; // Device name - -class DeformablePSROIPoolingLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { - public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void GenerateInputs() override; - - protected: - void SetUp() override; - - private: - float spatialScale_; - }; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp deleted file mode 100644 index 26d972933f8851..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using depthToSpaceParamsTuple = typename std::tuple< - std::vector, // Input shape - InferenceEngine::Precision, // Input precision - ov::op::v0::DepthToSpace::DepthToSpaceMode, // Mode - std::size_t, // Block size - std::string>; // Device name> - -class DepthToSpaceLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp deleted file mode 100644 index fe62ca78376ee2..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -std::ostream& operator <<(std::ostream& os, const ov::op::v0::DetectionOutput::Attributes& inputShape); - -enum { - idxLocation, - idxConfidence, - idxPriors, - idxArmConfidence, - idxArmLocation, - numInputs -}; - -using DetectionOutputAttributes = std::tuple< - int, // numClasses - int, // backgroundLabelId - int, // topK - std::vector, // keepTopK - std::string, // codeType - float, // nmsThreshold - float, // confidenceThreshold - bool, // clip_afterNms - bool, // clip_beforeNms - bool // decreaseLabelId ->; - -using ParamsWhichSizeDepends = std::tuple< - bool, // varianceEncodedInTarget - bool, // shareLocation - bool, // normalized - size_t, // inputHeight - size_t, // inputWidth - InferenceEngine::SizeVector, // "Location" input - InferenceEngine::SizeVector, // "Confidence" input - InferenceEngine::SizeVector, // "Priors" input - InferenceEngine::SizeVector, // "ArmConfidence" input - InferenceEngine::SizeVector // "ArmLocation" input ->; - -using DetectionOutputParams = std::tuple< - DetectionOutputAttributes, - ParamsWhichSizeDepends, - size_t, // Number of batch - float, // objectnessScore - std::string // Device name ->; - -class DetectionOutputLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { - public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - ov::op::v0::DetectionOutput::Attributes attrs; - std::vector inShapes; - void GenerateInputs() override; - void Compare(const std::vector>> &expectedOutputs, - const std::vector &actualOutputs) override; - protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp deleted file mode 100644 index 1dee86fcad06fd..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/dft.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - std::vector, // Axes - std::vector, // Signal size - ngraph::helpers::DFTOpType, - std::string> DFTParams; // Device name - -class DFTLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/einsum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/einsum.hpp deleted file mode 100644 index 6934f26861dd3c..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/einsum.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::string, // Equation - std::vector> // Input shapes -> EinsumEquationWithInput; - -typedef std::tuple< - InferenceEngine::Precision, // Input precision - EinsumEquationWithInput, // Equation with corresponding input shapes - std::string // Device name -> EinsumLayerTestParamsSet; - -class EinsumLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp deleted file mode 100644 index 8a3aaa6755bc22..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -// NOTE: WILL BE REWORKED (31905) - -#pragma once - -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -typedef std::tuple< - std::vector, // input shapes - ngraph::helpers::EltwiseTypes, // eltwise op type - ngraph::helpers::InputLayerType, // secondary input type - ov::test::utils::OpType, // op type - ElementType, // Net precision - ElementType, // In precision - ElementType, // Out precision - TargetDevice, // Device name - ov::AnyMap // Additional network configuration -> EltwiseTestParams; - -class EltwiseLayerTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { -protected: - void SetUp() override; - -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -private: - void transformInputShapesAccordingEltwise(const ov::PartialShape& secondInputShape); -}; -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp deleted file mode 100644 index 60b1fb152b0a8d..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // emb_table_shape - std::vector, // indices - std::vector, // offsets - size_t, // default_index - bool, // with_weights - bool // with_def_index -> embeddingBagOffsetsSumParams; - -typedef std::tuple< - embeddingBagOffsetsSumParams, - InferenceEngine::Precision, // embedding table - InferenceEngine::Precision, // indices - LayerTestsUtils::TargetDevice> embeddingBagOffsetsSumLayerTestParamsSet; - -class EmbeddingBagOffsetsSumLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_packed_sum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_packed_sum.hpp deleted file mode 100644 index aebd414f0326f5..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_bag_packed_sum.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // emb_table_shape - std::vector>, // indices - bool // with_weights -> embeddingBagPackedSumParams; - -typedef std::tuple< - embeddingBagPackedSumParams, - InferenceEngine::Precision, // embedding table - InferenceEngine::Precision, // indices - LayerTestsUtils::TargetDevice> embeddingBagPackedSumLayerTestParamsSet; - - -class EmbeddingBagPackedSumLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_segments_sum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_segments_sum.hpp deleted file mode 100644 index e683252eff7327..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/embedding_segments_sum.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // emb_table_shape - std::vector, // indices - std::vector, // segment_ids - size_t, // num_segments - size_t, // default_index - bool, // with_weights - bool // with_def_index -> embeddingSegmentsSumParams; - -typedef std::tuple< - embeddingSegmentsSumParams, - InferenceEngine::Precision, // embedding table - InferenceEngine::Precision, // indices - LayerTestsUtils::TargetDevice> embeddingSegmentsSumLayerTestParamsSet; - -class EmbeddingSegmentsSumLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_detection_output.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_detection_output.hpp deleted file mode 100644 index e62f86d9b3f05c..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_detection_output.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -typedef std::tuple< - std::vector, // inputShapes - float, // score_threshold - float, // nms_threshol - float, // max_delta_log_wh - int64_t, // num_classes - int64_t, // post_nms_count - size_t, // max_detections_per_image - bool, // class_agnostic_box_regression - std::vector, // deltas_weights - ElementType, // Network precision - std::string // Device name -> ExperimentalDetectronDetectionOutputTestParams; - -class ExperimentalDetectronDetectionOutputLayerTest : - public testing::WithParamInterface, - virtual public SubgraphBaseTest { -protected: - void SetUp() override; - void generate_inputs(const std::vector& targetInputStaticShapes) override; - -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -}; -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp deleted file mode 100644 index 5946ee093f705d..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -typedef std::tuple< - std::vector, // Input shapes - float, // min_size: minimum box width & height - float, // nms_threshold: specifies NMS threshold - int64_t, // post_nms_count: number of top-n proposals after NMS - int64_t, // pre_nms_count: number of top-n proposals after NMS - std::pair>, // input tensors - ElementType, // Network precision - std::string // Device name>; -> ExperimentalDetectronGenerateProposalsSingleImageTestParams; - -class ExperimentalDetectronGenerateProposalsSingleImageLayerTest : - public testing::WithParamInterface, - virtual public SubgraphBaseTest { -protected: - void SetUp() override; - void generate_inputs(const std::vector& targetInputStaticShapes) override; - -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -}; -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp index 8ab367b39981e4..b61f888d387f12 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_prior_grid_generator.hpp @@ -35,4 +35,4 @@ class ExperimentalDetectronPriorGridGeneratorLayerTest : }; } // namespace subgraph } // namespace test -} // namespace ov +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp deleted file mode 100644 index 38f480a0a6ae45..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -using Attrs = ov::op::v6::ExperimentalDetectronROIFeatureExtractor::Attributes; -using ExperimentalROI = ov::op::v6::ExperimentalDetectronROIFeatureExtractor; - -typedef std::tuple< - std::vector, // Input shapes - int64_t, // Output size - int64_t, // Sampling ratio - std::vector, // Pyramid scales - bool, // Aligned - ElementType, // Network precision - std::string // Device name>; -> ExperimentalDetectronROIFeatureExtractorTestParams; - -class ExperimentalDetectronROIFeatureExtractorLayerTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { -protected: - void SetUp() override; - -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -}; -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_topkrois.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_topkrois.hpp deleted file mode 100644 index 091c865a893bb6..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/experimental_detectron_topkrois.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -typedef std::tuple< - std::vector, // input shape - int64_t , // Max rois - ElementType, // Network precision - std::string // Device name -> ExperimentalDetectronTopKROIsTestParams; - -class ExperimentalDetectronTopKROIsLayerTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { -protected: - void SetUp() override; - -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -}; -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp deleted file mode 100644 index 8240652b6182fb..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using extractImagePatchesTuple = typename std::tuple< - std::vector, // input shape - std::vector, // kernel size - std::vector, // strides - std::vector, // rates - ov::op::PadType, // pad type - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - LayerTestsUtils::TargetDevice>; // Device name - -class ExtractImagePatchesTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eye.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eye.hpp deleted file mode 100644 index 6c3f83069faa91..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eye.hpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#pragma once - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using ElementType = ov::element::Type_t; -using TargetDevice = std::string; -using LocalElementType = ov::element_type_traits::value_type; - -using EyeLayerTestParams = std::tuple, // eye shape - std::vector, // output batch shape - std::vector, // eye params (rows, cols, diag_shift) - ElementType, // Net precision - TargetDevice>; // Device name - -class EyeLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp deleted file mode 100644 index 66e5d3b0ef485f..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -// seed selected using current cloc time -#define USE_CLOCK_TIME 1 -// seed started from default value, and incremented every time using big number like 9999 -#define USE_INCREMENTAL_SEED 2 - -/** - * redefine this seed to reproduce issue with given seed that can be read from gtest logs - */ -#define BASE_SEED 123 -#define NGRAPH_SEED 123 - -namespace LayerTestsDefinitions { - - -typedef std::tuple< - size_t, // fake quantize levels - std::vector, // fake quantize inputs shape - std::vector, // fake quantize (inputLow, inputHigh, outputLow, outputHigh) or empty for random - std::vector, // input generator data (low, high, resolution) or empty for default - ov::op::AutoBroadcastSpec // fake quantize broadcast mode -> fqSpecificParams; -typedef std::tuple< - fqSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice, // Device name - - std::pair> // Additional backend configuration and alis name to it -> fqLayerTestParamsSet; - -class FakeQuantizeLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; -protected: - void SetUp() override; - void UpdateSeed(); - - protected: - float inputDataMin = 0.0; - float inputDataMax = 10.0; - float inputDataResolution = 1.0; - int32_t seed = 1; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp deleted file mode 100644 index c2f354f39200a6..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather.hpp +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Indices - std::vector, // Indices shape - int, // Gather axis - std::vector, // Input shapes - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string // Device name -> gatherParamsTuple; - -class GatherLayerTestBase : virtual public LayerTestsUtils::LayerTestsCommon { -public: - using ::testing::Test::SetUp; -protected: - void SetUp(const gatherParamsTuple& params); -}; - -class GatherLayerTest : public testing::WithParamInterface, public GatherLayerTestBase { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - - -typedef std::tuple< - std::vector, // Input shapes - std::vector, // Indices shape - std::tuple, // Gather axis and batch - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string // Device name -> gather7ParamsTuple; - -class Gather7LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -class Gather8LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -class Gather8IndiceScalarLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -typedef std::tuple< - gather7ParamsTuple, - std::vector // indices data -> gather8withIndicesDataParamsTuple; - -class Gather8withIndicesDataLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_elements.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_elements.hpp deleted file mode 100644 index 8d5cc5467fa6b3..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_elements.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Data shapes - std::vector, // Indices shape - int, // Axis - InferenceEngine::Precision, // Data precision - InferenceEngine::Precision, // Indices precision - LayerTestsUtils::TargetDevice // Device name -> GatherElementsParams; - -class GatherElementsLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_nd.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_nd.hpp deleted file mode 100644 index 9e9705009578af..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_nd.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { -using Config = std::map; - -typedef std::tuple< - std::vector, // Data shapes - std::vector, // Indices shape - int // batch dims -> GatherNDParamsSubset; - -typedef std::tuple< - GatherNDParamsSubset, - InferenceEngine::Precision, // Data precision - InferenceEngine::Precision, // Indices precision - LayerTestsUtils::TargetDevice, // Device name - Config // Plugin config -> GatherNDParams; - -class GatherNDLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -class GatherND8LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_tree.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_tree.hpp deleted file mode 100644 index 41925413e75d89..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gather_tree.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using GatherTreeParamsTuple = typename std::tuple< - std::vector, // Input tensors shape - ngraph::helpers::InputLayerType, // Secondary input type - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string>; // Device name - -class GatherTreeLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/generate_proposals.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/generate_proposals.hpp deleted file mode 100644 index d2c06541eff2b1..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/generate_proposals.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -typedef std::tuple< - std::vector, // Input shapes - float, // min_size: minimum box width & height - float, // nms_threshold: specifies NMS threshold - int64_t, // post_nms_count: number of top-n proposals after NMS - int64_t, // pre_nms_count: number of top-n proposals after NMS - bool, // normalized: specifies whether box is normalized or not - std::pair>, // input tensors - ElementType, // Network precision - ElementType, // roi_num precision - std::string // Device name>; -> GenerateProposalsTestParams; - -class GenerateProposalsLayerTest : - public testing::WithParamInterface, - virtual public SubgraphBaseTest { -protected: - void SetUp() override; - void generate_inputs(const std::vector& targetInputStaticShapes) override; - void compare(const std::vector& expected, const std::vector& actual) override; - -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -}; -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp deleted file mode 100644 index 19e06b49d5f443..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using GridSampleParams = std::tuple; // Device name - -class GridSampleLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp deleted file mode 100644 index 0e7cf8de26d7a2..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grn.hpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "ie_core.hpp" -#include "ie_precision.hpp" - -#include "functional_test_utils/blob_utils.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" - -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - InferenceEngine::Precision, - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, - float, - std::string> grnParams; - -class GrnLayerTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - InferenceEngine::SizeVector inputShapes; - float bias; - - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp deleted file mode 100644 index de90196085c575..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - InferenceEngine::SizeVector, - InferenceEngine::SizeVector, - std::vector, - std::vector, - InferenceEngine::SizeVector, - size_t, - size_t, - ov::op::PadType> groupConvSpecificParams; -typedef std::tuple< - groupConvSpecificParams, - InferenceEngine::Precision, - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, - LayerTestsUtils::TargetDevice> groupConvLayerTestParamsSet; - -class GroupConvolutionLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp deleted file mode 100644 index 03017430c08572..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -// DEPRECATED, remove this old API when KMB (#58495) and ARM (#58496) plugins are migrated to new API -using groupConvBackpropDataSpecificParams = std::tuple< - InferenceEngine::SizeVector, // kernels - InferenceEngine::SizeVector, // strides - std::vector, // pad begins - std::vector, // pad ends - InferenceEngine::SizeVector, // dilations - size_t, // num output channels - size_t, // num groups - ov::op::PadType>; // padding type -using groupConvBackpropDataLayerTestParamsSet = std::tuple< - groupConvBackpropDataSpecificParams, - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shape - LayerTestsUtils::TargetDevice>; // Device name - -class GroupConvBackpropDataLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -using groupConvBackpropSpecificParams = std::tuple< - InferenceEngine::SizeVector, // kernels - InferenceEngine::SizeVector, // strides - std::vector, // pad begins - std::vector, // pad ends - InferenceEngine::SizeVector, // dilations - size_t, // num output channels - size_t, // num groups - ov::op::PadType, // padding type - std::vector>; // output padding -using groupConvBackpropLayerTestParamsSet = std::tuple< - groupConvBackpropSpecificParams, - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shape - InferenceEngine::SizeVector, // Output shapes - LayerTestsUtils::TargetDevice>; // Device name - -class GroupConvBackpropLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_cell.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_cell.hpp deleted file mode 100644 index ec6c7e61a462b5..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_cell.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using GRUCellParams = typename std::tuple< - bool, // using decompose to sub-ops transformation - size_t, // batch - size_t, // hidden size - size_t, // input size - std::vector, // activations - float, // clip - bool, // linear_before_reset - ngraph::helpers::InputLayerType, // W input type (Constant or Parameter) - ngraph::helpers::InputLayerType, // R input type (Constant or Parameter) - ngraph::helpers::InputLayerType, // B input type (Constant or Parameter) - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class GRUCellTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp deleted file mode 100644 index 02a529a5a9bab4..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/test_enums.hpp" - -namespace LayerTestsDefinitions { - -using GRUSequenceParams = typename std::tuple< - ngraph::helpers::SequenceTestsMode, // pure Sequence or TensorIterator - size_t, // seq_lengths - size_t, // batch - size_t, // hidden size - // todo: fix. input size hardcoded to 10 due to limitation (10 args) of gtests Combine() func. - //size_t, // input size - std::vector, // activations - float, // clip - bool, // linear_before_reset - ov::op::RecurrentSequenceDirection, // direction - ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class GRUSequenceTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; - void GenerateInputs() override; - ngraph::helpers::SequenceTestsMode m_mode; - int64_t m_max_seq_len = 0; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp deleted file mode 100644 index 2a874e11c8cd7f..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/interpolate.hpp +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - ov::op::util::InterpolateBase::InterpolateMode, // InterpolateMode - ov::op::util::InterpolateBase::ShapeCalcMode, // ShapeCalculationMode - ov::op::util::InterpolateBase::CoordinateTransformMode, // CoordinateTransformMode - ov::op::util::InterpolateBase::NearestMode, // NearestMode - bool, // AntiAlias - std::vector, // PadBegin - std::vector, // PadEnd - double, // Cube coef - std::vector, // Axes - std::vector // Scales -> InterpolateSpecificParams; - -typedef std::tuple< - InterpolateSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::SizeVector, // Target shapes - LayerTestsUtils::TargetDevice, // Device name - std::map // Additional network configuration -> InterpolateLayerTestParams; - -class InterpolateLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -namespace v11 { - -class InterpolateLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace v11 - -//Interpolate-1 test -typedef std::tuple, // Pads - LayerTestsUtils::TargetDevice // Device name - > - Interpolate1LayerTestParams; - -class Interpolate1LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/is_inf.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/is_inf.hpp deleted file mode 100644 index 8836886149a2ec..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/is_inf.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -using IsInfParams = std::tuple, // Data shape - bool, // Detect negative - bool, // Detect positive - ElementType, // Data precision - std::string, // Device name - ov::AnyMap // Additional config - >; - -class IsInfLayerTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - void generate_inputs(const std::vector& targetInputStaticShapes) override; -}; - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp deleted file mode 100644 index 7218309e776dc2..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/log_softmax.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using logSoftmaxLayerTestParams = std::tuple< - InferenceEngine::Precision, // netPrecision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // inputShape - int64_t, // axis - std::string, // targetDevice - std::map // config ->; - -class LogSoftmaxLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/logical.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/logical.hpp deleted file mode 100644 index 81c80b8345c64f..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/logical.hpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include -#include - -#include "common_test_utils/common_utils.hpp" -#include "common_test_utils/test_common.hpp" -#include "common_test_utils/test_constants.hpp" -#include "ie_core.hpp" - -namespace LayerTestsDefinitions { -namespace LogicalParams { -using InputShapesTuple = std::pair, std::vector>; -} // LogicalParams - -typedef std::tuple< - LogicalParams::InputShapesTuple, // Input shapes tuple - ngraph::helpers::LogicalTypes, // Logical op type - ngraph::helpers::InputLayerType, // Second input type - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string, // Device name - std::map // Additional network configuration -> LogicalTestParams; - -class LogicalLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -protected: - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - void SetupParams(); - void SetUp() override; - -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - static std::vector combineShapes(const std::map, std::vector>>& inputShapes); - -protected: - LogicalParams::InputShapesTuple inputShapes; - ngraph::helpers::LogicalTypes logicalOpType; - ngraph::helpers::InputLayerType secondInputType; - InferenceEngine::Precision netPrecision; - std::map additional_config; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/loop.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/loop.hpp deleted file mode 100644 index adbfd3e5c1a822..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/loop.hpp +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { -enum LOOP_IN_TYPE { - INVARIANT, - MERGED -}; - -using LoopParams = typename std::tuple< - bool, // ExecuteFirstIteration - bool, // BodyCondition is a constant? - bool, // BodyCondition value, if it is a Const - int64_t, // TripCount, -1 means infinity - std::vector, LOOP_IN_TYPE>>, // inputs - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class LoopTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - - -using StaticShapeLoopParams = typename std::tuple< - bool, - bool, - std::tuple< - bool, - int64_t, - int64_t, - int64_t - >, - int64_t, - InferenceEngine::SizeVector, - InferenceEngine::Precision, - std::string, - std::map - >; - -/** - * Test case with static SHAPE version of loop operation. - * Total iteration count is dynamic. - */ -class StaticShapeLoopTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - std::vector>> PredefinedRefs(); - -private: - bool unrolling; // unroll Loop - bool static_iter_num; // trip count provided by constant node - bool static_continue_cond; // initial_cond provided by constant node - int64_t max_iter_num; // -1 means infinity loop (expected dynamic exit condition in body) - int64_t dynamic_exit; // -1 means always true - int64_t axis; // -1 means no auto concatenation - int64_t start_value; - InferenceEngine::SizeVector data_shape; - InferenceEngine::Precision data_prc; - - int64_t actual_n_iter(); - -protected: - void SetUp() override; -}; - - -class TrivialLoopTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -protected: - using RefBlobGenerator = std::function; - std::map inputGens, outputGens; - - void CreateSlicedLoop(size_t batch_size, size_t num_iteration, InferenceEngine::Precision iePrc, - InferenceEngine::SizeVector& ieShape); - void CreateSlicedLoopDynCondition(size_t batch_size, size_t num_iteration, InferenceEngine::Precision iePrc, - InferenceEngine::SizeVector& ieShape, size_t trip_count); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override { - auto found = inputGens.find(info.name()); - if (found != inputGens.end()) { - return found->second(info.getTensorDesc()); - } - - found = inputGens.find(""); - if (found != inputGens.end()) { - return found->second(info.getTensorDesc()); - } - - return LayerTestsCommon::GenerateInput(info); - } - - std::vector>> CalculateRefs() override { - if (outputGens.empty()) - return LayerTestsCommon::CalculateRefs(); - - const auto results = function->get_results(); - const auto outs_info = cnnNetwork.getOutputsInfo(); - const auto num_out_blob = results.size(); - - std::vector>> res_collection(num_out_blob); - - for (size_t i = 0; i < num_out_blob; i++) { - // TODO: name of original NG result doesn't match with outs after conversion. - // Expected : auto name = results[i]->get_friendly_name(); - auto name = results[i]->get_input_node_ptr(0)->get_friendly_name(); - auto data = outs_info.at(name); - IE_ASSERT(data != nullptr); - - RefBlobGenerator generator; - auto found = outputGens.find(name); - if (found != outputGens.end()) { - generator = found->second; - } else { - found = outputGens.find(""); - if (found != outputGens.end()) { - generator = found->second; - } - } - - IE_ASSERT(generator != nullptr) << "Test output generator is not specified"; - auto blob = generator(data->getTensorDesc()); - auto blob_size = blob->byteSize(); - auto blob_ptr = blob->buffer().as(); - - auto &res = res_collection[i]; - res.second.resize(blob_size); - std::copy(blob_ptr, blob_ptr + blob_size, res.second.begin()); - } - return res_collection; - } -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp deleted file mode 100644 index 990394bcab167b..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/low_precision.hpp +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LowPrecisionTestDefinitions { - -typedef std::tuple< - InferenceEngine::Precision, // Net precision - LayerTestsUtils::TargetDevice, // Device name - std::pair> // Configuration -> lowPrecisionTestParamsSet; - -class LowPrecisionTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; -// ! [test_low_precision:definition] - -} // namespace LowPrecisionTestDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp deleted file mode 100644 index d4a3f69a707374..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lrn.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - double, // Alpha - double, // Beta - double, // Bias - size_t, // Size - std::vector, // Reduction axes - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::SizeVector, // Input shapes - std::string // Device name -> lrnLayerTestParamsSet; - -class LrnLayerTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell.hpp deleted file mode 100644 index 1145c588794328..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using LSTMCellParams = typename std::tuple< - bool, // using decompose to sub-ops transformation - size_t, // batch - size_t, // hidden size - size_t, // input size - std::vector, // activations - float, // clip - ngraph::helpers::InputLayerType, // W input type (Constant or Parameter) - ngraph::helpers::InputLayerType, // R input type (Constant or Parameter) - ngraph::helpers::InputLayerType, // B input type (Constant or Parameter) - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class LSTMCellTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell_basic.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell_basic.hpp deleted file mode 100644 index 878da4062d3358..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_cell_basic.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using LSTMCellBasicParams = typename std::tuple< - bool, // using decompose to sub-ops transformation - size_t, // batch - size_t, // hidden size - size_t, // input size - std::vector, // activations - float, // clip - InferenceEngine::Precision, // Network precision - std::string, // Device name - std::map>; // Config - -class LSTMCellBasicTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp deleted file mode 100644 index 65ef47607874f5..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using LSTMSequenceParams = typename std::tuple< - ngraph::helpers::SequenceTestsMode, // pure Sequence or TensorIterator - size_t, // seq_lengths - size_t, // batch - size_t, // hidden size - size_t, // input size - std::vector, // activations - float, // clip - ov::op::RecurrentSequenceDirection, // direction - ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) - InferenceEngine::Precision, // Network precision - std::string>; // Device name - - -class LSTMSequenceTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void GenerateInputs() override; - void SetUp() override; - - ngraph::helpers::SequenceTestsMode m_mode; - int64_t m_max_seq_len = 0; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mat_mul.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mat_mul.hpp deleted file mode 100644 index 1b7f2a0898f710..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mat_mul.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -struct ShapeRelatedParams { - std::pair input1, input2; -}; - -typedef std::tuple< - ShapeRelatedParams, - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - ngraph::helpers::InputLayerType, // Secondary input type - LayerTestsUtils::TargetDevice, // Device name - std::map // Additional network configuration -> MatMulLayerTestParamsSet; - -class MatMulTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - static std::vector combineShapes(const std::vector>& firstInputShapes, - const std::vector>& secondInputShapes, - bool transposeA, - bool transposeB); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp deleted file mode 100644 index c00c76acaba6dc..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -using InputPrecisions = std::tuple; // iou_threshold, score_threshold, - -using TopKParams = std::tuple; // Maximum number of boxes to be selected per batch element - -using ThresholdParams = std::tuple; // filter out boxes with low confidence score after decaying - -using NmsParams = std::tuple, // Params using to create 1st and 2nd inputs - InputPrecisions, // Input precisions - ov::op::v8::MatrixNms::SortResultType, // Order of output elements - ov::element::Type, // Output type - TopKParams, // Maximum number of boxes topk params - ThresholdParams, // Thresholds: score_threshold, gaussian_sigma, post_threshold - int, // Background class id - bool, // If boxes are normalized - ov::op::v8::MatrixNms::DecayFunction, // Decay function - bool, // make output shape static - std::string>; // Device name - -class MatrixNmsLayerTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void generate_inputs(const std::vector& targetInputStaticShapes) override; - void compare(const std::vector &expected, const std::vector &actual) override; - -protected: - void SetUp() override; - -private: - void GetOutputParams(size_t& numBatches, size_t& maxOutputBoxesPerBatch); - ov::op::v8::MatrixNms::Attributes m_attrs; - bool m_outStaticShape; -}; - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp deleted file mode 100644 index 4d285d6505ea01..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using MemoryTestParams = std::tuple< - ngraph::helpers::MemoryTransformation, // Apply Memory transformation - int64_t, // iterationCount - InferenceEngine::SizeVector, // inputShape - InferenceEngine::Precision, // netPrecision - std::string // targetDevice ->; - -class MemoryTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - void Run() override; - -protected: - std::vector>> CalculateRefs() override; - void SetUp() override; - void Infer() override; - virtual std::shared_ptr CreateReadValueOp( - const ov::Output& value, const std::shared_ptr& variable) const { - return std::make_shared(value, variable); - } - virtual std::shared_ptr CreateAssignOp( - const ov::Output& value, const std::shared_ptr& variable) const { - return std::make_shared(value, variable); - } - - virtual void CreateCommonFunc(); - - ov::element::Type ngPrc; - ov::Shape inputShape; - -private: - void CreateTIFunc(); - void ApplyLowLatency(); - - InferenceEngine::Precision netPrecision; - ov::EvaluationContext eval_context; - ngraph::helpers::MemoryTransformation transformation; - - int64_t iteration_count; -}; - -class MemoryTestV3 : public MemoryTest { -protected: - std::shared_ptr CreateReadValueOp( - const ov::Output& value, const std::shared_ptr& variable) const override { - return std::make_shared(value, variable->get_info().variable_id); - } - - std::shared_ptr CreateAssignOp( - const ov::Output& value, const std::shared_ptr& variable) const override { - return std::make_shared(value, variable->get_info().variable_id); - } -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/minimum_maximum.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/minimum_maximum.hpp deleted file mode 100644 index bee0cde0cdfea5..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/minimum_maximum.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace LayerTestsDefinitions { - -using MaxMinParamsTuple = typename std::tuple< - std::vector>, // Input shapes - ngraph::helpers::MinMaxOpType, // OperationType - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - ngraph::helpers::InputLayerType, // Secondary input type - std::string>; // Device name - -class MaxMinLayerTest: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp deleted file mode 100644 index 3c8f9fb246b0f6..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -using InputPrecisions = std::tuple; // iou_threshold, score_threshold, - // soft_nms_sigma precisions - -using InputfloatVar = std::tuple; // nmsEta - -using InputboolVar = std::tuple; // normalized - -using MulticlassNmsParams = std::tuple, // Params using to create inputs - InputPrecisions, // Input precisions - int32_t, // Max output boxes per class - InputfloatVar, // iouThreshold, scoreThreshold, nmsEta - int32_t, // background_class - int32_t, // keep_top_k - ov::element::Type, // Output type - ov::op::util::MulticlassNmsBase::SortResultType, // SortResultType - InputboolVar, // Sort result across batch, normalized - bool, // make output shape static - std::string>; - -class MulticlassNmsLayerTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void generate_inputs(const std::vector& targetInputStaticShapes) override; - void compare(const std::vector &expected, const std::vector &actual) override; - -protected: - void SetUp() override; - virtual std::shared_ptr CreateNmsOp(const OutputVector& paramOuts) const { - std::shared_ptr nms; - if (paramOuts.size() > 2) { - nms = std::make_shared(paramOuts[0], paramOuts[1], paramOuts[2], m_attrs); - } else { - nms = std::make_shared(paramOuts[0], paramOuts[1], m_attrs); - } - return nms; - } - ov::op::util::MulticlassNmsBase::Attributes m_attrs; - -private: - void GetOutputParams(size_t& numBatches, size_t& maxOutputBoxesPerBatch); - bool m_outStaticShape; -}; - -class MulticlassNmsLayerTest8 : public MulticlassNmsLayerTest { -protected: - std::shared_ptr CreateNmsOp(const OutputVector& paramOuts) const override { - return std::make_shared(paramOuts[0], paramOuts[1], m_attrs); - } -}; -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multinomial.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multinomial.hpp deleted file mode 100644 index af42c02638c773..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multinomial.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#pragma once -#include "ov_models/builders.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -using MultinomialTestParams = std::tuple< - ElementType, // netPrecision - ElementType, // inPrecision - ElementType, // outPrecision - InputShape, // Dynamic shape + Target static shapes - std::int64_t, // Number of samples - element::Type_t, // Output type attribute - bool, // With replacement, - bool, // Log probs; - TargetDevice, // targetDevice - Config // config - >; - -class MultinomialTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mvn.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mvn.hpp deleted file mode 100644 index e4fafdc499bf2d..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/mvn.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple - mvn1Params; - -class Mvn1LayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Data precision - InferenceEngine::Precision, // Axes precision - std::vector, // Axes - bool, // Normalize variance - float, // Epsilon - std::string, // Epsilon mode - std::string // Device name - > mvn6Params; - -class Mvn6LayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nms_rotated.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nms_rotated.hpp deleted file mode 100644 index 64e9ddc1535d5c..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nms_rotated.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - - -namespace LayerTestsDefinitions { - -using InputShapeParams = std::tuple; // Number of classes - -using InputPrecisions = - std::tuple; // iou_threshold, score_threshold, soft_nms_sigma precisions - -using NmsRotatedParams = std::tuple; // Device name - -class NmsRotatedLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void GenerateInputs() override; - void Compare(const std::vector>>& expectedOutputs, - const std::vector& actualOutputs) override; - -protected: - void SetUp() override; - InputShapeParams inShapeParams; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp deleted file mode 100644 index aeb31ea4ad0d58..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace testing { -namespace internal { - -template <> -inline void PrintTo(const ::ov::op::v5::NonMaxSuppression::BoxEncodingType& value, ::std::ostream* os) {} - -} // namespace internal -} // namespace testing - -namespace LayerTestsDefinitions { - -using InputShapeParams = std::tuple; // Number of classes - -using InputPrecisions = - std::tuple; // iou_threshold, score_threshold, soft_nms_sigma precisions - -using NmsParams = std::tuple; // Device name - -class NmsLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void GenerateInputs() override; - void Compare(const std::vector>>& expectedOutputs, - const std::vector& actualOutputs) override; - -protected: - void SetUp() override; - InputShapeParams inShapeParams; - -private: - void CompareBBoxes(const std::vector>>& expectedOutputs, - const std::vector& actualOutputs); -}; - -class Nms9LayerTest : public NmsLayerTest { -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp deleted file mode 100644 index e1eff18607cd60..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/nonzero.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/base/layer_test_utils.hpp" - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include -#include -#include -#include - -namespace LayerTestsDefinitions { - -using ConfigMap = typename std::map; - -using NonZeroLayerTestParamsSet = typename std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - LayerTestsUtils::TargetDevice, // Device name - ConfigMap>; // Additional network configuration - -class NonZeroLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp deleted file mode 100644 index 3c46d1c5cf3522..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -using NormalizeL2LayerTestParams = std::tuple< - std::vector, // axes - float, // eps - ov::op::EpsMode, // eps_mode - InferenceEngine::SizeVector, // inputShape - InferenceEngine::Precision, // netPrecision - std::string // targetDevice ->; - -class NormalizeL2LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp deleted file mode 100644 index f16ed98f9a43b9..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/one_hot.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -namespace LayerTestsDefinitions { -typedef std::tuple< - ov::element::Type, // depth type (any integer type) - int64_t, // depth value - ov::element::Type, // On & Off values type (any supported type) - float, // OnValue - float, // OffValue - int64_t, // axis - InferenceEngine::Precision, // Net precision - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice // Target device name -> oneHotLayerTestParamsSet; - -class OneHotLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp deleted file mode 100644 index 2fa4e951e47f52..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pad.hpp +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - std::vector, // padsBegin - std::vector, // padsEnd - float, // argPadValue - ngraph::helpers::PadMode, // padMode - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice // Target device name -> padLayerTestParamsSet; - -class PadLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - virtual std::shared_ptr CreatePadOp(const ov::Output& data, - const std::vector& padsBegin, - const std::vector& padsEnd, - float argPadValue, - ngraph::helpers::PadMode padMode) const { - OPENVINO_SUPPRESS_DEPRECATED_START - const auto pad = ngraph::builder::makePad(data, padsBegin, padsEnd, argPadValue, padMode, false); - OPENVINO_SUPPRESS_DEPRECATED_END - return pad; - } -}; - -class PadLayerTest12 : public PadLayerTest { -protected: - std::shared_ptr CreatePadOp(const ov::Output& data, - const std::vector& padsBegin, - const std::vector& padsEnd, - float argPadValue, - ngraph::helpers::PadMode padMode) const override { - OPENVINO_SUPPRESS_DEPRECATED_START - const auto pad = ngraph::builder::makePad(data, padsBegin, padsEnd, argPadValue, padMode, true); - OPENVINO_SUPPRESS_DEPRECATED_END - return pad; - } -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp deleted file mode 100644 index 3cdc51ad43f9b4..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - ngraph::helpers::PoolingTypes, // Pooling type, max or avg - std::vector, // Kernel size - std::vector, // Stride - std::vector, // Pad begin - std::vector, // Pad end - ov::op::RoundingType, // Rounding type - ov::op::PadType, // Pad type - bool // Exclude pad -> poolSpecificParams; -typedef std::tuple< - poolSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::vector, // Input shape - std::string // Device name -> poolLayerTestParamsSet; - -typedef std::tuple< - poolSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - size_t, // Channel number - std::string // Device name -> globalPoolLayerTestParamsSet; - -typedef std::tuple< - std::vector, // Kernel size - std::vector, // Stride - std::vector, // Dilation - std::vector, // Pad begin - std::vector, // Pad end - ov::element::Type_t, // Index element type - int64_t, // Axis - ov::op::RoundingType, // Rounding type - ov::op::PadType // Pad type -> maxPoolV8SpecificParams; - -typedef std::tuple< - maxPoolV8SpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::vector, // Input shape - std::string // Device name -> maxPoolV8LayerTestParamsSet; - -class PoolingLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -class GlobalPoolingLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -class MaxPoolingV8LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/power.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/power.hpp deleted file mode 100644 index 1a22a296c8c57e..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/power.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" - -namespace LayerTestsDefinitions { - - using PowerParamsTuple = typename std::tuple< - std::vector>, //input shapes - InferenceEngine::Precision, //Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string, //Device name - std::vector>; //power - -class PowerLayerTest: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp deleted file mode 100644 index 08761d7110d809..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box.hpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "ie_core.hpp" -#include "ie_precision.hpp" - -#include "functional_test_utils/blob_utils.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" - -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -using priorBoxSpecificParams = std::tuple< - std::vector, // min_size - std::vector, // max_size - std::vector, // aspect_ratio - std::vector, // density - std::vector, // fixed_ratio - std::vector, // fixed_size - bool, // clip - bool, // flip - float, // step - float, // offset - std::vector, // variance - bool, // scale_all_sizes - bool>; // min_max_aspect_ratios_order - -typedef std::tuple< - priorBoxSpecificParams, - InferenceEngine::Precision, // net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // input shape - InferenceEngine::SizeVector, // image shape - std::string> priorBoxLayerParams; - -class PriorBoxLayerTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - InferenceEngine::SizeVector inputShapes; - InferenceEngine::SizeVector imageShapes; - InferenceEngine::Precision netPrecision; - std::vector min_size; - std::vector max_size; - std::vector aspect_ratio; - std::vector density; - std::vector fixed_ratio; - std::vector fixed_size; - std::vector variance; - float step; - float offset; - bool clip; - bool flip; - bool scale_all_sizes; - bool min_max_aspect_ratios_order; - - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp deleted file mode 100644 index 1f35f829f5d61a..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/prior_box_clustered.hpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "common_test_utils/common_utils.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "ie_core.hpp" -#include "ie_precision.hpp" - -#include "functional_test_utils/blob_utils.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "common_test_utils/common_utils.hpp" - -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // widths - std::vector, // heights - bool, // clip - float, // step_width - float, // step_height - float, // step - float, // offset - std::vector> priorBoxClusteredSpecificParams; - -typedef std::tuple< - priorBoxClusteredSpecificParams, - InferenceEngine::Precision, // net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // input shape - InferenceEngine::SizeVector, // image shape - std::string> priorBoxClusteredLayerParams; - -class PriorBoxClusteredLayerTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - InferenceEngine::SizeVector inputShapes; - InferenceEngine::SizeVector imageShapes; - InferenceEngine::Precision netPrecision; - std::vector widths; - std::vector heights; - std::vector variances; - float step_width; - float step_height; - float step; - float offset; - bool clip; - - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp deleted file mode 100644 index 31223f0b26728e..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/proposal.hpp +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -namespace proposalTypes { - -typedef size_t base_size_type; -typedef size_t pre_nms_topn_type; -typedef size_t post_nms_topn_type; -typedef float nms_thresh_type; -typedef size_t min_size_type; -typedef std::vector ratio_type; -typedef std::vector scale_type; -typedef bool clip_before_nms_type; -typedef bool clip_after_nms_type; -typedef bool normalize_type; -typedef size_t feat_stride_type; -typedef float box_size_scale_type; -typedef float box_coordinate_scale_type; -typedef std::string framework_type; - -}; // namespace proposalTypes - -using namespace proposalTypes; - -typedef std::tuple< - base_size_type, - pre_nms_topn_type, - post_nms_topn_type, - nms_thresh_type, - min_size_type, - ratio_type, - scale_type, - clip_before_nms_type, - clip_after_nms_type, - framework_type> proposalSpecificParams; -typedef std::tuple< - proposalSpecificParams, - std::string> proposalLayerTestParamsSet; - -class ProposalLayerTest - : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - static std::string SerializeProposalSpecificParams(proposalSpecificParams& params); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - void Compare(const std::vector>> &expectedOutputs, - const std::vector &actualOutputs) override; - template - void Compare(const T *expected, const T *actual, std::size_t size, - T threshold, const std::size_t output_index) { - for (std::size_t i = 0; i < size; ++i) { - const auto &ref = expected[i]; - const auto &res = actual[i]; - - // verify until first -1 appears in the 1st output. - if (output_index == 0 && - ov::test::utils::ie_abs(ref - static_cast(-1)) <= threshold) { - // output0 shape = {x, 5} - // output1 shape = {x} - // setting the new_size for output1 verification - num_selected_boxes = i / 5; - return; - } - - const auto absoluteDifference = ov::test::utils::ie_abs(res - ref); - if (absoluteDifference <= threshold) { - continue; - } - - const auto max = std::max(ov::test::utils::ie_abs(res), - ov::test::utils::ie_abs(ref)); - float diff = - static_cast(absoluteDifference) / static_cast(max); - ASSERT_TRUE(max != 0 && (diff <= static_cast(threshold))) - << "Relative comparison of values expected: " << ref - << " and actual: " << res << " at index " << i - << " with threshold " << threshold << " failed"; - } - } -protected: - void SetUp() override; - -private: - size_t num_selected_boxes; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp deleted file mode 100644 index 14f35e754b1787..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/psroi_pooling.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using psroiParams = std::tuple, // input shape - std::vector, // coords shape - size_t, // output_dim - size_t, // group_size - float, // Spatial scale - size_t, // spatial_bins_x - size_t, // spatial_bins_y - std::string, // mode - InferenceEngine::Precision, // Net precision - LayerTestsUtils::TargetDevice>; // Device name - -class PSROIPoolingLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { - public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void GenerateInputs() override; - static void fillROITensor(float* buffer, int numROIs, int batchSize, - int height, int width, int groupSize, - float spatialScale, int spatialBinsX, int spatialBinsY, const std::string& mode); - - protected: - void SetUp() override; - - private: - size_t groupSize_; - float spatialScale_; - size_t spatialBinsX_; - size_t spatialBinsY_; - std::string mode_; - }; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/random_uniform.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/random_uniform.hpp deleted file mode 100644 index 5741908b81287a..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/random_uniform.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "constant.hpp" - -namespace LayerTestsDefinitions { - -struct RandomUniformTypeSpecificParams { - InferenceEngine::Precision precision; // Output data precision - double min_value; // min value constant, will be cast to the needed precision - double max_value; // max value constant, will be cast to the needed precision -}; - -using RandomUniformParamsTuple = typename std::tuple< - ov::Shape, // output shape - RandomUniformTypeSpecificParams, // parameters which depends on output type - int64_t, // global seed - int64_t, // operation seed - std::string>; // Device name - -class RandomUniformLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions - diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp deleted file mode 100644 index b83fcee41fd1ec..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/range.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - float, // start - float, // stop - float, // step - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string // Target device name -> RangeParams; - -class RangeLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { - float start, stop, step; -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void Infer() override; - -protected: - void SetUp() override; -}; - -class RangeNumpyLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void Infer() override; -protected: - void SetUp() override; -private: - float start, stop, step; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rdft.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rdft.hpp deleted file mode 100644 index aeaf504a117f95..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rdft.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - std::vector, // Axes - std::vector, // Signal size - ngraph::helpers::DFTOpType, - std::string> RDFTParams; // Device name - -class RDFTLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp deleted file mode 100644 index cb63bffe99a234..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reduce_ops.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Axis to reduce order - ov::test::utils::OpType, // Scalar or vector type axis - bool, // Keep dims - ngraph::helpers::ReductionType, // Reduce operation type - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - std::vector, // Input shapes - std::string // Target device name -> reduceMeanParams; - -class ReduceOpsLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - void SetUp() override; -}; - -class ReduceOpsLayerWithSpecificInputTest : public ReduceOpsLayerTest { -protected: - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/region_yolo.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/region_yolo.hpp deleted file mode 100644 index 5a56fc002b2868..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/region_yolo.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using regionYoloParamsTuple = std::tuple< - ov::Shape, // Input Shape - size_t, // classes - size_t, // coordinates - size_t, // num regions - bool, // do softmax - std::vector, // mask - int, // start axis - int, // end axis - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class RegionYoloLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reorg_yolo.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reorg_yolo.hpp deleted file mode 100644 index 2431ece8314db5..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reorg_yolo.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using ReorgYoloParamsTuple = typename std::tuple< - ov::Shape, // Input Shape - size_t, // stride - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class ReorgYoloLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp deleted file mode 100644 index 814444bff712ed..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reshape.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple, // Input shapes - std::vector, // OutForm Shapes - std::string, // Device name - std::map // Config - > - reshapeParams; -class ReshapeLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp deleted file mode 100644 index 71cdbdae6dad38..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/result.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using ConfigMap = typename std::map; - -using ResultTestParamSet = typename std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - LayerTestsUtils::TargetDevice, // Device name - ConfigMap>; // Additional network configuration - -class ResultLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse.hpp deleted file mode 100644 index 31521f05cd2a9e..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -using reverseParams = std::tuple, // input shape - std::vector, // axes - std::string, // mode - InferenceEngine::Precision, // net precision - LayerTestsUtils::TargetDevice>; // device name - -class ReverseLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse_sequence.hpp deleted file mode 100644 index 77049ac9c20c39..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/reverse_sequence.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using ReverseSequenceParamsTuple = typename std::tuple< - int64_t, // Index of the batch dimension - int64_t, // Index of the sequence dimension - std::vector, // Input shapes - std::vector, // Shape of the input vector with sequence lengths to be reversed - ngraph::helpers::InputLayerType, // Secondary input type - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class ReverseSequenceLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_cell.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_cell.hpp deleted file mode 100644 index 9af0ac37ee2877..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_cell.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using RNNCellParams = typename std::tuple< - bool, // using decompose to sub-ops transformation - size_t, // batch - size_t, // hidden size - size_t, // input size - std::vector, // activations - float, // clip - ngraph::helpers::InputLayerType, // W input type (Constant or Parameter) - ngraph::helpers::InputLayerType, // R input type (Constant or Parameter) - ngraph::helpers::InputLayerType, // B input type (Constant or Parameter) - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class RNNCellTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp deleted file mode 100644 index 3292cdd015ffb3..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using RNNSequenceParams = typename std::tuple< - ngraph::helpers::SequenceTestsMode, // pure Sequence or TensorIterator - size_t, // seq_lengths - size_t, // batch - size_t, // hidden size - size_t, // input size - std::vector, // activations - float, // clip - ov::op::RecurrentSequenceDirection, // direction - ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class RNNSequenceTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; - void GenerateInputs() override; - -private: - ngraph::helpers::SequenceTestsMode m_mode; - int64_t m_max_seq_len = 0; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp deleted file mode 100644 index 5628ef23f0f87f..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_align.hpp +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using roialignParams = std::tuple, // feature map shape - std::vector, // proposal coords shape - int, // bin's row count - int, // bin's column count - float, // spatial scale - int, // pooling ratio - std::string, // pooling mode - InferenceEngine::Precision, // net precision - LayerTestsUtils::TargetDevice>; // device name - -class ROIAlignLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - static void fillCoordTensor(std::vector& coords, int height, int width, - float spatialScale, int pooledRatio, int pooledH, int pooledW); - static void fillIdxTensor(std::vector& idx, int batchSize); - -protected: - void SetUp() override; - -private: - int pooledH; - int pooledW; - float spatialScale; - int poolingRatio; - std::string poolingMode; -}; - -using roialignV9Params = std::tuple, // feature map shape - std::vector, // proposal coords shape - int, // bin's row count - int, // bin's column count - float, // spatial scale - int, // pooling ratio - std::string, // pooling mode - std::string, // roi aligned mode - InferenceEngine::Precision, // net precision - LayerTestsUtils::TargetDevice>; // device name - -class ROIAlignV9LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; - -private: - int pooledH; - int pooledW; - float spatialScale; - int poolingRatio; - std::string poolingMode; - std::string roiAlignedMode; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp deleted file mode 100644 index 9ca462fa1f9ad2..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roi_pooling.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using roiPoolingParamsTuple = std::tuple< - InferenceEngine::SizeVector, // Input shape - InferenceEngine::SizeVector, // Coords shape - std::vector, // Pooled shape {pooled_h, pooled_w} - float, // Spatial scale - ngraph::helpers::ROIPoolingTypes, // ROIPooling method - InferenceEngine::Precision, // Net precision - LayerTestsUtils::TargetDevice>; // Device name - -class ROIPoolingLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - void GenerateInputs() override; - -protected: - void SetUp() override; - -private: - ngraph::helpers::ROIPoolingTypes pool_method; - float spatial_scale; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp deleted file mode 100644 index da3748424f9fd6..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/roll.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - std::vector, // Shift - std::vector, // Axes - std::string> rollParams; // Device name - -class RollLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_ND_update.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_ND_update.hpp deleted file mode 100644 index 7aeb0dafbb7817..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_ND_update.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -using sliceSelectInShape = std::tuple< - std::vector, // input shape - std::vector, // indices shape - std::vector, // indices value - std::vector>; // update shape - -using scatterNDUpdateParamsTuple = typename std::tuple< - sliceSelectInShape, // Input description - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // indices precision - std::string>; // Device name - -class ScatterNDUpdateLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - static std::vector combineShapes( - const std::map, std::map, std::vector>>& inputShapes); - -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_elements_update.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_elements_update.hpp deleted file mode 100644 index 46c5b0d3c42d51..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_elements_update.hpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -using axisShapeInShape = std::tuple< - std::vector, // input shape - std::vector, // update shape - int>; // axis - -using scatterElementsUpdateParamsTuple = typename std::tuple< - axisShapeInShape, // shape description - std::vector, // indices value - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // indices precision - std::string>; // Device name - -using scatterElementsUpdate12ParamsTuple = typename std::tuple< - axisShapeInShape, // shape description - std::vector, // indices value - ov::op::v12::ScatterElementsUpdate::Reduction, // Reduce mode - bool, // Use init value - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // indices precision - std::string>; // Device name - -class ScatterElementsUpdateLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - static std::vector combineShapes( - const std::map, std::map, std::vector>>& inputShapes); - -protected: - void SetUp() override; -}; - -class ScatterElementsUpdate12LayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_update.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_update.hpp deleted file mode 100644 index fa5157ccee5b5a..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/scatter_update.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -using axisUpdateShapeInShape = std::tuple< - std::vector, // input shape - std::vector, // indices shape - std::vector, // update shape - int64_t>; // axis - -using scatterUpdateParamsTuple = typename std::tuple< - axisUpdateShapeInShape, // shape description - std::vector, // indices value - InferenceEngine::Precision, // input precision - InferenceEngine::Precision, // indices precision - std::string>; // Device name - -class ScatterUpdateLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - static std::vector combineShapes( - const std::map, std::map, std::vector>>& inputShapes); - -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp deleted file mode 100644 index 7426dc04a0ca03..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector>, // mask, then, else shapes - InferenceEngine::Precision, // then, else precision - ov::op::AutoBroadcastSpec, // broadcast - std::string> selectTestParams; // device name - -class SelectLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp deleted file mode 100644 index 2bd8ed1d917644..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shape_of.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - InferenceEngine::Precision, // Network precision - std::vector, // Input shapes - std::string // Device name -> shapeOfParamsCommon; - -typedef std::tuple< - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Output precision - std::vector, // Input shapes - std::string // Device name -> shapeOfParams; - -class ShapeOfLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(testing::TestParamInfo obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shuffle_channels.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shuffle_channels.hpp deleted file mode 100644 index 566842c3a0c65a..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/shuffle_channels.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - int, // axis - int // group -> shuffleChannelsSpecificParams; - -typedef std::tuple< - shuffleChannelsSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice // Device name -> shuffleChannelsLayerTestParamsSet; - -class ShuffleChannelsLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/slice.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/slice.hpp deleted file mode 100644 index efed23ec5b6a24..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/slice.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -namespace LayerTestsDefinitions { - -struct Slice8SpecificParams { - std::vector shapes; - std::vector start; - std::vector stop; - std::vector step; - std::vector axes; -}; - -using Slice8Params = std::tuple< - Slice8SpecificParams, // Slice-8 specific parameters - ov::test::ElementType, // Net precision - ov::test::ElementType, // Input precision - ov::test::ElementType, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string, // Device name - std::map // Additional network configuration ->; - -class Slice8LayerTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_batch.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_batch.hpp deleted file mode 100644 index b3383f73c3d0f4..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_batch.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -using spaceToBatchParamsTuple = typename std::tuple< - std::vector, // block_shape - std::vector, // pads_begin - std::vector, // pads_end - std::vector, // Input shapes - InferenceEngine::Precision, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string>; // Device name>; - -class SpaceToBatchLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp deleted file mode 100644 index 361ff9fa41015c..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using spaceToDepthParamsTuple = typename std::tuple< - std::vector, // Input shape - InferenceEngine::Precision, // Input precision - ov::op::v0::SpaceToDepth::SpaceToDepthMode, // Mode - std::size_t, // Block size - std::string>; // Device name> - -class SpaceToDepthLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp deleted file mode 100644 index ccb9a43b144758..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/split.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - size_t, // Num splits - int64_t, // Axis - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::vector, // Input shapes - std::vector, // Used outputs indices - std::string // Target device name -> splitParams; - -class SplitLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp deleted file mode 100644 index 995ca768b15b07..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/squeeze_unsqueeze.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { -using ShapeAxesTuple = std::pair, std::vector>; - -typedef std::tuple< - ShapeAxesTuple, // InputShape (required), Squeeze indexes (if empty treated as non-existent) - ngraph::helpers::SqueezeOpType, // OpType - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string // Target device name -> squeezeParams; - -class SqueezeUnsqueezeLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/strided_slice.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/strided_slice.hpp deleted file mode 100644 index 3bc225c5ae6114..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/strided_slice.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsDefinitions { - -struct StridedSliceSpecificParams { - InferenceEngine::SizeVector inputShape; - std::vector begin; - std::vector end; - std::vector strides; - std::vector beginMask; - std::vector endMask; - std::vector newAxisMask; - std::vector shrinkAxisMask; - std::vector ellipsisAxisMask; -}; - -using StridedSliceParams = std::tuple< - StridedSliceSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string, // Device name - std::map // Additional network configuration ->; - -class StridedSliceLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp deleted file mode 100644 index 15342892a43e1a..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace LayerTestsDefinitions { - -using TensorIteratorParams = typename std::tuple< - bool, // using unroll tensor iterator transformation - size_t, // seq_lengths - size_t, // batch - size_t, // hidden size - // todo: fix. input size hardcoded to 10 due to limitation (10 args) of gtests Combine() func. - //size_t, // input size - size_t, // sequence axis - float, // clip - ngraph::helpers::TensorIteratorBody, // body type - ov::op::RecurrentSequenceDirection, // direction - InferenceEngine::Precision, // Network precision - std::string>; // Device name - -class TensorIteratorTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp deleted file mode 100644 index 1ce46e8bf30878..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tile.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::vector TileSpecificParams; -typedef std::tuple< - TileSpecificParams, - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InferenceEngine::SizeVector, // Input shapes - LayerTestsUtils::TargetDevice // Device name -> TileLayerTestParamsSet; - -class TileLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp deleted file mode 100644 index 3759c0bef6d569..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { -typedef std::tuple< - int64_t, // keepK - int64_t, // axis - ov::op::v3::TopK::Mode, // mode - ov::op::v3::TopK::SortType, // sort - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::SizeVector, // inputShape - std::string // Target device name -> TopKParams; - -class TopKLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp deleted file mode 100644 index b7547ac1aa8e47..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/transpose.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Input order - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::vector, // Input shapes - std::string // Target device name -> transposeParams; - -class TransposeLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp deleted file mode 100644 index b041d368e8f073..00000000000000 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/variadic_split.hpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include - -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Num splits - int64_t, // Axis - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::vector, // Input shapes - std::string // Target device name -> VariadicSplitParams; - -class VariadicSplitLayerTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather.hpp index a41a13ce962eb7..29398ba28a336d 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/gather.hpp @@ -78,5 +78,28 @@ class Gather8withIndicesDataLayerTest : public testing::WithParamInterface, // Input shapes + ov::Shape, // Indices shape + std::tuple, // Gather axis and batch + ov::element::Type, // Model type + std::string, // Device name + std::vector, // indices data + std::vector // String data + > + GatherStringParamsTuple; + +class GatherStringWithIndicesDataLayerTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); + +protected: + void SetUp() override; + void generate_inputs(const std::vector& target_shapes) override; + +private: + std::vector string_data; +}; + } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/group_normalization.hpp similarity index 98% rename from src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp rename to src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/group_normalization.hpp index 1527c07a635977..f89e75b11f1d86 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/group_normalization.hpp @@ -8,7 +8,6 @@ namespace ov { namespace test { -namespace subgraph { using GroupNormalizationTestParams = std::tuple>, //input shapes - InferenceEngine::Precision, //Network precision - std::string, //Device name - std::vector, //scale - std::vector>; //shift - -class ScaleShiftLayerTest: - public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon{ -public: - static std::string getTestCaseName(const testing::TestParamInfo &obj); -protected: - void SetUp() override; -}; -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp index d74865a6bb0c6b..a08870dfb68006 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/split_conv_concat.hpp @@ -30,16 +30,3 @@ class SplitConvConcat : public testing::WithParamInterface, - virtual public ov::test::SplitConvConcatBase { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj); - -protected: - void SetUp() override; -}; - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp b/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp deleted file mode 100644 index 4580a2e695b7ed..00000000000000 --- a/src/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#ifdef _WIN32 -#include -#endif - -#include - -#include "common_test_utils/file_utils.hpp" -#include "functional_test_utils/core_config.hpp" -#include "ie_icore.hpp" -#include "openvino/opsets/opset.hpp" -#include "openvino/pass/serialize.hpp" -#include "openvino/runtime/device_id_parser.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -namespace LayerTestsUtils { - -LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f), abs_threshold(-1.f) { - core = PluginCache::get().ie(targetDevice); -} - -void LayerTestsCommon::Run() { - bool isCurrentTestDisabled = ov::test::utils::current_test_is_disabled(); - - ov::test::utils::PassRate::Statuses status = isCurrentTestDisabled ? - ov::test::utils::PassRate::Statuses::SKIPPED : - ov::test::utils::PassRate::Statuses::CRASHED; - - auto &s = ov::test::utils::OpSummary::getInstance(); - s.setDeviceName(targetDevice); - s.updateOPsStats(function, status); - - if (isCurrentTestDisabled) - GTEST_SKIP() << "Disabled test due to configuration" << std::endl; - - if (functionRefs == nullptr) { - functionRefs = function->clone(); - functionRefs->set_friendly_name("refFunction"); - } - - // in case of crash jump will be made and work will be continued - auto crashHandler = std::unique_ptr(new ov::test::utils::CrashHandler()); - - // place to jump in case of a crash - int jmpRes = 0; -#ifdef _WIN32 - jmpRes = setjmp(ov::test::utils::env); -#else - jmpRes = sigsetjmp(ov::test::utils::env, 1); -#endif - if (jmpRes == ov::test::utils::JMP_STATUS::ok) { - crashHandler->StartTimer(); - try { - LoadNetwork(); - GenerateInputs(); - Infer(); - Validate(); - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::PASSED); - } - catch (const std::runtime_error &re) { - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::FAILED); - GTEST_FATAL_FAILURE_(re.what()); - } catch (const std::exception &ex) { - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::FAILED); - GTEST_FATAL_FAILURE_(ex.what()); - } catch (...) { - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::FAILED); - GTEST_FATAL_FAILURE_("Unknown failure occurred."); - } - } else if (jmpRes == ov::test::utils::JMP_STATUS::anyError) { - IE_THROW() << "Crash happens"; - } else if (jmpRes == ov::test::utils::JMP_STATUS::alarmErr) { - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::HANGED); - IE_THROW() << "Crash happens"; - } -} - -InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo& info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc()); -} - -void LayerTestsCommon::Compare( - const std::vector>>& expectedOutputs, - const std::vector& actualOutputs, - float threshold, - float abs_threshold) { - for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) { - const auto &expected = expectedOutputs[outputIndex]; - const auto &actual = actualOutputs[outputIndex]; - Compare(expected, actual, threshold, abs_threshold); - } -} - -template -inline void callCompare(const std::pair>& expected, - const T_IE* actualBuffer, - size_t size, - float threshold, - float abs_threshold) { - auto expectedBuffer = expected.second.data(); - switch (expected.first) { - case ov::element::Type_t::boolean: - case ov::element::Type_t::u8: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::i8: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::u16: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::i16: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::u32: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::i32: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::u64: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::i64: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::bf16: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::f16: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::f32: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::f64: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - case ov::element::Type_t::i4: { - auto expectedOut = - ngraph::helpers::convertOutputPrecision(expected.second, expected.first, ov::element::Type_t::i8, size); - LayerTestsCommon::Compare(reinterpret_cast(expectedOut.data()), - actualBuffer, - size, - threshold, - abs_threshold); - break; - } - case ov::element::Type_t::u4: { - auto expectedOut = - ngraph::helpers::convertOutputPrecision(expected.second, expected.first, ov::element::Type_t::u8, size); - LayerTestsCommon::Compare(reinterpret_cast(expectedOut.data()), - actualBuffer, - size, - threshold, - abs_threshold); - break; - } - case ov::element::Type_t::dynamic: - case ov::element::Type_t::undefined: - LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - actualBuffer, - size, - threshold, - abs_threshold); - break; - default: - FAIL() << "Comparator for " << expected.first << " precision isn't supported"; - } - return; -} - -void LayerTestsCommon::Compare(const std::pair>& expected, - const InferenceEngine::Blob::Ptr& actual, - float threshold, - float abs_threshold) { - const auto &precision = actual->getTensorDesc().getPrecision(); - auto k = static_cast(expected.first.size()) / precision.size(); - // W/A for int4, uint4 - if (expected.first == ov::element::Type_t::u4 || expected.first == ov::element::Type_t::i4) { - k /= 2; - } else if (expected.first == ov::element::Type_t::undefined || expected.first == ov::element::Type_t::dynamic) { - k = 1; - } - ASSERT_EQ(expected.second.size(), actual->byteSize() * k); - - auto memory = InferenceEngine::as(actual); - IE_ASSERT(memory); - const auto lockedMemory = memory->wmap(); - const auto actualBuffer = lockedMemory.as(); - - const auto &size = actual->size(); - switch (precision) { - case InferenceEngine::Precision::BOOL: - case InferenceEngine::Precision::U8: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::I8: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::U16: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::I16: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::U32: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::I32: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::U64: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::I64: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::BF16: - callCompare(expected, - reinterpret_cast(actualBuffer), - size, - threshold, - abs_threshold); - break; - case InferenceEngine::Precision::FP16: - callCompare(expected, - reinterpret_cast(actualBuffer), - size, - threshold, - abs_threshold); - break; - case InferenceEngine::Precision::FP32: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - case InferenceEngine::Precision::FP64: - callCompare(expected, reinterpret_cast(actualBuffer), size, threshold, abs_threshold); - break; - default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; - } -} - -void LayerTestsCommon::Compare(const std::pair>& expected, - const InferenceEngine::Blob::Ptr& actual) { - Compare(expected, actual, threshold); -} - -void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const InferenceEngine::Blob::Ptr &actual) { - auto get_raw_buffer = [](const InferenceEngine::Blob::Ptr &blob) { - auto memory = InferenceEngine::as(blob); - IE_ASSERT(memory); - const auto lockedMemory = memory->wmap(); - return lockedMemory.as(); - }; - const auto expectedBuffer = get_raw_buffer(expected); - const auto actualBuffer = get_raw_buffer(actual); - - const auto &precision = actual->getTensorDesc().getPrecision(); - const auto &size = actual->size(); - switch (precision) { - case InferenceEngine::Precision::FP32: - Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), - size, threshold); - break; - case InferenceEngine::Precision::I32: - Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, 0); - break; - case InferenceEngine::Precision::I16: - Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, 0); - break; - case InferenceEngine::Precision::U8: - Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, 0); - break; - default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; - } -} - -void LayerTestsCommon::Compare(const InferenceEngine::TensorDesc &actualDesc, const InferenceEngine::TensorDesc &expectedDesc) { - auto expectedDims = actualDesc.getDims(); - auto actualDims = expectedDesc.getDims(); - ASSERT_EQ(actualDims.size(), expectedDims.size()); - for (size_t j = 0; j < actualDims.size(); ++j) { - ASSERT_EQ(actualDims.at(j), expectedDims.at(j)); - } - ASSERT_EQ(actualDesc.getLayout(), expectedDesc.getLayout()); - ASSERT_EQ(actualDesc.getPrecision(), expectedDesc.getPrecision()); -} - -void LayerTestsCommon::ConfigureNetwork() { - for (const auto &in : cnnNetwork.getInputsInfo()) { - if (inLayout != InferenceEngine::Layout::ANY) { - in.second->setLayout(inLayout); - } - if (inPrc != InferenceEngine::Precision::UNSPECIFIED) { - in.second->setPrecision(inPrc); - } - } - - for (const auto &out : cnnNetwork.getOutputsInfo()) { - if (outLayout != InferenceEngine::Layout::ANY) { - out.second->setLayout(outLayout); - } - if (outPrc != InferenceEngine::Precision::UNSPECIFIED) { - out.second->setPrecision(outPrc); - } - } -} - -void LayerTestsCommon::LoadNetwork() { - cnnNetwork = InferenceEngine::CNNNetwork{function}; - CoreConfiguration(this); - ConfigureNetwork(); - executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration); -} - -void LayerTestsCommon::ExpectLoadNetworkToThrow(const std::string& msg) { - std::string what; - try { - LoadNetwork(); - } catch (const std::exception& e) { - what.assign(e.what()); - } - EXPECT_STR_CONTAINS(what.c_str(), msg.c_str()); -} - -void LayerTestsCommon::GenerateInputs() { - inputs.clear(); - const auto& inputsInfo = executableNetwork.GetInputsInfo(); - const auto& functionParams = function->get_parameters(); - for (int i = 0; i < functionParams.size(); ++i) { - const auto& param = functionParams[i]; - const auto infoIt = inputsInfo.find(param->get_friendly_name()); - GTEST_ASSERT_NE(infoIt, inputsInfo.cend()); - InferenceEngine::InputInfo::CPtr info = infoIt->second; - InferenceEngine::Blob::Ptr blob = GenerateInput(*info); - inputs.push_back(blob); - } -} - -void LayerTestsCommon::ConfigureInferRequest() { - const auto& inputsInfo = executableNetwork.GetInputsInfo(); - const auto& functionParams = function->get_parameters(); - for (int i = 0; i < functionParams.size(); ++i) { - const auto& param = functionParams[i]; - const auto infoIt = inputsInfo.find(param->get_friendly_name()); - GTEST_ASSERT_NE(infoIt, inputsInfo.cend()); - - const auto& info = infoIt->second; - auto blob = inputs[i]; - inferRequest.SetBlob(info->name(), blob); - } -} - -void LayerTestsCommon::Infer() { - inferRequest = executableNetwork.CreateInferRequest(); - - ConfigureInferRequest(); - - inferRequest.Infer(); -} - -std::vector>> LayerTestsCommon::CalculateRefs() { - return {}; -} - -std::vector LayerTestsCommon::GetOutputs() { - auto outputs = std::vector{}; - for (const auto &output : executableNetwork.GetOutputsInfo()) { - const auto &name = output.first; - outputs.push_back(inferRequest.GetBlob(name)); - } - return outputs; -} - -void LayerTestsCommon::Compare( - const std::vector>>& expectedOutputs, - const std::vector& actualOutputs) { - Compare(expectedOutputs, actualOutputs, threshold); -} - -void LayerTestsCommon::Validate() {} - -std::shared_ptr LayerTestsCommon::GetFunction() { - return function; -} - -std::map &LayerTestsCommon::GetConfiguration() { - return configuration; -} - -} // namespace LayerTestsUtils diff --git a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp index 845874a8a6bb49..6ff1d2a6e23a2b 100644 --- a/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp +++ b/src/tests/functional/shared_test_classes/src/base/low_precision_transformations/layer_transformation.cpp @@ -7,7 +7,6 @@ #include #include -#include #include "functional_test_utils/blob_utils.hpp" #include "ov_models/pass/convert_prc.hpp" diff --git a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp index c826e5ffcfc9b8..33b00c0a69e3af 100644 --- a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp +++ b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp @@ -96,10 +96,10 @@ void SubgraphBaseTest::run() { GTEST_FATAL_FAILURE_(errorMessage.c_str()); } } else if (jmpRes == ov::test::utils::JMP_STATUS::anyError) { - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } else if (jmpRes == ov::test::utils::JMP_STATUS::alarmErr) { summary.updateOPsStats(function, ov::test::utils::PassRate::Statuses::HANGED, rel_influence_coef); - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } } @@ -170,7 +170,7 @@ void SubgraphBaseTest::query_model() { actual.insert(res.first); } if (expected != actual) { - IE_THROW() << "Expected and actual are different"; + OPENVINO_THROW("Expected and actual are different"); } status = ov::test::utils::PassRate::Statuses::PASSED; } catch (const std::exception& ex) { @@ -185,10 +185,10 @@ void SubgraphBaseTest::query_model() { GTEST_FATAL_FAILURE_(errorMessage.c_str()); } } else if (jmpRes == ov::test::utils::JMP_STATUS::anyError) { - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } else if (jmpRes == ov::test::utils::JMP_STATUS::alarmErr) { summary.updateOPsStats(function, ov::test::utils::PassRate::Statuses::HANGED, rel_influence_coef); - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } } @@ -247,10 +247,10 @@ void SubgraphBaseTest::import_export() { GTEST_FATAL_FAILURE_(errorMessage.c_str()); } } else if (jmpRes == ov::test::utils::JMP_STATUS::anyError) { - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } else if (jmpRes == ov::test::utils::JMP_STATUS::alarmErr) { summary.updateOPsStats(function, ov::test::utils::PassRate::Statuses::HANGED, rel_influence_coef); - IE_THROW() << "Crash happens"; + OPENVINO_THROW("Crash happens"); } } diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index 6a982171cc6bcd..7514ab753ab100 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -491,7 +491,6 @@ ov::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { - InferenceEngine::Blob::Ptr blobPtr; switch (port) { case 0: { auto data_size = shape_size(targetShape); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp b/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp deleted file mode 100644 index 037978ae1c1cbc..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/activation.hpp" -#include "common_test_utils/node_builders/activation.hpp" - -namespace LayerTestsDefinitions { - -std::string ActivationLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::pair, std::vector> shapes; - std::string targetDevice; - std::pair> activationDecl; - std::tie(activationDecl, netPrecision, inPrc, outPrc, inLayout, outLayout, shapes, targetDevice) = obj.param; - - std::ostringstream result; - const char separator = '_'; - result << activationNames[activationDecl.first] << separator; - result << "IS=" << ov::test::utils::vec2str(shapes.first) << separator; - result << "AS=" << ov::test::utils::vec2str(shapes.second) << separator; - result << "ConstantsValue=" << ov::test::utils::vec2str(activationDecl.second) << separator; - result << "netPRC=" << netPrecision.name() << separator; - result << "inPRC=" << inPrc.name() << separator; - result << "outPRC=" << outPrc.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void ActivationLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::pair, std::vector> shapes; - std::pair> activationDecl; - std::tie(activationDecl, netPrecision, inPrc, outPrc, inLayout, outLayout, shapes, targetDevice) = GetParam(); - - activationType = activationDecl.first; - auto constantsValue = activationDecl.second; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(shapes.first))}; - params[0]->set_friendly_name("Input"); - - if (activationType == ngraph::helpers::ActivationTypes::PReLu && constantsValue.empty()) { - const auto elemnts_count = ov::shape_size(shapes.second); - constantsValue.resize(elemnts_count); - std::iota(constantsValue.begin(), constantsValue.end(), -10); - } - - auto activation = ov::test::utils::make_activation(params[0], ngPrc, activationType, shapes.second, constantsValue); - - function = std::make_shared(ov::NodeVector{activation}, params); -} - -InferenceEngine::Blob::Ptr ActivationLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - bool inPrcSigned = function->get_parameters()[0]->get_element_type().is_signed(); - int32_t data_start_from; - uint32_t data_range; - int32_t resolution; - - switch (activationType) { - case ngraph::helpers::ActivationTypes::Log: { - data_start_from = 1; - data_range = 20; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::Sqrt: { - data_start_from = 0; - data_range = 20; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::Asin: { - data_start_from = -1; - data_range = 2; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::Acos: { - data_start_from = -1; - data_range = 2; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::Acosh: { - data_start_from = 1; - data_range = 200; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::Atanh: { - data_start_from = -1; - data_range = 2; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::Ceiling: { - data_start_from = -1000; - data_range = 2000; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::RoundHalfToEven: { - data_start_from = -10; - data_range = 20; - resolution = 4; - break; - } - case ngraph::helpers::ActivationTypes::RoundHalfAwayFromZero: { - data_start_from = -10; - data_range = 20; - resolution = 4; - break; - } - case ngraph::helpers::ActivationTypes::Mish: { - data_start_from = -20; - data_range = 60; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::SoftPlus: { - data_start_from = -100; - data_range = 200; - resolution = 32768; - break; - } - case ngraph::helpers::ActivationTypes::SoftSign: { - data_start_from = -100; - data_range = 200; - resolution = 32768; - break; - } - default: { - data_start_from = -10; - data_range = 20; - resolution = 32768; - break; - } - } - if (!inPrcSigned) { - data_range = 15; - data_start_from = 0; - } - - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_range, - data_start_from, - resolution); -} - -ov::ParameterVector ActivationParamLayerTest::createActivationParams(ov::element::Type ngPrc, std::vector inShape) { - switch (activationType) { - case ngraph::helpers::ActivationTypes::PReLu: { - ov::ParameterVector negativeSlopeParam {std::make_shared(ngPrc, ov::Shape(inShape))}; - negativeSlopeParam[0]->set_friendly_name("negativeSlope"); - return negativeSlopeParam; - } - case ngraph::helpers::ActivationTypes::LeakyRelu: { - ov::ParameterVector leakySlopeParam {std::make_shared(ngPrc, ov::Shape(inShape))}; - leakySlopeParam[0]->set_friendly_name("leakySlope"); - return leakySlopeParam; - } - case ngraph::helpers::ActivationTypes::HardSigmoid: { - ov::ParameterVector hardSigmoidParam {std::make_shared(ngPrc, ov::Shape(inShape)), - std::make_shared(ngPrc, ov::Shape(inShape))}; - hardSigmoidParam[0]->set_friendly_name("alpha"); - hardSigmoidParam[1]->set_friendly_name("beta"); - return hardSigmoidParam; - } - case ngraph::helpers::ActivationTypes::Selu: { - ov::ParameterVector seluParam {std::make_shared(ngPrc, ov::Shape(inShape)), - std::make_shared(ngPrc, ov::Shape(inShape))}; - seluParam[0]->set_friendly_name("alpha"); - seluParam[1]->set_friendly_name("lambda"); - return seluParam; - } - default: - IE_THROW() << "Unsupported activation type for Params test type"; - } -} - -InferenceEngine::Blob::Ptr ActivationParamLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::Blob::Ptr blobPtr; - const std::string& name = info.name(); - if (name == "negativeSlope") { - const auto elemnts_count = ov::shape_size(function->get_parameters()[1]->get_shape()); - std::vector param_data(elemnts_count); - std::iota(param_data.begin(), param_data.end(), -10); - blobPtr = FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), ¶m_data[0], elemnts_count); - } else if (name == "leakySlope") { - const auto elemnts_count = ov::shape_size(function->get_parameters()[1]->get_shape()); - std::vector param_data(elemnts_count, constantsValue[0]); - blobPtr = FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), ¶m_data[0], elemnts_count); - } else if (name == "alpha") { - blobPtr = FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), &constantsValue[0], 1); - } else if (name == "beta" || name == "lambda") { - blobPtr = FuncTestUtils::createAndFillBlobWithFloatArray(info.getTensorDesc(), &constantsValue[1], 1); - } else { - blobPtr = FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 20, -10, 1); - } - return blobPtr; -} - -void ActivationParamLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::pair, std::vector> shapes; - std::pair> activationDecl; - std::tie(activationDecl, netPrecision, inPrc, outPrc, inLayout, outLayout, shapes, targetDevice) = GetParam(); - - activationType = activationDecl.first; - constantsValue = activationDecl.second; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(shapes.first))}; - auto activationParams = createActivationParams(ngPrc, shapes.second); - - params[0]->set_friendly_name("Input"); - params.insert(params.end(), activationParams.begin(), activationParams.end()); - - auto activation = ov::test::utils::make_activation(params, ngPrc, activationType); - ov::ResultVector results{std::make_shared(activation)}; - function = std::make_shared(results, params); -} - -void ActivationDynamicLayerTest::Run() { - const auto& params = function->get_parameters(); - ov::PartialShape output_shape; - - // make each parameter dimension dynamic with range {1 .. prev_dim * 2} - for (const auto& parameter : params) { - auto& dynamic_pshape = parameter->get_partial_shape(); - OPENVINO_ASSERT(dynamic_pshape.rank().is_static(), - "tests are not prepared to work with dynamically ranked inputs"); - for (size_t i = 0; i < dynamic_pshape.rank().get_length(); ++i) { - if (static_dims.count(i)) - continue; - dynamic_pshape[i] = {1, dynamic_pshape[i].get_max_length() * 2}; - } - parameter->set_partial_shape(dynamic_pshape); - if (parameter->get_friendly_name() == "Input") - output_shape = dynamic_pshape; - } - function->validate_nodes_and_infer_types(); - - const auto& results = function->get_results(); - OPENVINO_ASSERT(results.size() == 1); - ASSERT_EQ(results[0]->get_output_partial_shape(0), output_shape); - // no inference and checks are done here -- just shape check because we miss CNNNetwork functionality - // to handle dynamic inputs-outputs and test functionality to generate blob of a certain shape -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp deleted file mode 100644 index ce8656427aea4b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" -#include "shared_test_classes/single_layer/adaptive_pooling.hpp" - -using namespace InferenceEngine; -using namespace FuncTestUtils::PrecisionUtils; - -namespace LayerTestsDefinitions { - -std::string AdaPoolLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShape; - std::vector pooledSpatialShape; - - std::string poolingMode; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(inputShape, pooledSpatialShape, poolingMode, netPrecision, targetDevice) = obj.param; - - std::ostringstream result; - - result << "in_shape=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "pooled_spatial_shape=" << ov::test::utils::vec2str(pooledSpatialShape) << "_"; - result << "mode=" << poolingMode << "_"; - result << "prec=" << netPrecision.name() << "_"; - result << "dev=" << targetDevice; - return result.str(); -} - -void AdaPoolLayerTest::SetUp() { - std::vector inputShape; - std::vector pooledSpatialShape; - std::string poolingMode; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, pooledSpatialShape, poolingMode, netPrecision, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - ov::Shape pooledShape = {pooledSpatialShape.size() }; - auto pooledParam = ov::test::utils::deprecated::make_constant(ov::element::i32, pooledShape, pooledSpatialShape); - - // we cannot create abstract Op to use polymorphism - auto adapoolMax = std::make_shared(params[0], pooledParam, ov::element::i32); - auto adapoolAvg = std::make_shared(params[0], pooledParam); - - function = (poolingMode == "max" ? std::make_shared(adapoolMax->outputs(), params, "AdaPoolMax") : - std::make_shared(adapoolAvg->outputs(), params, "AdaPoolAvg")); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/batch_norm.cpp b/src/tests/functional/shared_test_classes/src/single_layer/batch_norm.cpp deleted file mode 100644 index 2f5e1ad8787010..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/batch_norm.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/batch_norm.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { -std::string BatchNormLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - double epsilon; - std::string targetDevice; - std::tie(epsilon, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "epsilon=" << epsilon << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -InferenceEngine::Blob::Ptr BatchNormLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), 3, 0, 1); -} - -void BatchNormLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShapes; - double epsilon; - std::tie(epsilon, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - - size_t C = inputShapes.at(1); - bool random = true; - std::vector values(C); - auto gamma = ov::test::utils::deprecated::make_constant(ngPrc, ov::Shape{C}, values, random, 1.f, 0.f); - auto beta = ov::test::utils::deprecated::make_constant(ngPrc, ov::Shape{C}, values, random, 1.f, 0.f); - auto mean = ov::test::utils::deprecated::make_constant(ngPrc, ov::Shape{C}, values, random, 1.f, 0.f); - - // Fill the vector for variance with positive values - std::default_random_engine gen; - std::uniform_real_distribution dis(0.0, 10.0); - std::generate(values.begin(), values.end(), [&dis, &gen]() { - return dis(gen); - }); - auto variance = ov::test::utils::deprecated::make_constant(ngPrc, ov::Shape{C}, values, !random); - auto batchNorm = std::make_shared(params[0], gamma, beta, mean, variance, epsilon); - - ov::ResultVector results{std::make_shared(batchNorm)}; - function = std::make_shared(results, params, "BatchNormInference"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp deleted file mode 100644 index 3c18b64f4e976b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/batch_to_space.hpp" - -namespace LayerTestsDefinitions { - -std::string BatchToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inShapes; - std::vector blockShape, cropsBegin, cropsEnd; - InferenceEngine::Precision netPrc; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::tie(blockShape, cropsBegin, cropsEnd, inShapes, netPrc, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inShapes) << "_"; - result << "netPRC=" << netPrc.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "BS=" << ov::test::utils::vec2str(blockShape) << "_"; - result << "CB=" << ov::test::utils::vec2str(cropsBegin) << "_"; - result << "CE=" << ov::test::utils::vec2str(cropsEnd) << "_"; - result << "trgDev=" << targetName << "_"; - return result.str(); -} - -void BatchToSpaceLayerTest::SetUp() { - std::vector inputShape; - std::vector blockShape, cropsBegin, cropsEnd; - InferenceEngine::Precision netPrecision; - std::tie(blockShape, cropsBegin, cropsEnd, inputShape, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - OPENVINO_SUPPRESS_DEPRECATED_START - auto b2s = ngraph::builder::makeBatchToSpace(params[0], ngPrc, blockShape, cropsBegin, cropsEnd); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::ResultVector results{std::make_shared(b2s)}; - function = std::make_shared(results, params, "BatchToSpace"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp deleted file mode 100644 index 6c6e00ffddfd81..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/binary_convolution.hpp" - -namespace LayerTestsDefinitions { - -std::string BinaryConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - binConvSpecificParams binConvParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShape; - std::string targetDevice; - - std::tie(binConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = obj.param; - - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd; - size_t convOutChannels; - float padValue; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, padValue) = binConvParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "KS=" << ov::test::utils::vec2str(kernel) << "_"; - result << "S=" << ov::test::utils::vec2str(stride) << "_"; - result << "PB=" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE=" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "O=" << convOutChannels << "_"; - result << "AP=" << padType << "_"; - result << "PV=" << padValue << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -InferenceEngine::Blob::Ptr BinaryConvolutionLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::Blob::Ptr blobPtr; - const std::string name = info.name(); - // there is no input generation for filters since CPU implementation uses Constant - // TODO: enable filters input generation as Parameter when supported (Issue 50148) - if (name == "a_data_batch") { - blobPtr = FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 1, 0, 1, 7235346); - } - return blobPtr; -} - -void BinaryConvolutionLayerTest::SetUp() { - binConvSpecificParams binConvParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShape; - - std::tie(binConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = - this->GetParam(); - - ov::op::PadType padType; - InferenceEngine::SizeVector kernelSize, strides, dilations; - std::vector padsBegin, padsEnd; - size_t numOutChannels; - float padValue; - std::tie(kernelSize, strides, padsBegin, padsEnd, dilations, numOutChannels, padType, padValue) = binConvParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - params[0]->set_friendly_name("a_data_batch"); - - // TODO: refactor build BinaryConvolution op to accept filters input as Parameter - auto binConv = ngraph::builder::makeBinaryConvolution(params[0], kernelSize, strides, padsBegin, padsEnd, dilations, padType, numOutChannels, - padValue); - ov::ResultVector results{std::make_shared(binConv)}; - function = std::make_shared(results, params, "BinaryConvolution"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp b/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp deleted file mode 100644 index 17dc2b6f389ff9..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/broadcast.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/broadcast.hpp" - -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { -std::string BroadcastLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector targetShape; - ov::AxisSet axesMapping; - ov::op::BroadcastType mode; - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision networkPrecision; - std::string deviceName; - std::tie(targetShape, axesMapping, mode, inputShape, networkPrecision, deviceName) = obj.param; - - std::ostringstream result; - result << "targetShape=" << ov::test::utils::vec2str(targetShape) << "_"; - result << "axesMapping=" << ov::test::utils::set2str(axesMapping) << "_"; - result << "mode=" << mode << "_"; - result << "inShape=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "inNPrec=" << networkPrecision << "_"; - result << "trgDev=" << deviceName; - return result.str(); -} - -void BroadcastLayerTest::SetUp() { - InferenceEngine::SizeVector targetShape; - ov::AxisSet axesMapping; - ov::op::BroadcastType mode; - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision networkPrecision; - std::tie(targetShape, axesMapping, mode, inputShape, networkPrecision, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(networkPrecision); - - auto target_shape_const = ov::op::v0::Constant::create(ov::element::i64, {targetShape.size()}, targetShape); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::shared_ptr broadcast; - if (mode == ov::op::BroadcastType::NONE) { - auto axisSetConst = ov::op::v0::Constant::create(ov::element::i64, {axesMapping.size()}, axesMapping.to_vector()); - broadcast = std::make_shared(params[0], target_shape_const, axisSetConst, mode); - } else { // numpy/bidirectional modes - broadcast = std::make_shared(params[0], target_shape_const, mode); - } - ov::ResultVector results{std::make_shared(broadcast)}; - function = std::make_shared(results, params, "BroadcastInference"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp b/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp deleted file mode 100644 index 74b0e8ec2c824d..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/bucketize.hpp" - -namespace LayerTestsDefinitions { - std::string BucketizeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector dataShape; - InferenceEngine::SizeVector bucketsShape; - bool with_right_bound; - InferenceEngine::Precision inDataPrc; - InferenceEngine::Precision inBucketsPrc; - InferenceEngine::Precision netPrc; - std::string targetDevice; - - std::tie(dataShape, bucketsShape, with_right_bound, inDataPrc, inBucketsPrc, netPrc, targetDevice) = obj.param; - - std::ostringstream result; - result << "DS=" << ov::test::utils::vec2str(dataShape) << "_"; - result << "BS=" << ov::test::utils::vec2str(bucketsShape) << "_"; - if (with_right_bound) - result << "rightIntervalEdge_"; - else - result << "leftIntervalEdge_"; - result << "inDataPrc=" << inDataPrc.name() << "_"; - result << "inBucketsPrc=" << inBucketsPrc.name() << "_"; - result << "netPrc=" << netPrc.name() << "_"; - result << "trgDev=" << targetDevice; - return result.str(); - } - - InferenceEngine::Blob::Ptr BucketizeLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::Blob::Ptr blobPtr; - const std::string name = info.name(); - if (name == "a_data") { - auto data_shape = info.getTensorDesc().getDims(); - auto data_size = std::accumulate(begin(data_shape), end(data_shape), 1, std::multiplies()); - blobPtr = FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_size * 5, 0, 10, 7235346); - } else if (name == "b_buckets") { - blobPtr = FuncTestUtils::createAndFillBlobUniqueSequence(info.getTensorDesc(), 0, 10, 8234231); - } - return blobPtr; - } - - void BucketizeLayerTest::SetUp() { - InferenceEngine::SizeVector dataShape; - InferenceEngine::SizeVector bucketsShape; - bool with_right_bound; - InferenceEngine::Precision inDataPrc; - InferenceEngine::Precision inBucketsPrc; - InferenceEngine::Precision netPrc; - - std::tie(dataShape, bucketsShape, with_right_bound, inDataPrc, inBucketsPrc, netPrc, targetDevice) = this->GetParam(); - - auto ngInDataPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inDataPrc); - auto ngInBucketsPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inBucketsPrc); - auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc); - auto data = std::make_shared(ngInDataPrc, ov::Shape(dataShape)); - data->set_friendly_name("a_data"); - auto buckets = std::make_shared(ngInBucketsPrc, ov::Shape(bucketsShape)); - buckets->set_friendly_name("b_buckets"); - auto bucketize = std::make_shared(data, buckets, ngNetPrc, with_right_bound); - function = std::make_shared(std::make_shared(bucketize), ov::ParameterVector{data, buckets}, "Bucketize"); - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp b/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp deleted file mode 100644 index 6088bef35b3bdf..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/clamp.hpp" - -namespace LayerTestsDefinitions { - -std::string ClampLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inShape; - std::pair interval; - InferenceEngine::Precision netPrc; - std::string targetDevice; - - std::tie(inShape, interval, netPrc, targetDevice) = obj.param; - - std::ostringstream result; - result << "inShape=" << ov::test::utils::vec2str(inShape) << "_"; - result << "min=" << interval.first << "_"; - result << "max=" << interval.second << "_"; - result << "netPrc=" << netPrc.name() << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void ClampLayerTest::SetUp() { - InferenceEngine::SizeVector inShape; - std::pair interval; - InferenceEngine::Precision netPrc; - - std::tie(inShape, interval, netPrc, targetDevice) = this->GetParam(); - - auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc); - auto input = std::make_shared(ngNetPrc, ov::Shape(inShape)); - auto clamp = std::make_shared(input, interval.first, interval.second); - function = std::make_shared(std::make_shared(clamp), ov::ParameterVector{input}); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/comparison.cpp b/src/tests/functional/shared_test_classes/src/single_layer/comparison.cpp deleted file mode 100644 index 97057a6b6e9f0b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/comparison.cpp +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/comparison.hpp" - -using namespace LayerTestsDefinitions::ComparisonParams; -using namespace ngraph::helpers; - -namespace LayerTestsDefinitions { -std::string ComparisonLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - InputShapesTuple inputShapes; - InferenceEngine::Precision ngInputsPrecision; - ComparisonTypes comparisonOpType; - InputLayerType secondInputType; - InferenceEngine::Precision ieInPrecision; - InferenceEngine::Precision ieOutPrecision; - std::string targetName; - std::map additional_config; - std::tie(inputShapes, - ngInputsPrecision, - comparisonOpType, - secondInputType, - ieInPrecision, - ieOutPrecision, - targetName, - additional_config) = obj.param; - std::ostringstream results; - - results << "IS0=" << ov::test::utils::vec2str(inputShapes.first) << "_"; - results << "IS1=" << ov::test::utils::vec2str(inputShapes.second) << "_"; - results << "inputsPRC=" << ngInputsPrecision.name() << "_"; - results << "comparisonOpType=" << comparisonOpType << "_"; - results << "secondInputType=" << secondInputType << "_"; - if (ieInPrecision != InferenceEngine::Precision::UNSPECIFIED) { - results << "IEInPRC=" << ieInPrecision.name() << "_"; - } - if (ieOutPrecision != InferenceEngine::Precision::UNSPECIFIED) { - results << "IEOutPRC=" << ieOutPrecision.name() << "_"; - } - results << "targetDevice=" << targetName; - return results.str(); -} - -void ComparisonLayerTest::SetUp() { - InputShapesTuple inputShapes; - InferenceEngine::Precision ngInputsPrecision; - InputLayerType secondInputType; - InferenceEngine::Precision ieInPrecision; - InferenceEngine::Precision ieOutPrecision; - std::string targetName; - std::map additional_config; - std::tie(inputShapes, - ngInputsPrecision, - comparisonOpType, - secondInputType, - ieInPrecision, - ieOutPrecision, - targetDevice, - additional_config) = this->GetParam(); - - auto ngInputsPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(ngInputsPrecision); - configuration.insert(additional_config.begin(), additional_config.end()); - - inPrc = ieInPrecision; - outPrc = ieOutPrecision; - - ov::ParameterVector inputs {std::make_shared(ngInputsPrc, ov::Shape(inputShapes.first))}; - - OPENVINO_SUPPRESS_DEPRECATED_START - auto secondInput = ngraph::builder::makeInputLayer(ngInputsPrc, secondInputType, inputShapes.second); - OPENVINO_SUPPRESS_DEPRECATED_END - if (secondInputType == InputLayerType::PARAMETER) { - inputs.push_back(std::dynamic_pointer_cast(secondInput)); - } - - auto comparisonNode = ngraph::builder::makeComparison(inputs[0], secondInput, comparisonOpType); - function = std::make_shared(comparisonNode, inputs, "Comparison"); -} - -InferenceEngine::Blob::Ptr ComparisonLayerTest::GenerateInput(const InferenceEngine::InputInfo &inputInfo) const { - InferenceEngine::Blob::Ptr blob; - - if (comparisonOpType == ComparisonTypes::IS_FINITE || comparisonOpType == ComparisonTypes::IS_NAN) { - blob = make_blob_with_precision(inputInfo.getTensorDesc()); - blob->allocate(); - auto dataPtr = blob->buffer().as(); - auto dataPtrInt = blob->buffer().as(); - const auto range = blob->size(); - const float start = -static_cast(range) / 2.f; - testing::internal::Random random(1); - - for (size_t i = 0; i < range; i++) { - if (i % 7 == 0) { - dataPtr[i] = std::numeric_limits::infinity(); - } else if (i % 7 == 1) { - dataPtr[i] = -std::numeric_limits::infinity(); - } else if (i % 7 == 2) { - dataPtrInt[i] = 0x7F800000 + random.Generate(range); - } else if (i % 7 == 3) { - dataPtr[i] = std::numeric_limits::quiet_NaN(); - } else if (i % 7 == 5) { - dataPtr[i] = -std::numeric_limits::quiet_NaN(); - } else { - dataPtr[i] = start + static_cast(random.Generate(range)); - } - } - } else { - blob = LayerTestsUtils::LayerTestsCommon::GenerateInput(inputInfo); - } - - return blob; -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp b/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp deleted file mode 100644 index ce237c8b0d7c67..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/concat.hpp" - -namespace LayerTestsDefinitions { - -std::string ConcatLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - int axis; - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::tie(axis, inputShapes, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "axis=" << axis << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void ConcatLayerTest::SetUp() { - int axis; - std::vector> inputShape; - InferenceEngine::Precision netPrecision; - std::tie(axis, inputShape, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params; - ov::OutputVector paramsOuts; - for (auto&& shape : inputShape) { - auto param = std::make_shared(ngPrc, ov::Shape(shape)); - params.push_back(param); - paramsOuts.push_back(param); - } - auto concat = std::make_shared(paramsOuts, axis); - ov::ResultVector results{std::make_shared(concat)}; - function = std::make_shared(results, params, "concat"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp b/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp deleted file mode 100644 index 984ce96bddf729..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/constant.hpp" - -namespace LayerTestsDefinitions { -namespace { -template -std::vector getElements(const std::vector& v) { - const auto new_size = std::min(N, v.size()); - return {begin(v), std::next(begin(v), new_size)}; -} -} // namespace - -std::string ConstantLayerTest::getTestCaseName( - const testing::TestParamInfo& obj) { - std::vector data_shape; - InferenceEngine::Precision data_precision; - std::vector data_elements; - std::string targetName; - - std::tie(data_shape, data_precision, data_elements, targetName) = obj.param; - - std::ostringstream result; - result << "S=" << ov::test::utils::vec2str(data_shape) << "_"; - result << "dataPRC=" << data_precision.name() << "_"; - result << "dataValue=" << ov::test::utils::vec2str(getElements<5>(data_elements)) << "_"; - return result.str(); -} - -void ConstantLayerTest::SetUp() { - std::vector data_shape; - InferenceEngine::Precision data_precision; - std::vector data_elements; - - std::tie(data_shape, data_precision, data_elements, targetDevice) = this->GetParam(); - - const auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(data_precision); - auto constant = ov::op::v0::Constant::create(precision, data_shape, data_elements); - ov::ResultVector results{std::make_shared(constant)}; - - function = std::make_shared(results, ov::ParameterVector{}, "constant"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/conversion.cpp b/src/tests/functional/shared_test_classes/src/single_layer/conversion.cpp deleted file mode 100644 index d8fbdfeb1a58ec..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/conversion.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/conversion.hpp" - -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -std::string ConversionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - ngraph::helpers::ConversionTypes conversionOpType; - InferenceEngine::Precision inputPrecision, targetPrecision; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::vector> inputShape; - std::tie(conversionOpType, inputShape, inputPrecision, targetPrecision, inLayout, outLayout, targetName) = - obj.param; - std::ostringstream result; - result << "conversionOpType=" << conversionNames[conversionOpType] << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "inputPRC=" << inputPrecision.name() << "_"; - result << "targetPRC=" << targetPrecision.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void ConversionLayerTest::SetUp() { - if (ov::test::utils::current_test_is_disabled()) { - GTEST_SKIP() << "Disabled test due to configuration" << std::endl; - } - ngraph::helpers::ConversionTypes conversionOpType; - InferenceEngine::Precision inputPrecision, targetPrecision; - std::vector> inputShape; - std::tie(conversionOpType, inputShape, inputPrecision, targetPrecision, inLayout, outLayout, targetDevice) = - GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - auto targetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(targetPrecision); - ov::ParameterVector params; - for (auto&& shape : inputShape) { - params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); - } - std::shared_ptr conversion; - if (ov::test::utils::ConversionTypes::CONVERT == conversionOpType) { - conversion = std::make_shared(params.front(), targetPrc); - } else { - const auto like = std::make_shared(targetPrc, ov::Shape{1}); - conversion = std::make_shared(params.front(), like); - } - ov::ResultVector results{std::make_shared(conversion)}; - function = std::make_shared(results, params, "Conversion"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convert_color_i420.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convert_color_i420.cpp deleted file mode 100644 index a952493b9b4748..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/convert_color_i420.cpp +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/convert_color_i420.hpp" -#include "openvino/op/i420_to_rgb.hpp" -#include "openvino/op/i420_to_bgr.hpp" - -namespace LayerTestsDefinitions { - -std::string ConvertColorI420LayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - ov::Shape inputShape; - ov::element::Type type; - bool conversion, singlePlane; - std::string targetName; - std::tie(inputShape, type, conversion, singlePlane, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "netPRC=" << type.c_type_string() << "_"; - result << "convRGB=" << conversion << "_"; - result << "singlePlane=" << singlePlane << "_"; - result << "targetDevice=" << targetName; - return result.str(); -} - -void ConvertColorI420LayerTest::SetUp() { - ov::Shape inputShape; - ov::element::Type ngPrc; - bool conversionToRGB, singlePlane; - abs_threshold = 1.0f; // I420 conversion can use various algorithms, thus some absolute deviation is allowed - threshold = 1.f; // Ignore relative comparison for I420 convert (allow 100% relative deviation) - std::tie(inputShape, ngPrc, conversionToRGB, singlePlane, targetDevice) = GetParam(); - if (singlePlane) { - inputShape[1] = inputShape[1] * 3 / 2; - auto param = std::make_shared(ngPrc, inputShape); - std::shared_ptr convert_color; - if (conversionToRGB) { - convert_color = std::make_shared(param); - } else { - convert_color = std::make_shared(param); - } - function = std::make_shared(std::make_shared(convert_color), - ov::ParameterVector{param}, "ConvertColorI420"); - } else { - auto uvShape = ov::Shape{inputShape[0], inputShape[1] / 2, inputShape[2] / 2, 1}; - auto param_y = std::make_shared(ngPrc, inputShape); - auto param_u = std::make_shared(ngPrc, uvShape); - auto param_v = std::make_shared(ngPrc, uvShape); - std::shared_ptr convert_color; - if (conversionToRGB) { - convert_color = std::make_shared(param_y, param_u, param_v); - } else { - convert_color = std::make_shared(param_y, param_u, param_v); - } - function = std::make_shared(std::make_shared(convert_color), - ov::ParameterVector{param_y, param_u, param_v}, - "ConvertColorI420"); - } -} - -// -------- Accuracy test (R/G/B combinations) -------- - -void ConvertColorI420AccuracyTest::GenerateInputs() { - inputs.clear(); - const auto& inputsInfo = executableNetwork.GetInputsInfo(); - const auto& functionParams = function->get_parameters(); - for (const auto& param : functionParams) { - const auto infoIt = inputsInfo.find(param->get_friendly_name()); - GTEST_ASSERT_NE(infoIt, inputsInfo.cend()); - InferenceEngine::InputInfo::CPtr info = infoIt->second; - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - size_t full_height = param->get_shape()[1]; - size_t full_width = param->get_shape()[2]; - int b_dim = static_cast(full_height * 2 / (3 * full_width)); - ASSERT_GT(b_dim, 1) << "Image height is invalid for I420 Accuracy test"; - ASSERT_EQ(255 % (b_dim - 1), 0) << "Image height is invalid for I420 Accuracy test"; - int b_step = 255 / (b_dim - 1); - auto input_image = I420TestUtils::color_test_image(full_width, full_width, b_step); - auto* rawBlobDataPtr = blob->buffer().as(); - for (size_t j = 0; j < input_image.size(); ++j) { - rawBlobDataPtr[j] = input_image[j]; - } - - inputs.push_back(blob); - } -} - -void ConvertColorI420AccuracyTest::Validate() { - ConvertColorI420LayerTest::Validate(); - - ASSERT_FALSE(expected_output.empty()); - ASSERT_TRUE(actual_output); - auto memory = InferenceEngine::as(actual_output); - const auto lockedMemory = memory->wmap(); - const auto* actualBuffer = lockedMemory.as(); - - // Allow less than 2% of deviations with 1 color step. 2% is experimental value - // For different calculation methods - 1.4% deviation is observed - I420TestUtils::ValidateColors(expected_output.data(), actualBuffer, expected_output.size(), 0.02); -} - -std::vector>> ConvertColorI420AccuracyTest::CalculateRefs() { - auto refs = ConvertColorI420LayerTest::CalculateRefs(); - if (!refs.empty()) { - auto out = refs[0].second; - expected_output.reserve(out.size()); - for (auto val : out) { - expected_output.push_back(val); - } - } - return refs; -} - -std::vector ConvertColorI420AccuracyTest::GetOutputs() { - auto outputs = ConvertColorI420LayerTest::GetOutputs(); - if (!outputs.empty()) { - actual_output = InferenceEngine::Blob::Ptr(outputs[0]); - } - return outputs; -} - - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convert_color_nv12.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convert_color_nv12.cpp deleted file mode 100644 index 4155d3777ec79f..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/convert_color_nv12.cpp +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/convert_color_nv12.hpp" -#include "openvino/op/nv12_to_rgb.hpp" -#include "openvino/op/nv12_to_bgr.hpp" - -namespace LayerTestsDefinitions { - -std::string ConvertColorNV12LayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - ov::Shape inputShape; - ov::element::Type type; - bool conversion, singlePlane; - std::string targetName; - std::tie(inputShape, type, conversion, singlePlane, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "netPRC=" << type.c_type_string() << "_"; - result << "convRGB=" << conversion << "_"; - result << "singlePlane=" << singlePlane << "_"; - result << "targetDevice=" << targetName; - return result.str(); -} - -void ConvertColorNV12LayerTest::SetUp() { - ov::Shape inputShape; - ov::element::Type ngPrc; - bool conversionToRGB, singlePlane; - abs_threshold = 1.0f; // NV12 conversion can use various algorithms, thus some absolute deviation is allowed - threshold = 1.f; // Ignore relative comparison for NV12 convert (allow 100% relative deviation) - std::tie(inputShape, ngPrc, conversionToRGB, singlePlane, targetDevice) = GetParam(); - if (singlePlane) { - inputShape[1] = inputShape[1] * 3 / 2; - auto param = std::make_shared(ngPrc, inputShape); - std::shared_ptr convert_color; - if (conversionToRGB) { - convert_color = std::make_shared(param); - } else { - convert_color = std::make_shared(param); - } - function = std::make_shared(std::make_shared(convert_color), - ov::ParameterVector{param}, "ConvertColorNV12"); - } else { - auto uvShape = ov::Shape{inputShape[0], inputShape[1] / 2, inputShape[2] / 2, 2}; - auto param_y = std::make_shared(ngPrc, inputShape); - auto param_uv = std::make_shared(ngPrc, uvShape); - std::shared_ptr convert_color; - if (conversionToRGB) { - convert_color = std::make_shared(param_y, param_uv); - } else { - convert_color = std::make_shared(param_y, param_uv); - } - function = std::make_shared(std::make_shared(convert_color), - ov::ParameterVector{param_y, param_uv}, "ConvertColorNV12"); - } -} - -// -------- Accuracy test (R/G/B combinations) -------- - -void ConvertColorNV12AccuracyTest::GenerateInputs() { - inputs.clear(); - const auto& inputsInfo = executableNetwork.GetInputsInfo(); - const auto& functionParams = function->get_parameters(); - for (const auto& param : functionParams) { - const auto infoIt = inputsInfo.find(param->get_friendly_name()); - GTEST_ASSERT_NE(infoIt, inputsInfo.cend()); - InferenceEngine::InputInfo::CPtr info = infoIt->second; - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - size_t full_height = param->get_shape()[1]; - size_t full_width = param->get_shape()[2]; - int b_dim = static_cast(full_height * 2 / (3 * full_width)); - ASSERT_GT(b_dim, 1) << "Image height is invalid for NV12 Accuracy test"; - ASSERT_EQ(255 % (b_dim - 1), 0) << "Image height is invalid for NV12 Accuracy test"; - int b_step = 255 / (b_dim - 1); - auto input_image = NV12TestUtils::color_test_image(full_width, full_width, b_step); - auto* rawBlobDataPtr = blob->buffer().as(); - for (size_t j = 0; j < input_image.size(); ++j) { - rawBlobDataPtr[j] = input_image[j]; - } - - inputs.push_back(blob); - } -} - -void ConvertColorNV12AccuracyTest::Validate() { - ConvertColorNV12LayerTest::Validate(); - - ASSERT_FALSE(expected_output.empty()); - ASSERT_TRUE(actual_output); - auto memory = InferenceEngine::as(actual_output); - const auto lockedMemory = memory->wmap(); - const auto* actualBuffer = lockedMemory.as(); - - // Allow less than 2% of deviations with 1 color step. 2% is experimental value - // For different calculation methods - 1.4% deviation is observed - NV12TestUtils::ValidateColors(expected_output.data(), actualBuffer, expected_output.size(), 0.02); -} - -std::vector>> ConvertColorNV12AccuracyTest::CalculateRefs() { - auto refs = ConvertColorNV12LayerTest::CalculateRefs(); - if (!refs.empty()) { - auto out = refs[0].second; - expected_output.reserve(out.size()); - for (auto val : out) { - expected_output.push_back(val); - } - } - return refs; -} - -std::vector ConvertColorNV12AccuracyTest::GetOutputs() { - auto outputs = ConvertColorNV12LayerTest::GetOutputs(); - if (!outputs.empty()) { - actual_output = InferenceEngine::Blob::Ptr(outputs[0]); - } - return outputs; -} - - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp deleted file mode 100644 index fe264058f3f7b3..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/convolution.hpp" - -namespace LayerTestsDefinitions { - -std::string ConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - convSpecificParams convParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = - obj.param; - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd; - size_t convOutChannels; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "O=" << convOutChannels << "_"; - result << "AP=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void ConvolutionLayerTest::SetUp() { - convSpecificParams convParams; - std::vector inputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = - this->GetParam(); - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd; - size_t convOutChannels; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - std::vector filter_weights; - auto conv = std::dynamic_pointer_cast( - ngraph::builder::makeConvolution(params[0], ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, false, filter_weights)); - ov::ResultVector results{std::make_shared(conv)}; - function = std::make_shared(results, params, "convolution"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp deleted file mode 100644 index 8e0ec8fc3c0d4c..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/convolution_backprop.hpp" - -namespace LayerTestsDefinitions { - -std::string ConvolutionBackpropLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - convBackpropSpecificParams convBackpropDataParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - InferenceEngine::SizeVector outputShapes; - std::string targetDevice; - std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outputShapes, targetDevice) = obj.param; - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd, outPadding; - size_t convOutChannels; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = convBackpropDataParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "OS=" << ov::test::utils::vec2str(outputShapes) << "_"; - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "OP=" << ov::test::utils::vec2str(outPadding) << "_"; - result << "O=" << convOutChannels << "_"; - result << "AP=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void ConvolutionBackpropLayerTest::SetUp() { - convBackpropSpecificParams convBackpropDataParams; - std::vector inputShape; - std::vector outputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd, outPadding; - size_t convOutChannels; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = convBackpropDataParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto convBackpropData = std::dynamic_pointer_cast( - ngraph::builder::makeConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, false, outPadding)); - if (!outputShape.empty()) { - auto outShape = ov::op::v0::Constant::create(ov::element::i64, {outputShape.size()}, outputShape); - convBackpropData = std::dynamic_pointer_cast( - ngraph::builder::makeConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels)); - } - ov::ResultVector results{std::make_shared(convBackpropData)}; - function = std::make_shared(results, params, "convolutionBackpropData"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp deleted file mode 100644 index 94010e9a26e3c9..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// DEPRECATED, can't be removed currently due to arm and kmb-plugin dependency (#55568) - -#include "shared_test_classes/single_layer/convolution_backprop_data.hpp" - -namespace LayerTestsDefinitions { - -std::string ConvolutionBackpropDataLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - convBackpropDataSpecificParams convBackpropDataParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - InferenceEngine::SizeVector outputShapes; - std::string targetDevice; - std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outputShapes, targetDevice) = obj.param; - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd, outPadding; - size_t convOutChannels; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = convBackpropDataParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "OS=" << ov::test::utils::vec2str(outputShapes) << "_"; - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "OP=" << ov::test::utils::vec2str(outPadding) << "_"; - result << "O=" << convOutChannels << "_"; - result << "AP=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void ConvolutionBackpropDataLayerTest::SetUp() { - convBackpropDataSpecificParams convBackpropDataParams; - std::vector inputShape; - std::vector outputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd, outPadding; - size_t convOutChannels; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = convBackpropDataParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto convBackpropData = std::dynamic_pointer_cast( - ngraph::builder::makeConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, false, outPadding)); - if (!outputShape.empty()) { - auto outShape = ov::op::v0::Constant::create(ov::element::i64, {outputShape.size()}, outputShape); - convBackpropData = std::dynamic_pointer_cast( - ngraph::builder::makeConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels)); - } - ov::ResultVector results{std::make_shared(convBackpropData)}; - function = std::make_shared(results, params, "convolutionBackpropData"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp deleted file mode 100644 index fc55ee41d018a8..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/ctc_greedy_decoder.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { -std::string CTCGreedyDecoderLayerTest::getTestCaseName( - const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - bool mergeRepeated; - std::tie(netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, - mergeRepeated, - targetDevice) = obj.param; - - std::ostringstream result; - const char separator = '_'; - - result << "IS=" << ov::test::utils::vec2str(inputShapes) << separator; - result << "netPRC=" << netPrecision.name() << separator; - result << "inPRC=" << inPrc.name() << separator; - result << "outPRC=" << outPrc.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; - result << "merge_repeated=" << std::boolalpha << mergeRepeated << separator; - result << "trgDev=" << targetDevice; - - return result.str(); -} - -void CTCGreedyDecoderLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - bool mergeRepeated; - std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, mergeRepeated, targetDevice) = GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - - OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcGreedyDecoder = std::dynamic_pointer_cast( - ngraph::builder::makeCTCGreedyDecoder(paramsIn[0], mergeRepeated)); - OPENVINO_SUPPRESS_DEPRECATED_END - - ov::ResultVector results{ std::make_shared(ctcGreedyDecoder) }; - function = std::make_shared(results, paramsIn, "CTCGreedyDecoder"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp deleted file mode 100644 index 82168a7b820f7c..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "shared_test_classes/single_layer/ctc_greedy_decoder_seq_len.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { -std::string CTCGreedyDecoderSeqLenLayerTest::getTestCaseName( - const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShape; - int sequenceLengths; - InferenceEngine::Precision dataPrecision, indicesPrecision; - int blankIndex; - bool mergeRepeated; - std::string targetDevice; - std::tie(inputShape, - sequenceLengths, - dataPrecision, - indicesPrecision, - blankIndex, - mergeRepeated, - targetDevice) = obj.param; - - std::ostringstream result; - - result << "IS=" << ov::test::utils::vec2str(inputShape) << '_'; - result << "seqLen=" << sequenceLengths << '_'; - result << "dataPRC=" << dataPrecision.name() << '_'; - result << "idxPRC=" << indicesPrecision.name() << '_'; - result << "BlankIdx=" << blankIndex << '_'; - result << "mergeRepeated=" << std::boolalpha << mergeRepeated << '_'; - result << "trgDev=" << targetDevice; - - return result.str(); -} - -void CTCGreedyDecoderSeqLenLayerTest::SetUp() { - InferenceEngine::SizeVector inputShape; - int sequenceLengths; - InferenceEngine::Precision dataPrecision, indicesPrecision; - int blankIndex; - bool mergeRepeated; - std::tie(inputShape, - sequenceLengths, - dataPrecision, - indicesPrecision, - blankIndex, - mergeRepeated, - targetDevice) = GetParam(); - - auto ngDataPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dataPrecision); - auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); - ov::ParameterVector paramsIn {std::make_shared(ngDataPrc, ov::Shape(inputShape))}; - - const auto sequenceLenNode = [&] { - const size_t B = inputShape[0]; - const size_t T = inputShape[1]; - - // Cap sequence length up to T - const int seqLen = std::min(T, sequenceLengths); - - std::mt19937 gen{42}; - std::uniform_int_distribution dist(1, seqLen); - - std::vector sequenceLenData(B); - for (int b = 0; b < B; b++) { - const int len = dist(gen); - sequenceLenData[b] = len; - } - - return ov::test::utils::deprecated::make_constant(ngIdxPrc, {B}, sequenceLenData); - }(); - - // Cap blank index up to C - 1 - int C = inputShape.at(2); - blankIndex = std::min(blankIndex, C - 1); - - OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcGreedyDecoderSeqLen = std::dynamic_pointer_cast( - ngraph::builder::makeCTCGreedyDecoderSeqLen(paramsIn[0], sequenceLenNode, - blankIndex, mergeRepeated, ngIdxPrc)); - OPENVINO_SUPPRESS_DEPRECATED_END - - ov::ResultVector results; - for (int i = 0; i < ctcGreedyDecoderSeqLen->get_output_size(); i++) { - results.push_back(std::make_shared(ctcGreedyDecoderSeqLen->output(i))); - } - function = std::make_shared(results, paramsIn, "CTCGreedyDecoderSeqLen"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp deleted file mode 100644 index 406991d7d8d61d..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/ctc_loss.hpp" - -namespace LayerTestsDefinitions { - -std::string CTCLossLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector logitsShapes; - InferenceEngine::Precision fpPrecision, intPrecision; - bool preprocessCollapseRepeated, ctcMergeRepeated, unique; - std::vector logitsLength, labelsLength; - std::vector> labels; - int blankIndex; - std::string targetDevice; - CTCLossParamsSubset ctcLossArgsSubset; - std::tie(ctcLossArgsSubset, fpPrecision, intPrecision, targetDevice) = obj.param; - std::tie(logitsShapes, logitsLength, labels, labelsLength, blankIndex, preprocessCollapseRepeated, - ctcMergeRepeated, unique) = ctcLossArgsSubset; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(logitsShapes) << "_"; - result << "LL=" << ov::test::utils::vec2str(logitsLength) << "_"; - result << "A=" << ov::test::utils::vec2str(labels) << "_"; - result << "AL=" << ov::test::utils::vec2str(labelsLength) << "_"; - result << "BI=" << blankIndex << "_"; - result << "PCR=" << preprocessCollapseRepeated << "_"; - result << "CMR=" << ctcMergeRepeated << "_"; - result << "U=" << unique << "_"; - result << "PF=" << fpPrecision.name() << "_"; - result << "PI=" << intPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -void CTCLossLayerTest::SetUp() { - std::vector logitsShapes; - InferenceEngine::Precision fpPrecision, intPrecision; - bool preprocessCollapseRepeated, ctcMergeRepeated, unique; - std::vector logitsLength, labelsLength; - std::vector> labels; - int blankIndex; - CTCLossParamsSubset ctcLossArgsSubset; - std::tie(ctcLossArgsSubset, fpPrecision, intPrecision, targetDevice) = this->GetParam(); - std::tie(logitsShapes, logitsLength, labels, labelsLength, blankIndex, preprocessCollapseRepeated, - ctcMergeRepeated, unique) = ctcLossArgsSubset; - - auto ngFpPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(fpPrecision); - auto ngIntPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(intPrecision); - - ov::ParameterVector params {std::make_shared(ngFpPrc, ov::Shape(logitsShapes))}; - OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcLoss = std::dynamic_pointer_cast( - ngraph::builder::makeCTCLoss(params[0], logitsLength, labels, labelsLength, blankIndex, - ngFpPrc, ngIntPrc, preprocessCollapseRepeated, ctcMergeRepeated, unique)); - OPENVINO_SUPPRESS_DEPRECATED_END - - ov::ResultVector results{std::make_shared(ctcLoss)}; - function = std::make_shared(results, params, "CTCLoss"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp deleted file mode 100644 index dd834b5545d069..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/cum_sum.hpp" - -namespace LayerTestsDefinitions { - -std::string CumSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - int64_t axis; - bool exclusive, reverse; - std::string targetDevice; - std::tie(inputShapes, inputPrecision, axis, exclusive, reverse, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Precision=" << inputPrecision.name() << "_"; - result << "Axis=" << axis << "_"; - result << "Exclusive=" << (exclusive ? "TRUE" : "FALSE") << "_"; - result << "Reverse=" << (reverse ? "TRUE" : "FALSE") << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void CumSumLayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - bool exclusive, reverse; - int64_t axis; - std::tie(inputShapes, inputPrecision, axis, exclusive, reverse, targetDevice) = this->GetParam(); - const auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - const auto paramData = std::make_shared(inType, ov::Shape(inputShapes)); - const auto axisNode = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis})->output(0); - const auto cumSum = std::make_shared(paramData, axisNode, exclusive, reverse); - - ov::ResultVector results{std::make_shared(cumSum)}; - function = std::make_shared(results, ov::ParameterVector{paramData}, "cumsum"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp deleted file mode 100644 index bd215a2458f51d..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "shared_test_classes/single_layer/deformable_convolution.hpp" - -namespace LayerTestsDefinitions { -std::string DeformableConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - deformableConvSpecificParams convParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = - obj.param; - ov::op::PadType padType; - InferenceEngine::SizeVector offsets, filter, stride, dilation; - std::vector padBegin, padEnd; - size_t groups, deformable_groups, convOutChannels; - bool with_bilinear_interpolation_pad, with_modulation; - std::tie(offsets, filter, stride, padBegin, padEnd, dilation, groups, deformable_groups, convOutChannels, padType, - with_bilinear_interpolation_pad, with_modulation) = convParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "DV" << ov::test::utils::vec2str(offsets) << "_"; - result << "K" << ov::test::utils::vec2str(filter) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "G=" << groups << "_"; - result << "DG=" << deformable_groups << "_"; - result << "O=" << convOutChannels << "_"; - result << "AP=" << padType << "_"; - result << "BI_PAD=" << with_bilinear_interpolation_pad << "_"; - result << "MODULATION=" << with_modulation << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -InferenceEngine::Blob::Ptr DeformableConvolutionLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::Blob::Ptr blobPtr; - const std::string& name = info.name(); - if (name == "a_data") { - blobPtr = LayerTestsUtils::LayerTestsCommon::GenerateInput(info); - } else if (name == "b_offset_vals") { - blobPtr = FuncTestUtils::createAndFillBlobFloat(info.getTensorDesc(), 2, 0, 10); - } else if (name == "c_filter_vals") { - blobPtr = LayerTestsUtils::LayerTestsCommon::GenerateInput(info); - } else if (name == "c_modulation_scalars") { - blobPtr = FuncTestUtils::createAndFillBlobFloat(info.getTensorDesc(), 1, 0, 20); - } - return blobPtr; -} -void DeformableConvolutionLayerTest::SetUp() { - deformableConvSpecificParams convParams; - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = - this->GetParam(); - ov::op::PadType padType; - InferenceEngine::SizeVector offsets, filter, stride, dilation; - std::vector padBegin, padEnd; - size_t groups, deformable_groups, convOutChannels; - bool with_bilinear_interpolation_pad, with_modulation; - std::tie(offsets, filter, stride, padBegin, padEnd, dilation, groups, deformable_groups, convOutChannels, padType, - with_bilinear_interpolation_pad, with_modulation) = convParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params; - for (auto&& shape : {inputShape, offsets, filter}) { - params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); - } - auto data = std::make_shared(ngPrc, ov::Shape(inputShape)); - data->set_friendly_name("a_data"); - auto offset_vals = std::make_shared(ngPrc, ov::Shape(offsets)); - offset_vals->set_friendly_name("b_offset_vals"); - auto filter_vals = std::make_shared(ngPrc, ov::Shape(filter)); - filter_vals->set_friendly_name("c_filter_vals"); - ov::ParameterVector parameters{data, offset_vals, filter_vals}; - std::shared_ptr deformable_conv; - if (with_modulation) { - auto modulation_shape = ov::Shape(offsets); - modulation_shape[1] = offsets[1] / 2; - auto modulation_scalars = std::make_shared(ngPrc, modulation_shape); - modulation_scalars->set_friendly_name("c_modulation_scalars"); - - deformable_conv = std::make_shared(data, offset_vals, filter_vals, modulation_scalars, stride, padBegin, - padEnd, dilation, padType, groups, deformable_groups, - with_bilinear_interpolation_pad); - parameters.push_back(modulation_scalars); - } else { - deformable_conv = std::make_shared(data, offset_vals, filter_vals, stride, padBegin, padEnd, dilation, - padType, groups, deformable_groups, with_bilinear_interpolation_pad); - } - - ov::ResultVector results{std::make_shared(deformable_conv)}; - function = std::make_shared(results, parameters, "deformable_convolution"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp deleted file mode 100644 index 0557da19c55540..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/deformable_psroi_pooling.hpp" - - -namespace LayerTestsDefinitions { - - std::string DeformablePSROIPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector dataShape; - std::vector roisShape; - std::vector offsetsShape; - int64_t outputDim; - int64_t groupSize; - float spatialScale; - std::vector spatialBinsXY; - float trans_std; - int64_t part_size; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - deformablePSROISpecificParams opParams; - - std::tie(opParams, netPrecision, targetDevice) = obj.param; - std::tie(dataShape, roisShape, offsetsShape, outputDim, groupSize, spatialScale, spatialBinsXY, - trans_std, part_size) = opParams; - - std::ostringstream result; - - result << "data_shape=" << ov::test::utils::vec2str(dataShape) << "_"; - result << "rois_shape=" << ov::test::utils::vec2str(roisShape) << "_"; - result << "offsets_shape=" << ov::test::utils::vec2str(offsetsShape) << "_"; - result << "out_dim=" << outputDim << "_"; - result << "group_size=" << groupSize << "_"; - result << "scale=" << spatialScale << "_"; - result << "bins_x=" << spatialBinsXY[0] << "_"; - result << "bins_y=" << spatialBinsXY[1] << "_"; - result << "trans_std=" << trans_std << "_"; - result << "part_size=" << part_size << "_"; - result << "prec=" << netPrecision.name() << "_"; - result << "dev=" << targetDevice; - return result.str(); - } - - void DeformablePSROIPoolingLayerTest::GenerateInputs() { - auto data_input_shape = cnnNetwork.getInputShapes().begin()->second; - const auto batch_distrib = data_input_shape[0] - 1; - const auto height = data_input_shape[2] / spatialScale_; - const auto width = data_input_shape[3] / spatialScale_; - - size_t it = 0; - for (const auto &input : cnnNetwork.getInputsInfo()) { - const auto &info = input.second; - InferenceEngine::Blob::Ptr blob; - - if (it == 0) { - blob = GenerateInput(*info); - } else if (it == 1) { - blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - ov::test::utils::fill_data_roi(blob, batch_distrib, - height, width, 1.0f, true); - } else { - blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - std::vector offset_data = ov::test::utils::generate_float_numbers(blob->size(), -0.9, 0.9); - ov::test::utils::fill_data_float_array(blob, &offset_data[0], blob->size()); - } - inputs.push_back(blob); - it++; - } - } - - void DeformablePSROIPoolingLayerTest::SetUp() { - std::vector dataShape; - std::vector roisShape; - std::vector offsetsShape; - int64_t outputDim; - int64_t groupSize; - std::string mode = "bilinear_deformable"; - std::vector spatialBinsXY; - float trans_std; - int64_t part_size; - InferenceEngine::Precision netPrecision; - deformablePSROISpecificParams opParams; - - std::tie(opParams, netPrecision, targetDevice) = this->GetParam(); - std::tie(dataShape, roisShape, offsetsShape, outputDim, groupSize, spatialScale_, spatialBinsXY, - trans_std, part_size) = opParams; - - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params; - ov::OutputVector inputs; - std::shared_ptr defomablePSROIPooling; - - if (offsetsShape.empty()) { // Test without optional third input (offsets) - params = ov::ParameterVector{std::make_shared(ngPrc, ov::Shape(dataShape)), - std::make_shared(ngPrc, ov::Shape(roisShape))}; - defomablePSROIPooling = std::make_shared(params[0], - params[1], - outputDim, - spatialScale_, - groupSize, - mode, - spatialBinsXY[0], - spatialBinsXY[1], - trans_std, - part_size); - } else { - params = ov::ParameterVector{std::make_shared(ngPrc, ov::Shape(dataShape)), - std::make_shared(ngPrc, ov::Shape(roisShape)), - std::make_shared(ngPrc, ov::Shape(offsetsShape))}; - defomablePSROIPooling = std::make_shared(params[0], - params[1], - params[2], - outputDim, - spatialScale_, - groupSize, - mode, - spatialBinsXY[0], - spatialBinsXY[1], - trans_std, - part_size); - } - - ov::ResultVector results{std::make_shared(defomablePSROIPooling)}; - function = std::make_shared(results, params, "deformable_psroi_pooling"); - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp deleted file mode 100644 index 3861370f37ebf4..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/depth_to_space.hpp" - -namespace LayerTestsDefinitions { - -static inline std::string DepthToSpaceModeToString(const ov::op::v0::DepthToSpace::DepthToSpaceMode& mode) { - static std::map names = { - {ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, - {ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, - }; - - auto i = names.find(mode); - if (i != names.end()) - return i->second; - else - throw std::runtime_error("Unsupported DepthToSpaceMode"); -} - -std::string DepthToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inShape; - ov::op::v0::DepthToSpace::DepthToSpaceMode mode; - std::size_t blockSize; - InferenceEngine::Precision inputPrecision; - std::string targetName; - std::tie(inShape, inputPrecision, mode, blockSize, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inShape) << "_"; - result << "inPrc=" << inputPrecision.name() << "_"; - result << "M=" << DepthToSpaceModeToString(mode) << "_"; - result << "BS=" << blockSize << "_"; - result << "targetDevice=" << targetName << "_"; - return result.str(); -} - -void DepthToSpaceLayerTest::SetUp() { - std::vector inShape; - ov::op::v0::DepthToSpace::DepthToSpaceMode mode; - std::size_t blockSize; - InferenceEngine::Precision inputPrecision; - std::tie(inShape, inputPrecision, mode, blockSize, targetDevice) = this->GetParam(); - auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ov::ParameterVector params {std::make_shared(inPrc, ov::Shape(inShape))}; - auto d2s = std::make_shared(params[0], mode, blockSize); - ov::ResultVector results{std::make_shared(d2s)}; - function = std::make_shared(results, params, "DepthToSpace"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp b/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp deleted file mode 100644 index eb1e0e0320face..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/detection_output.hpp" - -namespace LayerTestsDefinitions { - -std::ostream& operator <<(std::ostream& result, const ov::op::v0::DetectionOutput::Attributes& attrs) { - result << "Classes=" << attrs.num_classes << "_"; - result << "backgrId=" << attrs.background_label_id << "_"; - result << "topK=" << attrs.top_k << "_"; - result << "varEnc=" << attrs.variance_encoded_in_target << "_"; - result << "keepTopK=" << ov::test::utils::vec2str(attrs.keep_top_k) << "_"; - result << "codeType=" << attrs.code_type << "_"; - result << "shareLoc=" << attrs.share_location << "_"; - result << "nmsThr=" << attrs.nms_threshold << "_"; - result << "confThr=" << attrs.confidence_threshold << "_"; - result << "clipAfterNms=" << attrs.clip_after_nms << "_"; - result << "clipBeforeNms=" << attrs.clip_before_nms << "_"; - result << "decrId=" << attrs.decrease_label_id << "_"; - result << "norm=" << attrs.normalized << "_"; - result << "inH=" << attrs.input_height << "_"; - result << "inW=" << attrs.input_width << "_"; - result << "OS=" << attrs.objectness_score << "_"; - return result; -} - -std::string DetectionOutputLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - DetectionOutputAttributes commonAttrs; - ParamsWhichSizeDepends specificAttrs; - ov::op::v0::DetectionOutput::Attributes attrs; - size_t batch; - std::string targetDevice; - std::tie(commonAttrs, specificAttrs, batch, attrs.objectness_score, targetDevice) = obj.param; - - std::tie(attrs.num_classes, attrs.background_label_id, attrs.top_k, attrs.keep_top_k, attrs.code_type, attrs.nms_threshold, attrs.confidence_threshold, - attrs.clip_after_nms, attrs.clip_before_nms, attrs.decrease_label_id) = commonAttrs; - - const size_t numInputs = 5; - std::vector inShapes(numInputs); - std::tie(attrs.variance_encoded_in_target, attrs.share_location, attrs.normalized, attrs.input_height, attrs.input_width, - inShapes[idxLocation], inShapes[idxConfidence], inShapes[idxPriors], inShapes[idxArmConfidence], inShapes[idxArmLocation]) = specificAttrs; - - if (inShapes[idxArmConfidence].empty()) { - inShapes.resize(3); - } - - for (size_t i = 0; i < inShapes.size(); i++) { - inShapes[i][0] = batch; - } - - std::ostringstream result; - result << "IS = { "; - result << "LOC=" << ov::test::utils::vec2str(inShapes[0]) << "_"; - result << "CONF=" << ov::test::utils::vec2str(inShapes[1]) << "_"; - result << "PRIOR=" << ov::test::utils::vec2str(inShapes[2]); - std::string armConf, armLoc; - if (inShapes.size() > 3) { - armConf = "_ARM_CONF=" + ov::test::utils::vec2str(inShapes[3]) + "_"; - armLoc = "ARM_LOC=" + ov::test::utils::vec2str(inShapes[4]); - } - result << armConf; - result << armLoc << " }_"; - - result << attrs; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void DetectionOutputLayerTest::GenerateInputs() { - size_t it = 0; - for (const auto &input : cnnNetwork.getInputsInfo()) { - const auto &info = input.second; - InferenceEngine::Blob::Ptr blob; - int32_t resolution = 1; - uint32_t range = 1; - if (it == 2) { - if (attrs.normalized) { - resolution = 100; - } else { - range = 10; - } - } else if (it == 1 || it == 3) { - resolution = 1000; - } else { - resolution = 10; - } - blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - ov::test::utils::fill_data_random_float(blob, range, 0, resolution); - inputs.push_back(blob); - it++; - } -} - -void DetectionOutputLayerTest::Compare( - const std::vector>> &expectedOutputs, - const std::vector &actualOutputs) { - for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) { - const auto &expected = expectedOutputs[outputIndex].second; - const auto &actual = actualOutputs[outputIndex]; - - ASSERT_EQ(expected.size(), actual->byteSize()); - - size_t expSize = 0; - size_t actSize = 0; - - const auto &expectedBuffer = expected.data(); - auto memory = InferenceEngine::as(actual); - IE_ASSERT(memory); - const auto lockedMemory = memory->wmap(); - const auto actualBuffer = lockedMemory.as(); - - const float *expBuf = reinterpret_cast(expectedBuffer); - const float *actBuf = reinterpret_cast(actualBuffer); - for (size_t i = 0; i < actual->size(); i+=7) { - if (expBuf[i] == -1) - break; - expSize += 7; - } - for (size_t i = 0; i < actual->size(); i+=7) { - if (actBuf[i] == -1) - break; - actSize += 7; - } - ASSERT_EQ(expSize, actSize); - LayerTestsCommon::Compare(expBuf, actBuf, expSize, 1e-2f); - } -} - -void DetectionOutputLayerTest::SetUp() { - DetectionOutputAttributes commonAttrs; - ParamsWhichSizeDepends specificAttrs; - size_t batch; - std::tie(commonAttrs, specificAttrs, batch, attrs.objectness_score, targetDevice) = this->GetParam(); - - std::tie(attrs.num_classes, attrs.background_label_id, attrs.top_k, attrs.keep_top_k, attrs.code_type, attrs.nms_threshold, attrs.confidence_threshold, - attrs.clip_after_nms, attrs.clip_before_nms, attrs.decrease_label_id) = commonAttrs; - - inShapes.resize(numInputs); - std::tie(attrs.variance_encoded_in_target, attrs.share_location, attrs.normalized, attrs.input_height, attrs.input_width, - inShapes[idxLocation], inShapes[idxConfidence], inShapes[idxPriors], inShapes[idxArmConfidence], inShapes[idxArmLocation]) = specificAttrs; - - if (inShapes[idxArmConfidence].empty()) { - inShapes.resize(3); - } - - for (size_t i = 0; i < inShapes.size(); i++) { - inShapes[i][0] = batch; - } - - ov::ParameterVector params; - for (auto&& shape : inShapes) { - auto param = std::make_shared(ov::element::f32, ov::Shape(shape)); - params.push_back(param); - } - std::shared_ptr detOut; - if (params.size() == 3) - detOut = std::make_shared(params[0], params[1], params[2], attrs); - else if (params.size() == 5) - detOut = std::make_shared(params[0], params[1], params[2], params[3], params[4], attrs); - else - OPENVINO_THROW("DetectionOutput layer supports only 3 or 5 inputs"); - - ov::ResultVector results{std::make_shared(detOut)}; - function = std::make_shared(results, params, "DetectionOutput"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp b/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp deleted file mode 100644 index 19cbf193edd39e..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/dft.hpp" - -namespace LayerTestsDefinitions { - -std::string DFTLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::vector axes; - std::vector signalSize; - ngraph::helpers::DFTOpType opType; - std::string targetDevice; - std::tie(inputShapes, inputPrecision, axes, signalSize, opType, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Precision=" << inputPrecision.name() << "_"; - result << "Axes=" << ov::test::utils::vec2str(axes) << "_"; - result << "SignalSize=" << ov::test::utils::vec2str(signalSize) << "_"; - result << "Inverse=" << (opType == ngraph::helpers::DFTOpType::INVERSE) << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void DFTLayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::vector axes; - std::vector signalSize; - ngraph::helpers::DFTOpType opType; - std::tie(inputShapes, inputPrecision, axes, signalSize, opType, targetDevice) = this->GetParam(); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ov::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ov::Shape(inputShapes)); - paramVector.push_back(paramData); - - auto dft = ngraph::builder::makeDFT(paramVector[0], axes, signalSize, opType); - - - ov::ResultVector results{std::make_shared(dft)}; - function = std::make_shared(results, paramVector, "DFT"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp deleted file mode 100644 index cfec35565aa8b6..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/einsum.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -std::string EinsumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision precision; - EinsumEquationWithInput equationWithInput; - std::string targetDevice; - std::tie(precision, equationWithInput, targetDevice) = obj.param; - std::string equation; - std::vector inputShapes; - std::tie(equation, inputShapes) = equationWithInput; - - std::ostringstream result; - result << "PRC=" << precision.name() << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Eq=" << equation << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void EinsumLayerTest::SetUp() { - InferenceEngine::Precision precision; - EinsumEquationWithInput equationWithInput; - std::tie(precision, equationWithInput, targetDevice) = this->GetParam(); - std::string equation; - std::vector inputShapes; - std::tie(equation, inputShapes) = equationWithInput; - - const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precision); - ov::ParameterVector params; - ov::OutputVector paramsOuts; - for (auto&& shape : inputShapes) { - auto param = std::make_shared(ngPrc, ov::Shape(shape)); - params.push_back(param); - paramsOuts.push_back(param); - } - - const auto einsum = std::make_shared(paramsOuts, equation); - const ov::ResultVector results{std::make_shared(einsum)}; - function = std::make_shared(results, params, "einsum"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp deleted file mode 100644 index de9bea835f46e1..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" -#include -#include "shared_test_classes/single_layer/eltwise.hpp" -#include "common_test_utils/node_builders/eltwise.hpp" - -#include "functional_test_utils/plugin_cache.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -std::string EltwiseLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector shapes; - ElementType netType, inType, outType; - ngraph::helpers::InputLayerType secondaryInputType; - ov::test::utils::OpType opType; - ngraph::helpers::EltwiseTypes eltwiseOpType; - std::string targetName; - ov::AnyMap additional_config; - std::tie(shapes, eltwiseOpType, secondaryInputType, opType, netType, inType, outType, targetName, additional_config) = obj.param; - std::ostringstream results; - - results << "IS=("; - for (const auto& shape : shapes) { - results << ov::test::utils::partialShape2str({shape.first}) << "_"; - } - results << ")_TS=("; - for (const auto& shape : shapes) { - for (const auto& item : shape.second) { - results << ov::test::utils::vec2str(item) << "_"; - } - } - results << ")_eltwiseOpType=" << eltwiseOpType << "_"; - results << "secondaryInputType=" << secondaryInputType << "_"; - results << "opType=" << opType << "_"; - results << "NetType=" << netType << "_"; - results << "InType=" << inType << "_"; - results << "OutType=" << outType << "_"; - results << "trgDev=" << targetName; - for (auto const& configItem : additional_config) { - results << "_configItem=" << configItem.first << "="; - configItem.second.print(results); - } - return results.str(); -} - -void EltwiseLayerTest::transformInputShapesAccordingEltwise(const ov::PartialShape& secondInputShape) { - // propagate shapes in case 1 shape is defined - if (inputDynamicShapes.size() == 1) { - inputDynamicShapes.push_back(inputDynamicShapes.front()); - for (auto& staticShape : targetStaticShapes) { - staticShape.push_back(staticShape.front()); - } - } - ASSERT_EQ(inputDynamicShapes.size(), 2) << "Incorrect inputs number!"; - if (!secondInputShape.is_static()) { - return; - } - if (secondInputShape.get_shape() == ov::Shape{1}) { - inputDynamicShapes[1] = secondInputShape; - for (auto& staticShape : targetStaticShapes) { - staticShape[1] = secondInputShape.get_shape(); - } - } -} - -void EltwiseLayerTest::SetUp() { - std::vector shapes; - ElementType netType; - ngraph::helpers::InputLayerType secondaryInputType; - ov::test::utils::OpType opType; - ngraph::helpers::EltwiseTypes eltwiseType; - Config additional_config; - std::tie(shapes, eltwiseType, secondaryInputType, opType, netType, inType, outType, targetDevice, configuration) = - this->GetParam(); - - init_input_shapes(shapes); - - ov::ParameterVector parameters{std::make_shared(netType, inputDynamicShapes.front())}; - - ov::PartialShape shape_input_secondary; - switch (opType) { - case ov::test::utils::OpType::SCALAR: { - shape_input_secondary = {1}; - break; - } - case ov::test::utils::OpType::VECTOR: - shape_input_secondary = inputDynamicShapes.back(); - break; - default: - FAIL() << "Unsupported Secondary operation type"; - } - // To propagate shape_input_secondary just in static case because all shapes are defined in dynamic scenarion - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { - transformInputShapesAccordingEltwise(shape_input_secondary); - } - - std::shared_ptr secondaryInput; - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto param = std::make_shared(netType, shape_input_secondary); - secondaryInput = param; - parameters.push_back(param); - } else { - ov::Shape shape = inputDynamicShapes.back().get_max_shape(); - switch (eltwiseType) { - case ngraph::helpers::EltwiseTypes::DIVIDE: - case ngraph::helpers::EltwiseTypes::MOD: - case ngraph::helpers::EltwiseTypes::FLOOR_MOD: { - std::vector data = NGraphFunctions::Utils::generateVector(ov::shape_size(shape), 10, 2); - secondaryInput = ov::test::utils::deprecated::make_constant(netType, shape, data); - break; - } - case ngraph::helpers::EltwiseTypes::POWER: - secondaryInput = ov::test::utils::deprecated::make_constant(netType, shape, {}, true, 3); - break; - default: - secondaryInput = ov::test::utils::deprecated::make_constant(netType, shape, {}, true); - } - } - - parameters[0]->set_friendly_name("param0"); - secondaryInput->set_friendly_name("param1"); - - auto eltwise = ov::test::utils::make_eltwise(parameters[0], secondaryInput, eltwiseType); - function = std::make_shared(eltwise, parameters, "Eltwise"); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp deleted file mode 100644 index b351df09dbc84b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/embedding_bag_offsets_sum.hpp" -#include "ov_models/builders.hpp" - -namespace LayerTestsDefinitions { - -std::string EmbeddingBagOffsetsSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - embeddingBagOffsetsSumParams params; - InferenceEngine::Precision netPrecision, indPrecision; - std::string targetDevice; - std::tie(params, netPrecision, indPrecision, targetDevice) = obj.param; - std::vector embTableShape, indices, offsets; - size_t defaultIndex; - bool withWeights, withDefIndex; - std::tie(embTableShape, indices, offsets, defaultIndex, withWeights, withDefIndex) = params; - - std::ostringstream result; - result << "ETS=" << ov::test::utils::vec2str(embTableShape) << "_"; - result << "I" << ov::test::utils::vec2str(indices) << "_"; - result << "O" << ov::test::utils::vec2str(offsets) << "_"; - result << "DI" << defaultIndex << "_"; - result << "WW" << withWeights << "_"; - result << "WDI" << withDefIndex << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "indPRC=" << indPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -void EmbeddingBagOffsetsSumLayerTest::SetUp() { - embeddingBagOffsetsSumParams embParams; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - auto indPrecision = netPrecision; - std::tie(embParams, netPrecision, indPrecision, targetDevice) = this->GetParam(); - std::vector embTableShape, indices, offsets; - bool withWeights, withDefIndex; - size_t defaultIndex; - std::tie(embTableShape, indices, offsets, defaultIndex, withWeights, withDefIndex) = embParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - - auto emb_table_node = std::make_shared(ngPrc, ov::Shape(embTableShape)); - ov::ParameterVector params = {emb_table_node}; - - auto embBag = std::dynamic_pointer_cast( - ngraph::builder::makeEmbeddingBagOffsetsSum( - ngPrc, ngIdxPrc, emb_table_node, indices, offsets, defaultIndex, withWeights, withDefIndex)); - ov::ResultVector results{std::make_shared(embBag)}; - function = std::make_shared(results, params, "embeddingBagOffsetsSum"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp deleted file mode 100644 index 8bd6d0ac1b5947..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/embedding_bag_packed_sum.hpp" -#include "common_test_utils/node_builders/embedding_bag_packed_sum.hpp" - -namespace LayerTestsDefinitions { - -std::string EmbeddingBagPackedSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - embeddingBagPackedSumParams params; - InferenceEngine::Precision netPrecision, indPrecision; - std::string targetDevice; - std::tie(params, netPrecision, indPrecision, targetDevice) = obj.param; - std::vector embTableShape; - std::vector> indices; - bool withWeights; - std::tie(embTableShape, indices, withWeights) = params; - - std::ostringstream result; - result << "ETS=" << ov::test::utils::vec2str(embTableShape) << "_"; - result << "I" << ov::test::utils::vec2str(indices) << "_"; - result << "WW" << withWeights << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "indPRC=" << indPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -void EmbeddingBagPackedSumLayerTest::SetUp() { - embeddingBagPackedSumParams embParams; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - auto indPrecision = netPrecision; - std::tie(embParams, netPrecision, indPrecision, targetDevice) = this->GetParam(); - std::vector embTableShape; - std::vector> indices; - bool withWeights; - std::tie(embTableShape, indices, withWeights) = embParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - - auto emb_table_node = std::make_shared(ngPrc, ov::Shape(embTableShape)); - ov::ParameterVector params = {emb_table_node}; - - auto embBag = ov::test::utils::make_embedding_bag_packed_sum(ngPrc, ngIdxPrc, emb_table_node, indices, withWeights); - ov::ResultVector results{std::make_shared(embBag)}; - function = std::make_shared(results, params, "embeddingBagPackedSum"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp deleted file mode 100644 index 808f37ff33be8e..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/embedding_segments_sum.hpp" -#include "ov_models/builders.hpp" - - -namespace LayerTestsDefinitions { - -std::string EmbeddingSegmentsSumLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - embeddingSegmentsSumParams params; - InferenceEngine::Precision netPrecision, indPrecision; - std::string targetDevice; - std::tie(params, netPrecision, indPrecision, targetDevice) = obj.param; - std::vector embTableShape, indices, segmentIds; - size_t numSegments, defaultIndex; - bool withWeights, withDefIndex; - std::tie(embTableShape, indices, segmentIds, numSegments, defaultIndex, withWeights, withDefIndex) = params; - - std::ostringstream result; - result << "ETS=" << ov::test::utils::vec2str(embTableShape) << "_"; - result << "I" << ov::test::utils::vec2str(indices) << "_"; - result << "SI" << ov::test::utils::vec2str(segmentIds) << "_"; - result << "NS" << numSegments << "_"; - result << "DI" << defaultIndex << "_"; - result << "WW" << withWeights << "_"; - result << "WDI" << withDefIndex << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "indPRC=" << indPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -void EmbeddingSegmentsSumLayerTest::SetUp() { - embeddingSegmentsSumParams embParams; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - auto indPrecision = netPrecision; - std::tie(embParams, netPrecision, indPrecision, targetDevice) = this->GetParam(); - std::vector embTableShape, indices, segmentIds; - bool withWeights, withDefIndex; - size_t numSegments, defaultIndex; - std::tie(embTableShape, indices, segmentIds, numSegments, defaultIndex, withWeights, withDefIndex) = embParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - - auto emb_table_node = std::make_shared(ngPrc, ov::Shape(embTableShape)); - ov::ParameterVector params = {emb_table_node}; - - auto embBag = std::dynamic_pointer_cast( - ngraph::builder::makeEmbeddingSegmentsSum( - ngPrc, ngIdxPrc, emb_table_node, indices, segmentIds, numSegments, defaultIndex, withWeights, withDefIndex)); - ov::ResultVector results{std::make_shared(embBag)}; - function = std::make_shared(results, params, "embeddingSegmentsSum"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp deleted file mode 100644 index c17f1bfe0ae573..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/experimental_detectron_detection_output.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/data_utils.hpp" -#include - -namespace ov { -namespace test { -namespace subgraph { - -namespace { - std::ostream& operator <<(std::ostream& ss, const ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes& attributes) { - ss << "score_threshold=" << attributes.score_threshold << "_"; - ss << "nms_threshold=" << attributes.nms_threshold << "_"; - ss << "max_delta_log_wh=" << attributes.max_delta_log_wh << "_"; - ss << "num_classes=" << attributes.num_classes << "_"; - ss << "post_nms_count=" << attributes.post_nms_count << "_"; - ss << "max_detections_per_image=" << attributes.max_detections_per_image << "_"; - ss << "class_agnostic_box_regression=" << (attributes.class_agnostic_box_regression ? "true" : "false") << "_"; - ss << "deltas_weights=" << ov::test::utils::vec2str(attributes.deltas_weights); - return ss; -} -} // namespace - -std::string ExperimentalDetectronDetectionOutputLayerTest::getTestCaseName( - const testing::TestParamInfo& obj) { - std::vector inputShapes; - ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes attributes; - ElementType netPrecision; - std::string targetName; - std::tie( - inputShapes, - attributes.score_threshold, - attributes.nms_threshold, - attributes.max_delta_log_wh, - attributes.num_classes, - attributes.post_nms_count, - attributes.max_detections_per_image, - attributes.class_agnostic_box_regression, - attributes.deltas_weights, - netPrecision, - targetName) = obj.param; - - std::ostringstream result; - - using ov::test::operator<<; - result << "input_rois=" << inputShapes[0] << "_"; - result << "input_deltas=" << inputShapes[1] << "_"; - result << "input_scores=" << inputShapes[2] << "_"; - result << "input_im_info=" << inputShapes[3] << "_"; - - using ov::test::subgraph::operator<<; - result << "attributes={" << attributes << "}_"; - result << "netPRC=" << netPrecision << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void ExperimentalDetectronDetectionOutputLayerTest::SetUp() { - std::vector inputShapes; - ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes attributes; - - ElementType netPrecision; - std::string targetName; - std::tie( - inputShapes, - attributes.score_threshold, - attributes.nms_threshold, - attributes.max_delta_log_wh, - attributes.num_classes, - attributes.post_nms_count, - attributes.max_detections_per_image, - attributes.class_agnostic_box_regression, - attributes.deltas_weights, - netPrecision, - targetName) = this->GetParam(); - - if (netPrecision == element::f16) - abs_threshold = 0.01; - - inType = outType = netPrecision; - targetDevice = targetName; - - init_input_shapes(inputShapes); - - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(netPrecision, shape)); - - auto experimentalDetectron = std::make_shared( - params[0], // input_rois - params[1], // input_deltas - params[2], // input_scores - params[3], // input_im_info - attributes); - function = std::make_shared( - ov::OutputVector{experimentalDetectron->output(0), experimentalDetectron->output(1)}, - "ExperimentalDetectronDetectionOutput"); -} - -namespace { - -template -std::vector getValues(const std::vector& values) { - std::vector result(values.begin(), values.end()); - return result; -} - -template -std::vector generateInputTensors() { - const auto netPrecision = ov::element::from(); - std::vector inputTensors = { - // 16 x 4 = 64 - ov::test::utils::create_tensor( - netPrecision, - Shape{16, 4}, - getValues({ - 1.0f, 1.0f, 10.0f, 10.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 4.0f, 1.0f, 8.0f, 5.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f - })), - // 16 x 8 - ov::test::utils::create_tensor( - netPrecision, - Shape{16, 8}, - getValues({ - 5.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 4.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 8.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f - })), - // 16 x 2 = 32 - ov::test::utils::create_tensor( - netPrecision, - Shape{16, 2}, - getValues({ - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, - 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f - })), - // 1 x 3 = 3 - ov::test::utils::create_tensor(netPrecision, Shape{1, 3}, getValues({1.0f, 1.0f, 1.0f}))}; - - return inputTensors; -} -} // namespace - -void ExperimentalDetectronDetectionOutputLayerTest::generate_inputs( - const std::vector& targetInputStaticShapes) { - const auto netPrecision = std::get<9>(GetParam()); - - const std::vector inputTensors = - (netPrecision == element::f16) ? generateInputTensors() : generateInputTensors(); - - inputs.clear(); - const auto& funcInputs = function->inputs(); - for (auto i = 0ul; i < funcInputs.size(); ++i) { - if (targetInputStaticShapes[i] != inputTensors[i].get_shape()) { - OPENVINO_THROW("input shape is different from tensor shape"); - } - - inputs.insert({funcInputs[i].get_node_shared_ptr(), inputTensors[i]}); - } -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_generate_proposals_single_image.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_generate_proposals_single_image.cpp deleted file mode 100644 index 04bde87024d440..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_generate_proposals_single_image.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/experimental_detectron_generate_proposals_single_image.hpp" -#include "ov_models/builders.hpp" -#include - -namespace ov { -namespace test { -namespace subgraph { - -namespace { -std::ostream& operator <<( - std::ostream& ss, - const ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage::Attributes& attributes) { - ss << "score_threshold=" << attributes.min_size << "_"; - ss << "nms_threshold=" << attributes.nms_threshold << "_"; - ss << "max_delta_log_wh=" << attributes.post_nms_count << "_"; - ss << "num_classes=" << attributes.pre_nms_count; - return ss; -} -} // namespace - -std::string ExperimentalDetectronGenerateProposalsSingleImageLayerTest::getTestCaseName( - const testing::TestParamInfo& obj) { - std::vector inputShapes; - ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage::Attributes attributes; - std::pair> inputTensors; - ElementType netPrecision; - std::string targetName; - std::tie( - inputShapes, - attributes.min_size, - attributes.nms_threshold, - attributes.post_nms_count, - attributes.pre_nms_count, - inputTensors, - netPrecision, - targetName) = obj.param; - - std::ostringstream result; - using ov::test::operator<<; - result << "im_info=" << inputShapes[0] << "_"; - result << "anchors=" << inputShapes[1] << "_"; - result << "deltas=" << inputShapes[2] << "_"; - result << "scores=" << inputShapes[3] << "_"; - - using ov::test::subgraph::operator<<; - result << "attributes={" << attributes << "}_"; - result << "inputTensors=" << inputTensors.first << "_"; - result << "netPRC=" << netPrecision << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void ExperimentalDetectronGenerateProposalsSingleImageLayerTest::SetUp() { - std::vector inputShapes; - ov::op::v6::ExperimentalDetectronGenerateProposalsSingleImage::Attributes attributes; - std::pair> inputTensors; - ElementType netPrecision; - std::string targetName; - std::tie( - inputShapes, - attributes.min_size, - attributes.nms_threshold, - attributes.post_nms_count, - attributes.pre_nms_count, - inputTensors, - netPrecision, - targetName) = this->GetParam(); - - inType = outType = netPrecision; - targetDevice = targetName; - - init_input_shapes(inputShapes); - - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(netPrecision, shape)); - - auto experimentalDetectron = std::make_shared( - params[0], // im_info - params[1], // anchors - params[2], // deltas - params[3], // scores - attributes); - function = std::make_shared( - ov::OutputVector{experimentalDetectron->output(0), experimentalDetectron->output(1)}, - "ExperimentalDetectronGenerateProposalsSingleImage"); -} - -void ExperimentalDetectronGenerateProposalsSingleImageLayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { - auto inputTensors = std::get<5>(GetParam()); - - inputs.clear(); - const auto& funcInputs = function->inputs(); - for (auto i = 0ul; i < funcInputs.size(); ++i) { - if (targetInputStaticShapes[i] != inputTensors.second[i].get_shape()) { - OPENVINO_THROW("input shape is different from tensor shape"); - } - - inputs.insert({funcInputs[i].get_node_shared_ptr(), inputTensors.second[i]}); - } -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_prior_grid_generator.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_prior_grid_generator.cpp index b11f0b530b1b4d..8e7ad1a168c5b0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_prior_grid_generator.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_prior_grid_generator.cpp @@ -109,4 +109,4 @@ void ExperimentalDetectronPriorGridGeneratorLayerTest::generate_inputs(const std } // namespace subgraph } // namespace test -} // namespace ov +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_roifeatureextractor.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_roifeatureextractor.cpp deleted file mode 100644 index 80847e7dd98879..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_roifeatureextractor.cpp +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include -#include "functional_test_utils/plugin_cache.hpp" -#include "shared_test_classes/single_layer/experimental_detectron_roifeatureextractor.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -std::string ExperimentalDetectronROIFeatureExtractorLayerTest::getTestCaseName( - const testing::TestParamInfo& obj) { - std::vector inputShapes; - int64_t outputSize, samplingRatio; - std::vector pyramidScales; - bool aligned; - ElementType netPrecision; - std::string targetName; - std::tie(inputShapes, outputSize, samplingRatio, pyramidScales, aligned, netPrecision, targetName) = obj.param; - - std::ostringstream result; - if (inputShapes.front().first.size() != 0) { - result << "IS=("; - for (const auto &shape : inputShapes) { - result << ov::test::utils::partialShape2str({shape.first}) << "_"; - } - result.seekp(-1, result.cur); - result << ")_"; - } - result << "TS="; - for (const auto& shape : inputShapes) { - for (const auto& item : shape.second) { - result << ov::test::utils::vec2str(item) << "_"; - } - } - result << "outputSize=" << outputSize << "_"; - result << "samplingRatio=" << samplingRatio << "_"; - result << "pyramidScales=" << ov::test::utils::vec2str(pyramidScales) << "_"; - std::string alig = aligned ? "true" : "false"; - result << "aligned=" << alig << "_"; - result << "netPRC=" << netPrecision << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void ExperimentalDetectronROIFeatureExtractorLayerTest::SetUp() { - std::vector inputShapes; - int64_t outputSize, samplingRatio; - std::vector pyramidScales; - bool aligned; - ElementType netPrecision; - std::string targetName; - std::tie(inputShapes, outputSize, samplingRatio, pyramidScales, aligned, netPrecision, targetName) = this->GetParam(); - - inType = outType = netPrecision; - targetDevice = targetName; - - init_input_shapes(inputShapes); - - Attrs attrs; - attrs.aligned = aligned; - attrs.output_size = outputSize; - attrs.sampling_ratio = samplingRatio; - attrs.pyramid_scales = pyramidScales; - - ov::ParameterVector params; - ov::OutputVector paramsOuts; - for (auto&& shape : inputDynamicShapes) { - auto param = std::make_shared(netPrecision, shape); - params.push_back(param); - paramsOuts.push_back(param); - } - auto experimentalDetectronROIFeatureExtractor = std::make_shared(paramsOuts, attrs); - function = std::make_shared(ov::OutputVector{experimentalDetectronROIFeatureExtractor->output(0), - experimentalDetectronROIFeatureExtractor->output(1)}, - "ExperimentalDetectronROIFeatureExtractor"); -} -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_topkrois.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_topkrois.cpp deleted file mode 100644 index c067bc4e3b1722..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_topkrois.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include -#include "shared_test_classes/single_layer/experimental_detectron_topkrois.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -std::string ExperimentalDetectronTopKROIsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShapes; - int64_t maxRois; - ElementType netPrecision; - std::string targetName; - std::tie(inputShapes, maxRois, netPrecision, targetName) = obj.param; - - std::ostringstream result; - if (inputShapes.front().first.size() != 0) { - result << "IS=("; - for (const auto &shape : inputShapes) { - result << ov::test::utils::partialShape2str({shape.first}) << "_"; - } - result.seekp(-1, result.cur); - result << ")_"; - } - result << "TS="; - for (const auto& shape : inputShapes) { - for (const auto& item : shape.second) { - result << ov::test::utils::vec2str(item) << "_"; - } - } - result << "maxRois=" << maxRois << "_"; - result << "netPRC=" << netPrecision << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void ExperimentalDetectronTopKROIsLayerTest::SetUp() { - std::vector inputShapes; - int64_t maxRois; - ElementType netPrecision; - std::string targetName; - std::tie(inputShapes, maxRois, netPrecision, targetName) = this->GetParam(); - - inType = outType = netPrecision; - targetDevice = targetName; - - init_input_shapes(inputShapes); - - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(netPrecision, shape)); - - auto experimentalDetectronTopKROIs = std::make_shared(params[0], params[1], maxRois); - function = std::make_shared(ov::OutputVector {experimentalDetectronTopKROIs->output(0)}, "ExperimentalDetectronTopKROIs"); -} -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp b/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp deleted file mode 100644 index e59aa21c4d38eb..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/extract_image_patches.hpp" -#include "ov_models/builders.hpp" - - -namespace LayerTestsDefinitions { - -std::string ExtractImagePatchesTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inputShape, kernel, strides, rates; - ov::op::PadType pad_type; - InferenceEngine::Precision netPrc; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout; - std::string targetName; - std::tie(inputShape, kernel, strides, rates, pad_type, netPrc, inPrc, outPrc, inLayout, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "netPRC=" << netPrc.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "K=" << ov::test::utils::vec2str(kernel) << "_"; - result << "S=" << ov::test::utils::vec2str(strides) << "_"; - result << "R=" << ov::test::utils::vec2str(rates) << "_"; - result << "P=" << pad_type << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void ExtractImagePatchesTest::SetUp() { - std::vector inputShape, kernel, strides, rates; - ov::op::PadType pad_type; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, kernel, strides, rates, pad_type, netPrecision, inPrc, outPrc, inLayout, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - auto inputNode = std::make_shared(ngPrc, ov::Shape(inputShape)); - ov::ParameterVector params = {inputNode}; - - auto extImgPatches = std::make_shared(inputNode, - ov::Shape(kernel), - ov::Strides(strides), - ov::Shape(rates), - pad_type); - ov::ResultVector results{std::make_shared(extImgPatches)}; - function = std::make_shared(results, params, "ExtractImagePatches"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp deleted file mode 100644 index cf5a13baa14f3d..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/eye.hpp" - -namespace LayerTestsDefinitions { - -std::string EyeLayerTest::getTestCaseName(testing::TestParamInfo obj) { - EyeLayerTestParams params = obj.param; - - std::string td; - std::vector input_shapes; - ElementType net_precision; - std::vector out_batch_shape; - std::vector eye_par; - std::tie(input_shapes, out_batch_shape, eye_par, net_precision, td) = params; - std::ostringstream result; - result << "EyeTest_"; - result << "IS=("; - for (const auto& shape : input_shapes) { - result << ov::test::utils::partialShape2str({shape}) << "_"; - } - result << ")"; - result << "rowNum=" << eye_par[0] << "_"; - result << "colNum=" << eye_par[1] << "_"; - result << "diagShift=" << eye_par[2] << "_"; - result << "batchShape=" << ov::test::utils::vec2str(out_batch_shape) << "_"; - result << net_precision << "_"; - result << std::to_string(obj.index); - return result.str(); -} - -void EyeLayerTest::SetUp() { - std::vector input_shapes; - LocalElementType row_num, col_num; - LocalElementType shift; - std::vector out_batch_shape; - ElementType net_precision; - EyeLayerTestParams basicParamsSet = this->GetParam(); - - std::vector eye_par; - std::tie(input_shapes, out_batch_shape, eye_par, net_precision, targetDevice) = basicParamsSet; - row_num = eye_par[0]; - col_num = eye_par[1]; - shift = eye_par[2]; - - std::shared_ptr eye_operation; - - auto rows_const = std::make_shared(ov::element::i32, input_shapes[0], &row_num); - rows_const->set_friendly_name("rows"); - auto cols_const = std::make_shared(ov::element::i32, input_shapes[1], &col_num); - cols_const->set_friendly_name("cols"); - auto diag_const = std::make_shared(ov::element::i32, input_shapes[2], &shift); - diag_const->set_friendly_name("diagInd"); - - if (!out_batch_shape.empty() && out_batch_shape[0] != 0) { - auto batch_shape_par = std::make_shared(ov::element::i32, - ov::Shape{out_batch_shape.size()}, - out_batch_shape.data()); - batch_shape_par->set_friendly_name("batchShape"); - eye_operation = - std::make_shared(rows_const, cols_const, diag_const, batch_shape_par, net_precision); - } else { - eye_operation = std::make_shared(rows_const, cols_const, diag_const, net_precision); - } - // Without this call the eye operation will be calculated by CPU and substituted by Constant operator - ov::pass::disable_constant_folding(eye_operation); - ov::ResultVector results{std::make_shared(eye_operation)}; - function = std::make_shared(results, ov::ParameterVector{}, "eye"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp b/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp deleted file mode 100644 index 1ede979e6b242f..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/fake_quantize.hpp" - -#include "common_test_utils/node_builders/fake_quantize.hpp" - -namespace LayerTestsDefinitions { - - -std::string FakeQuantizeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - fqSpecificParams fqParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::pair> config; - std::tie(fqParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice, config) = obj.param; - size_t levels; - std::vector constShape; - std::vector fqDirectArgs; - std::vector inputArg; - ov::op::AutoBroadcastSpec broadcast; - std::tie(levels, constShape, fqDirectArgs, inputArg, broadcast) = fqParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "CS=" << ov::test::utils::vec2str(constShape) << "_"; - result << "LEVELS=" << levels << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - if (!config.first.empty()) { - result << "_targetConfig=" << config.first; - } - if (!fqDirectArgs.empty()) { - result << "_fqArgs=" << fqDirectArgs[0] << "_" << fqDirectArgs[1] << "_" << fqDirectArgs[2] << "_" << fqDirectArgs[3]; - } - if (inputArg.size() == 3) { - result << "_inputArg=" << inputArg[0] << "_" << inputArg[1] << "_" << inputArg[2]; - } - result << "_" << broadcast.m_type; - return result.str(); -} - -void FakeQuantizeLayerTest::SetUp() { - fqSpecificParams fqParams; - std::vector inputShape; - std::pair> config; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(fqParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice, config) = this->GetParam(); - InferenceEngine::SizeVector kernel, stride, dilation; - size_t levels; - std::vector constShape; - std::vector fqDirectArg; - std::vector inputArg; - ov::op::AutoBroadcastSpec broadcast; - std::tie(levels, constShape, fqDirectArg, inputArg, broadcast) = fqParams; - if (inputArg.size() == 3) { - inputDataMin = inputArg[0]; - inputDataMax = inputArg[1]; - inputDataResolution = inputArg[2]; - } - if (fqDirectArg.size() != 0) { - threshold = (fqDirectArg[3] - fqDirectArg[2]) / levels; - } - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - UpdateSeed(); - - std::shared_ptr fakeQNode; - if (fqDirectArg.empty()) { - int32_t ngraphSeed = seed; - if (NGRAPH_SEED != USE_CLOCK_TIME) { - ngraphSeed = NGRAPH_SEED; - } - std::cout << "\033[0;32m" << "[ ] " << "\033[0;0m" - << "ngraphSeed = " << ngraphSeed << std::endl; - fakeQNode = ov::test::utils::make_fake_quantize(params[0], ngPrc, levels, constShape, ngraphSeed); - } else { - fakeQNode = ov::test::utils::make_fake_quantize( - params[0], - ngPrc, - levels, - constShape, - {fqDirectArg[0]}, - {fqDirectArg[1]}, - {fqDirectArg[2]}, - {fqDirectArg[3]}); - } - auto fq = std::dynamic_pointer_cast(fakeQNode); - - ov::ResultVector results{std::make_shared(fq)}; - function = std::make_shared(results, params, "fakeQuantize"); - configuration = config.second; -} - -InferenceEngine::Blob::Ptr FakeQuantizeLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution, - seed); -} - -void FakeQuantizeLayerTest::UpdateSeed() { - if (BASE_SEED == USE_CLOCK_TIME) { - seed = std::chrono::system_clock::now().time_since_epoch().count(); - } else if (BASE_SEED == USE_INCREMENTAL_SEED) { - seed += 9999; - } else { - seed = BASE_SEED; - } - std::cout << "\033[0;32m" << "[ ] " << "\033[0;0m" - << "seed = " << seed << std::endl; -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp deleted file mode 100644 index 25d9dfcfcd4249..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/gather.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { - -void GatherLayerTestBase::SetUp(const gatherParamsTuple& params) { - int axis; - std::vector indices; - std::vector indicesShape; - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(indices, indicesShape, axis, inputShape, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = params; - ASSERT_EQ(ov::shape_size(indicesShape), indices.size()) << "Indices vector size and provided indices shape doesn't fit each other"; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape(indicesShape), indices); - auto axisNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape({}), {axis}); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode); - ov::ResultVector results{std::make_shared(gather)}; - function = std::make_shared(results, functionParams, "gather"); -} - -std::string GatherLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - int axis; - std::vector indices; - std::vector indicesShape, inputShape; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::tie(indices, indicesShape, axis, inputShape, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "axis=" << axis << "_"; - result << "indices=" << ov::test::utils::vec2str(indices) << "_"; - result << "indicesShape=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetName << "_"; - return result.str(); -} - -void GatherLayerTest::SetUp() { - GatherLayerTestBase::SetUp(GetParam()); -} - -std::string Gather7LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::tuple axis_batchIdx; - std::vector indices; - std::vector indicesShape, inputShape; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "axis=" << std::get<0>(axis_batchIdx) << "_"; - result << "batchIdx=" << std::get<1>(axis_batchIdx) << "_"; - result << "indicesShape=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetName << "_"; - return result.str(); -} - -void Gather7LayerTest::SetUp() { - std::tuple axis_batchIdx; - std::vector indicesShape; - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - int axis = std::get<0>(axis_batchIdx); - int batchIdx = std::get<1>(axis_batchIdx); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ov::test::utils::deprecated::make_constant(ov::element::i64, indicesShape, {}, true, - inputShape[axis < 0 ? axis + inputShape.size() : axis] - 1, 0); - auto axisNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ov::ResultVector results{ std::make_shared(gather) }; - function = std::make_shared(results, functionParams, "gather"); -} - -std::string Gather8LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::tuple axis_batchIdx; - std::vector indices; - std::vector indicesShape, inputShape; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "indicesShape=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "axis=" << std::get<0>(axis_batchIdx) << "_"; - result << "batchIdx=" << std::get<1>(axis_batchIdx) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetName << "_"; - return result.str(); -} - -void Gather8LayerTest::SetUp() { - std::tuple axis_batchIdx; - std::vector indicesShape; - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - int axis = std::get<0>(axis_batchIdx); - int batchIdx = std::get<1>(axis_batchIdx); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ov::test::utils::deprecated::make_constant(ov::element::i64, indicesShape, {}, true, - inputShape[axis < 0 ? axis + inputShape.size() : axis] - 1, - -static_cast(inputShape[axis < 0 ? axis + inputShape.size() : axis])); - auto axisNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ov::ResultVector results{ std::make_shared(gather) }; - function = std::make_shared(results, functionParams, "gather"); -} - -std::string Gather8IndiceScalarLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::tuple axis_batchIdx; - std::vector indices; - std::vector indicesShape, inputShape; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "indicesShape=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "axis=" << std::get<0>(axis_batchIdx) << "_"; - result << "batchIdx=" << std::get<1>(axis_batchIdx) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetName << "_"; - return result.str(); -} - -void Gather8IndiceScalarLayerTest::SetUp() { - std::tuple axis_batchIdx; - std::vector indicesShape; - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - int axis = std::get<0>(axis_batchIdx); - int batchIdx = std::get<1>(axis_batchIdx); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {inputShape[axis] - 1})->output(0); - - auto axisNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ov::ResultVector results{ std::make_shared(gather) }; - function = std::make_shared(results, functionParams, "gather"); -} - -std::string Gather8withIndicesDataLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - gather7ParamsTuple basicParams; - std::vector indicesData; - std::tie(basicParams, indicesData) = obj.param; - - std::tuple axis_batchIdx; - std::vector indices; - std::vector indicesShape, inputShape; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = basicParams; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "indicesShape=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "axis=" << std::get<0>(axis_batchIdx) << "_"; - result << "batchIdx=" << std::get<1>(axis_batchIdx) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetName << "_"; - - result << "indicesData=" << ov::test::utils::vec2str(indicesData) << "_"; - - return result.str(); -} - -void Gather8withIndicesDataLayerTest::SetUp() { - gather7ParamsTuple basicParams; - std::vector indicesData; - std::tie(basicParams, indicesData) = GetParam(); - - std::tuple axis_batchIdx; - std::vector indicesShape; - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, indicesShape, axis_batchIdx, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = basicParams; - int axis = std::get<0>(axis_batchIdx); - int batchIdx = std::get<1>(axis_batchIdx); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ov::test::utils::deprecated::make_constant(ov::element::i64, indicesShape, indicesData); - auto axisNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ov::ResultVector results{ std::make_shared(gather) }; - function = std::make_shared(results, functionParams, "gather"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp deleted file mode 100644 index ab3be417769f8e..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include -#include - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/gather_elements.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" - -namespace LayerTestsDefinitions { - -std::string GatherElementsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector dataShape, indicesShape; - InferenceEngine::Precision dPrecision, iPrecision; - int axis; - std::string device; - std::tie(dataShape, indicesShape, axis, dPrecision, iPrecision, device) = obj.param; - - std::ostringstream result; - result << "DS=" << ov::test::utils::vec2str(dataShape) << "_"; - result << "IS=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "Ax=" << axis << "_"; - result << "DP=" << dPrecision.name() << "_"; - result << "IP=" << iPrecision.name() << "_"; - result << "device=" << device; - - return result.str(); -} - -void GatherElementsLayerTest::SetUp() { - InferenceEngine::SizeVector dataShape, indicesShape; - InferenceEngine::Precision dPrecision, iPrecision; - int axis; - std::tie(dataShape, indicesShape, axis, dPrecision, iPrecision, targetDevice) = this->GetParam(); - outPrc = dPrecision; - - auto ngDPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dPrecision); - auto ngIPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iPrecision); - - ov::ParameterVector params {std::make_shared(ngDPrc, ov::Shape(dataShape))}; - - int posAxis = axis; - if (posAxis < 0) - posAxis += dataShape.size(); - const auto axisDim = dataShape[posAxis]; - - ov::test::utils::InputGenerateData in_data; - in_data.start_from = 0; - in_data.range = axisDim - 1; - auto indicesValues = ov::test::utils::create_and_fill_tensor(ov::element::i32, indicesShape, in_data); - auto indicesNode = std::make_shared(indicesValues); - - auto gather = std::make_shared(params[0], indicesNode, axis); - - ov::ResultVector results{std::make_shared(gather)}; - function = std::make_shared(results, params, "gatherEl"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp deleted file mode 100644 index 29a63fc6787005..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/gather_nd.hpp" - -namespace LayerTestsDefinitions { - -std::string GatherNDLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector dataShape, indicesShape; - InferenceEngine::Precision dPrecision, iPrecision; - int batchDims; - std::string device; - Config config; - GatherNDParamsSubset gatherArgsSubset; - std::tie(gatherArgsSubset, dPrecision, iPrecision, device, config) = obj.param; - std::tie(dataShape, indicesShape, batchDims) = gatherArgsSubset; - - std::ostringstream result; - result << "DS=" << ov::test::utils::vec2str(dataShape) << "_"; - result << "IS=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "BD=" << batchDims << "_"; - result << "DP=" << dPrecision.name() << "_"; - result << "IP=" << iPrecision.name() << "_"; - result << "device=" << device; - if (!config.empty()) { - result << "_config="; - for (const auto& cfg : config) { - result << "{" << cfg.first << ": " << cfg.second << "}"; - } - } - - return result.str(); -} - -void GatherNDLayerTest::SetUp() { - InferenceEngine::SizeVector dataShape, indicesShape; - InferenceEngine::Precision dPrecision, iPrecision; - int batchDims; - GatherNDParamsSubset gatherArgsSubset; - std::tie(gatherArgsSubset, dPrecision, iPrecision, targetDevice, configuration) = this->GetParam(); - std::tie(dataShape, indicesShape, batchDims) = gatherArgsSubset; - - auto ngDPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dPrecision); - auto ngIPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iPrecision); - - ov::ParameterVector params {std::make_shared(ngDPrc, ov::Shape(dataShape))}; - auto dataNode = params[0]; - auto gather = std::dynamic_pointer_cast( - ngraph::builder::makeGatherND(dataNode, indicesShape, ngIPrc, batchDims)); - ov::ResultVector results{std::make_shared(gather)}; - function = std::make_shared(results, params, "gatherND"); -} - - -std::string GatherND8LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - return GatherNDLayerTest::getTestCaseName(obj); -} - -void GatherND8LayerTest::SetUp() { - InferenceEngine::SizeVector dataShape, indicesShape; - InferenceEngine::Precision dPrecision, iPrecision; - int batchDims; - GatherNDParamsSubset gatherArgsSubset; - std::tie(gatherArgsSubset, dPrecision, iPrecision, targetDevice, configuration) = this->GetParam(); - std::tie(dataShape, indicesShape, batchDims) = gatherArgsSubset; - - auto ngDPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dPrecision); - auto ngIPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iPrecision); - - ov::ParameterVector params {std::make_shared(ngDPrc, ov::Shape(dataShape))}; - auto dataNode = params[0]; - auto gather = std::dynamic_pointer_cast( - ngraph::builder::makeGatherND8(dataNode, indicesShape, ngIPrc, batchDims)); - ov::ResultVector results{ std::make_shared(gather) }; - function = std::make_shared(results, params, "gatherND"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp deleted file mode 100644 index d19b8d1fd29733..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/gather_tree.hpp" - -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { -std::string GatherTreeLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inputShape; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - ngraph::helpers::InputLayerType secondaryInputType; - std::string targetName; - - std::tie(inputShape, secondaryInputType, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "secondaryInputType=" << secondaryInputType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void GatherTreeLayerTest::SetUp() { - std::vector inputShape; - InferenceEngine::Precision netPrecision; - ngraph::helpers::InputLayerType secondaryInputType; - - std::tie(inputShape, secondaryInputType, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - std::shared_ptr inp2; - std::shared_ptr inp3; - std::shared_ptr inp4; - - ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShape))}; - if (ngraph::helpers::InputLayerType::PARAMETER == secondaryInputType) { - ov::ParameterVector paramsSecond{std::make_shared(ngPrc, ov::Shape(inputShape)), - std::make_shared(ngPrc, ov::Shape{inputShape.at(1)}), - std::make_shared(ngPrc, ov::Shape())}; - paramsIn.insert(paramsIn.end(), paramsSecond.begin(), paramsSecond.end()); - - inp2 = paramsIn.at(1); - inp3 = paramsIn.at(2); - inp4 = paramsIn.at(3); - } else if (ngraph::helpers::InputLayerType::CONSTANT == secondaryInputType) { - auto maxBeamIndex = inputShape.at(2) - 1; - - inp2 = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, {}, true, maxBeamIndex); - inp3 = ov::test::utils::deprecated::make_constant(ngPrc, {inputShape.at(1)}, {}, true, maxBeamIndex); - inp4 = ov::test::utils::deprecated::make_constant(ngPrc, {}, {}, true, maxBeamIndex); - } else { - throw std::runtime_error("Unsupported inputType"); - } - - auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); - - ov::ResultVector results{std::make_shared(operationResult)}; - function = std::make_shared(results, paramsIn, "GatherTree"); -} - -InferenceEngine::Blob::Ptr GatherTreeLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - auto& shape = function->get_parameters()[0]->get_output_shape(0); - auto& vecDims = info.getTensorDesc().getDims(); - - auto maxBeamIndx = shape.at(2) - 1; - - if (vecDims.size() == 1 || vecDims.size() == 0) { //max_seq_len vector || end_token - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), maxBeamIndx, maxBeamIndx / 2); - } - - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), maxBeamIndx); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/generate_proposals.cpp b/src/tests/functional/shared_test_classes/src/single_layer/generate_proposals.cpp deleted file mode 100644 index 9b1c7fafa3a4fa..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/generate_proposals.cpp +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/generate_proposals.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -namespace { -std::ostream& operator <<( - std::ostream& ss, - const ov::op::v9::GenerateProposals::Attributes& attributes) { - ss << "score_threshold=" << attributes.min_size << "_"; - ss << "nms_threshold=" << attributes.nms_threshold << "_"; - ss << "post_nms_count=" << attributes.post_nms_count << "_"; - ss << "pre_nms_count=" << attributes.pre_nms_count; - ss << "normalized=" << attributes.normalized; - ss << "nms_eta=" << attributes.nms_eta; - return ss; -} -} // namespace - -std::string GenerateProposalsLayerTest::getTestCaseName( - const testing::TestParamInfo& obj) { - std::vector inputShapes; - ov::op::v9::GenerateProposals::Attributes attributes; - std::pair> inputTensors; - ElementType netPrecision; - ElementType roiNumPrecision; - std::string targetName; - std::tie( - inputShapes, - attributes.min_size, - attributes.nms_threshold, - attributes.post_nms_count, - attributes.pre_nms_count, - attributes.normalized, - inputTensors, - netPrecision, - roiNumPrecision, - targetName) = obj.param; - - std::ostringstream result; - using ov::test::operator<<; - result << "im_info=" << inputShapes[0] << "_"; - result << "anchors=" << inputShapes[1] << "_"; - result << "deltas=" << inputShapes[2] << "_"; - result << "scores=" << inputShapes[3] << "_"; - - using ov::test::subgraph::operator<<; - result << "attributes={" << attributes << "}_"; - result << "inputTensors=" << inputTensors.first << "_"; - result << "netPRC=" << netPrecision << "_"; - result << "roiNumPRC=" << roiNumPrecision << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void GenerateProposalsLayerTest::SetUp() { - std::vector inputShapes; - ov::op::v9::GenerateProposals::Attributes attributes; - std::pair> inputTensors; - ElementType netPrecision; - ElementType roiNumPrecision; - std::string targetName; - std::tie( - inputShapes, - attributes.min_size, - attributes.nms_threshold, - attributes.post_nms_count, - attributes.pre_nms_count, - attributes.normalized, - inputTensors, - netPrecision, - roiNumPrecision, - targetName) = this->GetParam(); - - inType = outType = netPrecision; - targetDevice = targetName; - if (targetDevice == ov::test::utils::DEVICE_GPU) { - if (netPrecision == element::Type_t::f16) { - abs_threshold = 0.2; - } else { - abs_threshold = 0.00009; - } - } - - init_input_shapes(inputShapes); - - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(netPrecision, shape)); - - auto generateProposals = std::make_shared( - params[0], // im_info - params[1], // anchors - params[2], // deltas - params[3], // scores - attributes, - roiNumPrecision); - function = std::make_shared( - ov::OutputVector{generateProposals->output(0), - generateProposals->output(1), - generateProposals->output(2)}, - "GenerateProposals"); -} - -void GenerateProposalsLayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { - auto inputTensors = std::get<6>(GetParam()); - - inputs.clear(); - const auto& funcInputs = function->inputs(); - for (auto i = 0ul; i < funcInputs.size(); ++i) { - if (targetInputStaticShapes[i] != inputTensors.second[i].get_shape()) { - OPENVINO_THROW("input shape is different from tensor shape"); - } - - inputs.insert({funcInputs[i].get_node_shared_ptr(), inputTensors.second[i]}); - } -} - -void GenerateProposalsLayerTest::compare(const std::vector& expected, - const std::vector& actual) { - if (targetDevice != ov::test::utils::DEVICE_GPU) { - SubgraphBaseTest::compare(expected, actual); - return; - } - - const auto outputsNum = expected.size(); - ASSERT_EQ(outputsNum, 3); - ASSERT_EQ(outputsNum, actual.size()); - ASSERT_EQ(outputsNum, function->get_results().size()); - - // actual outputs 0 (rois) and 1 (roi_scores) may be padded with zeros - for (size_t i = 0; i < 2; ++i) { - const auto expectedNumRois = expected[i].get_shape()[0]; - const auto actualNumRois = actual[i].get_shape()[0]; - ASSERT_LE(expectedNumRois, actualNumRois); - - const auto actualBuffer = static_cast(actual[i].data()); - const auto expectedBuffer = static_cast(expected[i].data()); - const auto outputSize = i == 0 ? 4 : 1; - - if (outType == element::Type_t::f32) { - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), - expectedNumRois * outputSize, - rel_threshold, - abs_threshold); - } else { - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), - expectedNumRois * outputSize, - rel_threshold, - abs_threshold); - } - - if (expectedNumRois < actualNumRois) { - if (outType == element::Type_t::f32) { - const auto fBuffer = static_cast(actual[i].data()); - for (size_t j = expectedNumRois * outputSize; j < actualNumRois * outputSize; ++j) { - ASSERT_TRUE(fBuffer[j] == 0.0f) - << "Expected 0.0, actual: " << fBuffer[j] << " at index: " << j << ", output: " << i; - } - } else { - const float16 zero{0}; - const auto fBuffer = static_cast(actual[i].data()); - for (size_t j = expectedNumRois * outputSize; j < actualNumRois * outputSize; ++j) { - ASSERT_TRUE(fBuffer[j] == zero) - << "Expected 0.0, actual: " << fBuffer[j] << " at index: " << j << ", output: " << i; - } - } - } - } - - // output 2 - rois_num - ov::test::utils::compare(expected[2], actual[2], abs_threshold, rel_threshold); -} -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp b/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp deleted file mode 100644 index baf9b7255dfaa5..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/grid_sample.hpp" - -namespace LayerTestsDefinitions { - -std::string GridSampleLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector dataShape; - InferenceEngine::SizeVector gridShape; - decltype(ov::op::v9::GridSample::Attributes::align_corners) alignCorners; - decltype(ov::op::v9::GridSample::Attributes::mode) mode; - decltype(ov::op::v9::GridSample::Attributes::padding_mode) paddingMode; - InferenceEngine::Precision inDataPrc; - InferenceEngine::Precision inGridPrc; - std::string targetDevice; - - std::tie(dataShape, gridShape, alignCorners, mode, paddingMode, inDataPrc, inGridPrc, targetDevice) = obj.param; - - std::ostringstream result; - result << "DS=" << ov::test::utils::vec2str(dataShape) << "_"; - result << "GS=" << ov::test::utils::vec2str(gridShape) << "_"; - result << "AlignCorners=" << alignCorners << "_"; - result << "Mode=" << ov::as_string(mode) << "_"; - result << "PaddingMode=" << ov::as_string(paddingMode) << "_"; - result << "inDataPrc=" << inDataPrc.name() << "_"; - result << "inGridPrc=" << inGridPrc.name() << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void GridSampleLayerTest::SetUp() { - InferenceEngine::SizeVector dataShape; - InferenceEngine::SizeVector gridShape; - decltype(ov::op::v9::GridSample::Attributes::align_corners) alignCorners; - decltype(ov::op::v9::GridSample::Attributes::mode) mode; - decltype(ov::op::v9::GridSample::Attributes::padding_mode) paddingMode; - InferenceEngine::Precision inDataPrc; - InferenceEngine::Precision inGridPrc; - - std::tie(dataShape, gridShape, alignCorners, mode, paddingMode, inDataPrc, inGridPrc, targetDevice) = - this->GetParam(); - - auto ngInDataPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inDataPrc); - auto ngInGridPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inGridPrc); - auto data = std::make_shared(ngInDataPrc, ov::Shape(dataShape)); - auto grid = std::make_shared(ngInGridPrc, ov::Shape(gridShape)); - auto gridSample = std::make_shared( - data, - grid, - ov::op::v9::GridSample::Attributes(alignCorners, mode, paddingMode)); - function = std::make_shared(std::make_shared(gridSample), - ov::ParameterVector{data, grid}, - "GridSample"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp deleted file mode 100644 index 32e880b4d5438f..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/grn.hpp" - -namespace LayerTestsDefinitions { -std::string GrnLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - float bias; - std::tie(netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, - bias, - targetDevice) = obj.param; - - std::ostringstream result; - const char separator = '_'; - - result << "IS=" << ov::test::utils::vec2str(inputShapes) << separator; - result << "netPRC=" << netPrecision.name() << separator; - result << "inPRC=" << inPrc.name() << separator; - result << "outPRC=" << outPrc.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; - result << "bias=" << bias << separator; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void GrnLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, bias, targetDevice) = GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - auto grn = std::make_shared(paramsIn[0], bias); - ov::ResultVector results{ std::make_shared(grn) }; - function = std::make_shared(results, paramsIn, "Grn"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp deleted file mode 100644 index 34a1145899e2bb..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/group_convolution.hpp" - -namespace LayerTestsDefinitions { - -std::string GroupConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - groupConvSpecificParams groupConvParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::tie(groupConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd; - size_t convOutChannels, numGroups; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "O=" << convOutChannels << "_"; - result << "G=" << numGroups << "_"; - result << "AP=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void GroupConvolutionLayerTest::SetUp() { - groupConvSpecificParams groupConvParams; - std::vector inputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(groupConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd; - size_t convOutChannels, numGroups; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto groupConv = std::dynamic_pointer_cast( - ngraph::builder::makeGroupConvolution(params[0], ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, numGroups)); - ov::ResultVector results{std::make_shared(groupConv)}; - function = std::make_shared(results, params, "groupConvolution"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp deleted file mode 100644 index f62796ea09773b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/group_convolution_backprop_data.hpp" - -namespace LayerTestsDefinitions { - -// DEPRECATED, remove this old API when KMB (#58495) and ARM (#58496) plugins are migrated to new API - -std::string GroupConvBackpropDataLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - groupConvBackpropDataSpecificParams groupConvBackpropDataParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd; - size_t convOutChannels, numGroups; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvBackpropDataParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "O=" << convOutChannels << "_"; - result << "G=" << numGroups << "_"; - result << "AP=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void GroupConvBackpropDataLayerTest::SetUp() { - groupConvBackpropDataSpecificParams groupConvBackpropDataParams; - std::vector inputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd; - size_t convOutChannels, numGroups; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvBackpropDataParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto groupConvBackpropData = std::dynamic_pointer_cast( - ngraph::builder::makeGroupConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, numGroups)); - ov::ResultVector results{std::make_shared(groupConvBackpropData)}; - function = std::make_shared(results, params, "GroupConvolutionBackpropData"); -} - -std::string GroupConvBackpropLayerTest::getTestCaseName(testing::TestParamInfo obj) { - groupConvBackpropSpecificParams groupConvBackpropDataParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes, outputShapes; - std::string targetDevice; - std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outputShapes, targetDevice) = obj.param; - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd, outPadding; - size_t convOutChannels, numGroups; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType, outPadding) = groupConvBackpropDataParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "OS=" << ov::test::utils::vec2str(outputShapes) << "_"; - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "OP=" << ov::test::utils::vec2str(outPadding) << "_"; - result << "O=" << convOutChannels << "_"; - result << "G=" << numGroups << "_"; - result << "AP=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void GroupConvBackpropLayerTest::SetUp() { - groupConvBackpropSpecificParams groupConvBackpropDataParams; - std::vector inputShape, outputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd, outPadding; - size_t convOutChannels, numGroups; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType, outPadding) = groupConvBackpropDataParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - std::shared_ptr groupConvBackpropData; - if (!outputShape.empty()) { - auto outShape = ov::op::v0::Constant::create(ov::element::i64, {outputShape.size()}, outputShape); - groupConvBackpropData = std::dynamic_pointer_cast( - ngraph::builder::makeGroupConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, numGroups, false, outPadding)); - } else { - groupConvBackpropData = std::dynamic_pointer_cast( - ngraph::builder::makeGroupConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, numGroups, false, outPadding)); - } - ov::ResultVector results{std::make_shared(groupConvBackpropData)}; - function = std::make_shared(results, params, "GroupConvolutionBackpropData"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp deleted file mode 100644 index 22984892495c17..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "shared_test_classes/single_layer/gru_cell.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { - -using ngraph::helpers::InputLayerType; - -std::string GRUCellTest::getTestCaseName(const testing::TestParamInfo &obj) { - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - bool linear_before_reset; - std::vector> inputShapes; - InputLayerType WType; - InputLayerType RType; - InputLayerType BType; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, - linear_before_reset, WType, RType, BType, netPrecision, targetDevice) = obj.param; - inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, - {3 * hidden_size, hidden_size}, {(linear_before_reset? 4 : 3) * hidden_size}}, - }; - std::ostringstream result; - result << "decomposition" << should_decompose << "_"; - result << "batch=" << batch << "_"; - result << "hidden_size=" << hidden_size << "_"; - result << "input_size=" << input_size << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "activations=" << ov::test::utils::vec2str(activations) << "_"; - result << "clip=" << clip << "_"; - result << "linear_before_reset=" << linear_before_reset << "_"; - result << "WType=" << WType << "_"; - result << "RType=" << RType << "_"; - result << "BType=" << BType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - return result.str(); -} - -void GRUCellTest::SetUp() { - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - bool linear_before_reset; - InputLayerType WType; - InputLayerType RType; - InputLayerType BType; - InferenceEngine::Precision netPrecision; - std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, linear_before_reset, - WType, RType, BType, netPrecision, targetDevice) = this->GetParam(); - - std::vector> inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, - {3 * hidden_size, hidden_size}, {(linear_before_reset? 4 : 3) * hidden_size}}, - }; - std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - - std::shared_ptr W; - if (WType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[0]); - W = param; - params.push_back(param); - } else { - W = ov::test::utils::deprecated::make_constant(ngPrc, WRB[0], {}, true); - } - - std::shared_ptr R; - if (RType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[1]); - R = param; - params.push_back(param); - } else { - R = ov::test::utils::deprecated::make_constant(ngPrc, WRB[1], {}, true); - } - - std::shared_ptr B; - if (BType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[2]); - B = param; - params.push_back(param); - } else { - B = ov::test::utils::deprecated::make_constant(ngPrc, WRB[2], {}, true); - } - - auto gru_cell = std::make_shared(params[0], params[1], W, R, B, hidden_size, activations, - activations_alpha, activations_beta, clip, - linear_before_reset); - ov::ResultVector results{std::make_shared(gru_cell->output(0))}; - function = std::make_shared(results, params, "gru_cell"); - if (should_decompose) { - ov::pass::Manager m; - m.register_pass(); - m.run_passes(function); - } -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp deleted file mode 100644 index 42360e1ec06465..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/gru_sequence.hpp" -#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" -#include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" -#include "common_test_utils/test_enums.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { - - using ngraph::helpers::InputLayerType; - - std::string GRUSequenceTest::getTestCaseName(const testing::TestParamInfo &obj) { - ngraph::helpers::SequenceTestsMode mode; - size_t seq_lengths; - size_t batch; - size_t hidden_size; - size_t input_size = 10; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - bool linear_before_reset; - ov::op::RecurrentSequenceDirection direction; - InputLayerType WRBType; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(mode, seq_lengths, batch, hidden_size, activations, clip, linear_before_reset, direction, WRBType, - netPrecision, targetDevice) = obj.param; - std::vector> inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, - {3 * hidden_size, hidden_size}, {(linear_before_reset ? 4 : 3) * hidden_size}}, - }; - std::ostringstream result; - result << "mode=" << mode << "_"; - result << "seq_lengths=" << seq_lengths << "_"; - result << "batch=" << batch << "_"; - result << "hidden_size=" << hidden_size << "_"; - result << "input_size=" << input_size << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "activations=" << ov::test::utils::vec2str(activations) << "_"; - result << "direction=" << direction << "_"; - result << "clip=" << clip << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - return result.str(); - } - - void GRUSequenceTest::SetUp() { - using namespace ngraph::helpers; - size_t seq_lengths; - size_t batch; - size_t hidden_size; - size_t input_size = 10; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - bool linear_before_reset; - ov::op::RecurrentSequenceDirection direction; - InputLayerType WRBType; - InferenceEngine::Precision netPrecision; - std::tie(m_mode, seq_lengths, batch, hidden_size, activations, clip, linear_before_reset, direction, WRBType, - netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; - std::vector inputShapes = { - {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, - {num_directions, 3 * hidden_size, input_size}, {num_directions, 3 * hidden_size, hidden_size}, - {num_directions, (linear_before_reset ? 4 : 3) * hidden_size}}, - }; - m_max_seq_len = seq_lengths; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - - const auto& W_shape = inputShapes[3]; - const auto& R_shape = inputShapes[4]; - const auto& B_shape = inputShapes[5]; - - std::shared_ptr seq_lengths_node; - if (m_mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) { - auto param = std::make_shared(ov::element::i64, ov::Shape(inputShapes[2])); - param->set_friendly_name("seq_lengths"); - params.push_back(param); - seq_lengths_node = param; - } else if (m_mode == ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST || - m_mode == ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) { - seq_lengths_node = ov::test::utils::deprecated::make_constant(ov::element::i64, inputShapes[2], {}, true, - static_cast(seq_lengths), 0.f); - } else { - std::vector lengths(batch, seq_lengths); - seq_lengths_node = ov::test::utils::deprecated::make_constant(ov::element::i64, inputShapes[2], lengths, false); - } - - std::shared_ptr W, R, B; - if (WRBType == InputLayerType::PARAMETER) { - const auto W_param = std::make_shared(ngPrc, W_shape); - const auto R_param = std::make_shared(ngPrc, R_shape); - const auto B_param = std::make_shared(ngPrc, B_shape); - W = W_param; - R = R_param; - B = B_param; - params.push_back(W_param); - params.push_back(R_param); - params.push_back(B_param); - } else { - W = ov::test::utils::deprecated::make_constant(ngPrc, W_shape, {}, true); - R = ov::test::utils::deprecated::make_constant(ngPrc, R_shape, {}, true); - B = ov::test::utils::deprecated::make_constant(ngPrc, B_shape, {}, true); - } - - auto gru_sequence = std::make_shared(params[0], params[1], seq_lengths_node, W, R, B, hidden_size, direction, - activations, activations_alpha, activations_beta, clip, linear_before_reset); - ov::ResultVector results{std::make_shared(gru_sequence->output(0)), - std::make_shared(gru_sequence->output(1))}; - function = std::make_shared(results, params, "gru_sequence"); - bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); - if (!is_pure_sequence) { - ov::pass::Manager manager; - if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) - manager.register_pass(); - manager.register_pass(); - manager.run_passes(function); - bool ti_found = is_tensor_iterator_exist(function); - EXPECT_EQ(ti_found, true); - } else { - bool ti_found = is_tensor_iterator_exist(function); - EXPECT_EQ(ti_found, false); - } - } - - void GRUSequenceTest::GenerateInputs() { - inputs.clear(); - for (const auto &input : executableNetwork.GetInputsInfo()) { - const auto &info = input.second; - auto blob = GenerateInput(*info); - if (input.first == "seq_lengths") { - blob = FuncTestUtils::createAndFillBlob(info->getTensorDesc(), m_max_seq_len, 0); - } - inputs.push_back(blob); - } - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp b/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp deleted file mode 100644 index 284357ba8b8f0e..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/interpolate.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -using ngraph::helpers::operator<<; - -namespace LayerTestsDefinitions { - -std::string InterpolateLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InterpolateSpecificParams interpolateParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes, targetShapes; - std::string targetDevice; - std::map additional_config; - std::tie(interpolateParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetShapes, targetDevice, additional_config) = obj.param; - std::vector padBegin, padEnd; - std::vector axes; - std::vector scales; - bool antialias; - ov::op::v4::Interpolate::InterpolateMode mode; - ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; - ov::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; - ov::op::v4::Interpolate::NearestMode nearestMode; - double cubeCoef; - std::tie(mode, shapeCalcMode, coordinateTransformMode, nearestMode, antialias, padBegin, padEnd, cubeCoef, axes, scales) = interpolateParams; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "TS=" << ov::test::utils::vec2str(targetShapes) << "_"; - result << "InterpolateMode=" << mode << "_"; - result << "ShapeCalcMode=" << shapeCalcMode << "_"; - result << "CoordinateTransformMode=" << coordinateTransformMode << "_"; - result << "NearestMode=" << nearestMode << "_"; - result << "CubeCoef=" << cubeCoef << "_"; - result << "Antialias=" << antialias << "_"; - result << "PB=" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE=" << ov::test::utils::vec2str(padEnd) << "_"; - result << "Axes=" << ov::test::utils::vec2str(axes) << "_"; - result << "Scales=" << ov::test::utils::vec2str(scales) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void InterpolateLayerTest::SetUp() { - InterpolateSpecificParams interpolateParams; - std::vector inputShape, targetShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::map additional_config; - std::tie(interpolateParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetShape, targetDevice, additional_config) = this->GetParam(); - std::vector padBegin, padEnd; - std::vector axes; - std::vector scales; - bool antialias; - ov::op::v4::Interpolate::InterpolateMode mode; - ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; - ov::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; - ov::op::v4::Interpolate::NearestMode nearestMode; - - configuration.insert(additional_config.begin(), additional_config.end()); - - double cubeCoef; - std::tie(mode, shapeCalcMode, coordinateTransformMode, nearestMode, antialias, padBegin, padEnd, cubeCoef, axes, scales) = interpolateParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto sizesConst = ov::op::v0::Constant(ov::element::Type_t::i64, {targetShape.size()}, targetShape); - auto sizesInput = std::make_shared(sizesConst); - - auto scales_const = ov::op::v0::Constant(ov::element::Type_t::f32, {scales.size()}, scales); - auto scalesInput = std::make_shared(scales_const); - - ov::op::v4::Interpolate::InterpolateAttrs interpolateAttributes{mode, shapeCalcMode, padBegin, - padEnd, coordinateTransformMode, nearestMode, antialias, cubeCoef}; - - std::shared_ptr interpolate; - if (axes.empty()) { - interpolate = std::make_shared(params[0], - sizesInput, - scalesInput, - interpolateAttributes); - } else { - auto axesConst = ov::op::v0::Constant(ov::element::Type_t::i64, {axes.size()}, axes); - auto axesInput = std::make_shared(axesConst); - - interpolate = std::make_shared(params[0], - sizesInput, - scalesInput, - axesInput, - interpolateAttributes); - } - const ov::ResultVector results{std::make_shared(interpolate)}; - function = std::make_shared(results, params, "interpolate"); -} - -//Interpolate-1 - -std::string Interpolate1LayerTest::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision dataPrecision; - InferenceEngine::Layout dataLayout; - InferenceEngine::SizeVector inputShapes, targetShapes; - std::string mode; - ov::AxisSet axes; - bool antialias; - std::vector pads; - std::string targetDevice; - std::tie(netPrecision, dataPrecision, dataLayout, inputShapes, targetShapes, - mode, axes, antialias, pads, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "TS=" << ov::test::utils::vec2str(targetShapes) << "_"; - result << "InterpolateMode=" << mode << "_"; - result << "Antialias=" << antialias << "_"; - result << "PB=" << ov::test::utils::vec2str(pads) << "_"; - result << "PE=" << ov::test::utils::vec2str(pads) << "_"; - result << "Axes=" << ov::test::utils::vec2str(axes.to_vector()) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "PRC=" << dataPrecision.name() << "_"; - result << "Layout=" << dataLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void Interpolate1LayerTest::SetUp() { - std::vector inputShape, targetShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - InferenceEngine::Precision dataPrecision; - InferenceEngine::Layout dataLayout; - std::string mode; - ov::AxisSet axes; - bool antialias; - std::vector pads; - std::tie(netPrecision, dataPrecision, dataLayout, inputShape, targetShape, - mode, axes, antialias, pads, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto sizesConst = ov::op::v0::Constant(ov::element::Type_t::i64, {targetShape.size()}, targetShape); - auto sizesInput = std::make_shared(sizesConst); - - bool align_corners = true; - - ov::op::v0::Interpolate::Attributes interpolateAttributes; - interpolateAttributes.axes = axes; - interpolateAttributes.mode = mode; - interpolateAttributes.align_corners = align_corners; - interpolateAttributes.antialias = antialias; - interpolateAttributes.pads_begin = pads; - interpolateAttributes.pads_end = pads; - - auto interpolate = std::make_shared(params[0], sizesInput, interpolateAttributes); - - const ov::ResultVector results{std::make_shared(interpolate)}; - function = std::make_shared(results, params, "interpolate"); -} - -namespace v11 { - -std::string InterpolateLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InterpolateSpecificParams interpolateParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes, targetShapes; - std::string targetDevice; - std::map additional_config; - std::tie(interpolateParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetShapes, targetDevice, additional_config) = obj.param; - std::vector padBegin, padEnd; - std::vector axes; - std::vector scales; - bool antialias; - ov::op::util::InterpolateBase::InterpolateMode mode; - ov::op::util::InterpolateBase::ShapeCalcMode shapeCalcMode; - ov::op::util::InterpolateBase::CoordinateTransformMode coordinateTransformMode; - ov::op::util::InterpolateBase::NearestMode nearestMode; - double cubeCoef; - std::tie(mode, shapeCalcMode, coordinateTransformMode, nearestMode, antialias, padBegin, padEnd, cubeCoef, axes, scales) = interpolateParams; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "TS=" << ov::test::utils::vec2str(targetShapes) << "_"; - result << "InterpolateMode=" << mode << "_"; - result << "ShapeCalcMode=" << shapeCalcMode << "_"; - result << "CoordinateTransformMode=" << coordinateTransformMode << "_"; - result << "NearestMode=" << nearestMode << "_"; - result << "CubeCoef=" << cubeCoef << "_"; - result << "Antialias=" << antialias << "_"; - result << "PB=" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE=" << ov::test::utils::vec2str(padEnd) << "_"; - result << "Axes=" << ov::test::utils::vec2str(axes) << "_"; - result << "Scales=" << ov::test::utils::vec2str(scales) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -static std::shared_ptr makeScalesOrSizesInput(ov::op::util::InterpolateBase::ShapeCalcMode shapeCalcMode, - const std::vector& sizes, - const std::vector& scales) { - if (shapeCalcMode == ov::op::util::InterpolateBase::ShapeCalcMode::SIZES) - return std::make_shared(ov::element::Type_t::i64, ov::Shape{sizes.size()}, sizes); - else - return std::make_shared(ov::element::Type_t::f32, ov::Shape{scales.size()}, scales); -} - -void InterpolateLayerTest::SetUp() { - InterpolateSpecificParams interpolateParams; - std::vector inputShape, targetShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::map additional_config; - std::tie(interpolateParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetShape, targetDevice, additional_config) = this->GetParam(); - std::vector padBegin, padEnd; - std::vector axes; - std::vector scales; - bool antialias; - ov::op::util::InterpolateBase::InterpolateMode mode; - ov::op::util::InterpolateBase::ShapeCalcMode shapeCalcMode; - ov::op::util::InterpolateBase::CoordinateTransformMode coordinateTransformMode; - ov::op::util::InterpolateBase::NearestMode nearestMode; - - configuration.insert(additional_config.begin(), additional_config.end()); - - double cubeCoef; - std::tie(mode, shapeCalcMode, coordinateTransformMode, nearestMode, antialias, padBegin, padEnd, cubeCoef, axes, scales) = interpolateParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto scalesOrSizesInput = makeScalesOrSizesInput(shapeCalcMode, targetShape, scales); - - ov::op::util::InterpolateBase::InterpolateAttrs interpolateAttributes{mode, shapeCalcMode, padBegin, - padEnd, coordinateTransformMode, nearestMode, antialias, cubeCoef}; - - std::shared_ptr interpolate{}; - if (axes.empty()) { - interpolate = std::make_shared(params[0], - scalesOrSizesInput, - interpolateAttributes); - } else { - auto axesInput = std::make_shared(ov::element::Type_t::i64, ov::Shape{axes.size()}, axes); - - interpolate = std::make_shared(params[0], - scalesOrSizesInput, - axesInput, - interpolateAttributes); - } - const ov::ResultVector results{std::make_shared(interpolate)}; - function = std::make_shared(results, params, "interpolate"); -} - -} // namespace v11 - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/is_inf.cpp b/src/tests/functional/shared_test_classes/src/single_layer/is_inf.cpp deleted file mode 100644 index f2b099e0dec333..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/is_inf.cpp +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/is_inf.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" - -using namespace ov::test::subgraph; - -std::string IsInfLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShapes; - ElementType dataPrc; - bool detectNegative, detectPositive; - std::string targetName; - ov::AnyMap additionalConfig; - std::tie(inputShapes, detectNegative, detectPositive, dataPrc, targetName, additionalConfig) = obj.param; - std::ostringstream result; - - result << "IS=("; - for (size_t i = 0lu; i < inputShapes.size(); i++) { - result << ov::test::utils::partialShape2str({inputShapes[i].first}) << (i < inputShapes.size() - 1lu ? "_" : ""); - } - result << ")_TS="; - for (size_t i = 0lu; i < inputShapes.front().second.size(); i++) { - result << "{"; - for (size_t j = 0lu; j < inputShapes.size(); j++) { - result << ov::test::utils::vec2str(inputShapes[j].second[i]) << (j < inputShapes.size() - 1lu ? "_" : ""); - } - result << "}_"; - } - result << ")_detectNegative=" << (detectNegative ? "True" : "False") << "_"; - result << "detectPositive=" << (detectPositive ? "True" : "False") << "_"; - result << "dataPrc=" << dataPrc << "_"; - result << "trgDev=" << targetName; - - if (!additionalConfig.empty()) { - result << "_PluginConf"; - for (auto &item : additionalConfig) { - result << "_" << item.first << "=" << item.second.as(); - } - } - - return result.str(); -} - -void IsInfLayerTest::SetUp() { - std::vector shapes; - ElementType dataPrc; - bool detectNegative, detectPositive; - std::string targetName; - ov::AnyMap additionalConfig; - std::tie(shapes, detectNegative, detectPositive, dataPrc, targetDevice, additionalConfig) = this->GetParam(); - - init_input_shapes(shapes); - configuration.insert(additionalConfig.begin(), additionalConfig.end()); - - ov::ParameterVector parameters; - for (auto&& shape : inputDynamicShapes) { - parameters.push_back(std::make_shared(dataPrc, shape)); - } - parameters[0]->set_friendly_name("Data"); - - ov::op::v10::IsInf::Attributes attributes {detectNegative, detectPositive}; - auto isInf = std::make_shared(parameters[0], attributes); - ov::ResultVector results; - for (int i = 0; i < isInf->get_output_size(); i++) { - results.push_back(std::make_shared(isInf->output(i))); - } - - function = std::make_shared(results, parameters, "IsInf"); -} - -namespace { - -template -void fill_tensor(ov::Tensor& tensor, int32_t range, T startFrom) { - auto pointer = tensor.data(); - testing::internal::Random random(1); - for (size_t i = 0; i < range; i++) { - if (i % 7 == 0) { - pointer[i] = std::numeric_limits::infinity(); - } else if (i % 7 == 1) { - pointer[i] = std::numeric_limits::quiet_NaN(); - } else if (i % 7 == 3) { - pointer[i] = -std::numeric_limits::infinity(); - } else if (i % 7 == 5) { - pointer[i] = -std::numeric_limits::quiet_NaN(); - } else { - pointer[i] = startFrom + static_cast(random.Generate(range)); - } - } -} - -} // namespace - -void IsInfLayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { - inputs.clear(); - const auto& funcInputs = function->inputs(); - const auto& input = funcInputs[0]; - - int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1, std::multiplies()); - float startFrom = -static_cast(range) / 2.f; - auto tensor = ov::Tensor{ input.get_element_type(), targetInputStaticShapes[0]}; - - if (input.get_element_type() == ov::element::f16) { - fill_tensor(tensor, range, startFrom); - } else { - fill_tensor(tensor, range, startFrom); - } - - inputs.insert({input.get_node_shared_ptr(), tensor}); -} diff --git a/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp b/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp deleted file mode 100644 index fbb74f9baa323b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/log_softmax.hpp" - -namespace LayerTestsDefinitions { - -std::string LogSoftmaxLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShape; - int64_t axis; - std::string targetDevice; - std::map config; - std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, axis, targetDevice, config) = obj.param; - - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "axis=" << axis << "_"; - result << "trgDev=" << targetDevice; - - return result.str(); -} - -void LogSoftmaxLayerTest::SetUp() { - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; - int64_t axis; - - std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, axis, targetDevice, configuration) = GetParam(); - outLayout = inLayout; - - const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - const ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - const auto logSoftmax = std::make_shared(params.at(0), axis); - - const ov::ResultVector results {std::make_shared(logSoftmax)}; - - function = std::make_shared(results, params, "logSoftmax"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp b/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp deleted file mode 100644 index db3760f067599d..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/logical.hpp" - -using namespace LayerTestsDefinitions::LogicalParams; - -namespace LayerTestsDefinitions { -std::string LogicalLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InputShapesTuple inputShapes; - ngraph::helpers::LogicalTypes comparisonOpType; - ngraph::helpers::InputLayerType secondInputType; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::map additional_config; - std::tie(inputShapes, comparisonOpType, secondInputType, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName, additional_config) - = obj.param; - std::ostringstream results; - - results << "IS0=" << ov::test::utils::vec2str(inputShapes.first) << "_"; - results << "IS1=" << ov::test::utils::vec2str(inputShapes.second) << "_"; - results << "comparisonOpType=" << comparisonOpType << "_"; - results << "secondInputType=" << secondInputType << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "inPRC=" << inPrc.name() << "_"; - results << "outPRC=" << outPrc.name() << "_"; - results << "inL=" << inLayout << "_"; - results << "outL=" << outLayout << "_"; - results << "trgDev=" << targetName; - return results.str(); -} - -std::vector LogicalLayerTest::combineShapes(const std::map, std::vector>>& inputShapes) { - std::vector resVec; - for (auto& inputShape : inputShapes) { - for (auto& item : inputShape.second) { - resVec.push_back({inputShape.first, item}); - } - - if (inputShape.second.empty()) { - resVec.push_back({inputShape.first, {}}); - } - } - return resVec; -} - -InferenceEngine::Blob::Ptr LogicalLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0); -} - -void LogicalLayerTest::SetupParams() { - std::tie(inputShapes, logicalOpType, secondInputType, netPrecision, - inPrc, outPrc, inLayout, outLayout, targetDevice, additional_config) = - this->GetParam(); - - configuration.insert(additional_config.begin(), additional_config.end()); -} - -void LogicalLayerTest::SetUp() { - SetupParams(); - - auto ngInputsPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); - ov::ParameterVector inputs {std::make_shared(ngInputsPrc, ov::Shape(inputShapes.first))}; - - std::shared_ptr logicalNode; - if (logicalOpType != ngraph::helpers::LogicalTypes::LOGICAL_NOT) { - OPENVINO_SUPPRESS_DEPRECATED_START - auto secondInput = ngraph::builder::makeInputLayer(ngInputsPrc, secondInputType, inputShapes.second); - OPENVINO_SUPPRESS_DEPRECATED_END - if (secondInputType == ngraph::helpers::InputLayerType::PARAMETER) { - inputs.push_back(std::dynamic_pointer_cast(secondInput)); - } - logicalNode = ngraph::builder::makeLogical(inputs[0], secondInput, logicalOpType); - } else { - logicalNode = ngraph::builder::makeLogical(inputs[0], ov::Output(), logicalOpType); - } - - function = std::make_shared(logicalNode, inputs, "Logical"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp b/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp deleted file mode 100644 index 21aaa59b82b942..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp +++ /dev/null @@ -1,399 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/loop.hpp" -#include - -namespace LayerTestsDefinitions { - - std::string LoopTest::getTestCaseName(const testing::TestParamInfo &obj) { - bool execute_first_iteration; - bool is_body_condition_const; - bool body_condition; // works only if is_body_condition_const == - int64_t trip_count; - std::vector, LOOP_IN_TYPE>> inputs; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(execute_first_iteration, is_body_condition_const, body_condition, trip_count, inputs, netPrecision, - targetDevice) = obj.param; - - std::vector> inputs_separate; - std::vector types_separate; - for (auto &el : inputs) { - inputs_separate.push_back(el.first); - types_separate.push_back(el.second); - } - std::ostringstream result; - result << "execute_first_iteration" << execute_first_iteration << "_"; - result << "is_body_condition_const=" << is_body_condition_const << "_"; - result << "body_condition=" << body_condition << "_"; - result << "trip_count=" << trip_count << "_"; - result << "IS=" << ov::test::utils::vec2str(inputs_separate) << "_"; - result << "types=" << ov::test::utils::vec2str(types_separate) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - auto res_str = result.str(); - std::replace(res_str.begin(), res_str.end(), '-', '_'); - return res_str; - } - - void LoopTest::SetUp() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - bool execute_first_iteration; - bool is_body_condition_const; - bool body_condition; // works only if is_body_condition_const == - int64_t trip_count; - std::vector, LOOP_IN_TYPE>> inputs; - InferenceEngine::Precision netPrecision; - std::tie(execute_first_iteration, is_body_condition_const, body_condition, trip_count, inputs, netPrecision, - targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - // That which we iterate over - std::vector> inputs_separate; - std::vector types_separate; - for (auto &el : inputs) { - inputs_separate.push_back(el.first); - types_separate.push_back(el.second); - } - // Example: - /* auto X = std::make_shared(ov::element::f32, ov::Shape{32, 1, 10}); - auto Y = std::make_shared(ov::element::f32, ov::Shape{32, 1, 10}); - auto M = std::make_shared(ov::element::f32, ov::Shape{32, 1, 10});*/ - ov::ParameterVector params; - for (auto&& shape : inputs_separate) { - params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); - } - - // Set up the cell body, a function from (Xi, Yi) -> (Zo) - // Body parameters - const std::vector body_params_shapes(inputs_separate.size(), ov::PartialShape::dynamic()); - auto current_iteration = std::make_shared(ov::element::i64, ov::Shape{1}); - - //Example: -/* auto Xi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto Yi = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); - auto M_body = std::make_shared(ov::element::f32, ov::PartialShape::dynamic());*/ - - ov::ParameterVector body_params; - for (const auto &pshape : body_params_shapes) { - auto paramNode = std::make_shared(ngPrc, pshape); - body_params.push_back(paramNode); - } - - std::shared_ptr body_condition_const; - if (is_body_condition_const) { - if (body_condition) { - body_condition_const = std::make_shared( - ov::element::boolean, ov::Shape{1}, true); - } else { - body_condition_const = std::make_shared( - ov::element::boolean, ov::Shape{1}, false); - } - } - - auto trip_count_const = - std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); - - std::shared_ptr exec_condition; - if (execute_first_iteration) { - exec_condition = std::make_shared( - ov::element::boolean, ov::Shape{1}, true); - } else { - exec_condition = std::make_shared( - ov::element::boolean, ov::Shape{1}, false); - } - - // Body - std::shared_ptr Zo = body_params[0]; - for (int i = 1; i < body_params.size(); ++i) { - Zo = std::make_shared(body_params[i], Zo); - } - - // body_params.insert(body_params.begin(), current_iteration); - auto body = std::make_shared(ov::OutputVector{body_condition_const, Zo}, - body_params); - - auto loop = std::make_shared(trip_count_const, exec_condition); - loop->set_function(body); - loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); - - for (int i = 0; i < body_params.size(); ++i) { - if (types_separate[i] == LOOP_IN_TYPE::INVARIANT) { - loop->set_invariant_input(body_params[i], params[i]); - } else if (types_separate[i] == LOOP_IN_TYPE::MERGED) { - // todo: support several merged inputs - // now supported only one in this sample - loop->set_merged_input(body_params[i], params[i], Zo); - } - } - - // Output 0 is last Zo - auto out0 = loop->get_iter_value(body_condition_const, -1); - auto out1 = loop->get_iter_value(Zo, -1); - // Output 1 is concat of Zos - // start=0, stride=1, part_size=1, end=-1, axis=1 - auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); - - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - auto result2 = std::make_shared(out2); - function = std::make_shared(ov::ResultVector{result0, result1, result2}, params, "loop"); - } - - std::string StaticShapeLoopTest::getTestCaseName(const testing::TestParamInfo &obj) { - bool unrolling; - bool static_iter_num; - bool static_continue_cond; - int64_t max_iter_num; - int64_t dynamic_exit; - int64_t axis; - int64_t start_value; - InferenceEngine::SizeVector data_shape; - InferenceEngine::Precision data_prc; - std::string targetDevice; - auto args_papck = std::tie(static_iter_num, max_iter_num, dynamic_exit, axis); - std::map configuration; - std::tie( - unrolling, - static_continue_cond, - args_papck, - start_value, - data_shape, - data_prc, - targetDevice, - configuration) = obj.param; - - std::ostringstream result; - result << "unrolling=" << std::to_string(unrolling) << "_"; - result << "static_iter_num=" << std::to_string(static_iter_num) << "_"; - result << "static_continue_cond=" << std::to_string(static_continue_cond) << "_"; - result << "max_iter_num=" << std::to_string(max_iter_num) << "_"; - result << "dynamic_exit=" << std::to_string(dynamic_exit) << "_"; - result << "axis=" << std::to_string(axis) << "_"; - result << "start_value=" << std::to_string(start_value) << "_"; - result << "max_iter_num=" << std::to_string(max_iter_num) << "_"; - result << "IS=" << ov::test::utils::vec2str(data_shape) << "_"; - result << "netPRC=" << std::to_string(data_prc) << "_"; - result << "targetDevice=" << targetDevice << "_"; - - auto res_str = result.str(); - std::replace(res_str.begin(), res_str.end(), '-', '_'); - return res_str; - } - - void StaticShapeLoopTest::SetUp() { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - auto args_papck = std::tie(static_iter_num, max_iter_num, dynamic_exit, axis); - std::tie( - unrolling, - static_continue_cond, - args_papck, - start_value, - data_shape, - data_prc, - targetDevice, - configuration) = GetParam(); - - const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(data_prc); - const auto ngShape = ov::Shape{data_shape}; - const auto scalarShape = ov::Shape{}; - - ov::ParameterVector params{}; - auto cond_input_create = [¶ms] (ov::element::Type prc, const ov::Shape &shape, int value = 0, bool is_static = false) - -> std::shared_ptr { - if (is_static) - return std::make_shared(prc, shape, value); - - auto input = std::make_shared(prc, shape); - params.push_back(input); - return input; - }; - - auto start = cond_input_create(prc, ngShape); - auto count = cond_input_create(ov::element::i64, scalarShape, max_iter_num, static_iter_num); - auto skip = cond_input_create(ov::element::boolean, scalarShape, true, static_continue_cond); - - // - // count skip start count skip start - // / / - // ___*___*____ __________*___*____ | idx | data | out | - // | idx in | | ex_val idx in | | 0 | 7 | 7 | - // | | / | | | / | / | | 1 | 7 | 8 | - // | add | | less add | | 2 | 8 | 10 | - // | | true | | | | | | 3 | 10 | 13 | - // | | | | | | | | ~~~~~ * * * ~~~~~ - // | out cnd | | cnd out | - // |___*____*___| |____*_____*________| - // Full loop Dynamic exit loop - // n_iter = count n_iter = ex_val - // - auto b_indx = std::make_shared(ov::element::i64, ov::Shape{}); - auto b_data = std::make_shared(prc, ngShape); - auto b_indx_cast = std::make_shared(b_indx, prc); - auto b_add = std::make_shared(b_data, b_indx_cast); - - std::shared_ptr b_cond; - if (dynamic_exit == -1) { - b_cond = std::make_shared(ov::element::boolean, ov::Shape{}, true); - } else { - auto b_exit_value = std::make_shared(ov::element::i64, scalarShape, dynamic_exit); - b_cond = std::make_shared(b_indx, b_exit_value); - } - - auto body = std::make_shared( - ov::OutputVector {b_cond, b_add}, // TODO: check with reverse - ov::ParameterVector {b_indx, b_data}); // TODO: check with reverse - - auto loop = std::make_shared(count, skip); - loop->set_function(body); - loop->set_special_body_ports({0, 0}); - loop->set_merged_input(b_data, start, b_add); - if (axis == -1) - loop->get_iter_value(b_add, -1); - else - loop->get_concatenated_slices(b_add, 0, 1, 1, -1, axis); - - function = std::make_shared( - ov::OutputVector {loop}, - params); - if (unrolling) { - ov::pass::Manager manager; - manager.register_pass(); - manager.run_passes(function); - } - } - - InferenceEngine::Blob::Ptr StaticShapeLoopTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - auto tdesc = info.getTensorDesc(); - auto blob = make_blob_with_precision(tdesc); - blob->allocate(); - - if (tdesc.getLayout() == InferenceEngine::SCALAR) { - auto scalar_1d = ov::test::utils::make_reshape_view(blob, {1}); - ov::test::utils::fill_data_with_broadcast(scalar_1d, 0, {static_cast(max_iter_num)}); - } else { - ov::test::utils::fill_data_with_broadcast(blob, 0, {static_cast(start_value)}); - } - - return blob; - } - - int64_t StaticShapeLoopTest::actual_n_iter() { - constexpr auto INF_N_ITER = std::numeric_limits::max(); - IE_ASSERT(dynamic_exit != -1 || max_iter_num != -1); - - // dynamic_exit + 1 - because loop body looks like do-while loop with post condition check. - return std::min(dynamic_exit == -1 ? INF_N_ITER : dynamic_exit + 1, - max_iter_num == -1 ? INF_N_ITER : max_iter_num); - } - - // Predefined ref output - std::vector>> StaticShapeLoopTest::PredefinedRefs() { - bool auto_concat_out = (axis != -1); - const auto n_iter = actual_n_iter(); - - auto ref_shape = data_shape; - if (auto_concat_out) - ref_shape[axis] *= n_iter; - - using namespace ov::test::utils; - InferenceEngine::TensorDesc tdesc {data_prc, ref_shape, InferenceEngine::TensorDesc::getLayoutByDims(ref_shape)}; - std::pair> res; - res.first = function->get_result()->get_element_type(); - res.second = std::vector(byte_size(tdesc)); - auto out = make_blob_with_precision(tdesc, res.second.data()); - - std::vector vals(n_iter); - float val = start_value; - for (int i = 0; i < n_iter; i++) { - val += i; - vals[i] = val; - } - - if (auto_concat_out) - fill_data_with_broadcast(out, axis, vals); - else - fill_data_with_broadcast(out, 0, {val}); // broadcast scalar data - - return {res}; - } - - void TrivialLoopTest::CreateSlicedLoop(size_t batch_size, size_t num_iteration, InferenceEngine::Precision iePrc, - InferenceEngine::SizeVector& ieShape) { - const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iePrc); - const auto scalarShape = ov::Shape{}; - - auto shape = ov::Shape{ieShape}; - auto to_slice_shape = ov::Shape{ieShape}; - to_slice_shape[0] = batch_size; - - auto to_slice = std::make_shared(prc, to_slice_shape); - auto start = std::make_shared(prc, shape, 0); - auto count = std::make_shared(ov::element::i64, scalarShape, num_iteration); - auto icond = std::make_shared(ov::element::boolean, scalarShape, true); - - // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_recu = std::make_shared(prc, shape); - auto b_add = std::make_shared(b_data, b_recu); - auto b_cond = std::make_shared(ov::element::boolean, scalarShape, true); - - auto body = std::make_shared( - ov::OutputVector {b_cond, b_add}, - ov::ParameterVector {b_data, b_recu}); - - auto loop = std::make_shared(count, icond); - loop->set_function(body); - loop->set_special_body_ports({-1, 0}); - loop->set_sliced_input(b_data, to_slice, 0, 1, 1, -1, 0); - loop->set_merged_input(b_recu, start, b_add); - loop->get_iter_value(b_add, -1); - - function = std::make_shared( - ov::OutputVector {loop}, - ov::ParameterVector {to_slice}); - } - - void TrivialLoopTest::CreateSlicedLoopDynCondition(size_t batch_size, size_t num_iteration, InferenceEngine::Precision iePrc, - InferenceEngine::SizeVector& ieShape, size_t trip_count) { - auto shape = ov::Shape{ieShape}; - auto to_slice_shape = ov::Shape{ieShape}; - to_slice_shape[0] = batch_size; - - const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iePrc); - const auto scalarShape = ov::Shape{}; - - auto to_slice = std::make_shared(prc, to_slice_shape); - auto start = std::make_shared(prc, shape, 0); - auto exit_on = std::make_shared(ov::element::i64, scalarShape, num_iteration); - auto count = std::make_shared(ov::element::i64, scalarShape, trip_count); - auto icond = std::make_shared(ov::element::boolean, scalarShape, true); - - // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_recu = std::make_shared(prc, shape); - auto b_add = std::make_shared(b_data, b_recu); - auto b_iter = std::make_shared(ov::element::i64, scalarShape); - auto b_exit_on = std::make_shared(ov::element::i64, scalarShape); - auto b_cond = std::make_shared(b_iter, b_exit_on); - - auto body = std::make_shared( - ov::OutputVector {b_cond, b_add}, - ov::ParameterVector {b_data, b_recu, b_iter, b_exit_on}); - - auto loop = std::make_shared(count, icond); - loop->set_function(body); - loop->set_special_body_ports({2, 0}); - loop->set_sliced_input(b_data, to_slice, 0, 1, 1, -1, 0); - loop->set_invariant_input(b_exit_on, exit_on); - loop->set_merged_input(b_recu, start, b_add); - loop->get_iter_value(b_add, -1); - - function = std::make_shared( - ov::OutputVector {loop}, - ov::ParameterVector {to_slice}); - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp b/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp deleted file mode 100644 index 046c985744ddd1..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) 2019-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/low_precision.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LowPrecisionTestDefinitions { - -std::string LowPrecisionTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::pair> config; - std::tie(netPrecision, targetDevice, config) = obj.param; - - std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice; - if (!config.first.empty()) { - result << "_targetConfig=" << config.first; - } - return result.str(); -} - -void LowPrecisionTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::pair> config; - std::tie(netPrecision, targetDevice, config) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto inputShape = ov::Shape{ 1, 16 }; - auto weights1Shape = ov::Shape{ 16, 16 }; - auto weights2Shape = ov::Shape{ 128, 32 }; - - // fully connected 1 - auto input = std::make_shared(ngPrc, inputShape); - std::vector weights1Data(ov::shape_size(weights1Shape), 0.0f); - - for (size_t i = 0; i < 16; i++) { - weights1Data[i * 17] = 10.0f + i; - } - - auto weights1 = ov::test::utils::deprecated::make_constant(ngPrc, weights1Shape, weights1Data); - auto fc1 = std::make_shared(input, weights1); - fc1->set_friendly_name("FullyConnected_1"); - - // bias 1 - std::vector bias1Data(ov::shape_size(inputShape), 0.0f); - auto bias1 = ov::test::utils::deprecated::make_constant(ngPrc, inputShape, bias1Data); - auto add1 = std::make_shared(fc1, bias1); - add1->set_friendly_name("Add_1"); -#if 0 - // ReLU 1 - auto relu1 = std::make_shared(add1); - relu1->set_friendly_name("Relu_1"); - - //// fully connected 2 - std::vector weights2Data(ov::shape_size(weights2Shape), 0.0f); - std::fill(weights2Data.begin(), weights2Data.end(), 0.0001f); - auto weights2 = ov::test::utils::deprecated::make_constant(ngPrc, weights2Shape, weights2Data); - auto fc2 = std::make_shared(relu1, weights2); - fc2->set_friendly_name("FullyConnected_2"); - - //// bias 2 - std::vector bias2Data(ov::shape_size(weights2Shape), 0.0f); - auto bias2 = ov::test::utils::deprecated::make_constant(ngPrc, weights2Shape, bias2Data); - auto add2 = std::make_shared(fc2, bias2); - add2->set_friendly_name("Add_2"); - - //// ReLU 2 - auto relu2 = std::make_shared(add2); - relu2->set_friendly_name("Relu_2"); -#endif - configuration = config.second; - function = std::make_shared(ov::ResultVector{std::make_shared(add1)}, - ov::ParameterVector{input}, - "LowPrecisionTest"); -} - -} // namespace LowPrecisionTestDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp deleted file mode 100644 index f11ab7c420ef62..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/lrn.hpp" - -namespace LayerTestsDefinitions { - -std::string LrnLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - double alpha, beta, bias; - size_t size; - std::vector axes; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - std::vector inputShapes; - std::string targetDevice; - std::tie(alpha, beta, bias, size, axes, netPrecision, inPrc, outPrc, inputShapes, targetDevice) = obj.param; - - std::ostringstream result; - const char separator = '_'; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << separator; - result << "Alpha=" << alpha << separator; - result << "Beta=" << beta << separator; - result << "Bias=" << bias << separator; - result << "Size=" << size << separator; - result << "Axes=" << ov::test::utils::vec2str(axes) << separator; - result << "netPRC=" << netPrecision.name() << separator; - result << "inPRC=" << inPrc.name() << separator; - result << "outPRC=" << outPrc.name() << separator; - result << "trgDev=" << targetDevice; - - return result.str(); -} - -void LrnLayerTest::SetUp() { - std::vector inputShapes; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - double alpha, beta, bias; - size_t size; - std::vector axes; - std::tie(alpha, beta, bias, size, axes, netPrecision, inPrc, outPrc, inputShapes, targetDevice) = GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - - auto axes_node = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes.data()); - auto lrn = std::make_shared(params[0], axes_node, alpha, beta, bias, size); - ov::ResultVector results {std::make_shared(lrn)}; - function = std::make_shared(results, params, "lrn"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp deleted file mode 100644 index e1ef6c5bc75ef0..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "transformations/op_conversions/lstm_cell_decomposition.hpp" -#include "shared_test_classes/single_layer/lstm_cell.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { - -using ngraph::helpers::InputLayerType; - -std::string LSTMCellTest::getTestCaseName(const testing::TestParamInfo &obj) { - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - InputLayerType WType; - InputLayerType RType; - InputLayerType BType; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, WType, RType, BType, - netPrecision, targetDevice) = obj.param; - std::vector> inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, - {4 * hidden_size, hidden_size}, {4 * hidden_size}}, - }; - std::ostringstream result; - result << "decomposition" << should_decompose << "_"; - result << "batch=" << batch << "_"; - result << "hidden_size=" << hidden_size << "_"; - result << "input_size=" << input_size << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "activations=" << ov::test::utils::vec2str(activations) << "_"; - result << "clip=" << clip << "_"; - result << "WType=" << WType << "_"; - result << "RType=" << RType << "_"; - result << "BType=" << BType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - return result.str(); -} - -void LSTMCellTest::SetUp() { - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - InputLayerType WType; - InputLayerType RType; - InputLayerType BType; - InferenceEngine::Precision netPrecision; - std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, WType, RType, BType, - netPrecision, targetDevice) = this->GetParam(); - std::vector> inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, - {4 * hidden_size, hidden_size}, {4 * hidden_size}}, - }; - std::vector WRB = {inputShapes[3], inputShapes[4], inputShapes[5]}; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1])), - std::make_shared(ngPrc, ov::Shape(inputShapes[2]))}; - - std::shared_ptr W; - if (WType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[0]); - W = param; - params.push_back(param); - } else { - W = ov::test::utils::deprecated::make_constant(ngPrc, WRB[0], {}, true); - } - - std::shared_ptr R; - if (RType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[1]); - R = param; - params.push_back(param); - } else { - R = ov::test::utils::deprecated::make_constant(ngPrc, WRB[1], {}, true); - } - - std::shared_ptr B; - if (BType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[2]); - B = param; - params.push_back(param); - } else { - B = ov::test::utils::deprecated::make_constant(ngPrc, WRB[2], {}, true); - } - - auto lstm_cell = std::make_shared(params[0], params[1], params[2], W, R, B, hidden_size, activations, - activations_alpha, activations_beta, clip); - ov::ResultVector results{std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; - function = std::make_shared(results, params, "lstm_cell"); - if (should_decompose) { - ov::pass::Manager m; - m.register_pass(); - m.run_passes(function); - } -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp deleted file mode 100644 index 786880fd7bf960..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) 2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "transformations/op_conversions/lstm_cell_decomposition.hpp" -#include "shared_test_classes/single_layer/lstm_cell_basic.hpp" - -namespace LayerTestsDefinitions { - -std::string LSTMCellBasicTest::getTestCaseName(const testing::TestParamInfo &obj) { - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::map additionalConfig; - std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, netPrecision, - targetDevice, additionalConfig) = obj.param; - std::vector> inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, - {4 * hidden_size, hidden_size}, {4 * hidden_size}}, - }; - std::ostringstream result; - result << "decomposition" << should_decompose << "_"; - result << "batch=" << batch << "_"; - result << "hidden_size=" << hidden_size << "_"; - result << "input_size=" << input_size << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "activations=" << ov::test::utils::vec2str(activations) << "_"; - result << "clip=" << clip << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - for (const auto& configEntry : additionalConfig) { - result << configEntry.first << ", " << configEntry.second << ";"; - } - return result.str(); -} - -void LSTMCellBasicTest::SetUp() { - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - InferenceEngine::Precision netPrecision; - std::map additionalConfig; - std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, netPrecision, - targetDevice, additionalConfig) = this->GetParam(); - - configuration.insert(additionalConfig.begin(), additionalConfig.end()); - std::vector> inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, - {4 * hidden_size, hidden_size}, {4 * hidden_size}}, - }; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1])), - std::make_shared(ngPrc, ov::Shape(inputShapes[2]))}; - ov::OutputVector paramsOuts; - for (auto&& param : params) - paramsOuts.push_back(param); - - std::vector WRB = {inputShapes[3], inputShapes[4], inputShapes[5]}; - auto lstm_cell = ngraph::builder::makeLSTM(paramsOuts, WRB, hidden_size, activations, {}, {}, clip); - ov::ResultVector results{std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; - function = std::make_shared(results, params, "lstm_cell"); - if (should_decompose) { - ov::pass::Manager m; - m.register_pass(); - m.run_passes(function); - } -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp deleted file mode 100644 index 2757b90df0cf3f..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/lstm_sequence.hpp" -#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" -#include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { - - using ngraph::helpers::InputLayerType; - - std::string LSTMSequenceTest::getTestCaseName(const testing::TestParamInfo &obj) { - ngraph::helpers::SequenceTestsMode mode; - size_t seq_lengths; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - ov::op::RecurrentSequenceDirection direction; - InputLayerType WRBType; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, - WRBType, netPrecision, targetDevice) = obj.param; - std::vector> inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, - {4 * hidden_size, hidden_size}, {4 * hidden_size}}, - }; - std::ostringstream result; - result << "mode=" << mode << "_"; - result << "seq_lengths=" << seq_lengths << "_"; - result << "batch=" << batch << "_"; - result << "hidden_size=" << hidden_size << "_"; - result << "input_size=" << input_size << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "activations=" << ov::test::utils::vec2str(activations) << "_"; - result << "direction=" << direction << "_"; - result << "clip=" << clip << "_"; - result << "WRBType=" << WRBType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - return result.str(); - } - - void LSTMSequenceTest::SetUp() { - using namespace ngraph::helpers; - using namespace ngraph::builder; - size_t seq_lengths; - - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - ov::op::RecurrentSequenceDirection direction; - InputLayerType WRBType; - InferenceEngine::Precision netPrecision; - std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, - WRBType, netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; - m_max_seq_len = seq_lengths; - std::vector inputShapes = { - {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch, num_directions, hidden_size}, - {batch}, {num_directions, 4 * hidden_size, input_size}, {num_directions, 4 * hidden_size, hidden_size}, {num_directions, 4 * hidden_size}}, - }; - - const auto& W_shape = inputShapes[4]; - const auto& R_shape = inputShapes[5]; - const auto& B_shape = inputShapes[6]; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, inputShapes[0]), - std::make_shared(ngPrc, inputShapes[1]), - std::make_shared(ngPrc, inputShapes[2])}; - - std::shared_ptr seq_lengths_node; - if (m_mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) { - auto param = std::make_shared(ov::element::i64, inputShapes[3]); - seq_lengths_node = param; - seq_lengths_node->set_friendly_name("seq_lengths"); - params.push_back(param); - } else if (m_mode == ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST || - m_mode == ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) { - seq_lengths_node = ov::test::utils::deprecated::make_constant(ov::element::i64, inputShapes[3], {}, true, - static_cast(seq_lengths), 0.f); - } else { - std::vector lengths(inputShapes[3][0], seq_lengths); - seq_lengths_node = ov::test::utils::deprecated::make_constant(ov::element::i64, inputShapes[3], lengths, false); - } - - std::shared_ptr W, R, B; - if (WRBType == InputLayerType::PARAMETER) { - const auto W_param = std::make_shared(ngPrc, W_shape); - const auto R_param = std::make_shared(ngPrc, R_shape); - const auto B_param = std::make_shared(ngPrc, B_shape); - W = W_param; - R = R_param; - B = B_param; - params.push_back(W_param); - params.push_back(R_param); - params.push_back(B_param); - } else { - W = ov::test::utils::deprecated::make_constant(ngPrc, W_shape, {}, true); - R = ov::test::utils::deprecated::make_constant(ngPrc, R_shape, {}, true); - B = ov::test::utils::deprecated::make_constant(ngPrc, B_shape, {}, true); - } - - auto lstm_sequence = std::make_shared(params[0], params[1], params[2], seq_lengths_node, W, R, B, hidden_size, direction, - std::vector{}, std::vector{}, activations, clip); - ov::ResultVector results{std::make_shared(lstm_sequence->output(0)), - std::make_shared(lstm_sequence->output(1)), - std::make_shared(lstm_sequence->output(2))}; - function = std::make_shared(results, params, "lstm_sequence"); - bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); - if (!is_pure_sequence) { - ov::pass::Manager manager; - if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) - manager.register_pass(); - manager.register_pass(); - manager.run_passes(function); - bool ti_found = is_tensor_iterator_exist(function); - EXPECT_EQ(ti_found, true); - } else { - bool ti_found = is_tensor_iterator_exist(function); - EXPECT_EQ(ti_found, false); - } - } - - void LSTMSequenceTest::GenerateInputs() { - for (const auto &input : executableNetwork.GetInputsInfo()) { - const auto &info = input.second; - auto blob = GenerateInput(*info); - if (input.first == "seq_lengths") { - blob = FuncTestUtils::createAndFillBlob(info->getTensorDesc(), m_max_seq_len, 0); - } - - inputs.push_back(blob); - } - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp b/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp deleted file mode 100644 index ccc86749ca4852..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/mat_mul.hpp" - -namespace LayerTestsDefinitions { - -std::vector MatMulTest::combineShapes(const std::vector>& firstInputShapes, - const std::vector>& secondInputShapes, - bool transposeA, - bool transposeB) { - std::vector resVec; - for (const auto& firstInputShape : firstInputShapes) { - for (const auto& secondInputShape : secondInputShapes) { - resVec.push_back(ShapeRelatedParams{ {firstInputShape, transposeA}, {secondInputShape, transposeB } }); - } - } - return resVec; -} - -std::string MatMulTest::getTestCaseName(const testing::TestParamInfo &obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout; - ShapeRelatedParams shapeRelatedParams; - ngraph::helpers::InputLayerType secondaryInputType; - std::string targetDevice; - std::map additionalConfig; - std::tie(shapeRelatedParams, netPrecision, inPrc, outPrc, inLayout, secondaryInputType, targetDevice, additionalConfig) = - obj.param; - - std::ostringstream result; - result << "IS0=" << ov::test::utils::vec2str(shapeRelatedParams.input1.first) << "_"; - result << "IS1=" << ov::test::utils::vec2str(shapeRelatedParams.input2.first) << "_"; - result << "transpose_a=" << shapeRelatedParams.input1.second << "_"; - result << "transpose_b=" << shapeRelatedParams.input2.second << "_"; - result << "secondaryInputType=" << secondaryInputType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "trgDev=" << targetDevice; - result << "config=("; - for (const auto& configEntry : additionalConfig) { - result << configEntry.first << ", " << configEntry.second << ";"; - } - result << ")"; - return result.str(); -} - -void MatMulTest::SetUp() { - ShapeRelatedParams shapeRelatedParams; - ngraph::helpers::InputLayerType secondaryInputType; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::map additionalConfig; - std::tie(shapeRelatedParams, netPrecision, inPrc, outPrc, inLayout, secondaryInputType, targetDevice, additionalConfig) = - this->GetParam(); - - configuration.insert(additionalConfig.begin(), additionalConfig.end()); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(shapeRelatedParams.input1.first))}; - - OPENVINO_SUPPRESS_DEPRECATED_START - auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, shapeRelatedParams.input2.first); - OPENVINO_SUPPRESS_DEPRECATED_END - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { - params.push_back(std::dynamic_pointer_cast(secondaryInput)); - } - auto MatMul = std::make_shared(params[0], secondaryInput, shapeRelatedParams.input1.second, shapeRelatedParams.input2.second); - ov::ResultVector results{std::make_shared(MatMul)}; - function = std::make_shared(results, params, "MatMul"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp b/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp deleted file mode 100644 index cc9621e27cd5fe..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include -#include "shared_test_classes/single_layer/matrix_nms.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -#include "functional_test_utils/plugin_cache.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -using namespace ngraph; -using namespace InferenceEngine; -using ngraph::helpers::operator<<; - -std::string MatrixNmsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector shapes; - InputPrecisions inPrecisions; - op::v8::MatrixNms::SortResultType sortResultType; - element::Type outType; - int backgroudClass; - op::v8::MatrixNms::DecayFunction decayFunction; - TopKParams topKParams; - ThresholdParams thresholdParams; - bool normalized; - bool outStaticShape; - std::string targetDevice; - std::tie(shapes, inPrecisions, sortResultType, outType, topKParams, thresholdParams, - backgroudClass, normalized, decayFunction, outStaticShape, targetDevice) = obj.param; - - ElementType paramsPrec, maxBoxPrec, thrPrec; - std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; - - int nmsTopK, keepTopK; - std::tie(nmsTopK, keepTopK) = topKParams; - - float score_threshold, gaussian_sigma, post_threshold; - std::tie(score_threshold, gaussian_sigma, post_threshold) = thresholdParams; - - std::ostringstream result; - result << "IS=("; - for (const auto& shape : shapes) { - result << ov::test::utils::partialShape2str({shape.first}) << "_"; - } - result << ")_TS=("; - for (const auto& shape : shapes) { - for (const auto& item : shape.second) { - result << ov::test::utils::vec2str(item) << "_"; - } - } - - result << ")_paramsPrec=" << paramsPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_"; - result << "sortResultType=" << sortResultType << "_normalized=" << normalized << "_"; - result << "outType=" << outType << "_nmsTopK=" << nmsTopK << "_keepTopK=" << keepTopK << "_"; - result << "backgroudClass=" << backgroudClass << "_decayFunction=" << decayFunction << "_"; - result << "score_threshold=" << score_threshold << "_gaussian_sigma=" << gaussian_sigma << "_"; - result << "post_threshold=" << post_threshold << "_outStaticShape=" << outStaticShape <<"_TargetDevice=" << targetDevice; - return result.str(); -} - -void MatrixNmsLayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { - inputs.clear(); - - const auto& funcInputs = function->inputs(); - for (int i = 0; i < funcInputs.size(); ++i) { - const auto& funcInput = funcInputs[i]; - ov::Tensor tensor; - - if (i == 1) { - tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); - - const size_t range = 1; - const size_t startFrom = 0; - const size_t k = 1000; - const int seed = 1; - std::default_random_engine random(seed); - std::uniform_int_distribution distribution(k * startFrom, k * (startFrom + range)); - - auto *dataPtr = tensor.data(); - for (size_t i = 0; i < tensor.get_size(); i++) { - auto value = static_cast(distribution(random)); - dataPtr[i] = value / static_cast(k); - } - } else { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); - } - - inputs.insert({funcInput.get_node_shared_ptr(), tensor}); - } -} - -void MatrixNmsLayerTest::GetOutputParams(size_t& numBatches, size_t& maxOutputBoxesPerBatch) { - size_t it = 0; - size_t numBoxes = 0, numClasses = 0; - const auto& funcInputs = function->inputs(); - for (int i = 0; i < funcInputs.size(); ++i) { - const auto& funcInput = funcInputs[i]; - const auto& dims = inputs[funcInput.get_node_shared_ptr()].get_shape(); - - if (it == 1) { - numClasses = dims[1]; - } else { - numBatches = dims[0]; - numBoxes = dims[1]; - } - it++; - } - - ASSERT_TRUE(numBatches > 0 && numBoxes > 0 && numClasses > 0) - << "Expected numBatches, numBoxes, numClasses > 0, got:" << numBatches << ", " << numBoxes << ", " << numClasses; - - auto realClasses = numClasses; - if (m_attrs.background_class >= 0 && m_attrs.background_class < numClasses) { - realClasses = realClasses - 1; - } - - size_t maxOutputBoxesPerClass = 0; - if (m_attrs.nms_top_k >= 0) - maxOutputBoxesPerClass = std::min(numBoxes, static_cast(m_attrs.nms_top_k)); - else - maxOutputBoxesPerClass = numBoxes; - - maxOutputBoxesPerBatch = maxOutputBoxesPerClass * realClasses; - if (m_attrs.keep_top_k >= 0) - maxOutputBoxesPerBatch = - std::min(maxOutputBoxesPerBatch, static_cast(m_attrs.keep_top_k)); -} - -void MatrixNmsLayerTest::compare(const std::vector &expectedOutputs, - const std::vector &actualOutputs) { - auto batchIndex = -1; - size_t numBatches(0), maxOutputBoxesPerBatch(0); - GetOutputParams(numBatches, maxOutputBoxesPerBatch); - std::vector numPerBatch(numBatches); - for (int outputIndex = static_cast(expectedOutputs.size()) - 1; outputIndex >= 0 ; outputIndex--) { - const auto& actual = actualOutputs[outputIndex]; - const auto _dims = actual.get_shape(); - if (_dims.size() == 1 && _dims[0] == numBatches) { - batchIndex = outputIndex; - if (actual.get_element_type() == ov::element::i32) { - auto buffer = actual.data(); - std::copy_n(buffer, numBatches, numPerBatch.begin()); - } else { - auto buffer = actual.data(); - std::copy_n(buffer, numBatches, numPerBatch.begin()); - } - } - } - - for (int outputIndex = static_cast(expectedOutputs.size()) - 1; outputIndex >= 0 ; outputIndex--) { - const auto& expected = expectedOutputs[outputIndex]; - const auto& actual = actualOutputs[outputIndex]; - const auto actualBuffer = static_cast(actual.data()); - const auto expectedBuffer = static_cast(expected.data()); - - //Compare Selected Outputs & Selected Indices - if (outputIndex != batchIndex) { - if (outputIndex == 2) { - if (expected.get_size() != actual.get_size()) - throw std::runtime_error("Expected and actual size 3rd output have different size"); - } - - const auto& precision = actual.get_element_type(); - auto expected_offset = 0; - auto actual_offset = 0; - for (size_t i = 0; i < numPerBatch.size(); i++) { - auto validNums = numPerBatch[i]; - switch (precision) { - case ov::element::f32: { - switch (expected.get_element_type()) { - case ov::element::f32: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(expectedBuffer) + expected_offset * 6, - reinterpret_cast(actualBuffer) + actual_offset * 6, validNums * 6, 1e-5f); - break; - case ov::element::f64: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(expectedBuffer) + expected_offset * 6, - reinterpret_cast(actualBuffer) + actual_offset * 6, validNums *6, 1e-5f); - break; - default: - break; - } - if (m_outStaticShape) { - const auto fBuffer = static_cast(actual.data()); - for (size_t tailing = validNums * 6; tailing < maxOutputBoxesPerBatch * 6; tailing++) { - ASSERT_TRUE(std::abs(fBuffer[(actual_offset * 6 + tailing)] - -1.f) < 1e-5) - << "Invalid default value: " << fBuffer[i] << " at index: " << i; - } - } - break; - } - case ov::element::i32: { - switch (expected.get_element_type()) { - case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(expectedBuffer) + expected_offset, - reinterpret_cast(actualBuffer) + actual_offset, validNums, 0); - break; - case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(expectedBuffer) + expected_offset, - reinterpret_cast(actualBuffer) + actual_offset, validNums, 0); - break; - default: - break; - } - if (m_outStaticShape) { - const auto iBuffer = actual.data(); - for (size_t tailing = validNums; tailing < maxOutputBoxesPerBatch; tailing++) { - ASSERT_TRUE(iBuffer[actual_offset + tailing] == -1) << "Invalid default value: " << iBuffer[i] << " at index: " << i; - } - } - break; - } - case ov::element::i64: { - switch (expected.get_element_type()) { - case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset, - reinterpret_cast(actualBuffer) + actual_offset, validNums, 0); - break; - case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset, - reinterpret_cast(actualBuffer) + actual_offset, validNums, 0); - break; - default: - break; - } - if (m_outStaticShape) { - const auto iBuffer = actual.data(); - for (size_t tailing = validNums; tailing < maxOutputBoxesPerBatch; tailing++) { - ASSERT_TRUE(iBuffer[actual_offset + tailing] == -1) << "Invalid default value: " << iBuffer[i] << " at index: " << i; - } - } - break; - } - default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; - } - if (!m_outStaticShape) { - expected_offset += validNums; - actual_offset += validNums; - } else { - expected_offset += validNums; - actual_offset += maxOutputBoxesPerBatch; - } - } - } else { - if (outputIndex == 2) { - if (expected.get_size() != actual.get_size()) - throw std::runtime_error("Expected and actual size 3rd output have different size"); - } - - const auto& precision = actual.get_element_type(); - size_t size = expected.get_size(); - switch (precision) { - case ov::element::i32: { - switch (expected.get_element_type()) { - case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, 0); - break; - case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, 0); - break; - default: - break; - } - break; - } - case ov::element::i64: { - switch (expected.get_element_type()) { - case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, 0); - break; - case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare( - reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, 0); - break; - default: - break; - } - break; - } - default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; - } - } - } -} - -void MatrixNmsLayerTest::SetUp() { - std::vector shapes; - InputPrecisions inPrecisions; - TopKParams topKParams; - ThresholdParams thresholdParams; - - std::tie(shapes, inPrecisions, m_attrs.sort_result_type, m_attrs.output_type, topKParams, thresholdParams, - m_attrs.background_class, m_attrs.normalized, m_attrs.decay_function, m_outStaticShape, targetDevice) = this->GetParam(); - - std::tie(m_attrs.nms_top_k, m_attrs.keep_top_k) = topKParams; - std::tie(m_attrs.score_threshold, m_attrs.gaussian_sigma, m_attrs.post_threshold) = thresholdParams; - - init_input_shapes(shapes); - - ElementType paramsPrec, maxBoxPrec, thrPrec; - std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(paramsPrec, shape)); - } - auto nms = std::make_shared(params[0], params[1], m_attrs); - - function = std::make_shared(nms, params, "MatrixNMS"); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp b/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp deleted file mode 100644 index 8c629ca73b6196..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/memory.cpp +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include -#include - -#include "openvino/pass/low_latency.hpp" -#include "openvino/op/util/variable.hpp" -#include "openvino/op/util/variable_context.hpp" -#include "openvino/opsets/opset7.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/memory.hpp" - -using namespace ngraph; -using ov::op::v1::Add; -using ov::op::v0::TensorIterator; -using ov::op::v0::Result; - -namespace LayerTestsDefinitions { - -std::string MemoryTest::getTestCaseName(const testing::TestParamInfo& obj) { - int64_t iteration_count; - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShape; - std::string targetDevice; - ngraph::helpers::MemoryTransformation transformation; - std::tie(transformation, iteration_count, inputShape, netPrecision, targetDevice) = obj.param; - - std::ostringstream result; - result << "transformation=" << transformation << "_"; - result << "iteration_count=" << iteration_count << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice; - result << ")"; - return result.str(); -} - -void MemoryTest::SetUp() { - std::tie(transformation, iteration_count, inputShape, netPrecision, targetDevice) = this->GetParam(); - ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - if (transformation == ngraph::helpers::MemoryTransformation::NONE) { - CreateCommonFunc(); - } else { - CreateTIFunc(); - ApplyLowLatency(); - } - - auto tensor = ov::Tensor(ngPrc, inputShape); - auto variable_context = ov::op::util::VariableContext(); - auto variable_value = std::make_shared(tensor); - variable_context.set_variable_value(function->get_variable_by_id("v0"), variable_value); - eval_context["VariableContext"] = variable_context; -} - -void MemoryTest::Run() { - functionRefs = function->clone(); - SKIP_IF_CURRENT_TEST_IS_DISABLED() - using namespace LayerTestsUtils; - auto crashHandler = [](int errCode) { - auto& s = ov::test::utils::OpSummary::getInstance(); - s.saveReport(); - std::cout << "Unexpected application crash!" << std::endl; - std::abort(); - }; - signal(SIGSEGV, crashHandler); - - auto& s = ov::test::utils::OpSummary::getInstance(); - s.setDeviceName(targetDevice); - if (ov::test::utils::current_test_is_disabled()) { - s.updateOPsStats(function, ov::test::utils::PassRate::Statuses::SKIPPED); - GTEST_SKIP() << "Disabled test due to configuration" << std::endl; - } else { - s.updateOPsStats(function, ov::test::utils::PassRate::Statuses::CRASHED); - } - - try { - if (transformation != ngraph::helpers::MemoryTransformation::LOW_LATENCY_V2_REGULAR_API) { - LoadNetwork(); - } else { - CoreConfiguration(this); - ConfigureNetwork(); - executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration); - } - inferRequest = executableNetwork.CreateInferRequest(); - GenerateInputs(); - for (int64_t i = 0; i < iteration_count; ++i) { - Infer(); - Validate(); - } - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::PASSED); - } catch (const std::runtime_error& re) { - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::FAILED); - GTEST_FATAL_FAILURE_(re.what()); - } catch (const std::exception& ex) { - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::FAILED); - GTEST_FATAL_FAILURE_(ex.what()); - } catch (...) { - s.updateOPsStats(functionRefs, ov::test::utils::PassRate::Statuses::FAILED); - GTEST_FATAL_FAILURE_("Unknown failure occurred."); - } -} - -void MemoryTest::Infer() { - ConfigureInferRequest(); - inferRequest.Infer(); -} - -std::vector>> MemoryTest::CalculateRefs() { - using namespace ngraph; - function->validate_nodes_and_infer_types(); - - auto referenceInputs = std::vector>(inputs.size()); - auto refInputsTypes = std::vector(inputs.size()); - ov::TensorVector inputTensors; - for (auto& input : inputs) { - const auto& dataSize = input->byteSize(); - const auto& tensorDesc = input->getTensorDesc(); - - auto memory = InferenceEngine::as(input); - OPENVINO_ASSERT(memory); - const auto lockedMemory = memory->wmap(); - const auto buffer = lockedMemory.as(); - - inputTensors.emplace_back(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(tensorDesc.getPrecision()), - tensorDesc.getDims()); - std::memcpy(inputTensors.back().data(), buffer, dataSize); - } - - // evaluate method is not implemented for TI op. - ov::pass::Manager manager; - manager.register_pass(); - manager.run_passes(function); - - const auto& outInfo = executableNetwork.GetOutputsInfo(); - ov::TensorVector outputTensors(outInfo.size()); - function->evaluate(outputTensors, inputTensors, eval_context); - - std::vector>> outputs(outInfo.size()); - for (size_t idx = 0; idx < outInfo.size(); ++idx) { - outputs[idx].first = outputTensors[idx].get_element_type(); - outputs[idx].second.resize(outputTensors[idx].get_byte_size()); - std::memcpy(outputs[idx].second.data(), outputTensors[idx].data(), outputTensors[idx].get_byte_size()); - } - return outputs; -} - -void MemoryTest::CreateTIFunc() { - auto param = std::make_shared(ngPrc, ov::Shape(inputShape)); - std::vector> shape = {{static_cast(iteration_count), 1}}; - auto iter_count = - std::make_shared(ngPrc, ov::Shape{static_cast(iteration_count), 1}); - - // Body - auto X = std::make_shared(ngPrc, ov::Shape(inputShape)); - auto Y = std::make_shared(ngPrc, ov::Shape(inputShape)); - auto Iter = std::make_shared(ngPrc, ov::Shape{1, 1}); - auto add = std::make_shared(X, Y); - auto res = std::make_shared(add); - auto Iter_res = std::make_shared(Iter); - auto body = std::make_shared(ov::OutputVector{res, Iter_res}, ov::ParameterVector{X, Y, Iter}); - - // TI construction - auto tensor_iterator = std::make_shared(); - tensor_iterator->set_body(body); - - tensor_iterator->set_merged_input(X, param, res); - tensor_iterator->set_invariant_input(Y, param); - tensor_iterator->set_sliced_input(Iter, iter_count, 0, 1, 1, -1, 0); - - auto output = tensor_iterator->get_iter_value(res, -1); - auto output_iter = tensor_iterator->get_concatenated_slices(Iter_res, 0, 1, 1, -1, 0); - function = - std::make_shared(ov::OutputVector{output, output_iter}, ov::ParameterVector{param, iter_count}, "PureTI"); -} - -void MemoryTest::CreateCommonFunc() { - ov::ParameterVector param{std::make_shared(ngPrc, ov::Shape(inputShape))}; - const auto variable_info = targetDevice == ov::test::utils::DEVICE_GPU - ? ov::op::util::VariableInfo{ov::Shape{inputShape}, ngPrc, "v0"} - : ov::op::util::VariableInfo{inputShape, ngPrc, "v0"}; - auto variable = std::make_shared(variable_info); - auto read_value = CreateReadValueOp(param.at(0), variable); - auto add = std::make_shared(read_value, param.at(0)); - auto assign = CreateAssignOp(add, variable); - auto res = std::make_shared(add); - function = std::make_shared(ov::ResultVector{res}, ov::SinkVector{assign}, param, "TestMemory"); -} - -void MemoryTest::ApplyLowLatency() { - if (transformation == ngraph::helpers::MemoryTransformation::LOW_LATENCY_V2) { - function->validate_nodes_and_infer_types(); - ov::pass::Manager manager; - manager.register_pass(); - manager.run_passes(function); - } else if (transformation == ngraph::helpers::MemoryTransformation::LOW_LATENCY_V2_ORIGINAL_INIT) { - function->validate_nodes_and_infer_types(); - ov::pass::Manager manager; - manager.register_pass(false); - manager.run_passes(function); - } -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp deleted file mode 100644 index 9873fe73416b82..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/minimum_maximum.hpp" - -namespace LayerTestsDefinitions { - std::string MaxMinLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - ngraph::helpers::InputLayerType inputType; - ngraph::helpers::MinMaxOpType opType; - std::tie(inputShapes, opType, netPrecision, inPrc, outPrc, inLayout, outLayout, inputType, targetName) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - results << "OpType=" << opType << "_"; - results << "SecondaryInputType=" << inputType << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "inPRC=" << inPrc.name() << "_"; - results << "outPRC=" << outPrc.name() << "_"; - results << "inL=" << inLayout << "_"; - results << "outL=" << outLayout << "_"; - results << "trgDev=" << targetName << "_"; - return results.str(); - } - - void MaxMinLayerTest::SetUp() { - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - ngraph::helpers::InputLayerType inputType; - ngraph::helpers::MinMaxOpType opType; - std::tie(inputShapes, opType, netPrecision, inPrc, outPrc, inLayout, outLayout, inputType, targetDevice) = this->GetParam(); - if (inputShapes.size() != 2) { - IE_THROW() << "Unsupported inputs number for Minimum/Maximum operaton"; - } - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape(inputShapes[0]))}; - OPENVINO_SUPPRESS_DEPRECATED_START - auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, inputType, {inputShapes[1]}); - OPENVINO_SUPPRESS_DEPRECATED_END - if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { - input.push_back(std::dynamic_pointer_cast(secondaryInput)); - } - - OPENVINO_SUPPRESS_DEPRECATED_START - auto op = ngraph::builder::makeMinMax(input[0], secondaryInput, opType); - OPENVINO_SUPPRESS_DEPRECATED_END - function = std::make_shared(op, input, "MinMax"); - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp b/src/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp deleted file mode 100644 index b26258f22d179b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/multiclass_nms.cpp +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include -#include "shared_test_classes/single_layer/multiclass_nms.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -#include "functional_test_utils/plugin_cache.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -using namespace ngraph; -using namespace InferenceEngine; -using ngraph::helpers::operator<<; - -std::string MulticlassNmsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector shapes; - InputPrecisions inPrecisions; - int32_t nmsTopK, backgroundClass, keepTopK; - element::Type outType; - - op::util::MulticlassNmsBase::SortResultType sortResultType; - - InputfloatVar inFloatVar; - InputboolVar inboolVar; - - bool outputStatic; - - std::string targetDevice; - - std::tie(shapes, inPrecisions, nmsTopK, inFloatVar, backgroundClass, keepTopK, outType, sortResultType, inboolVar, outputStatic, targetDevice) = obj.param; - - ElementType paramsPrec, roisnumPrec, maxBoxPrec, thrPrec; - std::tie(paramsPrec, roisnumPrec, maxBoxPrec, thrPrec) = inPrecisions; - - float iouThr, scoreThr, nmsEta; - std::tie(iouThr, scoreThr, nmsEta) = inFloatVar; - - bool sortResCB, normalized; - std::tie(sortResCB, normalized) = inboolVar; - - std::ostringstream result; - result << "IS=("; - for (const auto& shape : shapes) { - result << ov::test::utils::partialShape2str({shape.first}) << "_"; - } - result << ")_TS=("; - for (const auto& shape : shapes) { - for (const auto& item : shape.second) { - result << ov::test::utils::vec2str(item) << "_"; - } - } - - result << ")_paramsPrec=" << paramsPrec << "_roisnumPrec=" << roisnumPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_"; - result << "nmsTopK=" << nmsTopK << "_"; - result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_backgroundClass=" << backgroundClass << "_"; - result << "keepTopK=" << keepTopK << "_outType=" << outType << "_"; - result << "sortResultType=" << sortResultType << "_sortResCrossBatch=" << sortResCB << "_nmsEta=" << nmsEta << "_normalized=" << normalized << "_"; - result << "outputStatic=" << outputStatic; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void MulticlassNmsLayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { - inputs.clear(); - - const auto& funcInputs = function->inputs(); - ASSERT_TRUE(funcInputs.size() == 2 || funcInputs.size() == 3) << "Expected 3 inputs or 2 inputs."; - for (int i = 0; i < funcInputs.size(); ++i) { - const auto& funcInput = funcInputs[i]; - ov::Tensor tensor; - - if (i == 1) { // scores - tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); - - const size_t range = 1; - const size_t startFrom = 0; - const size_t k = 1000; - const int seed = 1; - std::default_random_engine random(seed); - std::uniform_int_distribution distribution(k * startFrom, k * (startFrom + range)); - - auto *dataPtr = tensor.data(); - for (size_t i = 0; i < tensor.get_size(); i++) { - auto value = static_cast(distribution(random)); - dataPtr[i] = value / static_cast(k); - } - } else if (i == 0) { // bboxes - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); - } else { // roisnum - /* sum of rois is no larger than num_bboxes. */ - ASSERT_TRUE(targetInputStaticShapes[i].size() == 1) << "Expected shape size 1 for input roisnum, got: " << targetInputStaticShapes[i]; - - // returns num random values whose sum is max_num. - auto _generate_roisnum = [](int num, int max_num) { - std::vector array; - std::vector results(num); - - array.push_back(0); - for (auto i = 0; i < num-1; i++) { - array.push_back(std::rand() % max_num); - } - array.push_back(max_num); - - std::sort(array.begin(), array.end()); - - for (auto i = 0; i < num; i++) { - results[i] = array[i+1] - array[i]; - } - - return results; - }; - auto roisnum = _generate_roisnum(targetInputStaticShapes[i][0], targetInputStaticShapes[0][1]/*num_bboxes*/); - - tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); - if (tensor.get_element_type() == ov::element::i32) { - auto dataPtr = tensor.data(); - for (size_t i = 0; i < roisnum.size(); i++) { - dataPtr[i] = static_cast(roisnum[i]); - } - } else { - auto dataPtr = tensor.data(); - for (size_t i = 0; i < roisnum.size(); i++) { - dataPtr[i] = static_cast(roisnum[i]); - } - } - } - - inputs.insert({funcInput.get_node_shared_ptr(), tensor}); - } -} - -void MulticlassNmsLayerTest::GetOutputParams(size_t& numBatches, size_t& maxOutputBoxesPerBatch) { - const auto& funcInputs = function->inputs(); - const auto& boxes_dims = inputs[funcInputs[0].get_node_shared_ptr()].get_shape(); - const auto& scores_dims = inputs[funcInputs[1].get_node_shared_ptr()].get_shape(); - - const auto shared = (scores_dims.size() == 3); - if (!shared) - ASSERT_TRUE(funcInputs.size() > 2) << "Expected 3 inputs when input 'score' is 2D."; - - const auto& roisnum_dims = funcInputs.size() >2 ? inputs[funcInputs[2].get_node_shared_ptr()].get_shape() : Shape(); - - const auto numBoxes = shared ? boxes_dims[1] : boxes_dims[1]; - auto numClasses = shared ? scores_dims[1] : boxes_dims[0]; - numBatches = shared ? scores_dims[0] : roisnum_dims[0]; - - ASSERT_TRUE(numBatches > 0 && numBoxes > 0 && numClasses > 0) - << "Expected numBatches, numBoxes, numClasses > 0, got:" << numBatches << ", " << numBoxes << ", " << numClasses; - - auto realClasses = numClasses; - if (m_attrs.background_class >= 0 && m_attrs.background_class < numClasses) { - realClasses = realClasses - 1; - } - - size_t maxOutputBoxesPerClass = 0; - if (m_attrs.nms_top_k >= 0) - maxOutputBoxesPerClass = std::min(numBoxes, static_cast(m_attrs.nms_top_k)); - else - maxOutputBoxesPerClass = numBoxes; - - maxOutputBoxesPerBatch = maxOutputBoxesPerClass * realClasses; - if (m_attrs.keep_top_k >= 0) - maxOutputBoxesPerBatch = - std::min(maxOutputBoxesPerBatch, static_cast(m_attrs.keep_top_k)); -} - -void MulticlassNmsLayerTest::compare(const std::vector &expectedOutputs, - const std::vector &actualOutputs) { - auto batchIndex = -1; // output index for output 'selected_num' - size_t numBatches(0), maxOutputBoxesPerBatch(0); - GetOutputParams(numBatches, maxOutputBoxesPerBatch); - std::vector numPerBatch(numBatches); - - ASSERT_TRUE(expectedOutputs.size() == 3) << "Expect 3 outputs, got: " << expectedOutputs.size(); - - for (int outputIndex = static_cast(expectedOutputs.size()) - 1; outputIndex >= 0; outputIndex--) { - const auto& actual = actualOutputs[outputIndex]; - const auto _dims = actual.get_shape(); - if (_dims.size() == 1) { // 'selected_num' - ASSERT_TRUE(_dims[0] == numBatches) << "Expect output 'selected_num' has shape of " << numBatches << ", got: " << _dims[0]; - batchIndex = outputIndex; - if (actual.get_element_type() == ov::element::i32) { - auto buffer = actual.data(); - std::copy_n(buffer, numBatches, numPerBatch.begin()); - } else { - auto buffer = actual.data(); - std::copy_n(buffer, numBatches, numPerBatch.begin()); - } - - break; - } - } - ASSERT_TRUE(batchIndex > -1) << "Expect to get output index for 'selected_num'"; - - // reserve order could make sure output 'selected_num' get checked first. - for (int outputIndex = static_cast(expectedOutputs.size()) - 1; outputIndex >= 0; outputIndex--) { - const auto& expected = expectedOutputs[outputIndex]; - const auto& actual = actualOutputs[outputIndex]; - const auto actualBuffer = static_cast(actual.data()); - const auto expectedBuffer = static_cast(expected.data()); - - const auto expected_shape = expected.get_shape(); - const auto actual_shape = actual.get_shape(); - - // Compare Selected Outputs & Selected Indices - if (outputIndex != batchIndex) { - if (m_outStaticShape) { - ASSERT_TRUE(expected_shape[0] <= actual_shape[0]) << "Expected the compatible shape, got: " << expected_shape << " and " << actual_shape; - } else { - ASSERT_TRUE(expected_shape == actual_shape) << "Expected the same shape, got: " << expected_shape << " and " << actual_shape; - } - - const auto& precision = actual.get_element_type(); - auto expected_offset = 0; - auto actual_offset = 0; - for (size_t i = 0; i < numPerBatch.size(); i++) { - auto validNums = numPerBatch[i]; - switch (precision) { - case ov::element::f32: { - switch (expected.get_element_type()) { - case ov::element::f32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset * 6, - reinterpret_cast(actualBuffer) + actual_offset * 6, validNums * 6, 1e-5f); - break; - case ov::element::f64: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset * 6, - reinterpret_cast(actualBuffer) + actual_offset * 6, validNums * 6, 1e-5f); - break; - default: - break; - } - - if (m_outStaticShape) { - const auto fBuffer = static_cast(actual.data()); - for (size_t tailing = validNums * 6; tailing < maxOutputBoxesPerBatch * 6; tailing++) { - ASSERT_TRUE(std::abs(fBuffer[(actual_offset * 6 + tailing)] - -1.f) < 1e-5) - << "Invalid default value: " << fBuffer[i] << " at index: " << i; - } - } - break; - } - case ov::element::i32: { - switch (expected.get_element_type()) { - case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset, - reinterpret_cast(actualBuffer) + actual_offset, validNums, 0); - break; - case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset, - reinterpret_cast(actualBuffer) + actual_offset, validNums, 0); - break; - default: - break; - } - if (m_outStaticShape) { - const auto iBuffer = actual.data(); - for (size_t tailing = validNums; tailing < maxOutputBoxesPerBatch; tailing++) { - ASSERT_TRUE(iBuffer[actual_offset + tailing] == -1) << "Invalid default value: " << iBuffer[i] << " at index: " << i; - } - } - break; - } - case ov::element::i64: { - switch (expected.get_element_type()) { - case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset, - reinterpret_cast(actualBuffer) + actual_offset, validNums, 0); - break; - case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer) + expected_offset, - reinterpret_cast(actualBuffer) + actual_offset, validNums, 0); - break; - default: - break; - } - if (m_outStaticShape) { - const auto iBuffer = actual.data(); - for (size_t tailing = validNums; tailing < maxOutputBoxesPerBatch; tailing++) { - ASSERT_TRUE(iBuffer[actual_offset + tailing] == -1) << "Invalid default value: " << iBuffer[i] << " at index: " << i; - } - } - break; - } - default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; - } - if (!m_outStaticShape) { - expected_offset += validNums; - actual_offset += validNums; - } else { - expected_offset += validNums; - actual_offset += maxOutputBoxesPerBatch; - } - } - } else { - ASSERT_TRUE(expected_shape == actual_shape) << "Expected the same shape, got: " << expected_shape << " and " << actual_shape; - - const auto& precision = actual.get_element_type(); - size_t size = expected.get_size(); - switch (precision) { - case ov::element::i32: { - switch (expected.get_element_type()) { - case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), - size, 0); - break; - case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), - size, 0); - break; - default: - break; - } - break; - } - case ov::element::i64: { - switch (expected.get_element_type()) { - case ov::element::i32: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), - size, 0); - break; - case ov::element::i64: - LayerTestsUtils::LayerTestsCommon::Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), - size, 0); - break; - default: - break; - } - break; - } - default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; - } - } - } -} - -void MulticlassNmsLayerTest::SetUp() { - std::vector shapes; - InputPrecisions inPrecisions; - size_t maxOutBoxesPerClass, backgroundClass, keepTopK; - element::Type outType; - - op::util::MulticlassNmsBase::SortResultType sortResultType; - - InputfloatVar inFloatVar; - InputboolVar inboolVar; - - std::tie(shapes, inPrecisions, maxOutBoxesPerClass, inFloatVar, backgroundClass, keepTopK, outType, sortResultType, inboolVar, - m_outStaticShape, targetDevice) = this->GetParam(); - - init_input_shapes(shapes); - - ElementType paramsPrec, roisnumPrec, maxBoxPrec, thrPrec; - std::tie(paramsPrec, roisnumPrec, maxBoxPrec, thrPrec) = inPrecisions; - - float iouThr, scoreThr, nmsEta; - std::tie(iouThr, scoreThr, nmsEta) = inFloatVar; - - bool sortResCB, normalized; - std::tie(sortResCB, normalized) = inboolVar; - - ParameterVector params; - if (inputDynamicShapes.size() > 2) { - std::vector types {paramsPrec, paramsPrec, roisnumPrec}; - OPENVINO_ASSERT(types.size() == inputDynamicShapes.size()); - for (size_t i = 0; i < types.size(); i++) { - auto param_node = std::make_shared(types[i], inputDynamicShapes[i]); - params.push_back(param_node); - } - } else { - for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(paramsPrec, shape)); - } - } - - m_attrs.iou_threshold = iouThr; - m_attrs.score_threshold = scoreThr; - m_attrs.nms_eta = nmsEta; - m_attrs.sort_result_type = sortResultType; - m_attrs.sort_result_across_batch = sortResCB; - m_attrs.output_type = outType; - m_attrs.nms_top_k = maxOutBoxesPerClass; - m_attrs.keep_top_k = keepTopK; - m_attrs.background_class = backgroundClass; - m_attrs.normalized = normalized; - - std::shared_ptr nms; - if (params.size() > 2) { - nms = std::make_shared(params[0], params[1], params[2], m_attrs); - } else { - nms = std::make_shared(params[0], params[1], m_attrs); - } - - function = std::make_shared(nms, params, "MulticlassNMS"); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp b/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp deleted file mode 100644 index efdc6b39a0ebd5..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#include "shared_test_classes/single_layer/multinomial.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -namespace ov { -namespace test { -namespace subgraph { - -std::string MultinomialTest::getTestCaseName(const testing::TestParamInfo &obj) { - ElementType netType, inType, outType; - InputShape shape; - std::int64_t numSamples; - element::Type_t outputType; - bool withReplacement; - bool logProbs; - TargetDevice targetDevice; - Config config; - std::tie(netType, inType, outType, shape, numSamples, outputType, withReplacement, logProbs, targetDevice, config) = obj.param; - - std::ostringstream result; - result << "NetType=" << netType << "_"; - result << "InType=" << inType << "_"; - result << "OutType=" << outType << "_"; - result << "IS=" << ov::test::utils::partialShape2str({shape.first}) << "_"; - result << "TS="; - for (const auto& item : shape.second) { - result << ov::test::utils::vec2str(item) << "_"; - } - result << "NumSamples=" << numSamples << "_"; - result << "OutputType=" << outputType << "_"; - result << "WithReplacement=" << withReplacement << "_"; - result << "LogProbs=" << logProbs << "_"; - result << "Device=" << targetDevice; - - return result.str(); -} - -void MultinomialTest::SetUp() { - InputShape shape; - ElementType ngPrc; - std::int64_t numSamples; - element::Type_t outputType; - bool withReplacement; - bool logProbs; - - std::tie(ngPrc, inType, outType, shape, numSamples, outputType, withReplacement, logProbs, targetDevice, configuration) = this->GetParam(); - init_input_shapes({shape}); - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(ngPrc, shape)); - } - - auto numSamplesConstant = std::make_shared( - ov::element::Type_t::i64, ov::Shape{1}, numSamples); - const auto paramOuts = - ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); - - const auto multinomial = std::make_shared( - paramOuts.at(0), - numSamplesConstant, - outputType, - withReplacement, - logProbs, - 0, - 2); - - function = std::make_shared(multinomial, params, "Multinomial"); -} - -} // namespace subgraph -} // namespace test -} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp deleted file mode 100644 index 01caab433850c3..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/mvn.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { - -std::string Mvn1LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - ov::AxisSet axes; - bool acrossChannels, normalizeVariance; - double eps; - std::string targetDevice; - std::tie(inputShapes, inputPrecision, axes, acrossChannels, normalizeVariance, eps, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Precision=" << inputPrecision.name() << "_"; - if (!axes.empty()) { - result << "ReductionAxes=" << ov::test::utils::vec2str(axes.to_vector()) << "_"; - } else { - result << "AcrossChannels=" << (acrossChannels ? "TRUE" : "FALSE") << "_"; - } - result << "NormalizeVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_"; - result << "Epsilon=" << eps << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void Mvn1LayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - ov::AxisSet axes; - bool acrossChanels, normalizeVariance; - double eps; - std::tie(inputShapes, inputPrecision, axes, acrossChanels, normalizeVariance, eps, targetDevice) = this->GetParam(); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ov::ParameterVector param {std::make_shared(inType, ov::Shape(inputShapes))}; - OPENVINO_SUPPRESS_DEPRECATED_START - auto mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], acrossChanels, normalizeVariance, eps)); - if (!axes.empty()) { - mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], axes, normalizeVariance, eps)); - } - OPENVINO_SUPPRESS_DEPRECATED_END - ov::ResultVector results{std::make_shared(mvn)}; - function = std::make_shared(results, param, "MVN1"); -} - - -std::string Mvn6LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision dataPrecision, axesPrecision; - std::vector axes; - bool normalizeVariance; - float eps; - std::string epsMode; - std::string targetDevice; - std::tie(inputShapes, dataPrecision, axesPrecision, axes, normalizeVariance, eps, epsMode, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "DataPrc=" << dataPrecision.name() << "_"; - result << "AxPrc=" << axesPrecision.name() << "_"; - result << "Ax=" << ov::test::utils::vec2str(axes) << "_"; - result << "NormVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_"; - result << "Eps=" << eps << "_"; - result << "EM=" << epsMode << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void Mvn6LayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision dataPrecision, axesPrecision; - std::vector axes; - bool normalizeVariance; - float eps; - std::string epsMode; - std::tie(inputShapes, dataPrecision, axesPrecision, axes, normalizeVariance, eps, epsMode, targetDevice) = this->GetParam(); - - auto dataType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dataPrecision); - auto axesType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(axesPrecision); - - ov::ParameterVector param {std::make_shared(dataType, ov::Shape(inputShapes))}; - auto axesNode = ov::test::utils::deprecated::make_constant(axesType, ov::Shape{axes.size()}, axes); - OPENVINO_SUPPRESS_DEPRECATED_START - auto mvn = ngraph::builder::makeMVN6(param[0], axesNode, normalizeVariance, eps, epsMode); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::ResultVector results{std::make_shared(mvn)}; - function = std::make_shared(results, param, "MVN6"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/nms_rotated.cpp b/src/tests/functional/shared_test_classes/src/single_layer/nms_rotated.cpp deleted file mode 100644 index 80e6cc98db203f..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/nms_rotated.cpp +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/nms_rotated.hpp" -#include "openvino/op/nms_rotated.hpp" - -#include - -namespace LayerTestsDefinitions { - -using namespace InferenceEngine; -using namespace FuncTestUtils::PrecisionUtils; - -std::string NmsRotatedLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InputShapeParams inShapeParams; - InputPrecisions inPrecisions; - int32_t maxOutBoxesPerClass; - float iouThr, scoreThr; - bool sortResDescend, clockwise; - ov::element::Type outType; - std::string targetDevice; - std::tie(inShapeParams, - inPrecisions, - maxOutBoxesPerClass, - iouThr, - scoreThr, - sortResDescend, - outType, - clockwise, - targetDevice) = obj.param; - - size_t numBatches, numBoxes, numClasses; - std::tie(numBatches, numBoxes, numClasses) = inShapeParams; - - Precision inputPrec, maxBoxPrec, thrPrec; - std::tie(inputPrec, maxBoxPrec, thrPrec) = inPrecisions; - - std::ostringstream result; - result << "numBatches=" << numBatches << "_numBoxes=" << numBoxes << "_numClasses=" << numClasses << "_"; - result << "inputPrec=" << inputPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_"; - result << "maxOutBoxesPerClass=" << maxOutBoxesPerClass << "_"; - result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_"; - result << "sortResDescend=" << sortResDescend << "_outType=" << outType << "_"; - result << "clockwise=" << clockwise << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void NmsRotatedLayerTest::GenerateInputs() { - size_t it = 0; - for (const auto& input : cnnNetwork.getInputsInfo()) { - const auto& info = input.second; - Blob::Ptr blob; - - if (it == 1) { - blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - if (info->getTensorDesc().getPrecision() == Precision::FP32) { - ov::test::utils::fill_data_random_float(blob, 1, 0, 1000); - } else { - ov::test::utils::fill_data_random_float(blob, 1, 0, 1000); - } - } else { - blob = GenerateInput(*info); - } - inputs.push_back(blob); - it++; - } -} - -void NmsRotatedLayerTest::Compare( - const std::vector>>& expectedOutputs, - const std::vector& actualOutputs) { - size_t num_batches, num_boxes, num_classes; - std::tie(num_batches, num_boxes, num_classes) = inShapeParams; - - struct OutBox { - OutBox() = default; - - OutBox(int32_t batchId, int32_t classId, int32_t boxId, float score) { - this->batchId = batchId; - this->classId = classId; - this->boxId = boxId; - this->score = score; - } - - bool operator==(const OutBox& rhs) const { - return batchId == rhs.batchId && classId == rhs.classId && boxId == rhs.boxId; - } - - int32_t batchId; - int32_t classId; - int32_t boxId; - float score; - }; - - std::vector expected; - { - const auto selected_indices_size = expectedOutputs[0].second.size() / expectedOutputs[0].first.size(); - const auto selected_scores_size = expectedOutputs[1].second.size() / expectedOutputs[1].first.size(); - - ASSERT_EQ(selected_indices_size, selected_scores_size); - - const auto boxes_count = selected_indices_size / 3; - expected.resize(boxes_count); - - if (expectedOutputs[0].first.size() == 4) { - auto selected_indices_data = reinterpret_cast(expectedOutputs[0].second.data()); - - for (size_t i = 0; i < selected_indices_size; i += 3) { - expected[i / 3].batchId = selected_indices_data[i + 0]; - expected[i / 3].classId = selected_indices_data[i + 1]; - expected[i / 3].boxId = selected_indices_data[i + 2]; - } - } else { - auto selected_indices_data = reinterpret_cast(expectedOutputs[0].second.data()); - - for (size_t i = 0; i < selected_indices_size; i += 3) { - expected[i / 3].batchId = static_cast(selected_indices_data[i + 0]); - expected[i / 3].classId = static_cast(selected_indices_data[i + 1]); - expected[i / 3].boxId = static_cast(selected_indices_data[i + 2]); - } - } - - if (expectedOutputs[1].first.size() == 4) { - auto selected_scores_data = reinterpret_cast(expectedOutputs[1].second.data()); - for (size_t i = 0; i < selected_scores_size; i += 3) { - expected[i / 3].score = selected_scores_data[i + 2]; - } - } else { - auto selected_scores_data = reinterpret_cast(expectedOutputs[1].second.data()); - for (size_t i = 0; i < selected_scores_size; i += 3) { - expected[i / 3].score = static_cast(selected_scores_data[i + 2]); - } - } - } - - std::vector actual; - { - const auto selected_indices_size = actualOutputs[0]->byteSize() / sizeof(float); - const auto selected_indices_memory = as(actualOutputs[0]); - IE_ASSERT(selected_indices_memory); - const auto selected_indices_lockedMemory = selected_indices_memory->rmap(); - const auto selected_indices_data = selected_indices_lockedMemory.as(); - - const auto selected_scores_memory = as(actualOutputs[1]); - IE_ASSERT(selected_scores_memory); - const auto selected_scores_lockedMemory = selected_scores_memory->rmap(); - const auto selected_scores_data = selected_scores_lockedMemory.as(); - - for (size_t i = 0; i < selected_indices_size; i += 3) { - const int32_t batchId = selected_indices_data[i + 0]; - const int32_t classId = selected_indices_data[i + 1]; - const int32_t boxId = selected_indices_data[i + 2]; - const float score = selected_scores_data[i + 2]; - if (batchId == -1 || classId == -1 || boxId == -1) - break; - - actual.emplace_back(batchId, classId, boxId, score); - } - } - - ASSERT_EQ(expected.size(), actual.size()); - for (size_t i = 0; i < expected.size(); ++i) { - ASSERT_EQ(expected[i], actual[i]) << ", i=" << i; - ASSERT_NEAR(expected[i].score, actual[i].score, abs_threshold) << ", i=" << i; - } -} - -void NmsRotatedLayerTest::SetUp() { - InputPrecisions inPrecisions; - size_t maxOutBoxesPerClass; - float iouThr, scoreThr; - bool sortResDescend, clockwise; - ov::element::Type outType; - std::tie(inShapeParams, - inPrecisions, - maxOutBoxesPerClass, - iouThr, - scoreThr, - sortResDescend, - outType, - clockwise, - targetDevice) = this->GetParam(); - - size_t numBatches, numBoxes, numClasses; - std::tie(numBatches, numBoxes, numClasses) = inShapeParams; - - Precision inputPrec, maxBoxPrec, thrPrec; - std::tie(inputPrec, maxBoxPrec, thrPrec) = inPrecisions; - - if (inputPrec == Precision::FP16) { - abs_threshold = 0.1; - } else { - abs_threshold = std::numeric_limits::epsilon(); - } - - ov::ParameterVector params; - - const std::vector boxesShape{numBatches, numBoxes, 5}, scoresShape{numBatches, numClasses, numBoxes}; - const auto ngPrc = convertIE2nGraphPrc(inputPrec); - - const auto boxesNode = std::make_shared(ngPrc, ov::Shape(boxesShape)); - params.push_back(boxesNode); - const auto scoresNode = std::make_shared(ngPrc, ov::Shape(scoresShape)); - params.push_back(scoresNode); - - const auto maxOutputBoxesPerClassNode = std::make_shared(ov::element::Type_t::u32, - ov::Shape{}, - std::vector{maxOutBoxesPerClass}); - const auto iouThresholdNode = std::make_shared(ov::element::Type_t::f32, - ov::Shape{}, - std::vector{iouThr}); - const auto scoreTresholdNode = std::make_shared(ov::element::Type_t::f32, - ov::Shape{}, - std::vector{scoreThr}); - - const auto nmsNode = std::make_shared(params[0], - params[1], - maxOutputBoxesPerClassNode, - iouThresholdNode, - scoreTresholdNode, - sortResDescend, - outType, - clockwise); - - function = std::make_shared(nmsNode, params, "NMS"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp b/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp deleted file mode 100644 index 13c49d694b6624..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/non_max_suppression.hpp" - -#include - -namespace LayerTestsDefinitions { - -using namespace ngraph; -using namespace InferenceEngine; -using namespace FuncTestUtils::PrecisionUtils; - -std::string NmsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InputShapeParams inShapeParams; - InputPrecisions inPrecisions; - int32_t maxOutBoxesPerClass; - float iouThr, scoreThr, softNmsSigma; - ov::op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; - bool sortResDescend; - ov::element::Type outType; - std::string targetDevice; - std::tie(inShapeParams, - inPrecisions, - maxOutBoxesPerClass, - iouThr, - scoreThr, - softNmsSigma, - boxEncoding, - sortResDescend, - outType, - targetDevice) = obj.param; - - size_t numBatches, numBoxes, numClasses; - std::tie(numBatches, numBoxes, numClasses) = inShapeParams; - - Precision paramsPrec, maxBoxPrec, thrPrec; - std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; - - std::ostringstream result; - result << "numBatches=" << numBatches << "_numBoxes=" << numBoxes << "_numClasses=" << numClasses << "_"; - result << "paramsPrec=" << paramsPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_"; - result << "maxOutBoxesPerClass=" << maxOutBoxesPerClass << "_"; - result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_softNmsSigma=" << softNmsSigma << "_"; - auto boxEncodingStr = (boxEncoding == ov::op::v5::NonMaxSuppression::BoxEncodingType::CENTER) ? "CENTER" : "CORNER"; - result << "boxEncoding=" << boxEncodingStr << "_sortResDescend=" << sortResDescend << "_outType=" << outType << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void NmsLayerTest::GenerateInputs() { - size_t it = 0; - for (const auto& input : cnnNetwork.getInputsInfo()) { - const auto& info = input.second; - Blob::Ptr blob; - - if (it == 1) { - blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - if (info->getTensorDesc().getPrecision() == Precision::FP32) { - ov::test::utils::fill_data_random_float(blob, 1, 0, 1000); - } else { - ov::test::utils::fill_data_random_float(blob, 1, 0, 1000); - } - } else { - blob = GenerateInput(*info); - } - inputs.push_back(blob); - it++; - } -} - -void NmsLayerTest::Compare( - const std::vector>>& expectedOutputs, - const std::vector& actualOutputs) { - CompareBBoxes(expectedOutputs, actualOutputs); -} - -typedef struct Rect { - int32_t x1; - int32_t y1; - int32_t x2; - int32_t y2; -} Rect; - -class Box { -public: - Box() = default; - - Box(int32_t batchId, int32_t classId, int32_t boxId, Rect rect, float score) { - this->batchId = batchId; - this->classId = classId; - this->boxId = boxId; - this->rect = rect; - this->score = score; - } - - int32_t batchId; - int32_t classId; - int32_t boxId; - Rect rect; - float score; -}; - -/* - * 1: selected_indices - tensor of type T_IND and shape [number of selected boxes, 3] containing information about - * selected boxes as triplets [batch_index, class_index, box_index]. 2: selected_scores - tensor of type T_THRESHOLDS - * and shape [number of selected boxes, 3] containing information about scores for each selected box as triplets - * [batch_index, class_index, box_score]. - * 3: valid_outputs - 1D tensor with 1 element of type T_IND representing the total number of selected boxes. - */ -void NmsLayerTest::CompareBBoxes( - const std::vector>>& expectedOutputs, - const std::vector& actualOutputs) { - size_t numBatches, numBoxes, numClasses; - std::tie(numBatches, numBoxes, numClasses) = inShapeParams; - - auto iouFunc = [](const Box& boxI, const Box& boxJ) { - const Rect& rectI = boxI.rect; - const Rect& rectJ = boxJ.rect; - - float areaI = (rectI.y2 - rectI.y1) * (rectI.x2 - rectI.x1); - float areaJ = (rectJ.y2 - rectJ.y1) * (rectJ.x2 - rectJ.x1); - - if (areaI <= 0.0f || areaJ <= 0.0f) { - return 0.0f; - } - - float intersection_ymin = std::max(rectI.y1, rectJ.y1); - float intersection_xmin = std::max(rectI.x1, rectJ.x1); - float intersection_ymax = std::min(rectI.y2, rectJ.y2); - float intersection_xmax = std::min(rectI.x2, rectJ.x2); - - float intersection_area = std::max(intersection_ymax - intersection_ymin, 0.0f) * - std::max(intersection_xmax - intersection_xmin, 0.0f); - - return intersection_area / (areaI + areaJ - intersection_area); - }; - - // Get input bboxes' coords - std::vector> coordList(numBatches, std::vector(numBoxes)); - { - const auto& input = inputs[0]; - auto memory = InferenceEngine::as(input); - IE_ASSERT(memory); - const auto lockedMemory = memory->rmap(); - const auto buffer = lockedMemory.as(); - for (size_t i = 0; i < numBatches; ++i) { - for (size_t j = 0; j < numBoxes; ++j) { - const int32_t y1 = static_cast(buffer[(i * numBoxes + j) * 4 + 0]); - const int32_t x1 = static_cast(buffer[(i * numBoxes + j) * 4 + 1]); - const int32_t y2 = static_cast(buffer[(i * numBoxes + j) * 4 + 2]); - const int32_t x2 = static_cast(buffer[(i * numBoxes + j) * 4 + 3]); - - coordList[i][j] = {std::min(y1, y2), std::min(x1, x2), std::max(y1, y2), std::max(x1, x2)}; - } - } - } - - auto compareBox = [](const Box& boxA, const Box& boxB) { - return (boxA.batchId < boxB.batchId) || (boxA.batchId == boxB.batchId && boxA.classId < boxB.classId) || - (boxA.batchId == boxB.batchId && boxA.classId == boxB.classId && boxA.boxId < boxB.boxId); - }; - - // Get expected bboxes' index/score - std::vector expectedList; - { - size_t selected_indices_size = expectedOutputs[0].second.size() / expectedOutputs[0].first.size(); - size_t selected_scores_size = expectedOutputs[1].second.size() / expectedOutputs[1].first.size(); - ASSERT_TRUE(selected_indices_size == selected_scores_size); - - expectedList.resize(selected_indices_size); - - if (expectedOutputs[0].first.size() == 4) { - auto selected_indices_data = reinterpret_cast(expectedOutputs[0].second.data()); - - for (size_t i = 0; i < selected_indices_size; i += 3) { - expectedList[i / 3].batchId = selected_indices_data[i + 0]; - expectedList[i / 3].classId = selected_indices_data[i + 1]; - expectedList[i / 3].boxId = selected_indices_data[i + 2]; - expectedList[i / 3].rect = coordList[expectedList[i / 3].batchId][expectedList[i / 3].boxId]; - } - } else { - auto selected_indices_data = reinterpret_cast(expectedOutputs[0].second.data()); - - for (size_t i = 0; i < selected_indices_size; i += 3) { - expectedList[i / 3].batchId = static_cast(selected_indices_data[i + 0]); - expectedList[i / 3].classId = static_cast(selected_indices_data[i + 1]); - expectedList[i / 3].boxId = static_cast(selected_indices_data[i + 2]); - expectedList[i / 3].rect = coordList[expectedList[i / 3].batchId][expectedList[i / 3].boxId]; - } - } - - if (expectedOutputs[1].first.size() == 4) { - auto selected_scores_data = reinterpret_cast(expectedOutputs[0].second.data()); - for (size_t i = 0; i < selected_scores_size; i += 3) { - expectedList[i / 3].score = selected_scores_data[i + 2]; - } - } else { - auto selected_scores_data = reinterpret_cast(expectedOutputs[0].second.data()); - for (size_t i = 0; i < selected_scores_size; i += 3) { - expectedList[i / 3].score = static_cast(selected_scores_data[i + 2]); - } - } - - std::sort(expectedList.begin(), expectedList.end(), compareBox); - } - - // Get actual bboxes' index/score - std::vector actualList; - { - size_t selected_indices_size = actualOutputs[0]->byteSize() / sizeof(float); - auto selected_indices_memory = as(actualOutputs[0]); - IE_ASSERT(selected_indices_memory); - const auto selected_indices_lockedMemory = selected_indices_memory->rmap(); - const auto selected_indices_data = selected_indices_lockedMemory.as(); - - auto selected_scores_memory = as(actualOutputs[1]); - IE_ASSERT(selected_scores_memory); - const auto selected_scores_lockedMemory = selected_scores_memory->rmap(); - const auto selected_scores_data = selected_scores_lockedMemory.as(); - - for (size_t i = 0; i < selected_indices_size; i += 3) { - const int32_t batchId = selected_indices_data[i + 0]; - const int32_t classId = selected_indices_data[i + 1]; - const int32_t boxId = selected_indices_data[i + 2]; - const float score = selected_scores_data[i + 2]; - if (batchId == -1 || classId == -1 || boxId == -1) - break; - - actualList.emplace_back(batchId, classId, boxId, coordList[batchId][boxId], score); - } - std::sort(actualList.begin(), actualList.end(), compareBox); - } - - std::vector intersectionList; - std::vector differenceList; - { - std::list tempExpectedList(expectedList.size()), tempActualList(actualList.size()); - std::copy(expectedList.begin(), expectedList.end(), tempExpectedList.begin()); - std::copy(actualList.begin(), actualList.end(), tempActualList.begin()); - auto sameBox = [](const Box& boxA, const Box& boxB) { - return (boxA.batchId == boxB.batchId) && (boxA.classId == boxB.classId) && (boxA.boxId == boxB.boxId); - }; - - for (auto itA = tempActualList.begin(); itA != tempActualList.end(); ++itA) { - bool found = false; - for (auto itB = tempExpectedList.begin(); itB != tempExpectedList.end(); ++itB) { - if (sameBox(*itA, *itB)) { - intersectionList.emplace_back(*itB); - tempExpectedList.erase(itB); - found = true; - break; - } - } - - if (!found) { - differenceList.emplace_back(*itA); - } - } - differenceList.insert(differenceList.end(), tempExpectedList.begin(), tempExpectedList.end()); - - for (auto& item : differenceList) { - if ((item.rect.x1 == item.rect.x2) || (item.rect.y1 == item.rect.y2)) - continue; - - float maxIou = 0.f; - for (auto& refItem : intersectionList) { - maxIou = std::max(maxIou, iouFunc(item, refItem)); - - if (maxIou > 0.3f) - break; - } - - ASSERT_TRUE(maxIou > 0.3f) << "MaxIOU: " << maxIou << ", expectedList.size(): " << expectedList.size() - << ", actualList.size(): " << actualList.size() - << ", intersectionList.size(): " << intersectionList.size() - << ", diffList.size(): " << differenceList.size() - << ", batchId: " << item.batchId << ", classId: " << item.classId - << ", boxId: " << item.boxId << ", score: " << item.score - << ", coord: " << item.rect.x1 << ", " << item.rect.y1 << ", " << item.rect.x2 - << ", " << item.rect.y2; - } - } -} - -void NmsLayerTest::SetUp() { - InputPrecisions inPrecisions; - size_t maxOutBoxesPerClass; - float iouThr, scoreThr, softNmsSigma; - ov::op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; - bool sortResDescend; - ov::element::Type outType; - std::tie(inShapeParams, - inPrecisions, - maxOutBoxesPerClass, - iouThr, - scoreThr, - softNmsSigma, - boxEncoding, - sortResDescend, - outType, - targetDevice) = this->GetParam(); - - size_t numBatches, numBoxes, numClasses; - std::tie(numBatches, numBoxes, numClasses) = inShapeParams; - - Precision paramsPrec, maxBoxPrec, thrPrec; - std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; - - const std::vector boxesShape{numBatches, numBoxes, 4}, scoresShape{numBatches, numClasses, numBoxes}; - auto ngPrc = convertIE2nGraphPrc(paramsPrec); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(boxesShape)), - std::make_shared(ngPrc, ov::Shape(scoresShape))}; - auto paramOuts = helpers::convert2OutputVector(helpers::castOps2Nodes(params)); - - OPENVINO_SUPPRESS_DEPRECATED_START - auto nms = builder::makeNms(paramOuts[0], - paramOuts[1], - convertIE2nGraphPrc(maxBoxPrec), - convertIE2nGraphPrc(thrPrec), - maxOutBoxesPerClass, - iouThr, - scoreThr, - softNmsSigma, - boxEncoding == ov::op::v5::NonMaxSuppression::BoxEncodingType::CENTER, - sortResDescend, - outType); - OPENVINO_SUPPRESS_DEPRECATED_END - - if (targetDevice == ov::test::utils::DEVICE_CPU) { - function = std::make_shared(nms, params, "NMS"); - } else { - auto nms_0_identity = - std::make_shared(nms->output(0), - ov::op::v0::Constant::create(outType, ov::Shape{1}, {1})); - auto nms_1_identity = - std::make_shared(nms->output(1), - ov::op::v0::Constant::create(ngPrc, ov::Shape{1}, {1})); - auto nms_2_identity = - std::make_shared(nms->output(2), - ov::op::v0::Constant::create(outType, ov::Shape{1}, {1})); - nms_0_identity->set_friendly_name("Multiply_0"); - nms_1_identity->set_friendly_name("Multiply_1"); - nms_2_identity->set_friendly_name("Multiply_2"); - function = - std::make_shared(ov::OutputVector{nms_0_identity, nms_1_identity, nms_2_identity}, params, "NMS"); - } -} - -void Nms9LayerTest::SetUp() { - InputPrecisions inPrecisions; - size_t maxOutBoxesPerClass; - float iouThr, scoreThr, softNmsSigma; - ov::op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; - bool sortResDescend; - ov::element::Type outType; - std::tie(inShapeParams, - inPrecisions, - maxOutBoxesPerClass, - iouThr, - scoreThr, - softNmsSigma, - boxEncoding, - sortResDescend, - outType, - targetDevice) = this->GetParam(); - - size_t numBatches, numBoxes, numClasses; - std::tie(numBatches, numBoxes, numClasses) = inShapeParams; - - Precision paramsPrec, maxBoxPrec, thrPrec; - std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; - - const std::vector boxesShape{numBatches, numBoxes, 4}, scoresShape{numBatches, numClasses, numBoxes}; - auto ngPrc = convertIE2nGraphPrc(paramsPrec); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(boxesShape)), - std::make_shared(ngPrc, ov::Shape(scoresShape))}; - auto paramOuts = helpers::convert2OutputVector(helpers::castOps2Nodes(params)); - - OPENVINO_SUPPRESS_DEPRECATED_START - auto nms = builder::makeNms(paramOuts[0], - paramOuts[1], - convertIE2nGraphPrc(maxBoxPrec), - convertIE2nGraphPrc(thrPrec), - maxOutBoxesPerClass, - iouThr, - scoreThr, - softNmsSigma, - boxEncoding == ov::op::v5::NonMaxSuppression::BoxEncodingType::CENTER, - sortResDescend, - outType, - ngraph::builder::NmsVersion::NmsVersion9); - OPENVINO_SUPPRESS_DEPRECATED_END - - function = std::make_shared(nms, params, "NMS"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp b/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp deleted file mode 100644 index c5606694162f73..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/nonzero.hpp" - -namespace LayerTestsDefinitions { - -std::string NonZeroLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShape; - InferenceEngine::Precision inputPrecision; - std::string targetDevice; - ConfigMap additionalConfig; - std::tie(inputShape, inputPrecision, targetDevice, additionalConfig) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "inPRC=" << inputPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -void NonZeroLayerTest::SetUp() { - auto inputShape = std::vector{}; - auto inputPrecision = InferenceEngine::Precision::UNSPECIFIED; - ConfigMap additionalConfig; - std::tie(inputShape, inputPrecision, targetDevice, additionalConfig) = GetParam(); - - configuration.insert(additionalConfig.cbegin(), additionalConfig.cend()); - - const auto& precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - const auto& paramNode = std::make_shared(precision, ov::Shape(inputShape)); - - auto nonZeroOp = std::make_shared(paramNode->output(0)); - - ov::ResultVector results{std::make_shared(nonZeroOp)}; - function = std::make_shared(results, ov::ParameterVector{paramNode}, "non_zero"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp b/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp deleted file mode 100644 index 0d855787c92fc3..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/normalize_l2.hpp" - -namespace LayerTestsDefinitions { - -std::string NormalizeL2LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector axes; - float eps; - ov::op::EpsMode epsMode; - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "axes=" << ov::test::utils::vec2str(axes) << "_"; - result << "eps=" << eps << "_"; - result << "epsMode=" << epsMode << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -InferenceEngine::Blob::Ptr NormalizeL2LayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::Blob::Ptr blobPtr; - const std::string& name = info.name(); - if (name == "data") { - blobPtr = FuncTestUtils::createAndFillBlobFloat(info.getTensorDesc(), 10, -5, 7, 222); - } else { - blobPtr = LayerTestsUtils::LayerTestsCommon::GenerateInput(info); - } - return blobPtr; -} - -void NormalizeL2LayerTest::SetUp() { - InferenceEngine::SizeVector inputShape; - std::vector axes; - float eps; - ov::op::EpsMode epsMode; - InferenceEngine::Precision netPrecision; - std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto data_input = params[0]; - data_input->set_friendly_name("data"); - - auto normAxes = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes); - auto norm = std::make_shared(data_input, normAxes, eps, epsMode); - - ov::ResultVector results{std::make_shared(norm)}; - function = std::make_shared(results, params, "NormalizeL2"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp b/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp deleted file mode 100644 index 040743cc082b56..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/one_hot.hpp" - -namespace LayerTestsDefinitions { - -std::string OneHotLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - int64_t axis; - ov::element::Type depth_type, set_type; - int64_t depth_val; - float on_val, off_val; - InferenceEngine::Precision netPrecision; - InferenceEngine::SizeVector inputShape; - LayerTestsUtils::TargetDevice targetDevice; - - std::tie(depth_type, depth_val, set_type, on_val, off_val, axis, netPrecision, inputShape, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "depthType=" << depth_type << "_"; - result << "depth=" << depth_val << "_"; - result << "SetValueType=" << set_type << "_"; - result << "onValue=" << on_val << "_"; - result << "offValue=" << off_val << "_"; - result << "axis=" << axis << "_"; - - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void OneHotLayerTest::SetUp() { - InferenceEngine::SizeVector inputShape; - int64_t axis; - ov::element::Type depth_type, set_type; - int64_t depth_val; - float on_val, off_val; - InferenceEngine::Precision netPrecision; - std::tie(depth_type, depth_val, set_type, on_val, off_val, axis, netPrecision, inputShape, targetDevice) = - this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto depth_const = std::make_shared(depth_type, ov::Shape{}, depth_val); - auto on_value_const = std::make_shared(set_type, ov::Shape{}, on_val); - auto off_value_const = std::make_shared(set_type, ov::Shape{}, off_val); - auto onehot = std::make_shared(params[0], depth_const, on_value_const, off_value_const, axis); - - ov::ResultVector results{std::make_shared(onehot)}; - function = std::make_shared(results, params, "OneHot"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp b/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp deleted file mode 100644 index d0b492ce5b34db..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/pad.hpp" - -namespace LayerTestsDefinitions { - -std::string PadLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout; - InferenceEngine::SizeVector inputShapes; - std::vector padsBegin, padsEnd; - ngraph::helpers::PadMode padMode; - float argPadValue; - std::string targetDevice; - std::tie(padsBegin, padsEnd, argPadValue, padMode, netPrecision, inPrc, outPrc, inLayout, inputShapes, targetDevice) = - obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "padsBegin=" << ov::test::utils::vec2str(padsBegin) << "_"; - result << "padsEnd=" << ov::test::utils::vec2str(padsEnd) << "_"; - if (padMode == ngraph::helpers::PadMode::CONSTANT) { - result << "Value=" << argPadValue << "_"; - } - result << "PadMode=" << padMode << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void PadLayerTest::SetUp() { - InferenceEngine::SizeVector inputShape; - std::vector padsBegin, padsEnd; - float argPadValue; - ngraph::helpers::PadMode padMode; - InferenceEngine::Precision netPrecision; - std::tie(padsBegin, padsEnd, argPadValue, padMode, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = - this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto pad = CreatePadOp(params[0], padsBegin, padsEnd, argPadValue, padMode); - ov::ResultVector results{std::make_shared(pad)}; - function = std::make_shared(results, params, "pad"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp deleted file mode 100644 index b05cf229a8ca69..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/pooling.hpp" - -namespace LayerTestsDefinitions { - -std::string PoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - poolSpecificParams poolParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::vector inputShapes; - std::string targetDevice; - std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ngraph::helpers::PoolingTypes poolType; - std::vector kernel, stride; - std::vector padBegin, padEnd; - ov::op::PadType padType; - ov::op::RoundingType roundingType; - bool excludePad; - std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - switch (poolType) { - case ngraph::helpers::PoolingTypes::MAX: - result << "MaxPool_"; - break; - case ngraph::helpers::PoolingTypes::AVG: - result << "AvgPool_"; - result << "ExcludePad=" << excludePad << "_"; - break; - } - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "Rounding=" << roundingType << "_"; - result << "AutoPad=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -std::string GlobalPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - poolSpecificParams poolParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetDevice; - size_t channels; - std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, channels, targetDevice) = obj.param; - ngraph::helpers::PoolingTypes poolType; - std::vector kernel, stride; - std::vector padBegin, padEnd; - ov::op::PadType padType; - ov::op::RoundingType roundingType; - bool excludePad; - std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; - - std::vector inputShapes = {1, channels, kernel[0], kernel[1]}; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - switch (poolType) { - case ngraph::helpers::PoolingTypes::MAX: - result << "MaxPool_"; - break; - case ngraph::helpers::PoolingTypes::AVG: - result << "AvgPool_"; - result << "ExcludePad=" << excludePad << "_"; - break; - } - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - if (padType == ov::op::PadType::EXPLICIT) { - result << "Rounding=" << roundingType << "_"; - } - result << "AutoPad=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -std::string MaxPoolingV8LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - maxPoolV8SpecificParams poolParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::vector inputShapes; - std::string targetDevice; - std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - std::vector kernel, stride, dilation; - std::vector padBegin, padEnd; - ov::op::PadType padType; - ov::op::RoundingType roundingType; - ov::element::Type indexElementType; - int64_t axis; - std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = poolParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "D" << ov::test::utils::vec2str(dilation) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "IET" << indexElementType << "_"; - result << "A" << axis << "_"; - result << "Rounding=" << roundingType << "_"; - result << "AutoPad=" << padType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void PoolingLayerTest::SetUp() { - poolSpecificParams poolParams; - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::helpers::PoolingTypes poolType; - std::vector kernel, stride; - std::vector padBegin, padEnd; - ov::op::PadType padType; - ov::op::RoundingType roundingType; - bool excludePad; - std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - OPENVINO_SUPPRESS_DEPRECATED_START - std::shared_ptr pooling = ngraph::builder::makePooling(params[0], - stride, - padBegin, - padEnd, - kernel, - roundingType, - padType, - excludePad, - poolType); - OPENVINO_SUPPRESS_DEPRECATED_END - - ov::ResultVector results{std::make_shared(pooling)}; - function = std::make_shared(results, params, "pooling"); -} - -void GlobalPoolingLayerTest::SetUp() { - poolSpecificParams poolParams; - InferenceEngine::Precision netPrecision; - size_t channels; - std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, channels, targetDevice) = this->GetParam(); - ngraph::helpers::PoolingTypes poolType; - std::vector kernel, stride; - std::vector padBegin, padEnd; - ov::op::PadType padType; - ov::op::RoundingType roundingType; - bool excludePad; - std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; - - std::vector inputShape = {1, channels, kernel[1], kernel[0]}; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - OPENVINO_SUPPRESS_DEPRECATED_START - std::shared_ptr pooling = ngraph::builder::makePooling(params[0], - stride, - padBegin, - padEnd, - kernel, - roundingType, - padType, - excludePad, - poolType); - OPENVINO_SUPPRESS_DEPRECATED_END - - ov::ResultVector results{std::make_shared(pooling)}; - function = std::make_shared(results, params, "pooling"); -} - -void MaxPoolingV8LayerTest::SetUp() { - maxPoolV8SpecificParams poolParams; - std::vector inputShape; - InferenceEngine::Precision netPrecision; - std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - std::vector kernel, stride, dilation; - std::vector padBegin, padEnd; - ov::op::PadType padType; - ov::op::RoundingType roundingType; - ov::element::Type indexElementType; - int64_t axis; - std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = poolParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto maxPool = std::make_shared(params[0], stride, dilation, padBegin, padEnd, - kernel, roundingType, padType, - indexElementType, axis); - - const auto maxPoolV8_second_output_is_supported = targetDevice == ov::test::utils::DEVICE_GPU; - ov::ResultVector results; - if (maxPoolV8_second_output_is_supported) { - results = {std::make_shared(maxPool->output(0)), - std::make_shared(maxPool->output(1))}; - } else { - results = { std::make_shared(maxPool->output(0)) }; - } - function = std::make_shared(results, params, "MaxPoolV8"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/power.cpp b/src/tests/functional/shared_test_classes/src/single_layer/power.cpp deleted file mode 100644 index a866f929cf02cf..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/power.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/power.hpp" - -namespace LayerTestsDefinitions { - std::string PowerLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::vector power; - std::tie(inputShapes, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName, power) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - results << "Power=" << ov::test::utils::vec2str(power) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "inPRC=" << inPrc.name() << "_"; - results << "outPRC=" << outPrc.name() << "_"; - results << "inL=" << inLayout << "_"; - results << "outL=" << outLayout << "_"; - results << "trgDev=" << targetName << "_"; - return results.str(); - } - - void PowerLayerTest::SetUp() { - threshold = 0.04f; - - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - std::vector power; - std::tie(inputShapes, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice, power) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector paramsIn{std::make_shared(ngPrc, ov::Shape(inputShapes[0]))}; - - auto power_const = std::make_shared(ngPrc, ov::Shape{ 1 }, power); - auto pow = std::make_shared(paramsIn[0], power_const); - - function = std::make_shared(pow, paramsIn, "power"); - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp b/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp deleted file mode 100644 index ad263b33f73450..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/prior_box.hpp" -#include - -namespace LayerTestsDefinitions { -std::string PriorBoxLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes, imageShapes; - std::string targetDevice; - priorBoxSpecificParams specParams; - std::tie(specParams, - netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, - imageShapes, - targetDevice) = obj.param; - - std::vector min_size, max_size, aspect_ratio, density, fixed_ratio, fixed_size, variance; - float step, offset; - bool clip, flip, scale_all_sizes, min_max_aspect_ratios_order; - std::tie(min_size, max_size, aspect_ratio, - density, fixed_ratio, fixed_size, clip, - flip, step, offset, variance, scale_all_sizes, min_max_aspect_ratios_order) = specParams; - - std::ostringstream result; - const char separator = '_'; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << separator; - result << "imageS=" << ov::test::utils::vec2str(imageShapes) << separator; - result << "netPRC=" << netPrecision.name() << separator; - result << "inPRC=" << inPrc.name() << separator; - result << "outPRC=" << outPrc.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; - result << "min_s=" << ov::test::utils::vec2str(min_size) << separator; - result << "max_s=" << ov::test::utils::vec2str(max_size)<< separator; - result << "asp_r=" << ov::test::utils::vec2str(aspect_ratio)<< separator; - result << "dens=" << ov::test::utils::vec2str(density)<< separator; - result << "fix_r=" << ov::test::utils::vec2str(fixed_ratio)<< separator; - result << "fix_s=" << ov::test::utils::vec2str(fixed_size)<< separator; - result << "var=" << ov::test::utils::vec2str(variance)<< separator; - result << "step=" << step << separator; - result << "off=" << offset << separator; - result << "clip=" << clip << separator; - result << "flip=" << flip<< separator; - result << "scale_all=" << scale_all_sizes << separator; - result << "min_max_aspect_ratios_order=" << min_max_aspect_ratios_order << separator; - result << "trgDev=" << targetDevice; - - return result.str(); -} - -void PriorBoxLayerTest::SetUp() { - priorBoxSpecificParams specParams; - std::tie(specParams, netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, imageShapes, targetDevice) = GetParam(); - - std::tie(min_size, max_size, aspect_ratio, - density, fixed_ratio, fixed_size, clip, - flip, step, offset, variance, scale_all_sizes, - min_max_aspect_ratios_order) = specParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShapes)), - std::make_shared(ngPrc, ov::Shape(imageShapes))}; - - ov::op::v8::PriorBox::Attributes attributes; - attributes.min_size = min_size; - attributes.max_size = max_size; - attributes.aspect_ratio = aspect_ratio; - attributes.density = density; - attributes.fixed_ratio = fixed_ratio; - attributes.fixed_size = fixed_size; - attributes.variance = variance; - attributes.step = step; - attributes.offset = offset; - attributes.clip = clip; - attributes.flip = flip; - attributes.scale_all_sizes = scale_all_sizes; - attributes.min_max_aspect_ratios_order = min_max_aspect_ratios_order; - - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBox = std::make_shared( - shape_of_1, - shape_of_2, - attributes); - - ov::pass::disable_constant_folding(priorBox); - - ov::ResultVector results{std::make_shared(priorBox)}; - function = std::make_shared (results, params, "PriorBoxFunction"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp b/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp deleted file mode 100644 index 86b1b3ff4fea70..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/prior_box_clustered.hpp" - -namespace LayerTestsDefinitions { -std::string PriorBoxClusteredLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes, imageShapes; - std::string targetDevice; - priorBoxClusteredSpecificParams specParams; - std::tie(specParams, - netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, - imageShapes, - targetDevice) = obj.param; - - std::vector widths, heights, variances; - float step_width, step_height, step, offset; - bool clip; - std::tie(widths, - heights, - clip, - step_width, - step_height, - step, - offset, - variances) = specParams; - - std::ostringstream result; - const char separator = '_'; - - result << "IS=" << ov::test::utils::vec2str(inputShapes) << separator; - result << "imageS=" << ov::test::utils::vec2str(imageShapes) << separator; - result << "netPRC=" << netPrecision.name() << separator; - result << "inPRC=" << inPrc.name() << separator; - result << "outPRC=" << outPrc.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; - result << "widths=" << ov::test::utils::vec2str(widths) << separator; - result << "heights=" << ov::test::utils::vec2str(heights) << separator; - result << "variances="; - if (variances.empty()) - result << "()" << separator; - else - result << ov::test::utils::vec2str(variances) << separator; - result << "stepWidth=" << step_width << separator; - result << "stepHeight=" << step_height << separator; - result << "step=" << step << separator; - result << "offset=" << offset << separator; - result << "clip=" << std::boolalpha << clip << separator; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void PriorBoxClusteredLayerTest::SetUp() { - priorBoxClusteredSpecificParams specParams; - std::tie(specParams, netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, imageShapes, targetDevice) = GetParam(); - - std::tie(widths, - heights, - clip, - step_width, - step_height, - step, - offset, - variances) = specParams; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes)), - std::make_shared(ngPrc, ov::Shape(inputShapes))}; - - ov::op::v0::PriorBoxClustered::Attributes attributes; - attributes.widths = widths; - attributes.heights = heights; - attributes.clip = clip; - attributes.step_widths = step_width; - attributes.step_heights = step_height; - attributes.step = step; - attributes.offset = offset; - attributes.variances = variances; - - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBoxClustered = std::make_shared( - shape_of_1, - shape_of_2, - attributes); - - ov::ResultVector results{ std::make_shared(priorBoxClustered) }; - function = std::make_shared(results, params, "PB_Clustered"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp b/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp deleted file mode 100644 index 7ec1c8dedfe22a..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/proposal.hpp" - -namespace LayerTestsDefinitions { - -const normalize_type normalize = true; -const feat_stride_type feat_stride = 1; -const box_size_scale_type box_size_scale = 2.0f; -const box_coordinate_scale_type box_coordinate_scale = 2.0f; - -std::string ProposalLayerTest::SerializeProposalSpecificParams(proposalSpecificParams& params) { - base_size_type base_size; - pre_nms_topn_type pre_nms_topn; - post_nms_topn_type post_nms_topn; - nms_thresh_type nms_thresh; - min_size_type min_size; - ratio_type ratio; - scale_type scale; - clip_before_nms_type clip_before_nms; - clip_after_nms_type clip_after_nms; - framework_type framework; - std::tie(base_size, pre_nms_topn, - post_nms_topn, - nms_thresh, - min_size, - ratio, - scale, - clip_before_nms, - clip_after_nms, - framework) = params; - - std::ostringstream result; - result << "base_size=" << base_size << "_"; - result << "pre_nms_topn=" << pre_nms_topn << "_"; - result << "post_nms_topn=" << post_nms_topn << "_"; - result << "nms_thresh=" << nms_thresh << "_"; - result << "feat_stride=" << feat_stride << "_"; - result << "min_size=" << min_size << "_"; - result << "ratio = " << ov::test::utils::vec2str(ratio) << "_"; - result << "scale = " << ov::test::utils::vec2str(scale) << "_"; - result << "clip_before_nms=" << clip_before_nms << "_"; - result << "clip_after_nms=" << clip_after_nms << "_"; - result << "normalize=" << normalize << "_"; - result << "box_size_scale=" << box_size_scale << "_"; - result << "box_coordinate_scale=" << box_coordinate_scale << "_"; - result << "framework=" << framework << "_"; - - return result.str(); -} - -std::string ProposalLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - proposalSpecificParams proposalParams; - std::string targetDevice; - std::tie(proposalParams, targetDevice) = obj.param; - auto proposalPramString = SerializeProposalSpecificParams(proposalParams); - - std::ostringstream result; - result << "targetDevice=" << targetDevice; - - return proposalPramString + result.str(); -} - -void ProposalLayerTest::Compare( - const std::vector>> &expectedOutputs, - const std::vector &actualOutputs) { - num_selected_boxes = 0; - for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) { - const auto &expected = expectedOutputs[outputIndex].second; - const auto &actual = actualOutputs[outputIndex]; - ASSERT_EQ(expected.size(), actual->byteSize()); - const auto &expectedBuffer = expected.data(); - - auto memory = InferenceEngine::as(actual); - IE_ASSERT(memory); - const auto lockedMemory = memory->rmap(); - const auto actualBuffer = lockedMemory.as(); - - const auto &precision = actual->getTensorDesc().getPrecision(); - auto size = actual->size(); - - // verifying the first output if there was less proposals than space - // provided, - // num_selected_boxes was set, take this into consideration while verifying the 2nd - // output - if (outputIndex == 1 && num_selected_boxes) { - size = num_selected_boxes; - } - - switch (precision) { - case InferenceEngine::Precision::BF16: - Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, - ov::bfloat16(threshold), outputIndex); - break; - case InferenceEngine::Precision::FP16: - Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), - size, - ov::float16(threshold), - outputIndex); - break; - case InferenceEngine::Precision::FP32: - Compare(reinterpret_cast(expectedBuffer), - reinterpret_cast(actualBuffer), size, - threshold, outputIndex); - break; - default: - FAIL() << "Comparator for " << precision << " precision isn't supported"; - } - } -} - -void ProposalLayerTest::SetUp() { - proposalSpecificParams proposalParams; - std::vector img_info = {225.0f, 225.0f, 1.0f}; - - std::tie(proposalParams, targetDevice) = this->GetParam(); - base_size_type base_size; - pre_nms_topn_type pre_nms_topn; - post_nms_topn_type post_nms_topn; - nms_thresh_type nms_thresh; - min_size_type min_size; - ratio_type ratio; - scale_type scale; - clip_before_nms_type clip_before_nms; - clip_after_nms_type clip_after_nms; - framework_type framework; - - std::tie(base_size, pre_nms_topn, - post_nms_topn, - nms_thresh, - min_size, - ratio, - scale, - clip_before_nms, - clip_after_nms, - framework) = proposalParams; - - size_t bottom_w = base_size; - size_t bottom_h = base_size; - size_t num_anchors = ratio.size() * scale.size(); - - std::vector scoresShape = {1, 2 * num_anchors, bottom_h, bottom_w}; - std::vector boxesShape = {1, 4 * num_anchors, bottom_h, bottom_w}; - std::vector imageInfoShape = {3}; - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(InferenceEngine::Precision::FP16); - // a_ and b_ are a workaround to solve alphabetic param sorting that destroys ordering - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(scoresShape)), - std::make_shared(ngPrc, ov::Shape(boxesShape))}; - params[0]->set_friendly_name("a_scores"); - params[1]->set_friendly_name("b_boxes"); - - OPENVINO_SUPPRESS_DEPRECATED_START - auto proposal = std::dynamic_pointer_cast( - ngraph::builder::makeProposal(params[0], params[1], img_info, ngPrc, - base_size, - pre_nms_topn, - post_nms_topn, - nms_thresh, - feat_stride, - min_size, - ratio, - scale, - clip_before_nms, - clip_after_nms, - normalize, - box_size_scale, - box_coordinate_scale, - framework)); - OPENVINO_SUPPRESS_DEPRECATED_END - - ov::ResultVector results{ - std::make_shared(proposal->output(0)), - std::make_shared(proposal->output(1))}; - function = std::make_shared(results, params, "proposal"); -} - -InferenceEngine::Blob::Ptr ProposalLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - InferenceEngine::Blob::Ptr blobPtr; - - const std::string name = info.name(); - if (name == "a_scores") { - blobPtr = FuncTestUtils::createAndFillBlobFloat(info.getTensorDesc(), 1, 0, 1000, 8234231); - } else if (name == "b_boxes") { - blobPtr = FuncTestUtils::createAndFillBlobFloatNormalDistribution(info.getTensorDesc(), 0.0f, 0.2f, 7235346); - } - - return blobPtr; -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp deleted file mode 100644 index 5c270921d52e91..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/psroi_pooling.hpp" - -namespace LayerTestsDefinitions { - -std::string PSROIPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShape; - std::vector coordsShape; - size_t outputDim; - size_t groupSize; - float spatialScale; - size_t spatialBinsX; - size_t spatialBinsY; - std::string mode; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(inputShape, coordsShape, outputDim, groupSize, spatialScale, spatialBinsX, spatialBinsY, mode, netPrecision, targetDevice) = obj.param; - - std::ostringstream result; - - result << "in_shape=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "coord_shape=" << ov::test::utils::vec2str(coordsShape) << "_"; - result << "out_dim=" << outputDim << "_"; - result << "group_size=" << groupSize << "_"; - result << "scale=" << spatialScale << "_"; - result << "bins_x=" << spatialBinsX << "_"; - result << "bins_y=" << spatialBinsY << "_"; - result << "mode=" << mode << "_"; - result << "prec=" << netPrecision.name() << "_"; - result << "dev=" << targetDevice; - return result.str(); -} - -static int randInt(int low, int high) { - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_int_distribution dis(low, high); - return dis(gen); -} - - void PSROIPoolingLayerTest::fillROITensor(float* buffer, int numROIs, int batchSize, - int height, int width, int groupSize, - float spatialScale, int spatialBinsX, int spatialBinsY, const std::string& mode) { - int minRoiWidth = groupSize; - int maxRoiWidth = width / groupSize * groupSize; - int minRoiHeight = groupSize; - int maxRoiHeight = height / groupSize * groupSize; - float scaleX = spatialScale; - float scaleY = spatialScale; - if (mode == "bilinear") { - minRoiWidth = spatialBinsX; - maxRoiWidth = width / spatialBinsX * spatialBinsX; - minRoiHeight = spatialBinsY; - maxRoiHeight = height / spatialBinsY * spatialBinsY; - scaleX *= width; - scaleY *= height; - } - int batchId = 0; - for (int i = 0; i < numROIs; i++) { - int sizeX = std::min(width, randInt(std::min(minRoiWidth, maxRoiWidth), std::max(minRoiWidth, maxRoiWidth))); - int sizeY = std::min(height, randInt(std::min(minRoiWidth, maxRoiWidth), std::max(minRoiWidth, maxRoiWidth))); - int startX = randInt(0, std::max(1, width - sizeX - 1)); - int startY = randInt(0, std::max(1, height - sizeY - 1)); - - float* roi = buffer + i * 5; - roi[0] = batchId; - roi[1] = startX / scaleX; - roi[2] = startY / scaleY; - roi[3] = (startX + sizeX - 1) / scaleX; - roi[4] = (startY + sizeY - 1) / scaleY; - - batchId = (batchId + 1) % batchSize; - } -} - -void PSROIPoolingLayerTest::GenerateInputs() { - auto inputShape = cnnNetwork.getInputShapes().begin()->second; - - size_t it = 0; - for (const auto &input : cnnNetwork.getInputsInfo()) { - const auto &info = input.second; - InferenceEngine::Blob::Ptr blob; - - if (it == 1) { - blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - fillROITensor(blob->buffer(), blob->size() / 5, - inputShape[0], inputShape[2], inputShape[3], groupSize_, - spatialScale_, spatialBinsX_, spatialBinsY_, mode_); - } else { - blob = GenerateInput(*info); - } - inputs.push_back(blob); - it++; - } -} - -void PSROIPoolingLayerTest::SetUp() { - std::vector inputShape; - std::vector coordsShape; - size_t outputDim; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, coordsShape, outputDim, groupSize_, spatialScale_, - spatialBinsX_, spatialBinsY_, mode_, netPrecision, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape)), - std::make_shared(ngPrc, ov::Shape(coordsShape))}; - std::shared_ptr psroiPooling = std::make_shared(params[0], - params[1], - outputDim, - groupSize_, - spatialScale_, - spatialBinsX_, - spatialBinsY_, - mode_); - ov::ResultVector results{std::make_shared(psroiPooling)}; - function = std::make_shared(results, params, "psroi_pooling"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp deleted file mode 100644 index caa6dfef8a3c06..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "openvino/op/parameter.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/single_layer/random_uniform.hpp" - -namespace LayerTestsDefinitions { - - -std::string RandomUniformLayerTest::getTestCaseName( - const testing::TestParamInfo &obj) { - RandomUniformTypeSpecificParams randomUniformTypeSpecificParams; - ov::Shape output_shape; - int64_t global_seed; - int64_t op_seed; - std::string targetName; - std::tie(output_shape, randomUniformTypeSpecificParams, global_seed, op_seed, targetName) = obj.param; - - std::ostringstream result; - result << "outputShape=" << ov::test::utils::vec2str(output_shape) << "_"; - result << "global_seed=" << global_seed << "_"; - result << "op_seed=" << op_seed << "_"; - result << "outputType=" << randomUniformTypeSpecificParams.precision.name() << "_"; - result << "min_val=" << randomUniformTypeSpecificParams.min_value << "_"; - result << "max_val=" << randomUniformTypeSpecificParams.max_value; - return result.str(); -} - -namespace { - -template -std::shared_ptr -createRangeConst(const typename InferenceEngine::PrecisionTrait

::value_type &value) { - return std::make_shared(FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(p), ov::Shape{1}, - std::vector::value_type>{ - value}); -} - -std::shared_ptr createConstant(InferenceEngine::Precision p, double value) { - using namespace InferenceEngine; - switch (p) { - case Precision::FP32: - return createRangeConst( - static_cast::value_type>(value)); - case Precision::FP16: - return createRangeConst( - static_cast::value_type>(value)); - default: - return createRangeConst( - static_cast::value_type>(value)); - } -} - -} // unnamed namespace - -void RandomUniformLayerTest::SetUp() { - RandomUniformTypeSpecificParams randomUniformParams; - int64_t global_seed; - int64_t op_seed; - ov::Shape output_shape; - std::string targetName; - std::tie(output_shape, randomUniformParams, global_seed, op_seed, targetDevice) = this->GetParam(); - const auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(randomUniformParams.precision); - - // Use Parameter as input with desired precision to properly configure execution configuration - // in CoreConfiguration() function - auto input = std::make_shared(precision, output_shape); - auto shape_of = std::make_shared(input); - - auto min_value = createConstant(randomUniformParams.precision, randomUniformParams.min_value); - auto max_value = createConstant(randomUniformParams.precision, randomUniformParams.max_value); - auto random_uniform = std::make_shared(shape_of, - min_value, - max_value, - precision, - global_seed, - op_seed); - ov::ResultVector results{std::make_shared(random_uniform)}; - - function = std::make_shared(results, ov::ParameterVector{input}, "random_uniform"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/range.cpp b/src/tests/functional/shared_test_classes/src/single_layer/range.cpp deleted file mode 100644 index ec8ef181c4afd7..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/range.cpp +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/range.hpp" - -namespace LayerTestsDefinitions { - -std::string RangeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - float start, stop, step; - std::string targetDevice; - std::tie(start, stop, step, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = obj.param; - - std::ostringstream result; - const char separator = '_'; - result << "Start=" << start << separator; - result << "Stop=" << stop << separator; - result << "Step=" << step << separator; - result << "netPRC=" << netPrecision.name() << separator; - result << "inPRC=" << inPrc.name() << separator; - result << "outPRC=" << outPrc.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; - result << "trgDev=" << targetDevice; - auto str = result.str(); - replace(str.begin(), str.end(), '.', '_'); - return str; -} - -void RangeLayerTest::Infer() { - inferRequest = executableNetwork.CreateInferRequest(); - inputs.clear(); - - auto blobStart = inferRequest.GetBlob("start"); - blobStart = FuncTestUtils::createAndFillBlobWithFloatArray(blobStart->getTensorDesc(), &start, 1); - - auto blobStop = inferRequest.GetBlob("stop"); - blobStop = FuncTestUtils::createAndFillBlobWithFloatArray(blobStop->getTensorDesc(), &stop, 1); - - auto blobStep = inferRequest.GetBlob("step"); - blobStep = FuncTestUtils::createAndFillBlobWithFloatArray(blobStep->getTensorDesc(), &step, 1); - - inferRequest.Infer(); -} - -void RangeLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - tie(start, stop, step, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - std::vector>> inputs {{"start", {}}, {"stop", {}}, {"step", {}}}; - ov::ParameterVector params; - for (auto&& shape : inputs) { - auto param = std::make_shared(ngPrc, ov::Shape(shape.second)); - param->set_friendly_name(shape.first); - params.push_back(param); - } - auto range = std::make_shared(params[0], params[1], params[2], ngPrc); - - function = std::make_shared( - std::make_shared(range), - params, - "Range"); -} - -std::string RangeNumpyLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrc; - InferenceEngine::Precision paramPrc; - InferenceEngine::Precision outPrc; - InferenceEngine::Layout inLayout, outLayout; - float start, stop, step; - std::string targetDevice; - std::tie(start, stop, step, paramPrc, netPrc, outPrc, inLayout, outLayout, targetDevice) = obj.param; - - std::ostringstream result; - const char separator = '_'; - result << "Start=" << start << separator; - result << "Stop=" << stop << separator; - result << "Step=" << step << separator; - result << "paramPRC=" << paramPrc.name() << separator; - result << "netPRC=" << netPrc.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void RangeNumpyLayerTest::Infer() { - inferRequest = executableNetwork.CreateInferRequest(); - inputs.clear(); - - auto blobStart = inferRequest.GetBlob("start"); - blobStart = FuncTestUtils::createAndFillBlobWithFloatArray(blobStart->getTensorDesc(), &start, 1); - - auto blobStop = inferRequest.GetBlob("stop"); - blobStop = FuncTestUtils::createAndFillBlobWithFloatArray(blobStop->getTensorDesc(), &stop, 1); - - auto blobStep = inferRequest.GetBlob("step"); - blobStep = FuncTestUtils::createAndFillBlobWithFloatArray(blobStep->getTensorDesc(), &step, 1); - - inferRequest.Infer(); -} - -void RangeNumpyLayerTest::SetUp() { - InferenceEngine::Precision netPrc; - InferenceEngine::Precision paramPrc; - std::tie(start, stop, step, paramPrc, netPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc); - auto ngParamPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(paramPrc); - - ov::ParameterVector params{std::make_shared(ngParamPrc, ov::Shape()), - std::make_shared(ngParamPrc, ov::Shape()), - std::make_shared(ngParamPrc, ov::Shape())}; - params[0]->set_friendly_name("start"); - params[1]->set_friendly_name("stop"); - params[2]->set_friendly_name("step"); - - auto range = std::make_shared(params[0], params[1], params[2], ngNetPrc); - const ov::ResultVector results{std::make_shared(range)}; - function = std::make_shared(results, params, "Range"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp deleted file mode 100644 index 432bd72ba8dcbb..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/rdft.hpp" - -namespace LayerTestsDefinitions { - -std::string RDFTLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::vector axes; - std::vector signalSize; - ngraph::helpers::DFTOpType opType; - std::string targetDevice; - std::tie(inputShapes, inputPrecision, axes, signalSize, opType, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Precision=" << inputPrecision.name() << "_"; - result << "Axes=" << ov::test::utils::vec2str(axes) << "_"; - result << "SignalSize=" << ov::test::utils::vec2str(signalSize) << "_"; - result << "Inverse=" << (opType == ngraph::helpers::DFTOpType::INVERSE) << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void RDFTLayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::vector axes; - std::vector signalSize; - ngraph::helpers::DFTOpType opType; - std::tie(inputShapes, inputPrecision, axes, signalSize, opType, targetDevice) = this->GetParam(); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ov::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ov::Shape(inputShapes)); - paramVector.push_back(paramData); - - auto rdft = ngraph::builder::makeRDFT(paramVector[0], axes, signalSize, opType); - - ov::ResultVector results{std::make_shared(rdft)}; - function = std::make_shared(results, paramVector, "RDFT"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp deleted file mode 100644 index 648e8383b4058e..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/reduce_ops.hpp" - -namespace LayerTestsDefinitions { - -std::string ReduceOpsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout; - bool keepDims; - ngraph::helpers::ReductionType reductionType; - std::vector inputShape; - std::vector axes; - ov::test::utils::OpType opType; - std::string targetDevice; - std::tie(axes, opType, keepDims, reductionType, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "axes=" << ov::test::utils::vec2str(axes) << "_"; - result << "opType=" << opType << "_"; - result << "type=" << reductionType << "_"; - if (keepDims) result << "KeepDims_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void ReduceOpsLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - bool keepDims; - ngraph::helpers::ReductionType reductionType; - std::vector inputShape; - std::vector axes; - ov::test::utils::OpType opType; - std::tie(axes, opType, keepDims, reductionType, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - std::vector shapeAxes; - switch (opType) { - case ov::test::utils::OpType::SCALAR: { - if (axes.size() > 1) - FAIL() << "In reduce op if op type is scalar, 'axis' input's must contain 1 element"; - break; - } - case ov::test::utils::OpType::VECTOR: { - shapeAxes.push_back(axes.size()); - break; - } - default: - FAIL() << "Reduce op doesn't support operation type: " << opType; - } - auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ov::element::Type_t::i64, ov::Shape(shapeAxes), axes)); - - const auto reduce = ngraph::builder::makeReduce(params[0], reductionAxesNode, keepDims, reductionType); - const ov::ResultVector results{std::make_shared(reduce)}; - function = std::make_shared(results, params, "Reduce"); -} -InferenceEngine::Blob::Ptr ReduceOpsLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - ngraph::helpers::ReductionType reductionType = std::get<3>(GetParam()); - InferenceEngine::Precision netPrecision = std::get<4>(GetParam()); - if (reductionType == ngraph::helpers::ReductionType::LogicalOr || - reductionType == ngraph::helpers::ReductionType::LogicalAnd) { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 2, 0); - } else if (!netPrecision.is_float()) { - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), 5, 0); - } - auto td = info.getTensorDesc(); - auto blob = make_blob_with_precision(td); - blob->allocate(); - if (reductionType == ngraph::helpers::ReductionType::Max) { - ov::test::utils::fill_data_random_float(blob, 5, -5, 1000); - } else { - ov::test::utils::fill_data_random_float(blob, 5, 0, 1000); - } - return blob; -} - -InferenceEngine::Blob::Ptr ReduceOpsLayerWithSpecificInputTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - auto axis_vec = std::get<0>(GetParam()); - IE_ASSERT(axis_vec.size() == 1); - - auto axis = axis_vec[0]; - auto td = info.getTensorDesc(); - auto dims = td.getDims(); - - // Slice of tensor through axis is {1, 0, 0, ....}, the mean value is 1/slice_size - auto raw_values = std::vector(dims[axis], 0); - raw_values[0] = 1; - - auto blob = make_blob_with_precision(td); - blob->allocate(); - ov::test::utils::fill_data_with_broadcast(blob, axis, raw_values); - return blob; -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp b/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp deleted file mode 100644 index 59561f79df6e74..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/region_yolo.hpp" - -namespace LayerTestsDefinitions { - -std::string RegionYoloLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - ov::Shape inputShape; - size_t classes; - size_t coords; - size_t num_regions; - bool do_softmax; - std::vector mask; - int start_axis; - int end_axis; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::tie(inputShape, classes, coords, num_regions, do_softmax , mask, start_axis, end_axis, netPrecision, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "classes=" << classes << "_"; - result << "coords=" << coords << "_"; - result << "num=" << num_regions << "_"; - result << "doSoftmax=" << do_softmax << "_"; - result << "axis=" << start_axis << "_"; - result << "endAxis=" << end_axis << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetName << "_"; - return result.str(); -} - -void RegionYoloLayerTest::SetUp() { - ov::Shape inputShape; - size_t classes; - size_t coords; - size_t num_regions; - bool do_softmax; - std::vector mask; - int start_axis; - int end_axis; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, classes, coords, num_regions, do_softmax, mask, start_axis, end_axis, netPrecision, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto param = std::make_shared(ngPrc, inputShape); - auto region_yolo = std::make_shared(param, coords, classes, num_regions, do_softmax, mask, start_axis, end_axis); - function = std::make_shared(std::make_shared(region_yolo), ov::ParameterVector{param}, "RegionYolo"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp deleted file mode 100644 index 3801a534e2e65c..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/reorg_yolo.hpp" - -namespace LayerTestsDefinitions { - -std::string ReorgYoloLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - ov::Shape inputShape; - size_t stride; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::tie(inputShape, stride, netPrecision, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << inputShape << "_"; - result << "stride=" << stride << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetName << "_"; - return result.str(); -} - -void ReorgYoloLayerTest::SetUp() { - ov::Shape inputShape; - size_t stride; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam(); - auto param = std::make_shared(ov::element::f32, inputShape); - auto reorg_yolo = std::make_shared(param, stride); - function = std::make_shared(std::make_shared(reorg_yolo), ov::ParameterVector{param}, "ReorgYolo"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp deleted file mode 100644 index 41c6ec80bb462c..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/reshape.hpp" - -namespace LayerTestsDefinitions { -std::string ReshapeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::vector outFormShapes; - std::string targetDevice; - std::map config; - bool specialZero; - std::tie(specialZero, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outFormShapes, targetDevice, config) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "OS=" << ov::test::utils::vec2str(outFormShapes) << "_"; - result << "specialZero=" << specialZero << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void ReshapeLayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - std::vector outFormShapes; - bool specialZero; - InferenceEngine::Precision netPrecision; - std::tie(specialZero, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outFormShapes, targetDevice, configuration) = - this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - auto constNode = std::make_shared( - ov::element::Type_t::i64, ov::Shape{outFormShapes.size()}, outFormShapes); - auto reshape = std::dynamic_pointer_cast( - std::make_shared(paramsIn[0], constNode, specialZero)); - ov::ResultVector results{std::make_shared(reshape)}; - function = std::make_shared(results, paramsIn, "Reshape"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/result.cpp b/src/tests/functional/shared_test_classes/src/single_layer/result.cpp deleted file mode 100644 index 7e09567d526fb3..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/result.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/result.hpp" - -namespace LayerTestsDefinitions { - -std::string ResultLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShape; - InferenceEngine::Precision inputPrecision; - std::string targetDevice; - ConfigMap additionalConfig; - std::tie(inputShape, inputPrecision, targetDevice, additionalConfig) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "inPRC=" << inputPrecision.name() << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -void ResultLayerTest::SetUp() { - std::vector inputShape; - InferenceEngine::Precision inputPrecision; - std::string targetDevice; - ConfigMap additionalConfig; - std::tie(inputShape, inputPrecision, targetDevice, additionalConfig) = GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - const ov::ResultVector results{std::make_shared(params[0])}; - function = std::make_shared(results, params, "result"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp deleted file mode 100644 index 44ce5a46449223..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/reverse.hpp" - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/reverse.hpp" - -using namespace InferenceEngine; -using namespace FuncTestUtils::PrecisionUtils; - -namespace LayerTestsDefinitions { - -std::string ReverseLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShape; - std::vector axes; - std::string mode; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(inputShape, axes, mode, netPrecision, targetDevice) = obj.param; - - std::ostringstream result; - - result << "in_shape=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "axes=" << ov::test::utils::vec2str(axes) << "_"; - result << "mode=" << mode << "_"; - result << "prec=" << netPrecision.name() << "_"; - result << "dev=" << targetDevice; - return result.str(); -} - -void ReverseLayerTest::SetUp() { - std::vector inputShape; - std::vector axes; - std::string mode; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, axes, mode, netPrecision, targetDevice) = GetParam(); - - const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector paramsVector; - const ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - std::shared_ptr axes_constant; - if (mode == "index") { - axes_constant = std::make_shared(ov::element::i32, ov::Shape{axes.size()}, axes); - } else { - std::vector axesMask(inputShape.size(), false); - for (auto axe : axes) { - axesMask[axe] = true; - } - axes_constant = - std::make_shared(ov::element::boolean, ov::Shape{axesMask.size()}, axesMask); - } - const auto reverse = std::make_shared(params[0], axes_constant, mode); - function = std::make_shared(reverse->outputs(), params, "reverse"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp deleted file mode 100644 index efb9a3519d14c4..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/reverse_sequence.hpp" - -namespace LayerTestsDefinitions { -std::string ReverseSequenceLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - int64_t batchAxisIndx; - int64_t seqAxisIndx; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::vector inputShape; - std::vector secondInputShape; - ngraph::helpers::InputLayerType secondaryInputType; - - std::tie(batchAxisIndx, seqAxisIndx, inputShape, secondInputShape, secondaryInputType, netPrecision, targetName) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "seqLengthsShape" << ov::test::utils::vec2str(secondInputShape) << "_"; - result << "secondaryInputType=" << secondaryInputType << "_"; - result << "batchAxis=" << batchAxisIndx << "_"; - result << "seqAxis=" << seqAxisIndx << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetName; - return result.str(); -} - -void ReverseSequenceLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - int64_t batchAxisIndx; - int64_t seqAxisIndx; - std::vector inputShape; - std::vector secondInputShape; - ngraph::helpers::InputLayerType secondaryInputType; - - std::tie(batchAxisIndx, seqAxisIndx, inputShape, secondInputShape, secondaryInputType, netPrecision, targetDevice) = GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto secondPrc = ov::element::Type_t::i32; //according to the specification - OPENVINO_SUPPRESS_DEPRECATED_START - auto secondaryInput = ngraph::builder::makeInputLayer(secondPrc, secondaryInputType, secondInputShape); - OPENVINO_SUPPRESS_DEPRECATED_END - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { - paramsIn.push_back(std::dynamic_pointer_cast(secondaryInput)); - } - - auto reverse = std::make_shared(paramsIn[0], secondaryInput, batchAxisIndx, seqAxisIndx); - ov::ResultVector results{std::make_shared(reverse)}; - function = std::make_shared(results, paramsIn, "ReverseSequence"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp deleted file mode 100644 index 25255df0fd6575..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "transformations/op_conversions/rnn_cell_decomposition.hpp" -#include "shared_test_classes/single_layer/rnn_cell.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { - -using ngraph::helpers::InputLayerType; - -std::string RNNCellTest::getTestCaseName(const testing::TestParamInfo &obj) { - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - float clip; - InputLayerType WType; - InputLayerType RType; - InputLayerType BType; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, WType, RType, BType, - netPrecision, targetDevice) = obj.param; - std::vector> inputShapes = {{batch, input_size}, {batch, hidden_size}, - {hidden_size, input_size}, {hidden_size, hidden_size}, {hidden_size}}; - std::ostringstream result; - result << "decomposition" << should_decompose << "_"; - result << "batch=" << batch << "_"; - result << "hidden_size=" << hidden_size << "_"; - result << "input_size=" << input_size << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "activations=" << ov::test::utils::vec2str(activations) << "_"; - result << "clip=" << clip << "_"; - result << "WType=" << WType << "_"; - result << "RType=" << RType << "_"; - result << "BType=" << BType << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - return result.str(); -} - -void RNNCellTest::SetUp() { - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - InputLayerType WType; - InputLayerType RType; - InputLayerType BType; - InferenceEngine::Precision netPrecision; - std::tie(should_decompose, batch, hidden_size, input_size, activations, clip, WType, RType, BType, - netPrecision, targetDevice) = this->GetParam(); - std::vector> inputShapes = {{batch, input_size}, {batch, hidden_size}, - {hidden_size, input_size}, {hidden_size, hidden_size}, {hidden_size}}; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - - std::shared_ptr W; - if (WType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[0]); - W = param; - params.push_back(param); - } else { - W = ov::test::utils::deprecated::make_constant(ngPrc, WRB[0], {}, true); - } - - std::shared_ptr R; - if (RType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[1]); - R = param; - params.push_back(param); - } else { - R = ov::test::utils::deprecated::make_constant(ngPrc, WRB[1], {}, true); - } - - std::shared_ptr B; - if (BType == InputLayerType::PARAMETER) { - const auto param = std::make_shared(ngPrc, WRB[2]); - B = param; - params.push_back(param); - } else { - B = ov::test::utils::deprecated::make_constant(ngPrc, WRB[2], {}, true); - } - - auto rnn_cell = std::make_shared(params[0], params[1], W, R, B, hidden_size, activations, - activations_alpha, activations_beta, clip); - ov::ResultVector results{std::make_shared(rnn_cell)}; - function = std::make_shared(results, params, "rnn_cell"); - if (should_decompose) { - ov::pass::Manager m; - m.register_pass(); - m.run_passes(function); - } -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp deleted file mode 100644 index c604cebc8ec816..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" -#include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" -#include "shared_test_classes/single_layer/rnn_sequence.hpp" -#include "common_test_utils/node_builders/constant.hpp" - -namespace LayerTestsDefinitions { - - using ngraph::helpers::InputLayerType; - - std::string RNNSequenceTest::getTestCaseName(const testing::TestParamInfo &obj) { - ngraph::helpers::SequenceTestsMode mode; - size_t seq_lengths; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - ov::op::RecurrentSequenceDirection direction; - InferenceEngine::Precision netPrecision; - InputLayerType WRBType; - std::string targetDevice; - std::tie(mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, WRBType, - netPrecision, targetDevice) = obj.param; - std::vector> inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {hidden_size, input_size}, - {hidden_size, hidden_size}, {hidden_size}}, - }; - std::ostringstream result; - result << "mode=" << mode << "_"; - result << "seq_lengths=" << seq_lengths << "_"; - result << "batch=" << batch << "_"; - result << "hidden_size=" << hidden_size << "_"; - result << "input_size=" << input_size << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "activations=" << ov::test::utils::vec2str(activations) << "_"; - result << "direction=" << direction << "_"; - result << "clip=" << clip << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - return result.str(); - } - - void RNNSequenceTest::SetUp() { - using namespace ngraph::helpers; - size_t seq_lengths; - size_t batch; - size_t hidden_size; - size_t input_size; - std::vector activations; - std::vector activations_alpha; - std::vector activations_beta; - float clip; - ov::op::RecurrentSequenceDirection direction; - InputLayerType WRBType; - InferenceEngine::Precision netPrecision; - std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, WRBType, - netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; - std::vector inputShapes = { - {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, - {num_directions, hidden_size, input_size}, {num_directions, hidden_size, hidden_size}, - {num_directions, hidden_size}}, - }; - m_max_seq_len = seq_lengths; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - std::shared_ptr seq_lengths_node; - if (m_mode == SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM) { - auto param = std::make_shared(ov::element::i64, inputShapes[2]); - param->set_friendly_name("seq_lengths"); - params.push_back(param); - seq_lengths_node = param; - } else if (m_mode == ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST || - m_mode == ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) { - seq_lengths_node = ov::test::utils::deprecated::make_constant(ov::element::i64, inputShapes[2], {}, true, - static_cast(seq_lengths), 0.f); - } else { - std::vector lengths(batch, seq_lengths); - seq_lengths_node = ov::test::utils::deprecated::make_constant(ov::element::i64, inputShapes[2], lengths, false); - } - - const auto& W_shape = inputShapes[3]; - const auto& R_shape = inputShapes[4]; - const auto& B_shape = inputShapes[5]; - - std::shared_ptr W, R, B; - if (WRBType == InputLayerType::PARAMETER) { - const auto W_param = std::make_shared(ngPrc, W_shape); - const auto R_param = std::make_shared(ngPrc, R_shape); - const auto B_param = std::make_shared(ngPrc, B_shape); - W = W_param; - R = R_param; - B = B_param; - params.push_back(W_param); - params.push_back(R_param); - params.push_back(B_param); - } else { - W = ov::test::utils::deprecated::make_constant(ngPrc, W_shape, {}, true); - R = ov::test::utils::deprecated::make_constant(ngPrc, R_shape, {}, true); - B = ov::test::utils::deprecated::make_constant(ngPrc, B_shape, {}, true); - } - - auto rnn_sequence = std::make_shared(params[0], params[1], seq_lengths_node, W, R, B, hidden_size, direction, - activations, activations_alpha, activations_beta, clip); - ov::ResultVector results{std::make_shared(rnn_sequence->output(0)), - std::make_shared(rnn_sequence->output(1))}; - function = std::make_shared(results, params, "rnn_sequence"); - bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || - m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); - if (!is_pure_sequence) { - ov::pass::Manager manager; - if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) - manager.register_pass(); - manager.register_pass(); - manager.run_passes(function); - bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function); - EXPECT_EQ(ti_found, true); - } else { - bool ti_found = ngraph::helpers::is_tensor_iterator_exist(function); - EXPECT_EQ(ti_found, false); - } - } - - void RNNSequenceTest::GenerateInputs() { - for (const auto &input : executableNetwork.GetInputsInfo()) { - const auto &info = input.second; - auto blob = GenerateInput(*info); - if (input.first == "seq_lengths") { - blob = FuncTestUtils::createAndFillBlob(info->getTensorDesc(), m_max_seq_len, 0); - } - - inputs.push_back(blob); - } - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp deleted file mode 100644 index 40af92f261c0d3..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/roi_align.hpp" - -#include "ov_models/builders.hpp" -#include "openvino/core/enum_names.hpp" -#include "openvino/opsets/opset3.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/roi_align.hpp" - -using namespace InferenceEngine; -using namespace FuncTestUtils::PrecisionUtils; - -namespace LayerTestsDefinitions { - -std::string ROIAlignLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShape; - std::vector coordsShape; - - int pooledH; - int pooledW; - float spatialScale; - int poolingRatio; - std::string poolingMode; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(inputShape, coordsShape, pooledH, pooledW, spatialScale, poolingRatio, poolingMode, netPrecision, targetDevice) = obj.param; - - std::ostringstream result; - - result << "in_shape=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "coord_shape=" << ov::test::utils::vec2str(coordsShape) << "_"; - result << "pooled_h=" << pooledH << "_"; - result << "pooled_w=" << pooledW << "_"; - result << "spatial_scale=" << spatialScale << "_"; - result << "pooling_ratio=" << poolingRatio << "_"; - result << "mode=" << poolingMode << "_"; - result << "prec=" << netPrecision.name() << "_"; - result << "dev=" << targetDevice; - return result.str(); -} - -static int randInt(int low, int high) { - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_int_distribution dis(low, high); - return dis(gen); -} - -void ROIAlignLayerTest::fillCoordTensor(std::vector& coords, int height, int width, - float spatialScale, int pooledRatio, int pooledH, int pooledW) { - int minRoiWidth = pooledW; - int maxRoiWidth = width / pooledRatio; - int minRoiHeight = pooledH; - int maxRoiHeight = height / pooledRatio; - - for (int i = 0; i < coords.size() / 4; i++) { - int sizeX = std::min(width, randInt(minRoiWidth, maxRoiWidth)); - int sizeY = std::min(height, randInt(minRoiHeight, maxRoiHeight)); - int startX = randInt(0, std::max(1, width - sizeX - 1)); - int startY = randInt(0, std::max(1, height - sizeY - 1)); - - coords[i * 4] = startX / spatialScale; - coords[i * 4 + 1] = startY / spatialScale; - coords[i * 4 + 2] = (startX + sizeX - 1) / spatialScale; - coords[i * 4 + 3] = (startY + sizeY - 1) / spatialScale; - } -} -void ROIAlignLayerTest::fillIdxTensor(std::vector& idx, int batchSize) { - int batchId = 0; - for (int i = 0; i < idx.size(); i++) { - idx[i] = batchId; - batchId = (batchId + 1) % batchSize; - } -} - -void ROIAlignLayerTest::SetUp() { - std::vector inputShape; - std::vector coordsShape; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, - coordsShape, - pooledH, - pooledW, - spatialScale, - poolingRatio, - poolingMode, - netPrecision, - targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - std::vector proposalVector; - std::vector roiIdxVector; - proposalVector.resize(coordsShape[0] * 4); - roiIdxVector.resize(coordsShape[0]); - - fillCoordTensor(proposalVector, inputShape[2], inputShape[3], spatialScale, poolingRatio, pooledH, pooledW); - fillIdxTensor(roiIdxVector, inputShape[0]); - ov::Shape idxShape = {coordsShape[0]}; - - auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); - auto roisIdx = std::make_shared(ov::element::i32, idxShape, roiIdxVector.data()); - - std::shared_ptr roiAlign = std::make_shared(params[0], - coords, - roisIdx, - pooledH, - pooledW, - poolingRatio, - spatialScale, - poolingMode); - ov::ResultVector results{std::make_shared(roiAlign)}; - function = std::make_shared(results, params, "roi_align"); -} - -std::string ROIAlignV9LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShape; - std::vector coordsShape; - - int pooledH; - int pooledW; - float spatialScale; - int poolingRatio; - std::string poolingMode; - std::string roiAlignedMode; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(inputShape, - coordsShape, - pooledH, - pooledW, - spatialScale, - poolingRatio, - poolingMode, - roiAlignedMode, - netPrecision, - targetDevice) = obj.param; - - std::ostringstream result; - - result << "in_shape=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "coord_shape=" << ov::test::utils::vec2str(coordsShape) << "_"; - result << "pooled_h=" << pooledH << "_"; - result << "pooled_w=" << pooledW << "_"; - result << "spatial_scale=" << spatialScale << "_"; - result << "pooling_ratio=" << poolingRatio << "_"; - result << "mode=" << poolingMode << "_"; - result << "mode=" << roiAlignedMode << "_"; - result << "prec=" << netPrecision.name() << "_"; - result << "dev=" << targetDevice; - return result.str(); -} - -void ROIAlignV9LayerTest::SetUp() { - std::vector inputShape; - std::vector coordsShape; - InferenceEngine::Precision netPrecision; - std::tie(inputShape, - coordsShape, - pooledH, - pooledW, - spatialScale, - poolingRatio, - poolingMode, - roiAlignedMode, - netPrecision, - targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - std::vector proposalVector; - std::vector roiIdxVector; - proposalVector.resize(coordsShape[0] * 4); - roiIdxVector.resize(coordsShape[0]); - - ROIAlignLayerTest::fillCoordTensor(proposalVector, - inputShape[2], - inputShape[3], - spatialScale, - poolingRatio, - pooledH, - pooledW); - ROIAlignLayerTest::fillIdxTensor(roiIdxVector, inputShape[0]); - ov::Shape idxShape = {coordsShape[0]}; - - auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); - auto roisIdx = std::make_shared(ov::element::i32, idxShape, roiIdxVector.data()); - - std::shared_ptr roiAlign = std::make_shared( - params[0], - coords, - roisIdx, - pooledH, - pooledW, - poolingRatio, - spatialScale, - ov::EnumNames::as_enum(poolingMode), - ov::EnumNames::as_enum(roiAlignedMode)); - - ov::ResultVector results{std::make_shared(roiAlign)}; - function = std::make_shared(results, params, "roi_align"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp deleted file mode 100644 index d1f87c04c80be8..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/roi_pooling.hpp" - -namespace LayerTestsDefinitions { - - std::string ROIPoolingLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShape; - std::vector coordsShape; - std::vector poolShape; - float spatial_scale; - ngraph::helpers::ROIPoolingTypes pool_method; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(inputShape, coordsShape, poolShape, spatial_scale, pool_method, netPrecision, targetDevice) = obj.param; - - std::ostringstream result; - - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "CS=" << ov::test::utils::vec2str(coordsShape) << "_"; - result << "PS=" << ov::test::utils::vec2str(poolShape) << "_"; - result << "Scale=" << spatial_scale << "_"; - switch (pool_method) { - case ngraph::helpers::ROIPoolingTypes::ROI_MAX: - result << "Max_"; - break; - case ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR: - result << "Bilinear_"; - break; - } - result << "netPRC=" << netPrecision.name() << "_"; - result << "trgDev=" << targetDevice; - return result.str(); - } - - void ROIPoolingLayerTest::GenerateInputs() { - auto feat_map_shape = cnnNetwork.getInputShapes().begin()->second; - - const auto is_roi_max_mode = (pool_method == ngraph::helpers::ROIPoolingTypes::ROI_MAX); - - const int height = is_roi_max_mode ? feat_map_shape[2] / spatial_scale : 1; - const int width = is_roi_max_mode ? feat_map_shape[3] / spatial_scale : 1; - - size_t it = 0; - for (const auto &input : cnnNetwork.getInputsInfo()) { - const auto &info = input.second; - InferenceEngine::Blob::Ptr blob; - - if (it == 1) { - blob = make_blob_with_precision(info->getTensorDesc()); - blob->allocate(); - ov::test::utils::fill_data_roi(blob, feat_map_shape[0] - 1, - height, width, 1.0f, is_roi_max_mode); - } else { - blob = GenerateInput(*info); - } - inputs.push_back(blob); - it++; - } - } - - void ROIPoolingLayerTest::SetUp() { - InferenceEngine::SizeVector inputShape; - InferenceEngine::SizeVector coordsShape; - InferenceEngine::SizeVector poolShape; - InferenceEngine::Precision netPrecision; - - threshold = 0.08f; - - std::tie(inputShape, coordsShape, poolShape, spatial_scale, pool_method, netPrecision, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape)), - std::make_shared(ngPrc, ov::Shape(coordsShape))}; - std::shared_ptr roi_pooling; - if (ov::test::utils::ROIPoolingTypes::ROI_MAX == pool_method) { - roi_pooling = std::make_shared(params[0], params[1], poolShape, spatial_scale, "max"); - } else { - roi_pooling = std::make_shared(params[0], params[1], poolShape, spatial_scale, "bilinear"); - } - ov::ResultVector results{std::make_shared(roi_pooling)}; - function = std::make_shared(results, params, "roi_pooling"); - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp deleted file mode 100644 index 396b103ac3b9e5..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/roll.hpp" - -namespace LayerTestsDefinitions { - -std::string RollLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::vector shift; - std::vector axes; - std::string targetDevice; - std::tie(inputShapes, inputPrecision, shift, axes, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Precision=" << inputPrecision.name() << "_"; - result << "Shift=" << ov::test::utils::vec2str(shift) << "_"; - result << "Axes=" << ov::test::utils::vec2str(axes) << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); -} - -void RollLayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::vector shift; - std::vector axes; - std::tie(inputShapes, inputPrecision, shift, axes, targetDevice) = this->GetParam(); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ov::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ov::Shape(inputShapes)); - paramVector.push_back(paramData); - - auto shiftNode = std::make_shared(ov::element::Type_t::i64, ov::Shape{shift.size()}, shift)->output(0); - auto axesNode = std::make_shared(ov::element::Type_t::i64, ov::Shape{axes.size()}, axes)->output(0); - - auto roll = std::make_shared(paramVector[0], shiftNode, axesNode); - - ov::ResultVector results{std::make_shared(roll)}; - function = std::make_shared(results, paramVector, "roll"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp deleted file mode 100644 index 1f0723e359f7b8..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/scatter_ND_update.hpp" - -namespace LayerTestsDefinitions { - -std::string ScatterNDUpdateLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - sliceSelectInShape shapeDescript; - std::vector inShape; - std::vector indicesShape; - std::vector indicesValue; - std::vector updateShape; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision indicesPrecision; - std::string targetName; - std::tie(shapeDescript, inputPrecision, indicesPrecision, targetName) = obj.param; - std::tie(inShape, indicesShape, indicesValue, updateShape) = shapeDescript; - std::ostringstream result; - result << "InputShape=" << ov::test::utils::vec2str(inShape) << "_"; - result << "IndicesShape=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "UpdateShape=" << ov::test::utils::vec2str(updateShape) << "_"; - result << "inPrc=" << inputPrecision.name() << "_"; - result << "idxPrc=" << indicesPrecision.name() << "_"; - result << "targetDevice=" << targetName << "_"; - return result.str(); -} - -std::vector ScatterNDUpdateLayerTest::combineShapes( - const std::map, std::map, std::vector>>& inputShapes) { - std::vector resVec; - for (auto& inputShape : inputShapes) { - for (auto& item : inputShape.second) { - auto indiceShape = item.first; - size_t indicesRank = indiceShape.size(); - std::vector updateShape; - for (size_t i = 0; i < indicesRank - 1; i++) { - updateShape.push_back(indiceShape[i]); - } - auto srcShape = inputShape.first; - for (size_t j = indiceShape[indicesRank - 1]; j < srcShape.size(); j++) { - updateShape.push_back(srcShape[j]); - } - resVec.push_back(std::make_tuple(srcShape, indiceShape, item.second, updateShape)); - } - } - return resVec; -} - -void ScatterNDUpdateLayerTest::SetUp() { - sliceSelectInShape shapeDescript; - InferenceEngine::SizeVector inShape; - InferenceEngine::SizeVector indicesShape; - InferenceEngine::SizeVector indicesValue; - InferenceEngine::SizeVector updateShape; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision indicesPrecision; - std::tie(shapeDescript, inputPrecision, indicesPrecision, targetDevice) = this->GetParam(); - std::tie(inShape, indicesShape, indicesValue, updateShape) = shapeDescript; - auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); - ov::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ov::Shape(inShape)); - paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ov::Shape(updateShape)); - paramVector.push_back(updateParams); - - auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); - auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1]); - - ov::ResultVector results{std::make_shared(s2d)}; - function = std::make_shared(results, paramVector, "ScatterNDUpdate"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp deleted file mode 100644 index 0f09a8fcf8d492..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/scatter_elements_update.hpp" - -#include "openvino/op/scatter_elements_update.hpp" -using ov::op::operator<<; - -namespace LayerTestsDefinitions { - -std::string ScatterElementsUpdateLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - axisShapeInShape shapeDescript; - InferenceEngine::SizeVector indicesValue; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision indicesPrecision; - std::string targetName; - std::tie(shapeDescript, indicesValue, inputPrecision, indicesPrecision, targetName) = obj.param; - std::ostringstream result; - result << "InputShape=" << ov::test::utils::vec2str(std::get<0>(shapeDescript)) << "_"; - result << "IndicesShape=" << ov::test::utils::vec2str(std::get<1>(shapeDescript)) << "_"; - result << "Axis=" << std::get<2>(shapeDescript) << "_"; - result << "inPrc=" << inputPrecision.name() << "_"; - result << "idxPrc=" << indicesPrecision.name() << "_"; - result << "targetDevice=" << targetName << "_"; - return result.str(); -} - -std::vector ScatterElementsUpdateLayerTest::combineShapes( - const std::map, std::map, std::vector>>& inputShapes) { - std::vector resVec; - for (auto& inputShape : inputShapes) { - for (auto& item : inputShape.second) { - for (auto& elt : item.second) { - resVec.push_back(std::make_tuple(inputShape.first, item.first, elt)); - } - } - } - return resVec; -} - -void ScatterElementsUpdateLayerTest::SetUp() { - InferenceEngine::SizeVector inShape; - InferenceEngine::SizeVector indicesShape; - int axis; - axisShapeInShape shapeDescript; - InferenceEngine::SizeVector indicesValue; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision indicesPrecision; - std::tie(shapeDescript, indicesValue, inputPrecision, indicesPrecision, targetDevice) = this->GetParam(); - std::tie(inShape, indicesShape, axis) = shapeDescript; - auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); - ov::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ov::Shape(inShape)); - paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ov::Shape(indicesShape)); - paramVector.push_back(updateParams); - - auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); - auto axis_node = std::make_shared(ov::element::i32, ov::Shape{}, std::vector{axis}); - auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1], axis_node); - - ov::ResultVector results{std::make_shared(s2d)}; - function = std::make_shared(results, paramVector, "ScatterElementsUpdate"); -} - -std::string ScatterElementsUpdate12LayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - axisShapeInShape shapeDescript; - std::vector indicesValue; - ov::op::v12::ScatterElementsUpdate::Reduction reduceMode; - bool useInitVal; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision indicesPrecision; - std::string targetName; - std::tie(shapeDescript, indicesValue, reduceMode, useInitVal, inputPrecision, indicesPrecision, targetName) = obj.param; - std::ostringstream result; - result << "InputShape=" << ov::test::utils::vec2str(std::get<0>(shapeDescript)) << "_"; - result << "IndicesShape=" << ov::test::utils::vec2str(std::get<1>(shapeDescript)) << "_"; - result << "Axis=" << std::get<2>(shapeDescript) << "_"; - result << "ReduceMode=" << reduceMode << "_"; - result << "UseInitVal=" << useInitVal << "_"; - result << "Indices=" << ov::test::utils::vec2str(indicesValue) << "_"; - result << "inPrc=" << inputPrecision.name() << "_"; - result << "idxPrc=" << indicesPrecision.name() << "_"; - result << "targetDevice=" << targetName << "_"; - return result.str(); -} - -void ScatterElementsUpdate12LayerTest::SetUp() { - InferenceEngine::SizeVector inShape; - InferenceEngine::SizeVector indicesShape; - int axis; - ov::op::v12::ScatterElementsUpdate::Reduction reduceMode; - bool useInitVal; - axisShapeInShape shapeDescript; - std::vector indicesValue; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision indicesPrecision; - std::tie(shapeDescript, indicesValue, reduceMode, useInitVal, inputPrecision, indicesPrecision, targetDevice) = this->GetParam(); - std::tie(inShape, indicesShape, axis) = shapeDescript; - const auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - const auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); - ov::ParameterVector paramVector; - const auto inputParams = std::make_shared(inPrc, ov::Shape(inShape)); - paramVector.push_back(inputParams); - const auto updateParams = std::make_shared(inPrc, ov::Shape(indicesShape)); - paramVector.push_back(updateParams); - - const auto indicesNode = std::make_shared(idxPrc, indicesShape, indicesValue); - const auto axisNode = std::make_shared(ov::element::Type_t::i32, ov::Shape{}, - std::vector{axis}); - const auto seuNode = std::make_shared(paramVector[0], indicesNode, - paramVector[1], axisNode, reduceMode, useInitVal); - - ov::ResultVector results{std::make_shared(seuNode)}; - function = std::make_shared(results, paramVector, "ScatterElementsUpdate"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp deleted file mode 100644 index fe36bdfcaa8906..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/scatter_update.hpp" - -namespace LayerTestsDefinitions { -std::string ScatterUpdateLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - axisUpdateShapeInShape shapeDescript; - std::vector inShape; - std::vector indicesShape; - std::vector updateShape; - int64_t axis; - std::vector indicesValue; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision indicesPrecision; - std::string targetName; - std::tie(shapeDescript, indicesValue, inputPrecision, indicesPrecision, targetName) = obj.param; - std::tie(inShape, indicesShape, updateShape, axis) = shapeDescript; - std::ostringstream result; - result << "InputShape=" << ov::test::utils::vec2str(inShape) << "_"; - result << "IndicesShape=" << ov::test::utils::vec2str(indicesShape) << "_"; - result << "IndicesValue=" << ov::test::utils::vec2str(indicesValue) << "_"; - result << "UpdateShape=" << ov::test::utils::vec2str(updateShape) << "_"; - result << "Axis=" << axis << "_"; - result << "inPrc=" << inputPrecision.name() << "_"; - result << "idxPrc=" << indicesPrecision.name() << "_"; - result << "targetDevice=" << targetName << "_"; - return result.str(); -} - -std::vector ScatterUpdateLayerTest::combineShapes( - const std::map, std::map, std::vector>>& inputShapes) { - std::vector resVec; - for (auto& inputShape : inputShapes) { - auto srcShape = inputShape.first; - auto srcRank = srcShape.size(); - for (auto& item : inputShape.second) { - auto indicesShape = item.first; - auto indicesRank = indicesShape.size(); - for (auto& axis : item.second) { - auto axisP = axis < 0 ? axis + srcRank : axis; - std::vector updateShape; - for (size_t rs = 0; rs < srcRank; rs++) { - if (rs != axisP) { - updateShape.push_back(srcShape[rs]); - } else { - for (size_t ri = 0; ri < indicesRank; ri++) { - updateShape.push_back(indicesShape[ri]); - } - } - } - resVec.push_back(std::make_tuple(srcShape, indicesShape, updateShape, axis)); - } - } - } - return resVec; -} - -void ScatterUpdateLayerTest::SetUp() { - axisUpdateShapeInShape shapeDescript; - InferenceEngine::SizeVector inShape; - InferenceEngine::SizeVector indicesShape; - InferenceEngine::SizeVector updateShape; - int64_t axis; - std::vector indicesValue; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision indicesPrecision; - std::tie(shapeDescript, indicesValue, inputPrecision, indicesPrecision, targetDevice) = this->GetParam(); - std::tie(inShape, indicesShape, updateShape, axis) = shapeDescript; - auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); - ov::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ov::Shape(inShape)); - paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ov::Shape(updateShape)); - paramVector.push_back(updateParams); - - auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); - auto axis_node = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); - auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1], axis_node); - - ov::ResultVector results{std::make_shared(s2d)}; - function = std::make_shared(results, paramVector, "ScatterUpdate"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/select.cpp b/src/tests/functional/shared_test_classes/src/single_layer/select.cpp deleted file mode 100644 index b548f673a46690..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/select.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/select.hpp" - -namespace LayerTestsDefinitions { - enum { CONDITION, THEN, ELSE, numOfInputs }; - - std::string SelectLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector> dataShapes(3); - InferenceEngine::Precision dataType; - ov::op::AutoBroadcastSpec broadcast; - std::string targetDevice; - std::tie(dataShapes, dataType, broadcast, targetDevice) = obj.param; - std::ostringstream result; - result << "COND=BOOL_" << ov::test::utils::vec2str(dataShapes[CONDITION]); - result << "_THEN=" << dataType.name() << "_" << ov::test::utils::vec2str(dataShapes[THEN]); - result << "_ELSE=" << dataType.name() << "_" << ov::test::utils::vec2str(dataShapes[ELSE]); - result << "_" << broadcast.m_type; - result << "_targetDevice=" << targetDevice; - return result.str(); - } - - void SelectLayerTest::SetUp() { - std::vector> inputShapes(numOfInputs); - InferenceEngine::Precision inputPrecision; - ov::op::AutoBroadcastSpec broadcast; - std::tie(inputShapes, inputPrecision, broadcast, targetDevice) = this->GetParam(); - - ov::ParameterVector paramNodesVector; - auto paramNode = std::make_shared(ov::element::Type_t::boolean, ov::Shape(inputShapes[CONDITION])); - paramNodesVector.push_back(paramNode); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - for (size_t i = 1; i < inputShapes.size(); i++) { - paramNode = std::make_shared(inType, ov::Shape(inputShapes[i])); - paramNodesVector.push_back(paramNode); - } - auto select = std::make_shared(paramNodesVector[0], paramNodesVector[1], paramNodesVector[2], broadcast); - ov::ResultVector results{std::make_shared(select)}; - function = std::make_shared(results, paramNodesVector, "select"); - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp b/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp deleted file mode 100644 index 18f82ac6e33092..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/shape_of.hpp" - -namespace LayerTestsDefinitions { - - std::string ShapeOfLayerTest::getTestCaseName(testing::TestParamInfo obj) { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - InferenceEngine::Precision outputPrecision; - std::string targetDevice; - std::tie(inputPrecision, outputPrecision, inputShapes, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Precision=" << inputPrecision.name() << "_"; - result << "Output Precision=" << outputPrecision.name() << "_"; - result << "TargetDevice=" << targetDevice; - return result.str(); - } - - void ShapeOfLayerTest::SetUp() { - InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; - std::tie(inputPrecision, outPrc, inputShapes, targetDevice) = this->GetParam(); - auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - auto outType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc); - ov::ParameterVector param {std::make_shared(inType, ov::Shape(inputShapes))}; - auto shapeOf = std::make_shared(param[0], outType); - ov::ResultVector results{std::make_shared(shapeOf)}; - function = std::make_shared(results, param, "shapeOf"); - } - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp b/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp deleted file mode 100644 index 17e49dfe7484e6..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/shuffle_channels.hpp" - -namespace LayerTestsDefinitions { - -std::string ShuffleChannelsLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - shuffleChannelsSpecificParams shuffleChannelsParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::tie(shuffleChannelsParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - int axis, group; - std::tie(axis, group) = shuffleChannelsParams; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Axis=" << std::to_string(axis) << "_"; - result << "Group=" << std::to_string(group) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void ShuffleChannelsLayerTest::SetUp() { - shuffleChannelsSpecificParams shuffleChannelsParams; - std::vector inputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(shuffleChannelsParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - int axis, group; - std::tie(axis, group) = shuffleChannelsParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto shuffleChannels = std::make_shared(params[0], axis, group); - ov::ResultVector results{std::make_shared(shuffleChannels)}; - function = std::make_shared(results, params, "shuffleChannels"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/slice.cpp b/src/tests/functional/shared_test_classes/src/single_layer/slice.cpp deleted file mode 100644 index 869e52bf7af36b..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/slice.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" - -#include "shared_test_classes/single_layer/slice.hpp" - -using namespace ngraph; - -namespace LayerTestsDefinitions { - -std::string Slice8LayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector shapes; - Slice8SpecificParams params; - ov::element::Type_t netPrecision, inPrecision, outPrecision; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::map additionalConfig; - std::tie(params, netPrecision, inPrecision, outPrecision, inLayout, outLayout, targetName, additionalConfig) = obj.param; - std::ostringstream result; - result << "IS=("; - for (const auto& shape : params.shapes) { - result << ov::test::utils::partialShape2str({shape.first}) << "_"; - } - result << ")_TS=("; - for (const auto& shape : params.shapes) { - for (const auto& item : shape.second) { - result << ov::test::utils::vec2str(item) << "_"; - } - } - result << "start=" << ov::test::utils::vec2str(params.start) << "_"; - result << "stop=" << ov::test::utils::vec2str(params.stop) << "_"; - result << "step=" << ov::test::utils::vec2str(params.step) << "_"; - result << "axes=" << ov::test::utils::vec2str(params.axes) << "_"; - result << "netPRC=" << netPrecision << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void Slice8LayerTest::SetUp() { - Slice8SpecificParams sliceParams; - ov::test::ElementType netPrecision, inPrecision, outPrecision; - InferenceEngine::Layout inLayout, outLayout; - std::map additionalConfig; - std::tie(sliceParams, netPrecision, inPrecision, outPrecision, inLayout, outLayout, targetDevice, additionalConfig) = this->GetParam(); - - configuration.insert(additionalConfig.begin(), additionalConfig.end()); - init_input_shapes(sliceParams.shapes); - ov::ParameterVector params; - for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(netPrecision, shape)); - } - OPENVINO_SUPPRESS_DEPRECATED_START - auto sliceOp = ngraph::builder::makeSlice(params[0], sliceParams.start, sliceParams.stop, sliceParams.step, sliceParams.axes, netPrecision); - OPENVINO_SUPPRESS_DEPRECATED_END - - ov::ResultVector results; - for (int i = 0; i < sliceOp->get_output_size(); i++) - results.push_back(std::make_shared(sliceOp->output(i))); - function = std::make_shared(results, params, "Slice-8"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp b/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp deleted file mode 100644 index 78416211185467..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/space_to_batch.hpp" - -namespace LayerTestsDefinitions { - -std::string SpaceToBatchLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inShapes; - std::vector blockShape, padsBegin, padsEnd; - InferenceEngine::Precision netPrc; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::tie(blockShape, padsBegin, padsEnd, inShapes, netPrc, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inShapes) << "_"; - result << "netPRC=" << netPrc.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "BS=" << ov::test::utils::vec2str(blockShape) << "_"; - result << "PB=" << ov::test::utils::vec2str(padsBegin) << "_"; - result << "PE=" << ov::test::utils::vec2str(padsEnd) << "_"; - result << "trgDev=" << targetName << "_"; - return result.str(); -} - -void SpaceToBatchLayerTest::SetUp() { - std::vector inputShape; - std::vector blockShape, padsBegin, padsEnd; - InferenceEngine::Precision netPrecision; - std::tie(blockShape, padsBegin, padsEnd, inputShape, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - OPENVINO_SUPPRESS_DEPRECATED_START - auto s2b = ngraph::builder::makeSpaceToBatch(params[0], ngPrc, blockShape, padsBegin, padsEnd); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::ResultVector results{std::make_shared(s2b)}; - function = std::make_shared(results, params, "SpaceToBatch"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp b/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp deleted file mode 100644 index 1579cf0f1408e1..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" -#include "shared_test_classes/single_layer/space_to_depth.hpp" - -namespace LayerTestsDefinitions { - -static inline std::string SpaceToDepthModeToString(const ov::op::v0::SpaceToDepth::SpaceToDepthMode& mode) { - static std::map names = { - {ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, - {ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, "DEPTH_FIRST"}, - }; - - auto i = names.find(mode); - if (i != names.end()) - return i->second; - else - throw std::runtime_error("Unsupported SpaceToDepthMode"); -} - -std::string SpaceToDepthLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector inShape; - ov::op::v0::SpaceToDepth::SpaceToDepthMode mode; - std::size_t blockSize; - InferenceEngine::Precision inputPrecision; - std::string targetName; - std::tie(inShape, inputPrecision, mode, blockSize, targetName) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inShape) << "_"; - result << "inPrc=" << inputPrecision.name() << "_"; - result << "M=" << SpaceToDepthModeToString(mode) << "_"; - result << "BS=" << blockSize << "_"; - result << "targetDevice=" << targetName << "_"; - return result.str(); -} - -void SpaceToDepthLayerTest::SetUp() { - std::vector inShape; - ov::op::v0::SpaceToDepth::SpaceToDepthMode mode; - std::size_t blockSize; - InferenceEngine::Precision inputPrecision; - std::tie(inShape, inputPrecision, mode, blockSize, targetDevice) = this->GetParam(); - auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - ov::ParameterVector params {std::make_shared(inPrc, ov::Shape(inShape))}; - auto s2d = std::make_shared(params[0], mode, blockSize); - ov::ResultVector results{std::make_shared(s2d)}; - function = std::make_shared(results, params, "SpaceToDepth"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/split.cpp b/src/tests/functional/shared_test_classes/src/single_layer/split.cpp deleted file mode 100644 index 18686875ba1954..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/split.cpp +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/split.hpp" - -namespace LayerTestsDefinitions { - -std::string SplitLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - size_t numSplits; - int64_t axis; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes, outIndices; - std::string targetDevice; - std::tie(numSplits, axis, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outIndices, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "numSplits=" << numSplits << "_"; - result << "axis=" << axis << "_"; - if (!outIndices.empty()) { - result << "outIndices" << ov::test::utils::vec2str(outIndices) << "_"; - } - result << "IS"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void SplitLayerTest::SetUp() { - size_t axis, numSplits; - std::vector inputShape, outIndices; - InferenceEngine::Precision netPrecision; - std::tie(numSplits, axis, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outIndices, targetDevice) = this->GetParam(); - if (outIndices.empty()) { - for (int i = 0; i < numSplits; ++i) { - outIndices.push_back(i); - } - } - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - OPENVINO_SUPPRESS_DEPRECATED_START - auto split = std::dynamic_pointer_cast(ngraph::builder::makeSplit(params[0], - ngPrc, numSplits, axis)); - OPENVINO_SUPPRESS_DEPRECATED_END - ov::ResultVector results; - for (int i = 0; i < outIndices.size(); i++) { - results.push_back(std::make_shared(split->output(outIndices[i]))); - } - function = std::make_shared(results, params, "split"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp b/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp deleted file mode 100644 index d7e8ebee7bb6c7..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/squeeze_unsqueeze.hpp" - -namespace LayerTestsDefinitions { -std::string SqueezeUnsqueezeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - ShapeAxesTuple shapeItem; - std::string targetDevice; - ngraph::helpers::SqueezeOpType opType; - std::tie(shapeItem, opType, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = obj.param; - - std::ostringstream result; - const char separator = '_'; - result << "OpType=" << opType << separator; - result << "IS=" << ov::test::utils::vec2str(shapeItem.first) << separator; - result << "Axes=" << (shapeItem.second.empty() ? "default" : ov::test::utils::vec2str(shapeItem.second)) << separator; - result << "netPRC=" << netPrecision.name() << separator; - result << "inPRC=" << inPrc.name() << separator; - result << "outPRC=" << outPrc.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void SqueezeUnsqueezeLayerTest::SetUp() { - InferenceEngine::Precision netPrecision; - std::vector inputShapes; - std::vector axesVector; - ShapeAxesTuple shapeItem; - ngraph::helpers::SqueezeOpType opType; - std::tie(shapeItem, opType, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = GetParam(); - std::tie(inputShapes, axesVector) = shapeItem; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - std::shared_ptr op; - - if (axesVector.empty() && opType == ngraph::helpers::SqueezeOpType::SQUEEZE) { - op = std::make_shared(params.front()); - } else { - OPENVINO_SUPPRESS_DEPRECATED_START - op = ngraph::builder::makeSqueezeUnsqueeze(params.front(), ov::element::i64, axesVector, opType); - OPENVINO_SUPPRESS_DEPRECATED_END - } - - const ov::ResultVector results{std::make_shared(op)}; - function = std::make_shared(results, params, "Squeeze"); -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp b/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp deleted file mode 100644 index 95a7aa4044aec4..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/builders.hpp" - -#include "shared_test_classes/single_layer/strided_slice.hpp" - -namespace LayerTestsDefinitions { - -std::string StridedSliceLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - StridedSliceSpecificParams params; - InferenceEngine::Precision netPrc; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::string targetName; - std::map additionalConfig; - std::tie(params, netPrc, inPrc, outPrc, inLayout, outLayout, targetName, additionalConfig) = obj.param; - std::ostringstream result; - result << "inShape=" << ov::test::utils::vec2str(params.inputShape) << "_"; - result << "netPRC=" << netPrc.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "begin=" << ov::test::utils::vec2str(params.begin) << "_"; - result << "end=" << ov::test::utils::vec2str(params.end) << "_"; - result << "stride=" << ov::test::utils::vec2str(params.strides) << "_"; - result << "begin_m=" << ov::test::utils::vec2str(params.beginMask) << "_"; - result << "end_m=" << ov::test::utils::vec2str(params.endMask) << "_"; - result << "new_axis_m=" << (params.newAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.newAxisMask)) << "_"; - result << "shrink_m=" << (params.shrinkAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.shrinkAxisMask)) << "_"; - result << "ellipsis_m=" << (params.ellipsisAxisMask.empty() ? "def" : ov::test::utils::vec2str(params.ellipsisAxisMask)) << "_"; - result << "trgDev=" << targetName; - return result.str(); -} - -void StridedSliceLayerTest::SetUp() { - StridedSliceSpecificParams ssParams; - InferenceEngine::Precision netPrecision; - std::map additionalConfig; - std::tie(ssParams, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice, additionalConfig) = this->GetParam(); - configuration.insert(additionalConfig.begin(), additionalConfig.end()); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(ssParams.inputShape))}; - - ov::Shape constShape = {ssParams.begin.size()}; - auto beginNode = std::make_shared(ov::element::i64, constShape, ssParams.begin.data()); - auto endNode = std::make_shared(ov::element::i64, constShape, ssParams.end.data()); - auto strideNode = std::make_shared(ov::element::i64, constShape, ssParams.strides.data()); - auto ss = std::make_shared(params[0], - beginNode, - endNode, - strideNode, - ssParams.beginMask, - ssParams.endMask, - ssParams.newAxisMask, - ssParams.shrinkAxisMask, - ssParams.ellipsisAxisMask); - - ov::ResultVector results{std::make_shared(ss)}; - function = std::make_shared(results, params, "StridedSlice"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp b/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp deleted file mode 100644 index 7a9a0cc34c9d51..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include "shared_test_classes/single_layer/tensor_iterator.hpp" - -namespace LayerTestsDefinitions { - - std::string TensorIteratorTest::getTestCaseName(const testing::TestParamInfo &obj) { - bool should_decompose; - size_t seq_lengths; - size_t batch; - size_t hidden_size; - size_t input_size = 10; - size_t sequence_axis; - ngraph::helpers::TensorIteratorBody ti_body; - float clip; - ov::op::RecurrentSequenceDirection direction; - InferenceEngine::Precision netPrecision; - std::string targetDevice; - std::tie(should_decompose, seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, - targetDevice) = obj.param; - std::vector> inputShapes = {}; - - switch (ti_body) { - case ngraph::helpers::TensorIteratorBody::LSTM: - inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, - {4 * hidden_size, hidden_size}, {4 * hidden_size}}, - }; - break; - case ngraph::helpers::TensorIteratorBody::GRU: - inputShapes = { - {{batch, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, - {3 * hidden_size, hidden_size}, {3 * hidden_size}}, - }; - break; - case ngraph::helpers::TensorIteratorBody::RNN: - inputShapes = {{batch, input_size}, {batch, hidden_size}, - {hidden_size, input_size}, {hidden_size, hidden_size}, {hidden_size}}; - break; - } - - std::ostringstream result; - result << "unrolling=" << should_decompose << "_"; - result << "seq_len=" << seq_lengths << "_"; - result << "seq_len_axis=" << sequence_axis << "_"; - result << "batch=" << batch << "_"; - result << "hidden_size=" << hidden_size << "_"; - result << "input_size=" << input_size << "_"; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "TensorIteratorBody=" << ti_body << "_"; - result << "direction=" << direction << "_"; - result << "clip=" << clip << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "targetDevice=" << targetDevice << "_"; - return result.str(); - } - - void TensorIteratorTest::SetUp() { - size_t seq_lengths; - bool should_decompose; - size_t batch; - size_t hidden_size; - size_t input_size = 10; - size_t sequence_axis; - ngraph::helpers::TensorIteratorBody ti_body; - float clip; - ov::op::RecurrentSequenceDirection direction; - InferenceEngine::Precision netPrecision; - std::tie(should_decompose, seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, - targetDevice) = this->GetParam(); - std::vector> inputShapes; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto tensor_iterator = std::make_shared(); - - // Each case consist of 3 steps: - // 1. Create TensorIterator body. - // 2. Set PortMap - // 3. Create outer function - auto axis = std::make_shared(ov::element::i64, ov::Shape{1}, - std::vector{static_cast(sequence_axis)}); - switch (ti_body) { - case ngraph::helpers::TensorIteratorBody::LSTM: { - inputShapes = { - {{batch, seq_lengths, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, - {4 * hidden_size, hidden_size}, {4 * hidden_size}}, - }; - if (sequence_axis == 0) { - // swap batch and seq_lengths - std::swap(inputShapes[0][0], inputShapes[0][1]); - } - ov::ParameterVector outer_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1])), - std::make_shared(ngPrc, ov::Shape(inputShapes[2]))}; - - // 1. Create TensorIterator body. - inputShapes[0][sequence_axis] = 1; // sliced dimension - ov::ParameterVector body_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1])), - std::make_shared(ngPrc, ov::Shape(inputShapes[2]))}; - - auto squeeze = std::make_shared(body_params[0], axis); - std::vector WRB = {inputShapes[3], inputShapes[4], inputShapes[5]}; - ov::OutputVector out_vector = {squeeze, body_params[1], body_params[2]}; - auto lstm_cell = ngraph::builder::makeLSTM(out_vector, WRB, hidden_size, {"sigmoid", "tanh", "tanh"}, {}, {}, clip); - auto unsqueeze = std::make_shared(lstm_cell->output(0), axis); - ov::ResultVector results{std::make_shared(unsqueeze), - std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; - auto body = std::make_shared(results, body_params, "lstm_cell"); - tensor_iterator->set_function(body); - - // 2. Set PortMap - if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { - tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); - tensor_iterator->get_concatenated_slices(results[0], 0, 1, 1, -1, sequence_axis); - } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { - tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); - tensor_iterator->get_concatenated_slices(results[0], -1, -1, 1, 0, sequence_axis); - } else { - OPENVINO_ASSERT(false, "Bidirectional case is not supported."); - } - - tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[1]); - tensor_iterator->set_merged_input(body_params[2], outer_params[2], results[2]); - tensor_iterator->get_iter_value(results[1]); - tensor_iterator->get_iter_value(results[2]); - - // 3. Outer function - function = std::make_shared(ov::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1), - tensor_iterator->output(2)}, outer_params); - break; - } - case ngraph::helpers::TensorIteratorBody::GRU: { - inputShapes = { - {{batch, seq_lengths, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, - {3 * hidden_size, hidden_size}, {3 * hidden_size}}, - }; - if (sequence_axis == 0) { - // swap batch and seq_lengths - std::swap(inputShapes[0][0], inputShapes[0][1]); - } - ov::ParameterVector outer_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - - // 1. Create TensorIterator body. - inputShapes[0][sequence_axis] = 1; // sliced dimension - ov::ParameterVector body_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - - std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - auto squeeze = std::make_shared(body_params[0], axis); - ov::OutputVector out_vector = {squeeze, body_params[1]}; - auto gru_cell = ngraph::builder::makeGRU(out_vector, WRB, hidden_size, {"sigmoid", "tanh"}, - {}, {}, clip, false); - auto unsqueeze = std::make_shared(gru_cell->output(0), axis); - ov::ResultVector results{std::make_shared(gru_cell->output(0)), - std::make_shared(unsqueeze)}; - auto body = std::make_shared(results, body_params, "gru_cell"); - tensor_iterator->set_function(body); - - // 2. Set PortMap - if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { - tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); - tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, sequence_axis); - } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { - tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); - tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); - } else { - OPENVINO_ASSERT(false, "Bidirectional case is not supported."); - } - - tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[0]); - tensor_iterator->get_iter_value(results[0]); - - // 3. Outer function - function = std::make_shared(ov::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1)}, outer_params); - break; - } - case ngraph::helpers::TensorIteratorBody::RNN: { - inputShapes = {{batch, seq_lengths, input_size}, - {batch, hidden_size}, - {hidden_size, input_size}, - {hidden_size, hidden_size}, - {hidden_size}}; - if (sequence_axis == 0) { - // swap batch and seq_lengths - std::swap(inputShapes[0][0], inputShapes[0][1]); - } - ov::ParameterVector outer_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - - // 1. Create TensorIterator body. - inputShapes[0][sequence_axis] = 1; // sliced dimension - ov::ParameterVector body_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - auto squeeze = std::make_shared(body_params[0], axis); - ov::OutputVector out_vector = {squeeze, body_params[1]}; - auto rnn_cell = ngraph::builder::makeRNN(out_vector, WRB, hidden_size, {"tanh"}, {}, {}, clip); - auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); - ov::ResultVector results{std::make_shared(rnn_cell), - std::make_shared(unsqueeze)}; - auto body = std::make_shared(results, body_params, "rnn_cell"); - tensor_iterator->set_function(body); - - // 2. Set PortMap - if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { - tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); - tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, sequence_axis); - } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { - tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); - tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); - } else { - OPENVINO_ASSERT(false, "Bidirectional case is not supported."); - } - - tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[0]); - tensor_iterator->get_iter_value(results[0]); - - // 3. Outer function - function = std::make_shared(ov::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1)}, outer_params); - break; - } - } - if (should_decompose) { - ov::pass::Manager m; - m.register_pass(); - m.run_passes(function); - } - } -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp b/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp deleted file mode 100644 index 74f61406e76596..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/tile.hpp" - -namespace LayerTestsDefinitions { - -std::string TileLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - TileSpecificParams tileParams; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::tie(tileParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "Repeats=" << ov::test::utils::vec2str(tileParams) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void TileLayerTest::SetUp() { - TileSpecificParams tileParams; - std::vector inputShape; - auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; - std::tie(tileParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto repeatsNode = std::make_shared(ov::element::i64, std::vector{tileParams.size()}, tileParams); - auto tile = std::make_shared(params[0], repeatsNode); - - ov::ResultVector results{std::make_shared(tile)}; - function = std::make_shared(results, params, "tile"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp b/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp deleted file mode 100644 index 0cac7708c685ff..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/topk.hpp" - -namespace LayerTestsDefinitions { - std::string TopKLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout; - InferenceEngine::SizeVector inputShape; - std::string targetDevice; - int64_t keepK, axis; - ov::op::v3::TopK::Mode mode; - ov::op::v3::TopK::SortType sort; - std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; - result << "k=" << keepK << "_"; - result << "axis=" << axis << "_"; - result << "mode=" << mode << "_"; - result << "sort=" << sort << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void TopKLayerTest::SetUp() { - InferenceEngine::SizeVector inputShape; - InferenceEngine::Precision netPrecision; - int64_t keepK, axis; - ov::op::v3::TopK::Mode mode; - ov::op::v3::TopK::SortType sort; - std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto k = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, &keepK); - auto topk = std::dynamic_pointer_cast( - std::make_shared(params[0], k, axis, mode, sort)); - - ov::ResultVector results; - for (size_t i = 0; i < topk->get_output_size(); i++) { - results.push_back(std::make_shared(topk->output(i))); - } - function = std::make_shared(results, params, "TopK"); -} - -InferenceEngine::Blob::Ptr TopKLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { - IE_ASSERT(InferenceEngine::Precision::FP32 == info.getTensorDesc().getPrecision() - || InferenceEngine::Precision::BF16 == info.getTensorDesc().getPrecision() - || InferenceEngine::Precision::FP16 == info.getTensorDesc().getPrecision()); - - InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc()); - blob->allocate(); - - size_t size = blob->size(); - int start = - static_cast(size / 2); - std::vector data(size); - std::iota(data.begin(), data.end(), start); - std::mt19937 gen(0); - std::shuffle(data.begin(), data.end(), gen); - - float divisor = size / 10.0; - if (InferenceEngine::Precision::FP32 == info.getTensorDesc().getPrecision()) { - auto *rawBlobDataPtr = blob->buffer().as(); - for (size_t i = 0; i < size; i++) { - rawBlobDataPtr[i] = static_cast(data[i] / divisor); - } - } else if (InferenceEngine::Precision::BF16 == info.getTensorDesc().getPrecision()) { - auto *rawBlobDataPtr = blob->buffer().as(); - for (size_t i = 0; i < size; i++) { - rawBlobDataPtr[i] = static_cast(data[i] / divisor); - } - } else if (InferenceEngine::Precision::FP16 == info.getTensorDesc().getPrecision()) { - auto* rawBlobDataPtr = blob->buffer().as(); - for (size_t i = 0; i < size; i++) { - rawBlobDataPtr[i] = static_cast(data[i] / divisor); - } - } - - return blob; -} -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp b/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp deleted file mode 100644 index 47347234e8c38a..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/transpose.hpp" - -namespace LayerTestsDefinitions { - -std::string TransposeLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - std::vector inputShapes, inputOrder; - std::string targetDevice; - std::tie(inputOrder, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "inputOrder=" << ov::test::utils::vec2str(inputOrder) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); -} - -void TransposeLayerTest::SetUp() { - std::vector inputShape, inputOrder; - InferenceEngine::Precision netPrecision; - std::tie(inputOrder, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - const auto inOrderShape = inputOrder.empty() ? ov::Shape({0}) : ov::Shape({inputShape.size()}); - const auto inputOrderOp = std::make_shared(ov::element::i64, - inOrderShape, - inputOrder); - const auto transpose = std::make_shared(params.at(0), inputOrderOp); - const ov::ResultVector results{std::make_shared(transpose)}; - function = std::make_shared(results, params, "Transpose"); -} - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp b/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp deleted file mode 100644 index 4ae365569e4895..00000000000000 --- a/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shared_test_classes/single_layer/variadic_split.hpp" - -namespace LayerTestsDefinitions { - - std::string VariadicSplitLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { - int64_t axis; - std::vector numSplits; - InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::tie(numSplits, axis, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "numSplits=" << ov::test::utils::vec2str(numSplits) << "_"; - result << "axis=" << axis << "_"; - result << "IS"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "trgDev=" << targetDevice; - return result.str(); - } - - void VariadicSplitLayerTest::SetUp() { - int64_t axis; - std::vector inputShape, numSplits; - InferenceEngine::Precision netPrecision; - std::tie(numSplits, axis, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - - auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, axis); - auto num_split = std::make_shared(ov::element::u64, ov::Shape{numSplits.size()}, numSplits); - auto VariadicSplit = std::make_shared(params[0], split_axis_op, num_split); - - ov::ResultVector results; - for (int i = 0; i < numSplits.size(); i++) { - results.push_back(std::make_shared(VariadicSplit->output(i))); - } - function = std::make_shared(results, params, "VariadicSplit"); - } - -} // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_op/gather.cpp b/src/tests/functional/shared_test_classes/src/single_op/gather.cpp index d320aa16806b23..d88d6ca64d512c 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gather.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gather.cpp @@ -244,5 +244,77 @@ void Gather8withIndicesDataLayerTest::SetUp() { function = std::make_shared(result, ov::ParameterVector{param}, "gather"); } +// Gather String support +std::string GatherStringWithIndicesDataLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { + const GatherStringParamsTuple& basicParams = obj.param; + std::vector indicesData; + std::vector str_data; + + std::tuple axis_batch_idx; + std::vector indices; + ov::Shape indices_shape; + std::vector shapes; + ov::element::Type model_type; + std::string device_name; + std::tie(shapes, indices_shape, axis_batch_idx, model_type, device_name, indicesData, str_data) = basicParams; + + std::ostringstream result; + result << "IS=("; + for (size_t i = 0lu; i < shapes.size(); i++) { + result << ov::test::utils::partialShape2str({shapes[i].first}) << (i < shapes.size() - 1lu ? "_" : ""); + } + result << ")_TS="; + for (size_t i = 0lu; i < shapes.front().second.size(); i++) { + result << "{"; + for (size_t j = 0lu; j < shapes.size(); j++) { + result << ov::test::utils::vec2str(shapes[j].second[i]) << (j < shapes.size() - 1lu ? "_" : ""); + } + result << "}_"; + } + result << "axis=" << std::get<0>(axis_batch_idx) << "_"; + result << "batch_idx=" << std::get<1>(axis_batch_idx) << "_"; + result << "indices_shape=" << ov::test::utils::vec2str(indices_shape) << "_"; + result << "netPRC=" << model_type.get_type_name() << "_"; + result << "trgDev=" << device_name << "_"; + result << "indicesData=" << ov::test::utils::vec2str(indicesData) << "_"; + + return result.str(); +} + +void GatherStringWithIndicesDataLayerTest::SetUp() { + const GatherStringParamsTuple& basicParams = GetParam(); + std::vector indicesData; + std::tuple axis_batch_idx; + ov::Shape indices_shape; + std::vector shapes; + ov::element::Type model_type; + std::tie(shapes, indices_shape, axis_batch_idx, model_type, targetDevice, indicesData, string_data) = basicParams; + init_input_shapes(shapes); + + int axis = std::get<0>(axis_batch_idx); + int batch_idx = std::get<1>(axis_batch_idx); + auto param = std::make_shared(model_type, inputDynamicShapes.front()); + + // create indices tensor and fill data + ov::Tensor indices_node_tensor{ov::element::i64, indices_shape}; + auto indices_tensor_data = indices_node_tensor.data(); + for (size_t i = 0; i < shape_size(indices_shape); ++i) { + indices_tensor_data[i] = indicesData[i]; + } + + auto indices_node = std::make_shared(indices_node_tensor); + auto axis_node = ov::op::v0::Constant::create(ov::element::i64, ov::Shape(), {axis}); + auto gather = std::make_shared(param, indices_node, axis_node, batch_idx); + auto result = std::make_shared(gather); + function = std::make_shared(result, ov::ParameterVector{param}, "gather"); +} + +void GatherStringWithIndicesDataLayerTest::generate_inputs(const std::vector& target_shapes) { + inputs.clear(); + const auto& func_inputs = function->inputs(); + auto& data_input = func_inputs[0]; + inputs.insert({data_input.get_node_shared_ptr(), ov::Tensor(element::string, data_input.get_shape(), string_data.data())}); +} + } // namespace test } // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp index 55ba0da2d18bd0..6533b799add5ed 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp @@ -6,15 +6,12 @@ #include "common_test_utils/node_builders/constant.hpp" #include "common_test_utils/node_builders/eltwise.hpp" -#include "functional_test_utils/core_config.hpp" #include "common_test_utils/data_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "openvino/op/util/variable.hpp" #include "openvino/pass/low_latency.hpp" #include "openvino/pass/manager.hpp" -using namespace ngraph; - namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp index 0d3593478e38c0..c300689016458d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/node_builders/convolution_backprop_data.hpp" #include "common_test_utils/node_builders/constant.hpp" #include "ov_models/utils/ov_helpers.hpp" -#include "ie_common.h" #include "common_test_utils/node_builders/fake_quantize.hpp" namespace ov { @@ -48,7 +47,7 @@ void QuantConvBackpropDataLayerTest::SetUp() { ov::element::Type element_type = ov::element::undefined; std::tie(groupConvBackpropDataParams, element_type, inputShape, targetDevice) = this->GetParam(); ov::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; size_t quantLevels; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp index 37482b17c5aaea..f90231a4c0bfb7 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp @@ -46,39 +46,3 @@ void ScaleShiftLayerTest::SetUp() { } } // namespace test } // namespace ov - -// legacy impl for npu repo -namespace SubgraphTestsDefinitions { - std::string ScaleShiftLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - std::string targetName; - std::vector scale, shift; - std::tie(inputShapes, netPrecision, targetName, scale, shift) = obj.param; - std::ostringstream results; - - results << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - results << "Scale=" << ov::test::utils::vec2str(scale) << "_"; - results << "Shift=" << ov::test::utils::vec2str(shift) << "_"; - results << "netPRC=" << netPrecision.name() << "_"; - results << "targetDevice=" << targetName << "_"; - return results.str(); - } - - void ScaleShiftLayerTest::SetUp() { - std::vector> inputShapes; - InferenceEngine::Precision netPrecision; - std::vector scale, shift; - std::tie(inputShapes, netPrecision, targetDevice, scale, shift) = this->GetParam(); - auto paramsShape = ov::Shape{1}; - if (inputShapes.size() > 1) - paramsShape = ov::Shape(inputShapes[1]); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector paramsIn{std::make_shared(ngPrc, ov::Shape(inputShapes[0]))}; - auto mul_const = std::make_shared(ngPrc, paramsShape, scale); - auto mul = std::make_shared(paramsIn[0], mul_const); - auto add_const = std::make_shared(ngPrc, paramsShape, shift); - auto add = std::make_shared(mul, add_const); - function = std::make_shared(add, paramsIn, "scale_shift"); - } -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp index 43c939dbc0e3f0..3fecea21d1208a 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_conv_concat.cpp @@ -5,7 +5,6 @@ #include "shared_test_classes/subgraph/split_conv_concat.hpp" #include "common_test_utils/data_utils.hpp" -#include "ie_common.h" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/node_builders/convolution.hpp" @@ -75,32 +74,3 @@ void SplitConvConcatBase::configure_test(const ov::test::BasicParams& param) { } // namespace test } // namespace ov - -namespace SubgraphTestsDefinitions { - -std::string SplitConvConcat::getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::Precision precision; - InferenceEngine::SizeVector inputShapes; - std::string targetDevice; - std::tie(precision, inputShapes, targetDevice) = obj.param; - auto element_type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precision); - - std::ostringstream result; - result << "IS=" << ov::test::utils::vec2str(inputShapes) << "_"; - result << "ET=" << element_type << "_"; - result << "targetDevice=" << targetDevice; - return result.str(); -} - -void SplitConvConcat::SetUp() { - InferenceEngine::Precision precision; - InferenceEngine::SizeVector inputShapes; - std::tie(precision, inputShapes, targetDevice) = this->GetParam(); - auto element_type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precision); - ov::Shape shape = inputShapes; - - ov::test::BasicParams param(element_type, shape, targetDevice); - configure_test(param); -} - -} // namespace SubgraphTestsDefinitions diff --git a/src/tests/ov_helpers/ov_models/CMakeLists.txt b/src/tests/ov_helpers/ov_models/CMakeLists.txt index 8203855b2bde43..69631bd82ba2a0 100644 --- a/src/tests/ov_helpers/ov_models/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_models/CMakeLists.txt @@ -23,8 +23,6 @@ ov_add_target( openvino::runtime::dev common_test_utils ADD_CLANG_FORMAT - EXCLUDED_SOURCE_PATHS - "${CMAKE_CURRENT_SOURCE_DIR}/ov_builders" ) ov_build_target_faster(${TARGET_NAME} @@ -36,5 +34,3 @@ ov_build_target_faster(${TARGET_NAME} ov_developer_package_export_targets(TARGET ${TARGET_NAME} INSTALL_INCLUDE_DIRECTORIES "${PUBLIC_HEADERS_DIR}/") - -add_subdirectory(ov_builders) \ No newline at end of file diff --git a/src/tests/ov_helpers/ov_models/ov_builders/CMakeLists.txt b/src/tests/ov_helpers/ov_models/ov_builders/CMakeLists.txt deleted file mode 100644 index fa65f42d554a15..00000000000000 --- a/src/tests/ov_helpers/ov_models/ov_builders/CMakeLists.txt +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(TARGET_NAME ov_builders) - -set(BUILDER_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include/) - -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) -file(GLOB_RECURSE PUBLIC_HEADERS ${BUILDER_INCLUDE_DIR}/*.hpp) - -# Create named folders for the sources within the .vcproj -# Empty name lists them directly under the .vcproj - -source_group("src" FILES ${LIBRARY_SRC}) -source_group("include" FILES ${PUBLIC_HEADERS}) - -# Create static library -add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) - -add_library(openvino::builders ALIAS ${TARGET_NAME}) -set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME builders) - -ov_build_target_faster(${TARGET_NAME} UNITY) - -target_include_directories(${TARGET_NAME} PUBLIC - $ - $) - -if(NOT BUILD_SHARED_LIBS) - target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY) -endif() - -target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) - -ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) - -# install & export - -ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) - -ov_developer_package_export_targets(TARGET ${TARGET_NAME} - INSTALL_INCLUDE_DIRECTORIES "${BUILDER_INCLUDE_DIR}/") \ No newline at end of file diff --git a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/broadcast.hpp b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/broadcast.hpp deleted file mode 100644 index 364198387fc982..00000000000000 --- a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/broadcast.hpp +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/core/node.hpp" - -namespace ov { -namespace op { -namespace util { -Output make_broadcast(const Output& node, const Shape& target_shape, const AxisSet& broadcast_axes); - -Output make_broadcast(const Output& node, const Shape& target_shape, std::size_t start_match_axis); -} // namespace util -} // namespace op -} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/reshape.hpp b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/reshape.hpp deleted file mode 100644 index 0e1be4a7c761df..00000000000000 --- a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/reshape.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/core/node.hpp" - -namespace ov { -namespace op { -namespace util { -/// \brief Change shape of a value -/// -/// \param[in] value The value to be reshaped. -/// \param[in] shape The new shape. -/// -/// \return Reshape:v1 op. -std::shared_ptr reshape(const Output& value, const Shape& shape); - -/// \brief Permute axes according to specified axes_order parameter. -/// -/// \param The vlaue whose axes we want to permute. -/// \param axes_order The permutation of axes. -/// -/// \return Transpose:v1 op. -std::shared_ptr reorder_axes(const Output& value, std::vector axes_order = {}); - -/// \brief Return transposed value (with axes in reversed order). -/// -/// \param Value to transpose. -/// -/// \return Transpose:v1 op. -std::shared_ptr transpose(const Output& value); - -/// \brief Flatten a value into a 2D matrix, with a static dividing axis. -/// -/// \param The tensor to be flattened. -/// \param The axis dividing shape. -/// -/// \return The new value will be a 2D matrix representing the flattened input -/// node. -std::shared_ptr flatten(const Output& value, int axis); -} // namespace util -} // namespace op -} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/reshape.cpp b/src/tests/ov_helpers/ov_models/ov_builders/src/reshape.cpp deleted file mode 100644 index 64886c9f1a4a90..00000000000000 --- a/src/tests/ov_helpers/ov_models/ov_builders/src/reshape.cpp +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ov_models/ov_builders/reshape.hpp" - -#include "openvino/op/add.hpp" -#include "openvino/op/broadcast.hpp" -#include "openvino/op/concat.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/convert.hpp" -#include "openvino/op/range.hpp" -#include "openvino/op/reduce_prod.hpp" -#include "openvino/op/reshape.hpp" -#include "openvino/op/shape_of.hpp" -#include "openvino/op/squeeze.hpp" -#include "openvino/op/strided_slice.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/op/transpose.hpp" -#include "openvino/op/variadic_split.hpp" - -namespace ov { -namespace op { -namespace util { -std::shared_ptr reshape(const Output& value, const Shape& shape) { - if (value.get_partial_shape().same_scheme(shape)) { - return value.get_node_shared_ptr(); - } else if (is_scalar(shape)) { - auto value_rank = value.get_shape().size(); - AxisVector axes_vector(value_rank); - std::iota(axes_vector.begin(), axes_vector.end(), 0); - auto axes = ov::op::v0::Constant::create(ov::element::i64, Shape{value_rank}, axes_vector); - return std::make_shared(value, axes); - } else { - auto out_pattern = ov::op::v0::Constant::create(ov::element::i64, - Shape{shape.size()}, - std::vector(shape.begin(), shape.end())); - - return std::make_shared(value, out_pattern, false); - } -} - -std::shared_ptr reorder_axes(const Output& value, std::vector axes_order) { - const auto axes_order_const = - ov::op::v0::Constant::create(ov::element::i64, - Shape{axes_order.size()}, - std::vector(axes_order.begin(), axes_order.end())); - return std::make_shared(value, axes_order_const); -} - -std::shared_ptr transpose(const Output& value) { - // This part is left to preserve backward compatibility and ensure passing ONNX tests. - if (value.get_partial_shape().is_static()) { - std::vector axes_order(value.get_shape().size()); - std::iota(begin(axes_order), end(axes_order), 0); - std::reverse(begin(axes_order), end(axes_order)); - return reorder_axes(value, axes_order); - } - - const auto input_rank = std::make_shared(std::make_shared(value)); - const auto neg_one = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {-1}); - const auto start_node = std::make_shared(input_rank, neg_one); - const auto reverse_axes_order = std::make_shared(reshape(start_node, Shape{}), // start - neg_one, // stop (exclusive) - neg_one); // step - return std::make_shared(value, reverse_axes_order); -} - -namespace { -/// -/// \brief Return the node representing normalized axis with respect to -/// provided rank. -/// -/// \param[in] node_rank The node representing rank used for normalization. -/// \param[in] axis The axis value to be normalized. -/// -/// \return The new Constant node representing normalized axis value. -/// -std::shared_ptr get_normalized_axis_node(const std::shared_ptr node_rank, int64_t axis) { - auto axis_node = ov::op::v0::Constant::create(ov::element::i64, Shape{1}, {axis}); - // shortcut for already positive value - if (axis >= 0) { - return axis_node; - } - - // TODO: What if axis value is beyond acceptable values? [-node_rank, - // node_rank-1] - return std::make_shared(node_rank, axis_node); -} -} // namespace - -std::shared_ptr flatten(const Output& value, int axis) { - // First dimension of output tensor is the product of [d_0, ... d_{axis-1}] dimensions of - // input tensor. The last dimension is the product of the rest of input tensor dimensions: - // [d_{axis}, ..., d_n] - std::shared_ptr output_shape; - if (axis == 0) { - output_shape = ov::op::v0::Constant::create(ov::element::i64, Shape{2}, {1, -1}); - } else if (axis == 1) { - output_shape = ov::op::v0::Constant::create(ov::element::i64, Shape{2}, {0, -1}); - } else { - const auto value_shape = std::make_shared(value); - const auto value_rank = std::make_shared(value_shape); - const auto axis_node = get_normalized_axis_node(value_rank, axis); - - const auto first_part_dims = - std::make_shared(value_shape, - ov::op::v0::Constant::create(ov::element::i64, {1}, {0}), - axis_node, - std::vector{0}, - std::vector{0}); - const auto first_part_dims_length = - std::make_shared(first_part_dims, - ov::op::v0::Constant::create(ov::element::i64, {}, {0}), - true); - - const auto remaining_part_length = ov::op::v0::Constant::create(ov::element::i64, {1}, {-1}); - - output_shape = - std::make_shared(ov::OutputVector{first_part_dims_length, remaining_part_length}, 0); - } - return std::make_shared(value, output_shape, true); -} -} // namespace util -} // namespace op -} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/src/utils/ov_helpers.cpp b/src/tests/ov_helpers/ov_models/src/utils/ov_helpers.cpp index a4734f24180739..2b92b45c1521c8 100644 --- a/src/tests/ov_helpers/ov_models/src/utils/ov_helpers.cpp +++ b/src/tests/ov_helpers/ov_models/src/utils/ov_helpers.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -10,8 +10,8 @@ #include #include "backend.hpp" +#include "common_test_utils/specialize_function.hpp" #include "common_test_utils/test_enums.hpp" -#include "ngraph/specialize_function.hpp" #include "openvino/core/node.hpp" #include "openvino/op/tensor_iterator.hpp" #include "openvino/op/util/attr_types.hpp" @@ -226,9 +226,7 @@ std::shared_ptr foldFunction(const std::shared_ptr& functi } } - OPENVINO_SUPPRESS_DEPRECATED_START; - const auto& foldedFunc = ngraph::specialize_function(function, paramElementTypes, paramShapes, inBuffers); - OPENVINO_SUPPRESS_DEPRECATED_END; + const auto& foldedFunc = ov::test::utils::specialize_function(function, paramElementTypes, paramShapes, inBuffers); ov::pass::ConstantFolding().run_on_model(foldedFunc); for (const auto& op : foldedFunc->get_ops()) { OPENVINO_ASSERT(ov::op::util::is_constant(op) || ov::op::util::is_output(op) || ov::op::util::is_parameter(op), diff --git a/src/tests/test_utils/common_test_utils/CMakeLists.txt b/src/tests/test_utils/common_test_utils/CMakeLists.txt index 10caf4fb836540..0eca7bc46b9bd0 100644 --- a/src/tests/test_utils/common_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/common_test_utils/CMakeLists.txt @@ -31,7 +31,6 @@ function(add_common_utils ADD_TARGET_NAME) ov_models openvino::runtime openvino::runtime::dev - openvino::builders PRIVATE openvino::util openvino::shape_inference diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp new file mode 100644 index 00000000000000..c3e9cb4ae2cd07 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/node.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_broadcast(const Output& node, + const Shape& target_shape, + const AxisSet& broadcast_axes); + +std::shared_ptr make_broadcast(const Output& node, const Shape& target_shape, std::size_t start_match_axis); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp new file mode 100644 index 00000000000000..3c13af77d110ca --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/node.hpp" + +namespace ov { +namespace test { +namespace utils { +/// \brief Change shape of a value +/// +/// \param[in] value The value to be reshaped. +/// \param[in] shape The new shape. +/// +/// \return Reshape:v1 op. +std::shared_ptr reshape(const Output& value, const Shape& shape); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp index 0bd6140e2133ad..ade6aac7aee97a 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp @@ -81,6 +81,8 @@ void compare(const ov::Tensor& expected, const ov::Tensor& actual, const double abs_threshold = std::numeric_limits::max(), const double rel_threshold = std::numeric_limits::max()); + +void compare_str(const ov::Tensor& expected, const ov::Tensor& actual); } // namespace utils } // namespace test } // namespace ov diff --git a/src/core/include/ngraph/specialize_function.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/specialize_function.hpp similarity index 78% rename from src/core/include/ngraph/specialize_function.hpp rename to src/tests/test_utils/common_test_utils/include/common_test_utils/specialize_function.hpp index 97945a93e84016..15686cc4492526 100644 --- a/src/core/include/ngraph/specialize_function.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/specialize_function.hpp @@ -1,27 +1,16 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once -#if !defined(IN_OV_COMPONENT) && !defined(NGRAPH_LEGACY_HEADER_INCLUDED) -# define NGRAPH_LEGACY_HEADER_INCLUDED -# ifdef _MSC_VER -# pragma message( \ - "The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# else -# warning("The nGraph API is deprecated and will be removed in the 2024.0 release. For instructions on transitioning to the new API, please refer to https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -# endif -#endif - -#include - -#include "openvino/core/deprecated.hpp" #include "openvino/core/model.hpp" #include "openvino/core/shape.hpp" #include "openvino/core/type.hpp" -namespace ngraph { +namespace ov { +namespace test { +namespace utils { /// \brief Creates a "specialized" clone of a function. The partial shapes and element types of /// the function's parameters may be narrowed to more specific shapes and element type /// and constant values may optionally be substituted for any or all of the parameters. @@ -48,7 +37,7 @@ namespace ngraph { /// param2: {?,?,4} /// ``` /// -/// Shape specialization would allow us to create a clone of f where the shapes are (for +/// ov::Shape specialization would allow us to create a clone of f where the shapes are (for /// example): /// /// ``` @@ -95,7 +84,7 @@ namespace ngraph { /// same as the number of f's parameters. /// 2. Each shape in parameter_shapes is a refinement of the shape of the corresponding /// parameter of f. Roughly speaking, a shape s1 is said to "refine" s2 if s1 can be -/// obtained from s2 by filling in s2's question marks. See PartialShape::refines for +/// obtained from s2 by filling in s2's question marks. See ov::PartialShape::refines for /// more details. /// 3. For all i, either the element type of fp_i is dynamic, or fp_i is the same as /// parameter_element_types[i]. (Here fp_i is the ith parameter of f.) @@ -104,12 +93,10 @@ namespace ngraph { /// which a Constant node with element type parameter_element_types[i] and shape /// parameter_shapes[i] can be created. /// -OPENVINO_DEPRECATED("The nGraph API is deprecated and will be removed in the 2024.0 release. " - "For instructions on transitioning to the new API, please refer to " - "https://docs.openvino.ai/latest/openvino_2_0_transition_guide.html") -OPENVINO_API -std::shared_ptr specialize_function(std::shared_ptr f, +std::shared_ptr specialize_function(std::shared_ptr model, const std::vector& parameter_element_types, const std::vector& parameter_shapes, const std::vector& parameter_values); -} // namespace ngraph +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp index 5b4fa456fb7e5c..b5e4c2d116b528 100644 --- a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp +++ b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/ov_tensor_utils.hpp" #include "gtest/gtest.h" -#include "ie_common.h" #include "openvino/op/constant.hpp" #include "openvino/op/loop.hpp" #include "openvino/op/result.hpp" @@ -1024,7 +1023,7 @@ AccuracyCheckResult accuracy_check(const std::shared_ptr& ref_functio return AccuracyCheckResult{true, ""}; } try { - IE_ASSERT(ref_function->get_parameters().size() == cur_function->get_parameters().size()); + OPENVINO_ASSERT(ref_function->get_parameters().size() == cur_function->get_parameters().size()); std::map, ov::Tensor> ref_input_data; std::map, ov::Tensor> cur_input_data; @@ -1038,7 +1037,7 @@ AccuracyCheckResult accuracy_check(const std::shared_ptr& ref_functio auto ref_outputs = ngraph::helpers::interpretFunction(ref_function, ref_input_data); auto outputs = ngraph::helpers::interpretFunction(cur_function, cur_input_data); - IE_ASSERT(ref_outputs.size() == outputs.size()); + OPENVINO_ASSERT(ref_outputs.size() == outputs.size()); for (int i = 0; i < ref_outputs.size(); i++) { ov::test::utils::compare(ref_outputs[i], outputs[i], abs_threshold, rel_threshold); diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/broadcast.cpp similarity index 74% rename from src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp rename to src/tests/test_utils/common_test_utils/src/node_builders/broadcast.cpp index ef141f1b25988e..e282821c19fedf 100644 --- a/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp +++ b/src/tests/test_utils/common_test_utils/src/node_builders/broadcast.cpp @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/ov_builders/broadcast.hpp" +#include "common_test_utils/node_builders/broadcast.hpp" +#include "common_test_utils/node_builders/reshape.hpp" #include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" @@ -11,11 +12,10 @@ #include "openvino/op/range.hpp" #include "openvino/op/reshape.hpp" #include "openvino/op/shape_of.hpp" -#include "ov_models/ov_builders/reshape.hpp" namespace ov { -namespace op { -namespace util { +namespace test { +namespace utils { namespace { /// /// \brief Reconstructs axes mapping vector for Broadcast:v1 operation. @@ -48,14 +48,14 @@ Output get_axes_mapping_output(const Shape& output_shape, const AxisSe return ov::op::v0::Constant::create(ov::element::i64, Shape{axes_mapping.size()}, axes_mapping); } -static Output get_axes_mapping_output(const PartialShape& output_shape, - const Output& input_shape, - std::size_t start_match_axis) { +Output get_axes_mapping_output(const PartialShape& output_shape, + const Output& input_shape, + std::size_t start_match_axis) { const auto one_node = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {1}); const auto zero_node = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {0}); const auto start_match_axis_node = ov::op::v0::Constant::create(ov::element::i64, Shape{}, {start_match_axis}); const auto target_shape_rank_node = - ov::op::util::reshape(std::make_shared(input_shape), Shape{}); + ov::test::utils::reshape(std::make_shared(input_shape), Shape{}); const auto range_node = std::make_shared(zero_node, target_shape_rank_node, one_node, element::i64); @@ -65,27 +65,28 @@ static Output get_axes_mapping_output(const PartialShape& output_shape std::make_shared(range_node, start_match_axis_node->get_element_type()); // end of workaround - const auto result = std::make_shared(range_node_converted, start_match_axis_node); - return result; + return std::make_shared(range_node_converted, start_match_axis_node); } } // namespace -Output make_broadcast(const Output& node, - const Shape& target_shape, - const AxisSet& broadcast_axes) { +std::shared_ptr make_broadcast(const Output& node, + const Shape& target_shape, + const AxisSet& broadcast_axes) { return std::make_shared( node, ov::op::v0::Constant::create(ov::element::i64, Shape{target_shape.size()}, target_shape), get_axes_mapping_output(target_shape, broadcast_axes)); } -Output make_broadcast(const Output& node, const Shape& target_shape, size_t start_match_axis) { +std::shared_ptr make_broadcast(const Output& node, + const Shape& target_shape, + size_t start_match_axis) { const auto node_shape = std::make_shared(node); return std::make_shared( node, ov::op::v0::Constant::create(ov::element::i64, Shape{target_shape.size()}, target_shape), get_axes_mapping_output(target_shape, node_shape, start_match_axis)); } -} // namespace util -} // namespace op +} // namespace utils +} // namespace test } // namespace ov diff --git a/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp new file mode 100644 index 00000000000000..7ea8196f39eaf0 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/node_builders/reshape.hpp" + +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_prod.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/strided_slice.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/variadic_split.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr reshape(const Output& value, const Shape& shape) { + if (value.get_partial_shape().same_scheme(shape)) { + return value.get_node_shared_ptr(); + } else if (is_scalar(shape)) { + auto value_rank = value.get_shape().size(); + AxisVector axes_vector(value_rank); + std::iota(axes_vector.begin(), axes_vector.end(), 0); + auto axes = ov::op::v0::Constant::create(ov::element::i64, Shape{value_rank}, axes_vector); + return std::make_shared(value, axes); + } else { + auto out_pattern = ov::op::v0::Constant::create(ov::element::i64, + Shape{shape.size()}, + std::vector(shape.begin(), shape.end())); + + return std::make_shared(value, out_pattern, false); + } +} +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp index ee85e0c88cebad..832bb6b618f5f7 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp @@ -6,6 +6,7 @@ #include "common_test_utils/data_utils.hpp" #include "openvino/core/type/element_type_traits.hpp" +#include "openvino/op/constant.hpp" #include "precomp.hpp" namespace ov { @@ -470,6 +471,16 @@ void compare(const ov::Tensor& expected, } } +void compare_str(const ov::Tensor& expected, const ov::Tensor& actual) { + ASSERT_EQ(expected.get_element_type(), ov::element::string); + ASSERT_EQ(actual.get_element_type(), ov::element::string); + EXPECT_EQ(expected.get_shape(), actual.get_shape()); + + const auto expected_const = ov::op::v0::Constant(expected); + const auto result_const = ov::op::v0::Constant(actual); + EXPECT_EQ(expected_const.get_value_strings(), result_const.get_value_strings()); +} + void compare(const ov::Tensor& expected, const ov::Tensor& actual, const double abs_threshold, @@ -527,6 +538,9 @@ void compare(const ov::Tensor& expected, CASE(ov::element::Type_t::u16) CASE(ov::element::Type_t::u32) CASE(ov::element::Type_t::u64) + case ov::element::Type_t::string: + compare_str(expected, actual); + break; default: OPENVINO_THROW("Unsupported element type: ", expected.get_element_type()); } diff --git a/src/tests/test_utils/common_test_utils/src/specialize_function.cpp b/src/tests/test_utils/common_test_utils/src/specialize_function.cpp new file mode 100644 index 00000000000000..b27c724c5398af --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/specialize_function.cpp @@ -0,0 +1,96 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/specialize_function.hpp" + +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/util/op_types.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr specialize_function(std::shared_ptr model, + const std::vector& parameter_element_types, + const std::vector& parameter_shapes, + const std::vector& parameter_values) { + OPENVINO_ASSERT(model->get_parameters().size() == parameter_shapes.size()); + OPENVINO_ASSERT(model->get_parameters().size() == parameter_element_types.size()); + OPENVINO_ASSERT(model->get_parameters().size() == parameter_values.size()); + + std::unordered_map> nodes; + + for (size_t i = 0; i < parameter_shapes.size(); i++) { + OPENVINO_ASSERT(model->get_parameters()[i]->get_element_type().is_dynamic() || + parameter_element_types[i] == model->get_parameters()[i]->get_element_type()); + + if (parameter_values[i] != nullptr && parameter_shapes[i].is_static() && + parameter_element_types[i].is_static()) { + nodes[model->get_parameters()[i].get()] = std::make_shared(parameter_element_types[i], + parameter_shapes[i].to_shape(), + parameter_values[i]); + } else { + nodes[model->get_parameters()[i].get()] = + std::make_shared(parameter_element_types[i], parameter_shapes[i]); + } + auto rt_info = model->get_parameters()[i]->get_rt_info(); + nodes[model->get_parameters()[i].get()]->get_rt_info() = rt_info; + } + + for (auto old_node : model->get_ordered_ops()) { + if (op::util::is_parameter(old_node)) { + continue; + } + + OutputVector new_args; + for (auto input : old_node->inputs()) { + auto output = input.get_source_output(); + new_args.push_back(output.for_node(nodes[output.get_node()])); + } + + NodeVector cloned_dependencies; + for (auto& dependency : old_node->get_control_dependencies()) { + std::shared_ptr dependent = nodes.at(dependency.get()); + if (find(cloned_dependencies.begin(), cloned_dependencies.end(), dependent) == cloned_dependencies.end()) { + cloned_dependencies.push_back(dependent); + } + } + nodes[old_node.get()] = old_node->copy_with_new_inputs(new_args, cloned_dependencies); + + auto rt_info = old_node->get_rt_info(); + nodes[old_node.get()]->get_rt_info() = rt_info; + + nodes[old_node.get()]->set_friendly_name(old_node->get_friendly_name()); + } + + ParameterVector new_parameters = model->get_parameters(); + for (size_t i = 0; i < new_parameters.size(); i++) { + auto name = new_parameters[i]->get_friendly_name(); + new_parameters[i] = as_type_ptr(nodes[new_parameters[i].get()]); + + // If the replacement for a Parameter is not itself a Parameter, we must have replaced it + // with a constant. We will insert a dead Parameter into the clone's parameters, in order + // to maintain the arity of the original function. + if (new_parameters[i] == nullptr) { + new_parameters[i] = std::make_shared(parameter_element_types[i], parameter_shapes[i]); + } + new_parameters[i]->set_friendly_name(name); + } + + ResultVector new_results = model->get_results(); + for (size_t i = 0; i < new_results.size(); i++) { + auto name = new_results[i]->get_friendly_name(); + new_results[i] = std::static_pointer_cast(nodes[new_results[i].get()]); + new_results[i]->set_friendly_name(name); + } + auto new_sinks = model->get_sinks(); + for (size_t i = 0; i < new_sinks.size(); i++) { + new_sinks[i] = std::static_pointer_cast(nodes[new_sinks[i].get()]); + } + + return std::make_shared(new_results, new_sinks, new_parameters); +} +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/core/tests/specialize_function.cpp b/src/tests/test_utils/common_test_utils/tests/specialize_function.cpp similarity index 99% rename from src/core/tests/specialize_function.cpp rename to src/tests/test_utils/common_test_utils/tests/specialize_function.cpp index a17ed00cee7cf4..e8eb4bc4f565c7 100644 --- a/src/core/tests/specialize_function.cpp +++ b/src/tests/test_utils/common_test_utils/tests/specialize_function.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/specialize_function.hpp" +#include "common_test_utils/specialize_function.hpp" #include "gtest/gtest.h" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/convert.hpp" -using namespace ngraph; using namespace ov; -OPENVINO_SUPPRESS_DEPRECATED_START; +using namespace ov::test::utils; +using ov::Shape; using ov::op::v0::Constant; using ov::op::v0::Convert; using ov::op::v0::Parameter; diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp index 2bf8fa2285d649..ed45a216537072 100644 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp +++ b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp @@ -15,7 +15,6 @@ #include "blob_factory.hpp" #include "common_test_utils/data_utils.hpp" #include "common_test_utils/test_constants.hpp" -#include "ie_ngraph_utils.hpp" #include "openvino/runtime/common.hpp" namespace FuncTestUtils { diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp index 1585517b3a7fd4..38ba8b8ea515f3 100644 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp +++ b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/precision_utils.hpp @@ -59,4 +59,4 @@ inline ::ov::element::Type convertIE2nGraphPrc(const InferenceEngine::Precision& } } // namespace PrecisionUtils -} // namespace FuncTestUtils +} // namespace FuncTestUtils \ No newline at end of file diff --git a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp index 6ca6a9e5caab65..a528596a468884 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp +++ b/src/tests/test_utils/unit_test_utils/mocks/mock_engine/mock_plugin.cpp @@ -14,7 +14,6 @@ #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "description_buffer.hpp" -#include "ie_icore.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/common.hpp" #include "openvino/runtime/icore.hpp" diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp index 013fb91ae791b5..7c73a9f4bd017b 100644 --- a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp @@ -6,8 +6,8 @@ #include -#include "ie_icore.hpp" #include "openvino/runtime/icompiled_model.hpp" +#include "openvino/runtime/icore.hpp" namespace ov { diff --git a/tests/model_hub_tests/models_hub_common/test_convert_model.py b/tests/model_hub_tests/models_hub_common/test_convert_model.py index 40b8a56a997ee3..f2456805d2b68e 100644 --- a/tests/model_hub_tests/models_hub_common/test_convert_model.py +++ b/tests/model_hub_tests/models_hub_common/test_convert_model.py @@ -3,18 +3,13 @@ import gc import numpy as np +# noinspection PyUnresolvedReferences +import openvino_tokenizers # do not delete, needed for text models from models_hub_common.multiprocessing_utils import multiprocessing_run from models_hub_common.utils import compare_two_tensors from openvino import convert_model from openvino.runtime import Core -try: - # 129480 - remove try-except when openvino-tokenizers wheel is built in OpenVINO GHA Workflow - # noinspection PyUnresolvedReferences - import openvino_tokenizers # do not delete, needed for text models -except: - pass - # set seed to have deterministic input data generation # to avoid sporadic issues in inference results rng = np.random.default_rng(seed=56190) diff --git a/tests/model_hub_tests/tf_hub_tests/precommit_models b/tests/model_hub_tests/tf_hub_tests/precommit_models index 4a5ab7f5a2ec70..e59d678d605e48 100644 --- a/tests/model_hub_tests/tf_hub_tests/precommit_models +++ b/tests/model_hub_tests/tf_hub_tests/precommit_models @@ -24,4 +24,4 @@ planet/vision/classifier/planet_v2,https://www.kaggle.com/models/google/planet-v # TF1 models in .pb format i3d-rgb,https://storage.openvinotoolkit.org/repositories/open_model_zoo/public/2022.1/i3d-rgb-tf/rgb.frozen.pb # Model with SentencePiece tokenizer, use openvino-tokenizers package -universal-sentence-encoder-multilingual,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual/versions/2,skip, 129480 - Add openvino-tokenizers wheel build to OpenVINO GHA Workflow \ No newline at end of file +universal-sentence-encoder-multilingual,https://www.kaggle.com/models/google/universal-sentence-encoder/frameworks/tensorFlow2/variations/multilingual/versions/2,skip,131659 - UnicodeDecodeError utf-8 codec cannot decode byte for SentencePieceOp attribute sp_model \ No newline at end of file