diff --git a/.github/workflows/android_arm64.yml b/.github/workflows/android_arm64.yml index 3b52ba7e72cc11..6e60051cd0c0ab 100644 --- a/.github/workflows/android_arm64.yml +++ b/.github/workflows/android_arm64.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - - master + # - master - 'releases/**' concurrency: diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index 53d34680b62b5b..de7d53265ca2e3 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -146,3 +146,15 @@ jobs: --form description="https://github.com/openvinotoolkit/openvino/runs/${{ github.run_number }}" \ https://scan.coverity.com/builds?project=openvino popd + + - name: Show Coverity configure logs + continue-on-error: true + run: cov-configure -c ${COVERITY_TOOL_DIR}/cov-analysis-linux64-2023.6.2/config/coverity_config.xml -lscc text + + - name: Upload Coverity logs + uses: actions/upload-artifact@v3 + if: always() + with: + name: coverity_logs + path: ${{ env.BUILD_DIR }}/cov-int/build-log.txt + if-no-files-found: 'error' diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 0a8298e5a17497..04e7e36fd63b86 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - - master + # - master - 'releases/**' concurrency: diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index 362c36a50504e3..7f6ec8a70f7590 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - - master + # - master - 'releases/**' concurrency: diff --git a/.github/workflows/webassembly.yml b/.github/workflows/webassembly.yml index f7ba021b2472d8..250b9f549a6ec2 100644 --- a/.github/workflows/webassembly.yml +++ b/.github/workflows/webassembly.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - - master + # - master - 'releases/**' concurrency: diff --git a/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake b/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake index da543c910736dd..6ead2897e09843 100644 --- a/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake +++ b/cmake/developer_package/cross_compile/cross_compiled_disp_gen.cmake @@ -7,7 +7,7 @@ # Generates cpp file with dispatcher for cross compiled function # Parameters: # XARCH_API_HEADER -- path to header with function declaration -# XARCH_FUNC_NAME -- name of function to dispatch +# XARCH_FUNC_NAMES -- names of functions to dispatch # XARCH_NAMESPACES -- full namespace used to keep ODR # XARCH_DISP_FILE -- dispatcher file name to generate # XARCH_SET -- set of ARCH supported by dispatcher. semicolon-delimited @@ -21,9 +21,6 @@ set(_CPU_CHECK_AVX2 "with_cpu_x86_avx2()") set(_CPU_CHECK_AVX512F "with_cpu_x86_avx512f()") function(_generate_dispatcher) - _find_signature_in_file(${XARCH_API_HEADER} ${XARCH_FUNC_NAME} SIGNATURE) - _generate_call_line_from_signature("${SIGNATURE}" CALL_LINE) - string(REPLACE "::" ";" XARCH_NAMESPACES "${XARCH_NAMESPACES}") list(GET XARCH_NAMESPACES -1 XARCH_CURRENT_NAMESPACE) @@ -46,20 +43,25 @@ function(_generate_dispatcher) "namespace ${_namespace} {\n") endforeach() - foreach(_arch ${XARCH_SET}) - string(APPEND DISP_CONTENT - "namespace ${_arch} {\n ${SIGNATURE}\; \n}\n") - endforeach() + foreach(_func_name ${XARCH_FUNC_NAMES}) + _find_signature_in_file(${XARCH_API_HEADER} ${_func_name} SIGNATURE) + _generate_call_line_from_signature("${SIGNATURE}" CALL_LINE) - string(APPEND DISP_CONTENT - "namespace ${XARCH_CURRENT_NAMESPACE} {\n\n${SIGNATURE} {\n") + foreach(_arch ${XARCH_SET}) + string(APPEND DISP_CONTENT + "namespace ${_arch} {\n ${SIGNATURE}\; \n}\n") + endforeach() - foreach(_arch ${XARCH_SET}) string(APPEND DISP_CONTENT - " if (${_CPU_CHECK_${_arch}}) {\n return ${_arch}::${CALL_LINE}\;\n }\n") - endforeach() + "namespace ${XARCH_CURRENT_NAMESPACE} {\n\n${SIGNATURE} {\n") + + foreach(_arch ${XARCH_SET}) + string(APPEND DISP_CONTENT + " if (${_CPU_CHECK_${_arch}}) {\n return ${_arch}::${CALL_LINE}\;\n }\n") + endforeach() - string(APPEND DISP_CONTENT "}\n\n}\n") + string(APPEND DISP_CONTENT "}\n\n}\n") + endforeach() foreach(_namespace ${PARENT_NAMESPACES}) string(APPEND DISP_CONTENT "} // namespace ${_namespace}\n") diff --git a/cmake/developer_package/cross_compile/cross_compiled_func.cmake b/cmake/developer_package/cross_compile/cross_compiled_func.cmake index c36cbe6762d9a0..51aa62014cc310 100644 --- a/cmake/developer_package/cross_compile/cross_compiled_func.cmake +++ b/cmake/developer_package/cross_compile/cross_compiled_func.cmake @@ -53,14 +53,15 @@ set(DISPATCHER_GEN_OPTIONS_HOLDER ${CMAKE_CURRENT_LIST_DIR}/cross_compiled_disp_ # AVX512F # API # NAMESPACE # like "IE::Ext::CPU::XARCH" -# NAME # like "my_fun" +# NAME # like "my_fun1 my_fun2" # ) # function(cross_compiled_file TARGET) set(oneValueArgs API ## Header with declaration of cross compiled function NAMESPACE ## The namespace where cross compiled function was declared - NAME) ## String with function signature to make cross compiled - set(multiValueArgs ARCH) ## List of architecture described in _ARCH_LIST + ) + set(multiValueArgs NAME ## String with function signatures to make cross compiled + ARCH) ## List of architecture described in _ARCH_LIST cmake_parse_arguments(X "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) ## verification @@ -92,7 +93,7 @@ function(cross_compiled_file TARGET) endif() endforeach() - _add_dispatcher_to_target(${TARGET} ${X_API} ${X_NAME} "${X_NAMESPACE}" "${_FULL_ARCH_SET}") + _add_dispatcher_to_target(${TARGET} ${X_API} "${X_NAME}" "${X_NAMESPACE}" "${_FULL_ARCH_SET}") endfunction() @@ -155,7 +156,7 @@ function(_add_dispatcher_to_target TARGET HEADER FUNC_NAME NAMESPACE ARCH_SET) add_custom_command( OUTPUT ${DISPATCHER_SOURCE} COMMAND ${CMAKE_COMMAND} - -D "XARCH_FUNC_NAME=${X_NAME}" + -D "XARCH_FUNC_NAMES=${X_NAME}" -D "XARCH_NAMESPACES=${NAMESPACE}" -D "XARCH_API_HEADER=${CMAKE_CURRENT_SOURCE_DIR}/${HEADER}" -D "XARCH_DISP_FILE=${CMAKE_CURRENT_BINARY_DIR}/${DISPATCHER_SOURCE}" diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.rst index bcbf32234356ac..51c7537399c0bd 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/activation/Swish_4.rst @@ -5,7 +5,7 @@ Swish .. meta:: - :description: Learn about Swish-4 - an element-wise, activation operation, which + :description: Learn about Swish-4 - an element-wise, activation operation, which can be performed on a single tensor in OpenVINO. **Versioned name**: *Swish-4* @@ -55,7 +55,7 @@ Example: Second input ``beta`` provided 256 56 - < !-- beta value: 2.0 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.rst index a6270e77ad8f49..5a1626d8db60c6 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/CumSum_3.rst @@ -5,7 +5,7 @@ CumSum .. meta:: - :description: Learn about CumSum-3 - an element-wise, arithmetic operation, which + :description: Learn about CumSum-3 - an element-wise, arithmetic operation, which can be performed on a single tensor in OpenVINO. **Versioned name**: *CumSum-3* @@ -24,7 +24,7 @@ To perform the summation in the opposite direction of the axis, set reverse attr * **Description**: If the attribute is set to ``true``, then exclusive sums are returned, the ``j-th`` element is not included in the ``j-th`` sum. Otherwise, the inclusive sum of the first ``j`` elements for the ``j-th`` element is calculated. * **Range of values**: - + * ``false`` - include the top element * ``true`` - do not include the top element * **Type**: ``boolean`` @@ -35,7 +35,7 @@ To perform the summation in the opposite direction of the axis, set reverse attr * **Description**: If set to ``true`` will perform the sums in reverse direction. * **Range of values**: - + * ``false`` - do not perform sums in reverse direction * ``true`` - perform sums in reverse direction * **Type**: ``boolean`` @@ -63,16 +63,16 @@ To perform the summation in the opposite direction of the axis, set reverse attr .. code-block:: xml :force: - + - < !-- input value is: [1., 2., 3., 4., 5.] --> + 5 - < !-- axis value is: 0 --> + - < !-- output value is: [1., 3., 6., 10., 15.] --> + 5 @@ -82,16 +82,16 @@ To perform the summation in the opposite direction of the axis, set reverse attr .. code-block:: xml :force: - + - < !-- input value is: [1., 2., 3., 4., 5.] --> + 5 - < !-- axis value is: 0 --> + - < !-- output value is: [0., 1., 3., 6., 10.] --> + 5 @@ -101,16 +101,16 @@ To perform the summation in the opposite direction of the axis, set reverse attr .. code-block:: xml :force: - + - < !-- input value is: [1., 2., 3., 4., 5.] --> + 5 - < !-- axis value is: 0 --> + - < !-- output value is: [15., 14., 12., 9., 5.] --> + 5 @@ -120,7 +120,7 @@ To perform the summation in the opposite direction of the axis, set reverse attr .. code-block:: xml :force: - + < -- input value is: [1., 2., 3., 4., 5.] --> diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.rst index c90bacf4443d2a..b6d380638db5c2 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/arithmetic/Sqrt_1.rst @@ -5,7 +5,7 @@ Sqrt .. meta:: - :description: Learn about Sqrt-1 - an element-wise, arithmetic operation, which + :description: Learn about Sqrt-1 - an element-wise, arithmetic operation, which can be performed on a single tensor in OpenVINO. **Versioned name**: *Sqrt-1* @@ -48,12 +48,12 @@ Sqrt - 4 < !-- float input values: [4.0, 7.0, 9.0, 10.0] --> + 4 - 4 < !-- float output values: [2.0, 2.6457512, 3.0, 3.1622777] --> + 4 @@ -66,12 +66,12 @@ Sqrt - 4 < !-- int input values: [4, 7, 9, 10] --> + 4 - 4 < !-- int output values: [2, 3, 3, 3] --> + 4 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.rst index a72896382765e2..d38e91d1328d2c 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/comparison/IsFinite_10.rst @@ -5,7 +5,7 @@ IsFinite .. meta:: - :description: Learn about IsFinite-10 - an element-wise, comparison operation, which + :description: Learn about IsFinite-10 - an element-wise, comparison operation, which can be performed on a single tensor in OpenVINO. **Versioned name**: *IsFinite-10* @@ -64,12 +64,12 @@ IsFinite - 4 < !-- Input value is: [NaN, 2.1, 3.7, Inf] --> + 4  - 4 < !-- Output value is: [False, True, True, False] --> + 4  diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.rst index 272b29acfd7f47..2d093126a11c1d 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/condition/Select_1.rst @@ -5,7 +5,7 @@ Select .. meta:: - :description: Learn about Select-1 - an element-wise, condition operation, which + :description: Learn about Select-1 - an element-wise, condition operation, which can be performed on three given tensors in OpenVINO. **Versioned name**: *Select-1* @@ -58,21 +58,21 @@ Select - < !-- cond value is: [[false, false], [true, false], [true, true]] --> + 3 2 - < !-- then value is: [[-1, 0], [1, 2], [3, 4]] --> + 3 2 - < !-- else value is: [[11, 10], [9, 8], [7, 6]] --> + 3 2 - < !-- output value is: [[11, 10], [1, 8], [3, 4]] --> + 3 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.rst index cc0a265754a6af..b39df77e94b5de 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/convolution/ConvolutionBackpropData_1.rst @@ -5,7 +5,7 @@ ConvolutionBackpropData .. meta:: - :description: Learn about ConvolutionBackpropData-1 - a 1D, 2D or 3D convolution operation, which + :description: Learn about ConvolutionBackpropData-1 - a 1D, 2D or 3D convolution operation, which can be performed on input and kernel tensors in OpenVINO. **Versioned name**: *ConvolutionBackpropData-1* @@ -24,11 +24,11 @@ When output shape is specified as an input tensor ``output_shape`` then it speci .. code-block:: xml :force: - + if auto_pads != None: pads_begin[i] = 0 pads_end[i] = 0 - + Y_i = stride[i] * (X_i - 1) + ((K_i - 1) * dilations[i] + 1) - pads_begin[i] - pads_end[i] + output_padding[i] where ``K_i`` filter kernel dimension along spatial axis ``i``. @@ -37,7 +37,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a .. code-block:: xml :force: - + total_padding[i] = stride[i] * (X_i - 1) + ((K_i - 1) * dilations[i] + 1) - output_shape[i] + output_padding[i] if auto_pads != SAME_UPPER: pads_begin[i] = total_padding[i] // 2 @@ -81,7 +81,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a * *auto_pad* * **Description**: *auto_pad* has the same definition as *auto_pad* for a regular Convolution but applied in the backward way, for the output tensor. - + * *explicit*: use explicit padding values from ``pads_begin`` and ``pads_end``. * *same_upper* the input is padded to match the output size. In case of odd padding value an extra padding is added at the end. * *same_lower* the input is padded to match the output size. In case of odd padding value an extra padding is added at the beginning. @@ -105,7 +105,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a * **2**: Convolution kernel tensor of type *T1* and rank 3, 4 or 5. Layout is ``[C_INPUT, C_OUTPUT, Z, Y, X]`` (number of input channels, number of output channels, spatial axes Z, Y, X). Spatial size of the kernel is derived from the shape of this input and aren't specified by any attribute. **Required.** * **3**: ``output_shape`` is 1D tensor of type *T2* that specifies spatial shape of the output. If specified, *padding amount* is deduced from relation of input and output spatial shapes according to formulas in the description. If not specified, *output shape* is calculated based on the ``pads_begin`` and ``pads_end`` or completely according to ``auto_pad``. **Optional.** * **Note**: Type of the convolution (1D, 2D or 3D) is derived from the rank of the input tensors and not specified by any attribute: - + * 1D convolution (input tensors rank 3) means that there is only one spatial axis X, * 2D convolution (input tensors rank 4) means that there are two spatial axes Y, X, * 3D convolution (input tensors rank 5) means that there are three spatial axes Z, Y, X. @@ -125,7 +125,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a .. code-block:: xml :force: - + @@ -156,7 +156,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a .. code-block:: xml :force: - + @@ -187,7 +187,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a .. code-block:: xml :force: - + @@ -204,7 +204,7 @@ If ``output_shape`` is specified, ``pads_begin`` and ``pads_end`` are ignored, a 3 - 2 < !-- output_shape value is: [450, 450]--> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.rst index 9c44b148909d46..ef7ba693c1e8d0 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBoxClustered_1.rst @@ -5,7 +5,7 @@ PriorBoxClustered .. meta:: - :description: Learn about PriorBoxClustered-1 - an object detection operation, + :description: Learn about PriorBoxClustered-1 - an object detection operation, which can be performed on two 1D input tensors. **Versioned name**: *PriorBoxClustered-1* @@ -94,7 +94,7 @@ If *clip* is defined, the coordinates of prior boxes are recalculated with the f * *step (step_w, step_h)* - * **Description**: *step (step_w, step_h)* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85. If both *step_h* and *step_w* are 0 then they are updated with value of *step*. If after that they are still 0 then they are calculated as input image width(height) divided with first input width(height). + * **Description**: *step (step_w, step_h)* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85. If both *step_h* and *step_w* are 0 then they are updated with value of *step*. If after that they are still 0 then they are calculated as input image width(height) divided with first input width(height). * **Range of values**: floating-point positive number * **Type**: ``float`` * **Default value**: 0.0 @@ -139,10 +139,10 @@ If *clip* is defined, the coordinates of prior boxes are recalculated with the f - 2 < !-- [10, 19] --> + 2 - 2 < !-- [180, 320] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.rst index 6c5bb401ee1039..05955f73889aee 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_1.rst @@ -6,7 +6,7 @@ PriorBox .. meta:: - :description: Learn about PriorBox-1 - an object detection operation, + :description: Learn about PriorBox-1 - an object detection operation, which can be performed on two required input tensors. **Versioned name**: *PriorBox-1* @@ -22,44 +22,44 @@ PriorBox 1. First calculates *center_x* and *center_y* of prior box: .. math:: - + W \equiv Width \quad Of \quad Image \\ H \equiv Height \quad Of \quad Image - - + + * If step equals 0: - + .. math:: - + center_x=(w+0.5) \\ center_y=(h+0.5) - + * else: - + .. math:: - + center_x=(w+offset)*step \\ center_y=(h+offset)*step \\ w \subset \left( 0, W \right ) \\ h \subset \left( 0, H \right ) 2. Then, for each :math:`s \subset \left( 0, min\_sizes \right )` calculates coordinates of prior boxes: .. math:: - + xmin = \frac{\frac{center_x - s}{2}}{W} - - - + + + .. math:: - + ymin = \frac{\frac{center_y - s}{2}}{H} - - - + + + .. math:: - + xmax = \frac{\frac{center_x + s}{2}}{W} - - - + + + .. math:: - + ymin = \frac{\frac{center_y + s}{2}}{H} 3. If *clip* attribute is set to true, each output value is clipped between :math:`\left< 0, 1 \right>`. @@ -186,10 +186,10 @@ PriorBox - 2 < !-- values: [24, 42] --> + 2 - 2 < !-- values: [384, 672] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.rst index 4535ad02ca962c..e1b9e1e71ac084 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/PriorBox_8.rst @@ -5,7 +5,7 @@ PriorBox .. meta:: - :description: Learn about PriorBox-8 - an object detection operation, + :description: Learn about PriorBox-8 - an object detection operation, which can be performed on two required input tensors. **Versioned name**: *PriorBox-8* @@ -21,41 +21,41 @@ PriorBox 1. First, it calculates *center_x* and *center_y* of a prior box: .. math:: - + W \equiv Width \quad Of \quad Image \\ H \equiv Height \quad Of \quad Image * If step equals 0: .. math:: - + center_x=(w+0.5) \\ center_y=(h+0.5) * else: .. math:: - + center_x=(w+offset)*step \\ center_y=(h+offset)*step \\ w \subset \left( 0, W \right ) \\ h \subset \left( 0, H \right ) 2. Then, it calculates coordinates of prior boxes for each :math:`s \subset \left( 0, min\_sizes \right )` : .. math:: - + xmin = \frac{\frac{center_x - s}{2}}{W} - - - + + + .. math:: - + ymin = \frac{\frac{center_y - s}{2}}{H} - - + + .. math:: - + xmax = \frac{\frac{center_x + s}{2}}{W} - - + + .. math:: - + ymin = \frac{\frac{center_y + s}{2}}{H} 3. If *clip* attribute is set to true, each output value is clipped between :math:`\left< 0, 1 \right>`. @@ -82,7 +82,7 @@ PriorBox * **Description**: *flip* is a flag that denotes that each *aspect_ratio* is duplicated and flipped. For example, *flip* equals 1 and *aspect_ratio* equals ``[4.0,2.0]``, meaning that the aspect_ratio is equal to ``[4.0,2.0,0.25,0.5]``. * **Range of values**: - + * false or 0 - each *aspect_ratio* is flipped * true or 1 - each *aspect_ratio* is not flipped * **Type**: ``boolean`` @@ -193,10 +193,10 @@ PriorBox - 2 < !-- values: [24, 42] --> + 2 - 2 < !-- values: [384, 672] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.rst index 0466aca91977d9..f1c235ab3ef8c1 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/detection/RegionYolo_1.rst @@ -5,7 +5,7 @@ RegionYolo .. meta:: - :description: Learn about RegionYolo-1 - an object detection operation, + :description: Learn about RegionYolo-1 - an object detection operation, which can be performed on a 4D input tensor. **Versioned name**: *RegionYolo-1* @@ -65,7 +65,7 @@ RegionYolo * **Description**: *do_softmax* is a flag that specifies the inference method and affects how the number of regions is determined. It also affects output shape. If it is 0, then output shape is 4D, and 2D otherwise. * **Range of values**: - + * *false* - do not perform softmax * *true* - perform softmax * **Type**: ``boolean`` @@ -100,7 +100,7 @@ RegionYolo .. code-block:: xml :force: - < !-- YOLO V3 example --> + @@ -120,8 +120,8 @@ RegionYolo - - < !-- YOLO V2 Example --> + + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.rst index aabb8ab101c212..411c54364258f8 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Eye_9.rst @@ -5,7 +5,7 @@ Eye .. meta:: - :description: Learn about Eye-9 - a generation operation, which can be + :description: Learn about Eye-9 - a generation operation, which can be performed on three required and one optional input tensors. **Versioned name**: *Eye-9* @@ -23,13 +23,13 @@ Example 1. *Eye* output with ``output_type`` = ``i32``: .. code-block:: xml :force: - + num_rows = 3 - + num_columns = 4 - + diagonal_index = 2 - + output = [[0 0 1 0] [0 0 0 1] [0 0 0 0]] @@ -38,13 +38,13 @@ Example 2. *Eye* output with ``output_type`` = ``i32``: .. code-block:: xml :force: - + num_rows = 3 - + num_columns = 4 - + diagonal_index = -1 - + output = [[0 0 0 0] [1 0 0 0] [0 1 0 0]] @@ -53,13 +53,13 @@ Example 3. *Eye* output with ``output_type`` = ``f16``: .. code-block:: xml :force: - + num_rows = 2 - + diagonal_index = 5 - + batch_shape = [1, 2] - + output = [[[[0. 0.] [0. 0.]] [[0. 0.] @@ -97,13 +97,13 @@ Example 3. *Eye* output with ``output_type`` = ``f16``: .. code-block:: xml :force: - + - < !-- num rows: 5 --> - < !-- num columns: 5 --> - < !-- diagonal index --> + + + @@ -117,14 +117,14 @@ Example 3. *Eye* output with ``output_type`` = ``f16``: .. code-block:: xml :force: - + - < !-- num rows --> - < !-- num columns --> - < !-- diagonal index --> - < !-- batch_shape : [2, 3] --> + + + + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.rst index 34f355612232f1..46d2d66213594f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Multinomial_13.rst @@ -91,10 +91,10 @@ Example 3 - 2D tensor, without replacement * **Description**: controls whether to sample with replacement (classes can be sampled multiple times). * **Range of values**: `true`, `false` - + * ``true`` - class indices can be sampled multiple times. * ``false`` - class indices will not repeat in the output and the size of ``probs``' ``class_size`` dimension is required to be larger or equal to *num_samples* value. Might affect performance. - + * **Type**: `bool` * **Required**: *Yes* @@ -149,16 +149,16 @@ Example 3 - 2D tensor, without replacement - < !-- probs value: [[0.1, 0.5, 0.4]] --> - 1 < !-- batch size of 2 --> + + 1 3 - < !-- num_samples value: 5 --> + - 1 < !--dimension depends on input batch size --> - 5 < !--dimension depends on num_samples --> + 1 + 5 @@ -171,16 +171,16 @@ Example 3 - 2D tensor, without replacement - < !-- probs value: [[-1, 1, 2], [50, 1, 21]] --> - 2 < !-- batch size of 2 --> + + 2 3 - < !-- num_samples value: 10 --> + - 2 < !--dimension depends on input batch size --> - 10 < !--dimension depends on num_samples --> + 2 + 10 @@ -193,16 +193,16 @@ Example 3 - 2D tensor, without replacement - < !-- probs value: [[0.1, 0.5, 0.4]] --> - 2 < !-- batch size of 2 --> + + 2 3 - < !-- num_samples value: 2 --> + - 2 < !-- batch size of 2 --> - 2 < !-- 2 unique samples of classes --> + 2 + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.rst index d7b63d49b83d54..526d13d594afdb 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/RandomUniform_8.rst @@ -5,7 +5,7 @@ RandomUniform .. meta:: - :description: Learn about RandomUniform-8 - a generation operation, which can be + :description: Learn about RandomUniform-8 - a generation operation, which can be performed on three required input tensors. **Versioned name**: *RandomUniform-8* @@ -16,10 +16,10 @@ RandomUniform **Detailed description**: -*RandomUniform* operation generates random numbers from a uniform distribution in the range ``[minval, maxval)``. -The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm -is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns -four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized +*RandomUniform* operation generates random numbers from a uniform distribution in the range ``[minval, maxval)``. +The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm +is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns +four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized with *global_seed* and *op_seed* attributes respectively. If both seed values equal to zero, RandomUniform generates non-deterministic sequence. @@ -32,7 +32,7 @@ If both seed values equal to zero, RandomUniform generates non-deterministic seq Link to the original paper `Parallel Random Numbers: As Easy as 1, 2, 3 `__. -The result of Philox is calculated by applying a fixed number of *key* and *counter* updating so-called "rounds". +The result of Philox is calculated by applying a fixed number of *key* and *counter* updating so-called "rounds". This implementation uses 4x32_10 version of Philox algorithm, where number of rounds = 10. Suppose we have *n* which determines *n*-th 4 elements of random sequence. @@ -43,7 +43,7 @@ In each round *key*, *counter* and *n* are splitted to pairs of uint32 values: R = cast\_to\_uint32(value)\\ L = cast\_to\_uint32(value >> 32), -where *cast\_to\_uint32* - static cast to uint32, *value* - uint64 input value, *L*, *R* - uint32 +where *cast\_to\_uint32* - static cast to uint32, *value* - uint64 input value, *L*, *R* - uint32 result values, >> - bitwise right shift. Then *n* and *counter* are updated with the following formula: @@ -68,7 +68,7 @@ Values :math:`L'_{n}, R'_{n}, L'_{counter}, R'_{counter}` are resulting four ran Float values between [0..1) are obtained from 32-bit integers by the following rules. -Float16 is formatted as follows: *sign* (1 bit) *exponent* (5 bits) *mantissa* (10 bits). The value is interpreted +Float16 is formatted as follows: *sign* (1 bit) *exponent* (5 bits) *mantissa* (10 bits). The value is interpreted using following formula: .. math:: @@ -99,7 +99,7 @@ where x is uint32 generated random value. Float32 is formatted as follows: *sign* (1 bit) *exponent* (8 bits) *mantissa* (23 bits). The value is interpreted using following formula: .. math:: - + (-1)^{sign} * 1, mantissa * 2 ^{exponent - 127} @@ -117,7 +117,7 @@ So the resulting float value is: .. code-block:: xml :force: - + val = ((exponent << 23) | x & 0x7fffffu) - 1.0, where x is uint32 generated random value. @@ -125,7 +125,7 @@ where x is uint32 generated random value. Double is formatted as follows: *sign* (1 bit) *exponent* (11 bits) *mantissa* (52 bits). The value is interpreted using following formula: .. math:: - + (-1)^{sign} * 1, mantissa * 2 ^{exponent - 1023} @@ -133,7 +133,7 @@ so to obtain double values *sign*, *exponent* and *mantissa* are set as follows: .. code-block:: xml :force: - + sign = 0 exponent = 1023 - representation of a zero exponent. mantissa = 52 right bits from two concatinated uint32 values from random integer generator. @@ -143,7 +143,7 @@ So the resulting double is obtained as follows: .. code-block:: xml :force: - + mantissa_h = x0 & 0xfffffu; // upper 20 bits of mantissa mantissa_l = x1; // lower 32 bits of mantissa mantissa = (mantissa_h << 32) | mantissa_l; @@ -156,7 +156,7 @@ To obtain a value in a specified range each value is processed with the followin For float values: .. math:: - + result = x * (maxval - minval) + minval, where *x* is random float or double value between [0..1). @@ -174,7 +174,7 @@ Example 1. *RandomUniform* output with ``global_seed`` = 150, ``op_seed`` = 10, .. code-block:: xml :force: - + input_shape = [ 3, 3 ] output = [[0.7011236 0.30539632 0.93931055] [0.9456035 0.11694777 0.50770056] @@ -185,7 +185,7 @@ Example 2. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, .. code-block:: xml :force: - + input_shape = [ 2, 2 ] minval = 2 @@ -200,7 +200,7 @@ Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, .. code-block:: xml :force: - + input_shape = [ 2, 3 ] minval = 50 @@ -261,11 +261,11 @@ Example 3. *RandomUniform* output with ``global_seed`` = 80, ``op_seed`` = 100, - < !-- shape value: [2, 3, 10] --> + 3 - < !-- min value --> - < !-- max value --> + + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.rst index fa0b5fe6bb1dee..689f8ae0e617e1 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_1.rst @@ -5,7 +5,7 @@ Range .. meta:: - :description: Learn about Range-1 - a generation operation, which can be + :description: Learn about Range-1 - a generation operation, which can be performed on three required input tensors. **Versioned name**: *Range-1* @@ -46,7 +46,7 @@ For a positive ``step``: for a negative ``step``: .. math:: - + start>=val[i]>stop, @@ -66,16 +66,16 @@ where - < !-- start value: 2 --> + - < !-- stop value: 23 --> + - < !-- step value: 3 --> + - 7 < !-- [ 2, 5, 8, 11, 14, 17, 20] --> + 7 @@ -88,16 +88,16 @@ where - < !-- start value: 23 --> + - < !-- stop value: 2 --> + - < !-- step value: -3 --> + - 7 < !-- [23, 20, 17, 14, 11, 8, 5] --> + 7 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.rst index 0a0418124d75f1..471eeef22482ab 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/generation/Range_4.rst @@ -5,7 +5,7 @@ Range .. meta:: - :description: Learn about Range-4 - a generation operation, which can be + :description: Learn about Range-4 - a generation operation, which can be performed on three required input tensors. **Versioned name**: *Range-4* @@ -81,16 +81,16 @@ This is aligned with PyTorch's operation ``torch.arange``, to align with tensorf - < !-- start value: 2 --> + - < !-- stop value: 23 --> + - < !-- step value: 3 --> + - 7 < !-- [ 2, 5, 8, 11, 14, 17, 20] --> + 7 @@ -104,16 +104,16 @@ This is aligned with PyTorch's operation ``torch.arange``, to align with tensorf - < !-- start value: 23 --> + - < !-- stop value: 2 --> + - < !-- step value: -3 --> + - 7 < !-- [23, 20, 17, 14, 11, 8, 5] --> + 7 @@ -127,16 +127,16 @@ This is aligned with PyTorch's operation ``torch.arange``, to align with tensorf - < !-- start value: 1 --> + - < !-- stop value: 2.5 --> + - < !-- step value: 0.5 --> + - 3 < !-- [ 1.0, 1.5, 2.0] --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.rst index 8b5e46eeb6ca98..a3df192c305726 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toBGR_8.rst @@ -5,7 +5,7 @@ I420toBGR .. meta:: - :description: Learn about I420toBGR-8 - an image processing operation, which + :description: Learn about I420toBGR-8 - an image processing operation, which can be performed to convert image from I420 to BGR format. **Versioned name**: *I420toBGR-8* @@ -70,19 +70,19 @@ Same as specified for :doc:`I420toRGB ` ope -  < !-- Y plane --> +   1 480 640 1 -  < !-- U plane --> +   1 240 320 1 -  < !-- V plane --> +   1 240 320 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.rst index a0d4a3d0532e28..8d37f583503ec1 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/I420toRGB_8.rst @@ -5,7 +5,7 @@ I420toRGB .. meta:: - :description: Learn about I420toRGB-8 - an image processing operation, which + :description: Learn about I420toRGB-8 - an image processing operation, which can be performed to convert image from I420 to RGB format. **Versioned name**: *I420toRGB-8* @@ -113,19 +113,19 @@ Input I420 image tensor shall have ``NHWC (also known as NYXC)`` layout and can -  < !-- Y plane --> +   1 480 640 1 -  < !-- U plane --> +   1 240 320 1 -  < !-- V plane --> +   1 240 320 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.rst index 0bed435759eb60..470fa5b7099006 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_1.rst @@ -5,7 +5,7 @@ Interpolate .. meta:: - :description: Learn about I420toRGB-8 - an image processing operation, which + :description: Learn about I420toRGB-8 - an image processing operation, which can be performed on two required tensors. **Versioned name**: *Interpolate-1* @@ -91,7 +91,7 @@ This is a scalar that specifies padding for each spatial dimension. 80 - 2  < !--The values in this input are [50, 60] --> + 2   diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.rst index b497cd42d297f9..281607f2504e62 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_11.rst @@ -5,7 +5,7 @@ Interpolate .. meta:: - :description: Learn about Interpolate-11 - an image processing operation, which + :description: Learn about Interpolate-11 - an image processing operation, which can be performed on two required and one optional tensor. **Versioned name**: *Interpolate-11* @@ -129,13 +129,13 @@ Interpolate 80 - 2 < !--The values in this input are [24, 160] --> + 2  - 2 < !--The values in this input are [0.5, 2.0] --> + 2  - 2 < !--The values in this input are [2, 3] (axes). --> + 2  diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.rst index c81ccff8eac943..7572f7c1bc97ac 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/Interpolate_4.rst @@ -5,7 +5,7 @@ Interpolate .. meta:: - :description: Learn about Interpolate-4 - an image processing operation, which + :description: Learn about Interpolate-4 - an image processing operation, which can be performed on three required and one optional tensor. **Versioned name**: *Interpolate-4* @@ -128,7 +128,7 @@ Calculations are performed according to the following rules. import math import numpy as np from enum import Enum, unique - + class GetNearestPixel: def __init__(self, mode: str): self.func = { @@ -138,37 +138,37 @@ Calculations are performed according to the following rules. 'ceil': GetNearestPixel.ceil_func, 'simple': GetNearestPixel.simple_func }[mode] - + def __call__(self, x_original, is_downsample): return self.func(x_original, is_downsample) - + @staticmethod def prefer_floor_func(x_original, is_downsample): if x_original == int(x_original) + 0.5: return int(math.floor(x_original)) else: return int(round(x_original)) - + @staticmethod def prefer_ceil_func(x_original, is_downsample): return int(round(x_original)) - + @staticmethod def floor_func(x_original, is_downsample): return int(math.floor(x_original)) - + @staticmethod def ceil_func(x_original, is_downsample): return int(math.ceil(x_original)) - + @staticmethod def simple_func(x_original, is_downsample): if is_downsample: return int(math.ceil(x_original)) else: return int(x_original) - - + + class GetOriginalCoordinate: def __init__(self, mode: str): self.func = { @@ -178,31 +178,31 @@ Calculations are performed according to the following rules. 'tf_half_pixel_for_nn': GetOriginalCoordinate.tf_half_pixel_for_nn_func, 'align_corners': GetOriginalCoordinate.align_corners_func }[mode] - + def __call__(self, x_resized, x_scale, length_resized, length_original): return self.func(x_resized, x_scale, length_resized, length_original) - + @staticmethod def half_pixel_func(x_resized, x_scale, length_resized, length_original): return ((x_resized + 0.5) / x_scale) - 0.5 - + @staticmethod def pytorch_half_pixel_func(x_resized, x_scale, length_resized, length_original): return (x_resized + 0.5) / x_scale - 0.5 if length_resized > 1 else 0.0 - + @staticmethod def asymmetric_func(x_resized, x_scale, length_resized, length_original): return x_resized / x_scale - + @staticmethod def tf_half_pixel_for_nn_func(x_resized, x_scale, length_resized, length_original): return (x_resized + 0.5) / x_scale - + @staticmethod def align_corners_func(x_resized, x_scale, length_resized, length_original): return 0 if length_resized == 1 else x_resized * (length_original - 1) / (length_resized - 1) - - + + def get_cubic_coeff(s, a): abs_s = abs(s) coeff = np.zeros(4) @@ -211,18 +211,18 @@ Calculations are performed according to the following rules. coeff[2] = (((-a -2.0) * abs_s+ (2.0 * a + 3.0)) * abs_s - a) * abs_s coeff[3] = - a * abs_s * abs_s * (abs_s - 1.0) return coeff - - + + def triangle_coeffs(dz): return np.maximum(0.0, 1.0 - np.abs(dz)) - - + + @unique class ShapeCalculationMode(Enum): SIZES = 0 SCALES = 1 - - + + class InterpolateCalculation: def __init__(self, attrs: dict): self.mode = attrs['mode'] @@ -233,38 +233,38 @@ Calculations are performed according to the following rules. 'linear_onnx': self.onnx_linear_interpolation }[self.mode] self.attrs = attrs - + self.pads_begin = attrs.get('pads_begin', [0]) self.pads_end = attrs.get('pads_end', [0]) self.coordinate_transformation_mode = attrs.get('coordinate_transformation_mode', 'half_pixel') self.nearest_mode = attrs.get('nearest_mode', 'round_prefer_floor') self.cube_coeff = attrs.get('cube_coeff', -0.75) self.antialias = attrs.get('antialias', False) - + self.shape_calculation_mode = { 'sizes': ShapeCalculationMode.SIZES, 'scales': ShapeCalculationMode.SCALES }[attrs['shape_calculation_mode']] - + self.get_original_coordinate = self.get_coordinate_transformation_mode() self.get_nearest_pixel = GetNearestPixel(self.nearest_mode) - - + + def get_coordinate_transformation_mode(self): return GetOriginalCoordinate(self.coordinate_transformation_mode) - + def shape_infer(self, input_data, sizes, scales): result = input_data.shape + self.pads_begin + self.pads_end - + if self.shape_calculation_mode == ShapeCalculationMode.SIZES: for i, axis in enumerate(self.axes): result[axis] = sizes[i] else: for i, axis in enumerate(self.axes): result[axis] = math.floor(scales[i] * result[axis]) - + return result - + @staticmethod def correct_pad(pad, rank): pad_len = len(pad) @@ -274,17 +274,17 @@ Calculations are performed according to the following rules. return np.array(pad[: rank - 1]).astype(np.int64) else: return np.array(pad, dtype=np.int64) - + def __call__(self, input_data, sizes, scales, axes): rank = input_data.ndim self.pads_begin = InterpolateCalculation.correct_pad(self.pads_begin, rank) self.pads_end = InterpolateCalculation.correct_pad(self.pads_end, rank) self.pads = list(zip(self.pads_begin, self.pads_end)) self.axes = np.array(axes).astype(np.int64) - + self.output_shape = self.shape_infer(input_data, sizes, scales) padded_data = np.pad(input_data, self.pads, 'constant') - + if self.shape_calculation_mode == ShapeCalculationMode.SIZES: num_of_axes = len(self.axes) self.scales = np.zeros(num_of_axes) @@ -292,18 +292,18 @@ Calculations are performed according to the following rules. self.scales[i] = self.output_shape[axis] / padded_data.shape[axis] else: self.scales = scales - + if self.mode == 'nearest': self.all_scales = np.ones(rank).astype(np.float) for i, axis in enumerate(self.axes): self.all_scales[axis] = self.scales[i] - + self.input_shape = padded_data.shape return self.func(padded_data) - + def clip_coord(self, coord, axis): return max(0, min(coord, self.input_shape[axis] - 1)) - + def cubic_interpolation(self, input_data): rank = len(self.input_shape) result = np.zeros(self.output_shape) @@ -328,28 +328,28 @@ Calculations are performed according to the following rules. summa += coeffs_prod * input_data[tuple(coords_for_sum)] result[coordinates] = summa return result - + def linear_interpolation(self, input_data): result = np.zeros(self.output_shape) num_of_axes = len(self.axes) is_downsample = False - + for scale in self.scales: is_downsample = is_downsample or (scale < 1) - + antialias = is_downsample and self.antialias - + a = np.zeros(num_of_axes) for i, _ in enumerate(self.axes): a[i] = self.scales[i] if antialias else 1.0 - + prod_of_a = np.prod(a) r = np.zeros(num_of_axes).astype(np.int64) for i, _ in enumerate(self.axes): r[i] = 2 if self.scales[i] > 1.0 else int(math.ceil(2.0/a[i])) - + indices = [tuple(np.array(ind).astype(np.int64) - r) for ind in np.ndindex(tuple(2 * r + 1))] - + for coordinates in np.ndindex(tuple(self.output_shape)): icoords = np.array(coordinates).astype(np.float64) icoords_r = np.array(coordinates).astype(np.float64) @@ -357,51 +357,51 @@ Calculations are performed according to the following rules. in_coord = self.get_original_coordinate(coordinates[axis], self.scales[i], self.output_shape[axis], self.input_shape[axis]) icoords[axis] = in_coord icoords_r[axis] = round(in_coord) - + summa = 0.0 wsum = 0.0 - + for index in indices: inner_coords = np.array(coordinates) for i, axis in enumerate(self.axes): inner_coords[axis] = index[i] + icoords_r[axis] - + conditions = [inner_coords[axis] >= 0 and inner_coords[axis] < self.input_shape[axis] for axis in self.axes] if not all(conditions): continue - + dz = np.zeros(num_of_axes) for i, axis in enumerate(self.axes): dz[i] = icoords[axis] - inner_coords[axis] - + w = prod_of_a * np.prod(triangle_coeffs(a * dz)) wsum += w summa += w * input_data[tuple(inner_coords)] - + if wsum == 0: result[coordinates] = 0.0 else: result[coordinates] = summa / wsum - + return result - + def onnx_linear_interpolation5D(self, input_data): rank = len(self.input_shape) assert rank in [3, 5], "mode 'linear_onnx' supports only 3D or 5D tensors" assert set(self.axes) == {2, 3, 4} or set(self.axes) == {0, 1, 2}, \ "mode 'linear_onnx' supports only case when axes = {2, 3, 4} or axes = {0, 1, 2}" - + result = np.zeros(self.output_shape) - + if rank == 3: reshaped_data = np.reshape(input_data, (1, 1, self.input_shape[0], self.input_shape[1], self.input_shape[2])) result = np.reshape(result, (1, 1, self.output_shape[0], self.output_shape[1], self.output_shape[2])) else: reshaped_data = input_data - + input_shape = np.array(reshaped_data.shape).astype(np.int64) output_shape = np.array(result.shape).astype(np.int64) - + batch_size = input_shape[0]; num_channels = input_shape[1]; input_depth = input_shape[2]; @@ -410,31 +410,31 @@ Calculations are performed according to the following rules. output_depth = output_shape[2]; output_height = output_shape[3]; output_width = output_shape[4]; - + depth_scale = self.scales[0]; height_scale = self.scales[1]; width_scale = self.scales[2]; - + z_original = np.zeros(output_depth).astype(np.float) y_original = np.zeros(output_height).astype(np.float) x_original = np.zeros(output_width).astype(np.float) - + in_z1 = np.zeros(output_depth).astype(np.int64) in_z2 = np.zeros(output_depth).astype(np.int64) in_y1 = np.zeros(output_height).astype(np.int64) in_y2 = np.zeros(output_height).astype(np.int64) in_x1 = np.zeros(output_width).astype(np.int64) in_x2 = np.zeros(output_width).astype(np.int64) - + dz1 = np.zeros(output_depth).astype(np.float) dz2 = np.zeros(output_depth).astype(np.float) - + dy1 = np.zeros(output_height).astype(np.float) dy2 = np.zeros(output_height).astype(np.float) - + dx1 = np.zeros(output_width).astype(np.float) dx2 = np.zeros(output_width).astype(np.float) - + for z in range(0, output_depth): in_z = self.get_original_coordinate(z, depth_scale, output_depth, input_depth) z_original[z] = in_z @@ -443,11 +443,11 @@ Calculations are performed according to the following rules. in_z2[z] = min(in_z1[z] + 1, input_depth - 1) dz1[z] = abs(in_z - in_z1[z]) dz2[z] = abs(in_z - in_z2[z]) - + if in_z1[z] == in_z2[z]: dz1[z] = 0.5 dz2[z] = 0.5 - + for y in range(0, output_height): in_y = self.get_original_coordinate(y, height_scale, output_height, input_height) y_original[y] = in_y @@ -456,19 +456,19 @@ Calculations are performed according to the following rules. in_y2[y] = min(in_y1[y] + 1, input_height - 1) dy1[y] = abs(in_y - in_y1[y]) dy2[y] = abs(in_y - in_y2[y]) - + if in_y1[y] == in_y2[y]: dy1[y] = 0.5 dy2[y] = 0.5 - + for x in range(0, output_width): in_x = self.get_original_coordinate(x, width_scale, output_width, input_width); x_original[x] = in_x in_x = max(0.0, min(in_x, input_width - 1)); - + in_x1[x] = min(in_x, input_width - 1); in_x2[x] = min(in_x1[x] + 1, input_width - 1); - + dx1[x] = abs(in_x - in_x1[x]); dx2[x] = abs(in_x - in_x2[x]); if in_x1[x] == in_x2[x]: @@ -487,33 +487,33 @@ Calculations are performed according to the following rules. x212 = reshaped_data[n, c, in_z2[z], in_y1[y], in_x2[x]] x122 = reshaped_data[n, c, in_z2[z], in_y2[y], in_x1[x]] x222 = reshaped_data[n, c, in_z2[z], in_y2[y], in_x2[x]] - + temp = dx2[x] * dy2[y] * dz2[z] * x111 + dx1[x] * dy2[y] * dz2[z] * x211 temp += dx2[x] * dy1[y] * dz2[z] * x121 + dx1[x] * dy1[y] * dz2[z] * x221 temp += dx2[x] * dy2[y] * dz1[z] * x112 + dx1[x] * dy2[y] * dz1[z] * x212 temp += dx2[x] * dy1[y] * dz1[z] * x122 + dx1[x] * dy1[y] * dz1[z] * x222 - + result[n, c, z, y, x] = temp - + return np.reshape(result, self.output_shape) - + def onnx_linear_interpolation4D(self, input_data): rank = len(self.input_shape) assert rank in [2, 4], "mode 'linear_onnx' supports only 2D or 4D tensors" assert set(self.axes) == {2, 3} or set(self.axes) == {0, 1}, \ "mode 'linear_onnx' supports only case when axes = {2, 3} or axes = {0, 1}" - + result = np.zeros(self.output_shape) - + if rank == 2: reshaped_data = np.reshape(input_data, (1, 1, self.input_shape[0], self.input_shape[1])) result = np.reshape(result, (1, 1, self.output_shape[0], self.output_shape[1])) else: reshaped_data = input_data - + input_shape = np.array(reshaped_data.shape).astype(np.int64) output_shape = np.array(result.shape).astype(np.int64) - + output_height = output_shape[2] output_width = output_shape[3] input_height = input_shape[2] @@ -522,21 +522,21 @@ Calculations are performed according to the following rules. width_scale = self.scales[1] batch_size = input_shape[0] num_channels = input_shape[1] - + y_original = np.zeros(output_height).astype(np.float) x_original = np.zeros(output_width).astype(np.float) - + in_y1 = np.zeros(output_height).astype(np.int64) in_y2 = np.zeros(output_height).astype(np.int64) in_x1 = np.zeros(output_width).astype(np.int64) in_x2 = np.zeros(output_width).astype(np.int64) - + dy1 = np.zeros(output_height).astype(np.float) dy2 = np.zeros(output_height).astype(np.float) - + dx1 = np.zeros(output_width).astype(np.float) dx2 = np.zeros(output_width).astype(np.float) - + for y in range(0, output_height): in_y = self.get_original_coordinate(y, height_scale, output_height, input_height) y_original[y] = in_y @@ -545,25 +545,25 @@ Calculations are performed according to the following rules. in_y2[y] = min(in_y1[y] + 1, input_height - 1) dy1[y] = abs(in_y - in_y1[y]) dy2[y] = abs(in_y - in_y2[y]) - + if in_y1[y] == in_y2[y]: dy1[y] = 0.5 dy2[y] = 0.5 - + for x in range(0, output_width): in_x = self.get_original_coordinate(x, width_scale, output_width, input_width); x_original[x] = in_x in_x = max(0.0, min(in_x, input_width - 1)); - + in_x1[x] = min(in_x, input_width - 1); in_x2[x] = min(in_x1[x] + 1, input_width - 1); - + dx1[x] = abs(in_x - in_x1[x]); dx2[x] = abs(in_x - in_x2[x]); if in_x1[x] == in_x2[x]: dx1[x] = 0.5 dx2[x] = 0.5 - + for n in range(0, batch_size): for c in range(0, num_channels): for y in range(0, output_height): @@ -574,21 +574,21 @@ Calculations are performed according to the following rules. x22 = reshaped_data[n, c, in_y2[y], in_x2[x]] temp = dx2[x] * dy2[y] * x11 + dx1[x] * dy2[y] * x21 + dx2[x] * dy1[y] * x12 + dx1[x] * dy1[y] * x22 result[n, c, y, x] = temp - + return np.reshape(result, self.output_shape) - + def onnx_linear_interpolation(self, input_data): rank = len(self.input_shape) assert rank in [2, 3, 4, 5], "mode 'linear_onnx' supports only 2D, 3D, 4D, or 5D tensors" - + if rank in [2, 4]: self.onnx_linear_interpolation4D(input_data) else: self.onnx_linear_interpolation5D(input_data) - + def nearest_interpolation(self, input_data): result = np.zeros(self.output_shape) - + num_of_axes = len(self.axes) for coordinates in np.ndindex(tuple(self.output_shape)): input_coords = np.array(coordinates, dtype=np.int64) @@ -597,7 +597,7 @@ Calculations are performed according to the following rules. nearest_pixel = self.get_nearest_pixel(in_coord, scale < 1) input_coords[axis] = max(0, min(nearest_pixel, self.input_shape[axis] - 1)) result[coordinates] = input_data[tuple(input_coords)] - + return result @@ -617,13 +617,13 @@ Calculations are performed according to the following rules. 80 - 2  < !--The values in this input are [24, 160] --> + 2   - 2  < !--The values in this input are [0.5, 2.0] --> + 2   - 2  < !--The values in this input are [2, 3] (axes). --> + 2   diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.rst index 777d132f7a9e7e..5320ecd4fe4317 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toBGR_8.rst @@ -5,7 +5,7 @@ NV12toBGR .. meta:: - :description: Learn about NV12toBGR-8 - an image processing operation, which + :description: Learn about NV12toBGR-8 - an image processing operation, which can be performed to convert an image from NV12 to BGR format. **Versioned name**: *NV12toBGR-8* @@ -70,13 +70,13 @@ Same as specified for :doc:`NV12toRGB ` ope - < !-- Y plane --> + 1 480 640 1 - < !-- UV plane --> + 1 240 320 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.rst index 1044e6b18916c5..2012d9f3d0c642 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/image/NV12toRGB_8.rst @@ -5,7 +5,7 @@ NV12toRGB .. meta:: - :description: Learn about NV12toRGB-8 - an image processing operation, which + :description: Learn about NV12toRGB-8 - an image processing operation, which can be performed to convert an image from NV12 to RGB format. **Versioned name**: *NV12toRGB-8* @@ -102,13 +102,13 @@ Input NV12 image tensor shall have ``NHWC (also known as NYXC)`` layout and can - < !-- Y plane --> + 1 480 640 1 - < !-- UV plane --> + 1 240 320 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.rst index e7a52a05faf540..051cababe93065 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/BatchToSpace_2.rst @@ -5,7 +5,7 @@ BatchToSpace .. meta:: - :description: Learn about BatchToSpace-2 - a data movement operation, + :description: Learn about BatchToSpace-2 - a data movement operation, which can be performed on four required input tensors. **Versioned name**: *BatchToSpace-2* @@ -21,25 +21,25 @@ BatchToSpace 1. Reshape ``data`` input to produce a tensor of shape :math:`[B_1, \dots, B_{N - 1}, \frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, D_2, \dots, D_{N - 1}]` .. math:: - + x^{\prime} = reshape(data, [B_1, \dots, B_{N - 1}, \frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, D_2, \dots, D_{N - 1}]) 2. Permute dimensions of :math:`x^{\prime}` to produce a tensor of shape :math:`[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1, B_1, D_2, B_2, \dots, D_{N-1}, B_{N - 1}]` .. math:: - + x^{\prime\prime} = transpose(x', [N, N + 1, 0, N + 2, 1, \dots, N + N - 1, N - 1]) 3. Reshape :math:`x^{\prime\prime}` to produce a tensor of shape :math:`[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1 \times B_1, D_2 \times B_2, \dots, D_{N - 1} \times B_{N - 1}]` .. math:: - + x^{\prime\prime\prime} = reshape(x^{\prime\prime}, [\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, D_1 \times B_1, D_2 \times B_2, \dots, D_{N - 1} \times B_{N - 1}]) 4. Crop the start and end of spatial dimensions of :math:`x^{\prime\prime\prime}` according to ``crops_begin`` and ``crops_end`` inputs to produce the output :math:`y` of shape: .. math:: - + \left[\frac{batch}{\left(B_1 \times \dots \times B_{N - 1}\right)}, crop(D_1 \times B_1, CB_1, CE_1), crop(D_2 \times B_2, CB_2, CE_2), \dots , crop(D_{N - 1} \times B_{N - 1}, CB_{N - 1}, CE_{N - 1})\right] Where @@ -80,27 +80,27 @@ Example: 2D input tensor ``data`` .. code-block:: xml :force: - + - < !-- data --> - 10 < !-- batch --> - 2 < !-- spatial dimension 1 --> + + 10 + 2 - < !-- block_shape value: [1, 5] --> + 2 - < !-- crops_begin value: [0, 2] --> + 2 - < !-- crops_end value: [0, 0] --> + 2 - 2 < !-- data.shape[0] / (block_shape.shape[0] * block_shape.shape[1]) --> - 8 < !-- data.shape[1] * block_shape.shape[1] - crops_begin[1] - crops_end[1]--> + 2 + 8 @@ -109,33 +109,33 @@ Example: 5D input tensor ``data`` .. code-block:: xml :force: - + - < !-- data --> - 48 < !-- batch --> - 3 < !-- spatial dimension 1 --> - 3 < !-- spatial dimension 2 --> - 1 < !-- spatial dimension 3 --> - 3 < !-- spatial dimension 4 --> + + 48 + 3 + 3 + 1 + 3 - < !-- block_shape value: [1, 2, 4, 3, 1] --> + 5 - < !-- crops_begin value: [0, 0, 1, 0, 0] --> + 5 - < !-- crops_end value: [0, 0, 1, 0, 0] --> + 5 - 2 < !-- data.shape[0] / (block_shape.shape[0] * block_shape.shape[1] * ... * block_shape.shape[4]) --> - 6 < !-- data.shape[1] * block_shape.shape[1] - crops_begin[1] - crops_end[1]--> - 10 < !-- data.shape[2] * block_shape.shape[2] - crops_begin[2] - crops_end[2] --> - 3 < !-- data.shape[3] * block_shape.shape[3] - crops_begin[3] - crops_end[3] --> - 3 < !-- data.shape[4] * block_shape.shape[4] - crops_begin[4] - crops_end[4] --> + 2 + 6 + 10 + 3 + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.rst index 37f7c4e3f101ff..583a182609403e 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_1.rst @@ -5,7 +5,7 @@ Broadcast .. meta:: - :description: Learn about Broadcast-1 - a data movement operation, + :description: Learn about Broadcast-1 - a data movement operation, which can be performed on two required and one optional input tensor. **Versioned name**: *Broadcast-1* @@ -53,7 +53,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape .. code-block:: xml :force: - + @@ -63,9 +63,9 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 1 - 4 < !--The tensor contains 4 elements: [1, 16, 50, 50] --> + 4 - < !-- the 3rd input shouldn't be provided with mode="numpy" --> + @@ -76,7 +76,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -84,10 +84,10 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 16 - 4 < !--The tensor contains 4 elements: [1, 16, 50, 50] --> + 4 - 1 < !--The tensor contains 1 elements: [1] --> + 1 @@ -99,7 +99,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -108,10 +108,10 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 50 - 4 < !--The tensor contains 4 elements: [1, 50, 50, 16] --> + 4 - 2 < !--The tensor contains 2 elements: [1, 2] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.rst index e7b3f3a0d3d1fd..e13946f4780518 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Broadcast_3.rst @@ -5,7 +5,7 @@ Broadcast .. meta:: - :description: Learn about Broadcast-3 - a data movement operation, + :description: Learn about Broadcast-3 - a data movement operation, which can be performed on two required and one optional input tensor. **Versioned name**: *Broadcast-3* @@ -61,7 +61,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape .. code-block:: xml :force: - + @@ -71,9 +71,9 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 1 - 4 < !--The tensor contains 4 elements: [1, 16, 50, 50] --> + 4 - < !-- the 3rd input shouldn't be provided with mode="numpy" --> + @@ -84,7 +84,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -92,10 +92,10 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 16 - 4 < !--The tensor contains 4 elements: [1, 16, 50, 50] --> + 4 - 1 < !--The tensor contains 1 elements: [1] --> + 1 @@ -107,7 +107,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -116,10 +116,10 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 50 - 4 < !--The tensor contains 4 elements: [1, 50, 50, 16] --> + 4 - 2 < !--The tensor contains 2 elements: [1, 2] --> + 2 @@ -131,7 +131,7 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape - + @@ -141,9 +141,9 @@ For example, ``axes_mapping = [1]`` enables broadcasting of a tensor with shape 1 - 4 < !--The tensor contains 4 elements: [1, 1, 50, 50] --> + 4 - < !-- the 3rd input shouldn't be provided with mode="bidirectional" --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.rst index 6c07321e08be99..7c3c11131f49e4 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Concat_1.rst @@ -5,7 +5,7 @@ Concat .. meta:: - :description: Learn about Concat-1 - a data movement operation, + :description: Learn about Concat-1 - a data movement operation, which can be performed on arbitrary number of input tensors. **Versioned name**: *Concat-1* @@ -39,25 +39,25 @@ Concat .. code-block:: xml :force: - + 1 - 8 < !-- axis for concatenation --> + 8 50 50 1 - 16 < !-- axis for concatenation --> + 16 50 50 1 - 32 < !-- axis for concatenation --> + 32 50 50 @@ -65,7 +65,7 @@ Concat 1 - 56 < !-- concatenated axis: 8 + 16 + 32 = 48 --> + 56 50 50 @@ -75,25 +75,25 @@ Concat .. code-block:: xml :force: - + 1 - 8 < !-- axis for concatenation --> + 8 50 50 1 - 16 < !-- axis for concatenation --> + 16 50 50 1 - 32 < !-- axis for concatenation --> + 32 50 50 @@ -101,7 +101,7 @@ Concat 1 - 56 < !-- concatenated axis: 8 + 16 + 32 = 48 --> + 56 50 50 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.rst index de7af7a597b276..1df751ac0c5f68 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/DepthToSpace_1.rst @@ -5,7 +5,7 @@ DepthToSpace .. meta:: - :description: Learn about DepthToSpace-1 - a data movement operation, + :description: Learn about DepthToSpace-1 - a data movement operation, which can be performed on a single input tensor. **Versioned name**: *DepthToSpace-1* @@ -21,7 +21,7 @@ DepthToSpace The operation is equivalent to the following transformation of the input tensor ``data`` with ``K`` spatial dimensions of shape ``[N, C, D1, D2, ..., DK]`` to *Y* output tensor. If ``mode = blocks_first``: .. code-block:: cpp - + x' = reshape(data, [N, block_size, block_size, ..., block_size, C / (block_size ^ K), D1, D2, ..., DK]) x'' = transpose(x', [0, K + 1, K + 2, 1, K + 3, 2, K + 4, 3, ..., K + (K + 1), K]) y = reshape(x'', [N, C / (block_size ^ K), D1 * block_size, D2 * block_size, D3 * block_size, ..., DK * block_size]) @@ -29,7 +29,7 @@ The operation is equivalent to the following transformation of the input tensor If ``mode = depth_first``: .. code-block:: cpp - + x' = reshape(data, [N, C / (block_size ^ K), block_size, block_size, ..., block_size, D1, D2, ..., DK]) x'' = transpose(x', [0, 1, K + 2, 2, K + 3, 3, K + 4, 4, ..., K + (K + 1), K + 1]) y = reshape(x'', [N, C / (block_size ^ K), D1 * block_size, D2 * block_size, D3 * block_size, ..., DK * block_size]) @@ -70,7 +70,7 @@ If ``mode = depth_first``: .. code-block:: xml :force: - + @@ -83,10 +83,10 @@ If ``mode = depth_first``: - 5 < !-- data.shape[0] --> - 7 < !-- data.shape[1] / (block_size ^ 2) --> - 4 < !-- data.shape[2] * block_size --> - 6 < !-- data.shape[3] * block_size --> + 5 + 7 + 4 + 6 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.rst index a2d18a3c4c65a1..35036ddf555950 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_1.rst @@ -5,14 +5,14 @@ Gather .. meta:: - :description: Learn about Gather-1 - a data movement operation, + :description: Learn about Gather-1 - a data movement operation, which can be performed on three required input tensors. **Versioned name:** *Gather-1* **Category:** *Data movement* -**Short description:** *Gather* operation takes slices of data in the first input tensor according +**Short description:** *Gather* operation takes slices of data in the first input tensor according to the indices specified in the second input tensor and axis from the third input. **Detailed description** @@ -30,13 +30,13 @@ Where ``axis`` is the value from the third input. * **1**: Tensor with arbitrary data. **Required.** * **2**: Tensor with indices to gather. The values for indices are in the range ``[0, input1[axis] - 1]``. **Required.** -* **3**: Scalar or 1D tensor *axis* is a dimension index to gather data from. For example, *axis* equal - to 1 means that gathering is performed over the first dimension. Negative value means reverse indexing. +* **3**: Scalar or 1D tensor *axis* is a dimension index to gather data from. For example, *axis* equal + to 1 means that gathering is performed over the first dimension. Negative value means reverse indexing. Allowed values are from ``[-len(input1.shape), len(input1.shape) - 1]``. **Required.** **Outputs** -* **1**: The resulting tensor that consists of elements from the first input tensor gathered by indices +* **1**: The resulting tensor that consists of elements from the first input tensor gathered by indices from the second input tensor. Shape of the tensor is ``[input1.shape[:axis], input2.shape, input1.shape[axis + 1:]]`` **Example** @@ -58,17 +58,17 @@ Where ``axis`` is the value from the third input. 20 28 - < !-- axis = 1 --> + - 6 < !-- embedded dimension from the 1st input --> - 15 < !-- embedded dimension from the 2nd input --> - 4 < !-- embedded dimension from the 2nd input --> - 20 < !-- embedded dimension from the 2nd input --> - 28 < !-- embedded dimension from the 2nd input --> - 10 < !-- embedded dimension from the 1st input --> - 24 < !-- embedded dimension from the 1st input --> + 6 + 15 + 4 + 20 + 28 + 10 + 24 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.rst index d2cee5ffd926c4..ebe248309a122d 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_7.rst @@ -5,7 +5,7 @@ Gather .. meta:: - :description: Learn about Gather-7 - a data movement operation, + :description: Learn about Gather-7 - a data movement operation, which can be performed on three required input tensors. **Versioned name**: *Gather-7* @@ -29,12 +29,12 @@ the number of batch dimensions. ``N`` and ``M`` are numbers of dimensions of ``d **Attributes**: * *batch_dims* - - * **Description**: *batch_dims* (also denoted as ``b``) is a leading number of dimensions of ``data`` - tensor and ``indices`` representing the batches, and *Gather* starts to gather from the ``b`` - dimension. It requires the first ``b`` dimensions in `data` and `indices` tensors to be equal. + + * **Description**: *batch_dims* (also denoted as ``b``) is a leading number of dimensions of ``data`` + tensor and ``indices`` representing the batches, and *Gather* starts to gather from the ``b`` + dimension. It requires the first ``b`` dimensions in `data` and `indices` tensors to be equal. If ``batch_dims`` is less than zero, the normalized value is used ``batch_dims = indices.rank + batch_dims``. - * **Range of values**: ``[-min(data.rank, indices.rank); min(data.rank, indices.rank)]`` and + * **Range of values**: ``[-min(data.rank, indices.rank); min(data.rank, indices.rank)]`` and ``batch_dims' <= axis'``. Where ``batch_dims'`` and ``axis'`` stand for normalized ``batch_dims`` and ``axis`` values. * **Type**: *T_AXIS* * **Default value**: 0 @@ -46,7 +46,7 @@ Example 1 with default *batch_dims* value: batch_dims = 0 axis = 0 - + indices = [0, 0, 4] data = [1, 2, 3, 4, 5] output = [1, 1, 5] @@ -58,15 +58,15 @@ Example 2 with non-default *batch_dims* value: batch_dims = 1 axis = 1 - + indices = [[0, 0, 4], <-- this is applied to the first batch [4, 0, 0]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[1, 2, 3, 4, 5], <-- the first batch [6, 7, 8, 9, 10]] <-- the second batch data_shape = (2, 5) - + output = [[ 1, 1, 5], [10, 6, 6]] output_shape = (2, 3) @@ -78,24 +78,24 @@ Example 3 with non-default *batch_dims* value: batch_dims = 2 axis = 2 - + indices = [[[0, 0, 4], <-- this is applied to the first batch, index = (0, 0) [4, 0, 0]], <-- this is applied to the second batch, index = (0, 1) - + [[1, 2, 4], <-- this is applied to the third batch, index = (1, 0) [4, 3, 2]]] <-- this is applied to the fourth batch, index = (1, 1) indices_shape = (2, 2, 3) - + data = [[[1, 2, 3, 4, 5], <-- the first batch, index = (0, 0) [6, 7, 8, 9, 10]], <-- the second batch, index = (0, 1) - + [[11, 12, 13, 14, 15], <-- the third batch, index = (1, 0) [16, 17, 18, 19, 20]]] <-- the fourth batch, index = (1, 1) data_shape = (2, 2, 5) - + output = [[[ 1, 1, 5], [10, 6, 6]], - + [[12, 13, 15], [20, 19, 18]]] output_shape = (2, 2, 3) @@ -106,28 +106,28 @@ Example 4 with *axis* > *batch_dims*: batch_dims = 1 axis = 2 - + indices = [[1, 2, 4], <-- this is applied to the first batch [4, 3, 2]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[[[ 1, 2, 3, 4], <-- first batch [ 5, 6, 7, 8], [ 9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]]], - + [[[21, 22, 23, 24], <-- second batch [25, 26, 27, 28], [29, 30, 31, 32], [33, 34, 35, 36], [37, 38, 39, 40]]]] data_shape = (2, 1, 5, 4) - + output = [[[[ 5, 6, 7, 8], [ 9, 10, 11, 12], [17, 18, 19, 20]]], - + [[[37, 38, 39, 40], [33, 34, 35, 36], [29, 30, 31, 32]]]] @@ -140,15 +140,15 @@ Example 5 with negative *batch_dims* value: batch_dims = -1 <-- normalized value will be indices.rank + batch_dims = 2 - 1 = 1 axis = 1 - + indices = [[0, 0, 4], <-- this is applied to the first batch [4, 0, 0]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[1, 2, 3, 4, 5], <-- the first batch [6, 7, 8, 9, 10]] <-- the second batch data_shape = (2, 5) - + output = [[ 1, 1, 5], [10, 6, 6]] output_shape = (2, 3) @@ -167,7 +167,7 @@ Example 5 with negative *batch_dims* value: **Outputs** -* **1**: The resulting tensor of type *T* that consists of elements from ``data`` tensor gathered by ``indices``. +* **1**: The resulting tensor of type *T* that consists of elements from ``data`` tensor gathered by ``indices``. The shape of the output tensor is ``data.shape[:axis] + indices.shape[batch_dims:] + data.shape[axis + 1:]`` **Types** @@ -193,7 +193,7 @@ Example 5 with negative *batch_dims* value: 32 21 - < !-- axis = 1 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.rst index c4df65f49e1be8..b2bb5bf0235c60 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Gather_8.rst @@ -6,7 +6,7 @@ Gather .. meta:: - :description: Learn about Gather-8 - a data movement operation, + :description: Learn about Gather-8 - a data movement operation, which can be performed on three required input tensors. **Versioned name**: *Gather-8* @@ -33,10 +33,10 @@ range output data for corresponding index will be filled with zeros (Example 7). **Attributes**: * *batch_dims* - - * **Description**: *batch_dims* (also denoted as ``b``) is a leading number of dimensions of ``data`` tensor - and ``indices`` representing the batches, and *Gather* starts to gather from the ``b`` dimension. - It requires the first ``b`` dimensions in ``data`` and ``indices`` tensors to be equal. + + * **Description**: *batch_dims* (also denoted as ``b``) is a leading number of dimensions of ``data`` tensor + and ``indices`` representing the batches, and *Gather* starts to gather from the ``b`` dimension. + It requires the first ``b`` dimensions in ``data`` and ``indices`` tensors to be equal. If ``batch_dims`` is less than zero, normalized value is used ``batch_dims = indices.rank + batch_dims``. * **Range of values**: ``[-min(data.rank, indices.rank); min(data.rank, indices.rank)]`` and ``batch_dims' <= axis'``. Where ``batch_dims'`` and ``axis'`` stand for normalized ``batch_dims`` and ``axis`` values. @@ -50,7 +50,7 @@ Example 1 with default *batch_dims* value: batch_dims = 0 axis = 0 - + indices = [0, 0, 4] data = [1, 2, 3, 4, 5] output = [1, 1, 5] @@ -61,15 +61,15 @@ Example 2 with non-default *batch_dims* value: batch_dims = 1 axis = 1 - + indices = [[0, 0, 4], <-- this is applied to the first batch [4, 0, 0]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[1, 2, 3, 4, 5], <-- the first batch [6, 7, 8, 9, 10]] <-- the second batch data_shape = (2, 5) - + output = [[ 1, 1, 5], [10, 6, 6]] output_shape = (2, 3) @@ -81,24 +81,24 @@ Example 3 with non-default *batch_dims* value: batch_dims = 2 axis = 2 - + indices = [[[0, 0, 4], <-- this is applied to the first batch, index = (0, 0) [4, 0, 0]], <-- this is applied to the second batch, index = (0, 1) - + [[1, 2, 4], <-- this is applied to the third batch, index = (1, 0) [4, 3, 2]]] <-- this is applied to the fourth batch, index = (1, 1) indices_shape = (2, 2, 3) - + data = [[[1, 2, 3, 4, 5], <-- the first batch, index = (0, 0) [6, 7, 8, 9, 10]], <-- the second batch, index = (0, 1) - + [[11, 12, 13, 14, 15], <-- the third batch, index = (1, 0) [16, 17, 18, 19, 20]]] <-- the fourth batch, index = (1, 1) data_shape = (2, 2, 5) - + output = [[[ 1, 1, 5], [10, 6, 6]], - + [[12, 13, 15], [20, 19, 18]]] output_shape = (2, 2, 3) @@ -109,28 +109,28 @@ Example 4 with *axis* > *batch_dims*: batch_dims = 1 axis = 2 - + indices = [[1, 2, 4], <-- this is applied to the first batch [4, 3, 2]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[[[ 1, 2, 3, 4], <-- first batch [ 5, 6, 7, 8], [ 9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]]], - + [[[21, 22, 23, 24], <-- second batch [25, 26, 27, 28], [29, 30, 31, 32], [33, 34, 35, 36], [37, 38, 39, 40]]]] data_shape = (2, 1, 5, 4) - + output = [[[[ 5, 6, 7, 8], [ 9, 10, 11, 12], [17, 18, 19, 20]]], - + [[[37, 38, 39, 40], [33, 34, 35, 36], [29, 30, 31, 32]]]] @@ -143,15 +143,15 @@ Example 5 with negative *batch_dims* value: batch_dims = -1 <-- normalized value will be indices.rank + batch_dims = 2 - 1 = 1 axis = 1 - + indices = [[0, 0, 4], <-- this is applied to the first batch [4, 0, 0]] <-- this is applied to the second batch indices_shape = (2, 3) - + data = [[1, 2, 3, 4, 5], <-- the first batch [6, 7, 8, 9, 10]] <-- the second batch data_shape = (2, 5) - + output = [[ 1, 1, 5], [10, 6, 6]] output_shape = (2, 3) @@ -163,7 +163,7 @@ Example 6 with negative indices: batch_dims = 0 axis = 0 - + indices = [0, -2, -1] data = [1, 2, 3, 4, 5] output = [1, 4, 5] @@ -175,8 +175,8 @@ Example 7 with indices out of the range: batch_dims = 0 axis = 0 - - indices = [3, 10, -20] + + indices = [3, 10, -20] data = [1, 2, 3, 4, 5] output = [4, 0, 0] @@ -221,7 +221,7 @@ of the output tensor is ``data.shape[:axis] + indices.shape[batch_dims:] + data. 32 21 - < !-- axis = 1 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.rst index 3c44d8b8188c76..41ef471065e158 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_1.rst @@ -5,7 +5,7 @@ Pad .. meta:: - :description: Learn about Pad-1 - a data movement operation, + :description: Learn about Pad-1 - a data movement operation, which can be performed on three required and one optional input tensor. **Versioned name**: *Pad-1* @@ -26,7 +26,7 @@ The following examples illustrate how output tensor is generated for the *Pad* l [ 9 10 11 12 ]] -with the following attributes: +with the following attributes: .. code-block:: cpp @@ -36,7 +36,7 @@ with the following attributes: depending on the *pad_mode*. -* ``pad_mode = "constant"``: +* ``pad_mode = "constant"``: .. code-block:: cpp @@ -48,7 +48,7 @@ depending on the *pad_mode*. [ 0 0 0 0 0 0 0 0 ]] -* ``pad_mode = "edge"``: +* ``pad_mode = "edge"``: .. code-block:: cpp @@ -121,7 +121,7 @@ depending on the *pad_mode*. **Example**: constant mode .. code-block:: xml - :force: + :force: @@ -133,22 +133,22 @@ depending on the *pad_mode*. 40 - 4 < !-- pads_begin = [0, 5, 2, 1] --> + 4 - 4 < !-- pads_end = [1, 0, 3, 7] --> + 4 - < !-- pad_value = 15.0 --> + - 2 < !-- 2 = 0 + 1 + 1 = pads_begin[0] + input.shape[0] + pads_end[0] --> - 8 < !-- 8 = 5 + 3 + 0 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 37 < !-- 37 = 2 + 32 + 3 = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> - < !-- all new elements are filled with 15.0 value --> + 2 + 8 + 37 + 48 + @@ -169,18 +169,18 @@ depending on the *pad_mode*. 40 - 4 < !-- pads_begin = [0, 5, 2, 1] --> + 4 - 4 < !-- pads_end = [1, 0, 3, 7] --> + 4 - 2 < !-- 2 = 0 + 1 + 1 = pads_begin[0] + input.shape[0] + pads_end[0] --> - 8 < !-- 8 = 5 + 3 + 0 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 37 < !-- 37 = 2 + 32 + 3 = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> + 2 + 8 + 37 + 48 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.rst index eea353f0934bbb..1c214393d6cc4e 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Pad_12.rst @@ -5,7 +5,7 @@ Pad .. meta:: - :description: Learn about Pad-12 - a data movement operation, + :description: Learn about Pad-12 - a data movement operation, which can be performed on three required and one optional input tensor. **Versioned name**: *Pad-12* @@ -19,7 +19,7 @@ Pad The following examples illustrate how output tensor is generated for the *Pad* layer for a given inputs: Positive pads example: -######################## +######################## .. code-block:: cpp @@ -82,7 +82,7 @@ depending on the *pad_mode* attribute: Negative pads example: -######################### +######################### .. code-block:: cpp @@ -111,7 +111,7 @@ for all of the *pad_mode* attribute options: Mixed pads example: -######################## +######################## .. code-block:: cpp @@ -178,7 +178,7 @@ Mixed pads example: * **Description**: *pad_mode* specifies the method used to generate the padding values. * **Range of values**: Name of the method in string format: - + * ``constant`` - padded values are taken from the *pad_value* input. If the input is not provided, the padding elements are equal to zero. * ``edge`` - padded values are copied from the respective edge of the input ``data`` tensor. * ``reflect`` - padded values are a reflection of the input `data` tensor. Values on the edges are not duplicated, ``pads_begin[D]`` and ``pads_end[D]`` must be not greater than ``data.shape[D] – 1`` for any valid ``D``. @@ -223,22 +223,22 @@ Mixed pads example: 40 - 4 < !-- pads_begin = [0, 5, 2, 1] --> + 4 - 4 < !-- pads_end = [1, 0, 3, 7] --> + 4 - < !-- pad_value = 15.0 --> + - 2 < !-- 2 = 0 + 1 + 1 = pads_begin[0] + input.shape[0] + pads_end[0] --> - 8 < !-- 8 = 5 + 3 + 0 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 37 < !-- 37 = 2 + 32 + 3 = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> - < !-- all new elements are filled with 15.0 value --> + 2 + 8 + 37 + 48 + @@ -247,7 +247,7 @@ Mixed pads example: **Example**: constant mode (positive and negative pads) .. code-block:: xml - :force: + :force: @@ -259,22 +259,22 @@ Mixed pads example: 40 - 4 < !-- pads_begin = [0, -2, -8, 1] --> + 4 - 4 < !-- pads_end = [-1, 4, -6, 7] --> + 4 - < !-- pad_value = 15.0 --> + - 1 < !-- 2 = 0 + 2 + (-1) = pads_begin[0] + input.shape[0] + pads_end[0] --> - 5 < !-- 5 = (-2) + 3 + 4 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 18 < !-- 18 = (-8) + 32 (-6) = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> - < !-- all new elements are filled with 15.0 value --> + 1 + 5 + 18 + 48 + @@ -283,7 +283,7 @@ Mixed pads example: **Example**: edge mode .. code-block:: xml - :force: + :force: @@ -295,18 +295,18 @@ Mixed pads example: 40 - 4 < !-- pads_begin = [0, 5, 2, 1] --> + 4 - 4 < !-- pads_end = [1, 0, 3, 7] --> + 4 - 2 < !-- 2 = 0 + 1 + 1 = pads_begin[0] + input.shape[0] + pads_end[0] --> - 8 < !-- 8 = 5 + 3 + 0 = pads_begin[1] + input.shape[1] + pads_end[1] --> - 37 < !-- 37 = 2 + 32 + 3 = pads_begin[2] + input.shape[2] + pads_end[2] --> - 48 < !-- 48 = 1 + 40 + 7 = pads_begin[3] + input.shape[3] + pads_end[3] --> + 2 + 8 + 37 + 48 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.rst index 5b6e7909f67b10..9497c6a1a1fc94 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ReverseSequence_1.rst @@ -5,7 +5,7 @@ ReverseSequence .. meta:: - :description: Learn about ReverseSequence-1 - a data movement operation, + :description: Learn about ReverseSequence-1 - a data movement operation, which can be performed on two required input tensors. **Versioned name**: *ReverseSequence-1* @@ -58,14 +58,14 @@ ReverseSequence - < !-- data --> - 4 < !-- batch_axis --> - 10 < !-- seq_axis --> + + 4 + 10 100 200 - 4 < !-- seq_lengths value: [2, 4, 8, 10] --> + 4 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.rst index e9325266d00294..fc22ffc74dd410 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Reverse_1.rst @@ -5,7 +5,7 @@ Reverse .. meta:: - :description: Learn about Reverse-1 - a data movement operation, + :description: Learn about Reverse-1 - a data movement operation, which can be performed on one required and one optional input tensor. **Versioned name**: *Reverse-1* @@ -62,7 +62,7 @@ If no axis specified, that means either the second input is empty if ``index`` m 200 - 1 < !-- reverting along single axis --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.rst index a8c9df7c993a74..20b086f93e225d 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Roll_7.rst @@ -5,7 +5,7 @@ Roll .. meta:: - :description: Learn about Roll-7 - a data movement operation, which can be + :description: Learn about Roll-7 - a data movement operation, which can be performed on three required input tensors. **Versioned name**: *Roll-7* @@ -100,7 +100,7 @@ No attributes available. 2 - 2 < !-- shifting along specified axes with the corresponding shift values --> + 2 @@ -131,7 +131,7 @@ No attributes available. 1 - 2 < !-- shifting along specified axes with the same shift value --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.rst index d0e810326634f6..aa5b2809e71219 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_12.rst @@ -130,22 +130,22 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> - 4 < !-- values: [2, 3, 4, 6] --> + > + 4 - < !-- indices (negative values allowed) --> - 6 < !-- values: [1, 0, 0, -2, -1, 2] --> + + 6 - > < !-- updates --> - 6 < !-- values: [10, 20, 30, 40, 70, 60] --> + > + 6 - < !-- values: [0] --> + 1 - 4 < !-- values: [52, 13, 104, 76] --> + 4 @@ -157,22 +157,22 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> - 4 < !-- values: [2, 3, 4, 6] --> + > + 4 - < !-- indices --> - 6 < !-- values: [1, 0, 0, 2, 3, 2] --> + + 6 - > < !-- updates --> - 6 < !-- values: [10, 20, 30, 40, 70, 60] --> + > + 6 - < !-- values: [0] --> + 1 - 4 < !-- values: [50, 10, 100, 70] --> + 4 @@ -184,30 +184,30 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> + > 3 - 4 < !-- values: [[0, 0, 0, 0], + 4 - < !-- indices --> + 2 - 2 < !-- values: [[1, 2], + 2 - > < !-- updates --> + > 2 - 2 < !-- values: [[11, 12], + 2 - < !-- values: [1] --> + 1 3 - 4 < !-- values: [[ 0, 11, 12, 0], + 4 @@ -221,30 +221,30 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> + > 3 - 4 < !-- values: [[1, 1, 1, 1], + 4 - < !-- indices --> + 2 - 2 < !-- values: [[1, 1], + 2 - > < !-- updates --> + > 2 - 2 < !-- values: [[11, 12], + 2 - < !-- values: [1] --> + 1 3 - 4 < !-- values: [[ 1, 24, 1, 1], + 4 @@ -258,30 +258,30 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` - > < !-- data --> + > 3 - 4 < !-- values: [[2, 2, 2, 2], + 4 - < !-- indices --> + 2 - 2 < !-- values: [[1, 1], + 2 - > < !-- updates --> + > 2 - 2 < !-- values: [[11, 12], + 2 - < !-- values: [1] --> + 1 3 - 4 < !-- values: [[ 2, 264, 2, 2], + 4 @@ -313,7 +313,7 @@ Accordingly for 3D tensor case, the update of the element corresponding to the ` 7 6 - < !-- values: [0] --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.rst index fe927abc9b9e95..69eafbb10f7c26 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterElementsUpdate_3.rst @@ -5,7 +5,7 @@ ScatterElementsUpdate .. meta:: - :description: Learn about ScatterElementsUpdate-3 - a data movement operation, which can be + :description: Learn about ScatterElementsUpdate-3 - a data movement operation, which can be performed on four required input tensors. **Versioned name**: *ScatterElementsUpdate-3* @@ -81,7 +81,7 @@ The value can be in range ``[-r, r - 1]`` where ``r`` is the rank of ``data``. * 7 6 - < !-- value [0] --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.rst index f6fba7c4427115..86bbacc8b1a7cc 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/ScatterUpdate_3.rst @@ -5,7 +5,7 @@ ScatterUpdate .. meta:: - :description: Learn about ScatterUpdate-3 - a data movement operation, which can be + :description: Learn about ScatterUpdate-3 - a data movement operation, which can be performed on four required input tensors. **Versioned name**: *ScatterUpdate-3* @@ -35,14 +35,14 @@ Several examples for case when `axis = 0`: * **1**: ``data`` tensor of arbitrary rank ``r`` and type *T_NUMERIC*. **Required.** -* **2**: ``indices`` tensor with indices of type *T_IND*. All index values are expected to be within bounds ``[0, s - 1]`` along the axis of size ``s``. If multiple indices point to the -same output location, the order of updating the values is undefined. If an index points to a non-existing output -tensor element or is negative, then an exception is raised. **Required.** +* **2**: ``indices`` tensor with indices of type *T_IND*. All index values are expected to be within bounds ``[0, s - 1]`` along the axis + of size ``s``. If multiple indices point to the same output location, the order of updating the values is undefined. + If an index points to a non-existing output tensor element or is negative, then an exception is raised. **Required.** * **3**: ``updates`` tensor of type *T_NUMERIC* and rank equal to ``rank(indices) + rank(data) - 1`` **Required.** * **4**: ``axis`` tensor with scalar or 1D tensor with one element of type *T_AXIS* specifying axis for scatter. -The value can be in the range ``[ -r, r - 1]``, where ``r`` is the rank of ``data``. **Required.** + The value can be in the range ``[ -r, r - 1]``, where ``r`` is the rank of ``data``. **Required.** **Outputs**: @@ -65,29 +65,29 @@ The value can be in the range ``[ -r, r - 1]``, where ``r`` is the rank of ``dat - < !-- data --> + 1000 256 10 15 - < !-- indices --> + 125 20 - < !-- updates --> + 1000 125 20 10 15 - < !-- axis --> - 1 < !-- value [1] --> + + 1 - < !-- output --> + 1000 256 10 @@ -103,26 +103,26 @@ The value can be in the range ``[ -r, r - 1]``, where ``r`` is the rank of ``dat - < !-- data --> - 3 < !-- {{-1.0f, 1.0f, -1.0f, 3.0f, 4.0f}, --> - 5 < !-- {-1.0f, 6.0f, -1.0f, 8.0f, 9.0f}, --> - < !-- {-1.0f, 11.0f, 1.0f, 13.0f, 14.0f}} --> - < !-- indices --> - 2 < !-- {0, 2} --> + + 3 + 5 + + + 2 - < !-- updates --> - 3 < !-- {1.0f, 1.0f} --> - 2 < !-- {1.0f, 1.0f} --> - < !-- {1.0f, 2.0f} --> - < !-- axis --> - 1 < !-- {1} --> + + 3 + 2 + + + 1 - < !-- output --> - 3 < !-- {{1.0f, 1.0f, 1.0f, 3.0f, 4.0f}, --> - 5 < !-- {1.0f, 6.0f, 1.0f, 8.0f, 9.0f}, --> - < !-- {1.0f, 11.0f, 2.0f, 13.0f, 14.0f}} --> + + 3 + 5 + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.rst index 22b0a7bfbd72af..207ddda0721436 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Slice_8.rst @@ -5,7 +5,7 @@ Slice .. meta:: - :description: Learn about Slice-8 - a data movement operation, + :description: Learn about Slice-8 - a data movement operation, which can be performed on four required and one optional input tensor. **Versioned name**: *Slice-8* @@ -82,24 +82,24 @@ Example 1: basic slicing - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [1] --> + 1 - < !-- stop: [8] --> + 1 - < !-- step: [1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [1, 2, 3, 4, 5, 6, 7] --> + 7 @@ -113,21 +113,21 @@ Example 2: basic slicing, ``axes`` default - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [1] --> + 1 - < !-- stop: [8] --> + 1 - < !-- step: [1] --> + 1 - < !-- output: [1, 2, 3, 4, 5, 6, 7] --> + 7 @@ -141,24 +141,24 @@ Example 3: basic slicing, ``step: [2]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [1] --> + 1 - < !-- stop: [8] --> + 1 - < !-- step: [2] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [1, 3, 5, 7] --> + 4 @@ -171,24 +171,24 @@ Example 4: ``start`` and ``stop`` out of the dimension size, ``step: [1]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [-100] --> + 1 - < !-- stop: [100] --> + 1 - < !-- step: [1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 @@ -202,24 +202,24 @@ Example 5: slicing backward all elements, ``step: [-1]``, ``stop: [-11]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [9] --> + 1 - < !-- stop: [-11] --> + 1 - < !-- step: [-1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] --> + 10 @@ -233,29 +233,29 @@ Example 6: slicing backward, ``step: [-1]``, ``stop: [0]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [9] --> + 1 - < !-- stop: [0] --> + 1 - < !-- step: [-1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 8, 7, 6, 5, 4, 3, 2, 1] --> + 9 - + Example 7: slicing backward, ``step: [-1]``, ``stop: [-10]`` @@ -264,24 +264,24 @@ Example 7: slicing backward, ``step: [-1]``, ``stop: [-10]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [9] --> + 1 - < !-- stop: [-10] --> + 1 - < !-- step: [-1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 8, 7, 6, 5, 4, 3, 2, 1] --> + 9 @@ -295,24 +295,24 @@ Example 8: slicing backward, ``step: [-2]`` - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [9] --> + 1 - < !-- stop: [-11] --> + 1 - < !-- step: [-2] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 7, 5, 3, 1] --> + 5 @@ -326,24 +326,24 @@ Example 9: ``start`` and ``stop`` out of the dimension size, slicing backward - < !-- data: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] --> + 10 - < !-- start: [100] --> + 1 - < !-- stop: [-100] --> + 1 - < !-- step: [-1] --> + 1 - < !-- axes: [0] --> + 1 - < !-- output: [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] --> + 10 @@ -357,31 +357,31 @@ Example 10: slicing 2D tensor, all axes specified - < !-- data: data: [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] --> + 2 5 - < !-- start: [0, 1] --> + 2 - < !-- stop: [2, 4] --> + 2 - < !-- step: [1, 2] --> + 2 - < !-- axes: [0, 1] --> + 2 - < !-- output: [1, 3, 6, 8] --> + 2 2 - + Example 11: slicing 3D tensor, all axes specified @@ -390,26 +390,26 @@ Example 11: slicing 3D tensor, all axes specified - < !-- data --> + 20 10 5 - < !-- start: [0, 0, 0] --> + 2 - < !-- stop: [4, 10, 5] --> + 2 - < !-- step: [1, 1, 1] --> + 2 - < !-- axes: [0, 1, 2] --> + 2 - < !-- output --> + 4 10 5 @@ -424,26 +424,26 @@ Example 12: slicing 3D tensor, last axes default - < !-- data --> + 20 10 5 - < !-- start: [0, 0] --> + 2 - < !-- stop: [4, 10] --> + 2 - < !-- step: [1, 1] --> + 2 - < !-- axes: [0, 1] --> + 2 - < !-- output --> + 4 10 5 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.rst index 5b1f060e7bc7cb..6b7ddb69c2ef53 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/SpaceToBatch_2.rst @@ -5,7 +5,7 @@ SpaceToBatch .. meta:: - :description: Learn about SpaceToBatch-2 - a data movement operation, + :description: Learn about SpaceToBatch-2 - a data movement operation, which can be performed on four required input tensors. **Versioned name**: *SpaceToBatch-2* @@ -79,30 +79,30 @@ No attributes available. - < !-- data --> - 2 < !-- batch --> - 6 < !-- spatial dimension 1 --> - 10 < !-- spatial dimension 2 --> - 3 < !-- spatial dimension 3 --> - 3 < !-- spatial dimension 4 --> + + 2 + 6 + 10 + 3 + 3 - < !-- block_shape value: [1, 2, 4, 3, 1] --> + 5 - < !-- pads_begin value: [0, 0, 1, 0, 0] --> + 5 - < !-- pads_end value: [0, 0, 1, 0, 0] --> + 5 - 48 < !-- data.shape[0] * block_shape.shape[0] * block_shape.shape[1] *... * block_shape.shape[4] --> - 3 < !-- (data.shape[1] + pads_begin[1] + pads_end[1]) / block_shape.shape[1] --> - 3 < !-- (data.shape[2] + pads_begin[2] + pads_end[2]) / block_shape.shape[2] --> - 1 < !-- (data.shape[3] + pads_begin[3] + pads_end[3]) / block_shape.shape[3] --> - 3 < !-- (data.shape[4] + pads_begin[4] + pads_end[4]) / block_shape.shape[4] --> + 48 + 3 + 3 + 1 + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.rst index bf955d0c8b6d58..0731920bd6db48 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Split_1.rst @@ -5,7 +5,7 @@ Split .. meta:: - :description: Learn about Split-1 - a data movement operation, + :description: Learn about Split-1 - a data movement operation, which can be performed on two required input tensors. **Versioned name**: *Split-1* @@ -58,13 +58,13 @@ Where D is the rank of input tensor ``data``. The axis being split must be evenl - < !-- some data --> + 6 12 10 24 - < !-- axis: 1 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.rst index b282848e4af0e1..a4025de9a9f924 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/StridedSlice_1.rst @@ -5,7 +5,7 @@ StridedSlice .. meta:: - :description: Learn about StridedSlice-1 - a data movement operation, + :description: Learn about StridedSlice-1 - a data movement operation, which can be performed on three required and one optional input tensor. **Versioned name**: *StridedSlice-1* @@ -88,13 +88,13 @@ Example of ``begin_mask`` & ``end_mask`` usage. 4 - 2 < !-- begin: [1, 0, 0] --> + 2 - 2 < !-- end: [0, 0, 2] --> + 2 - 2 < !-- stride: [1, 1, 1] --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.rst index 6c734cc3bba114..f788e136c8fa62 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Tile_1.rst @@ -5,7 +5,7 @@ Tile .. meta:: - :description: Learn about Tile-1 - a data movement operation, which can be + :description: Learn about Tile-1 - a data movement operation, which can be performed on two required input tensors. **Versioned name**: *Tile-1* @@ -39,10 +39,10 @@ No attributes available. *Tile* operation extends input tensor and filling in output tensor by the following rules: -.. math:: +.. math:: out_i=input_i[inner_dim*t] - + .. math:: t \in \left ( 0, \quad tiles \right ) @@ -62,7 +62,7 @@ No attributes available. 4 - 3 < !-- [1, 2, 3] --> + 3 @@ -81,13 +81,13 @@ No attributes available. - < !-- will be promoted to shape (1, 2, 3, 4) --> + 2 3 4 - 4 < !-- [5, 1, 2, 3] --> + 4 @@ -114,7 +114,7 @@ No attributes available. 4 - 3 < !-- [1, 2, 3] will be promoted to [1, 1, 2, 3] --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.rst index 28cd04c2767e18..54dbdb1d13d1f3 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/Transpose_1.rst @@ -5,7 +5,7 @@ Transpose .. meta:: - :description: Learn about Transpose-1 - a data movement operation, which can be + :description: Learn about Transpose-1 - a data movement operation, which can be performed on two required input tensors. **Versioned name**: *Transpose-1* @@ -53,7 +53,7 @@ Transpose 4 - 3 < !-- [2, 0, 1] --> + 3 @@ -79,7 +79,7 @@ Transpose 4 - 0 < !-- input_order is an empty 1D tensor --> + 0 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.rst index d87c037d44a9f0..19b64c2711d347 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/movement/VariadicSplit_1.rst @@ -5,7 +5,7 @@ VariadicSplit .. meta:: - :description: Learn about VariadicSplit-1 - a data movement operation, which can be + :description: Learn about VariadicSplit-1 - a data movement operation, which can be performed on three required input tensors. **Versioned name**: *VariadicSplit-1* @@ -20,7 +20,7 @@ VariadicSplit The i-th output tensor shape is equal to the input tensor `data` shape, except for dimension along `axis` which is ``split_lengths[i]``. .. math:: - + shape\_output\_tensor = [data.shape[0], data.shape[1], \dotsc , split\_lengths[i], \dotsc , data.shape[D-1]] Where D is the rank of input tensor `data`. The sum of elements in ``split_lengths`` must match ``data.shape[axis]``. @@ -49,16 +49,16 @@ Where D is the rank of input tensor `data`. The sum of elements in ``split_lengt - < !-- some data --> + 6 12 10 24 - < !-- axis: 0 --> + - 3 < !-- split_lengths: [1, 2, 3] --> + 3 @@ -89,21 +89,21 @@ Where D is the rank of input tensor `data`. The sum of elements in ``split_lengt - < !-- some data --> + 6 12 10 24 - < !-- axis: 0 --> + - 2 < !-- split_lengths: [-1, 2] --> + 2 - 4 < !-- 4 = 6 - 2 --> + 4 12 10 24 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.rst index e5233fb3d3eb7a..91b803f757d519 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_1.rst @@ -5,7 +5,7 @@ BatchNormInference .. meta:: - :description: Learn about BatchNormInference-5 - a normalization operation, which can be + :description: Learn about BatchNormInference-5 - a normalization operation, which can be performed on five required input tensors. **Versioned name**: *BatchNormInference-5* @@ -19,19 +19,19 @@ BatchNormInference *BatchNormInference* performs the following operations on a given data batch input tensor ``data``: * Normalizes each activation :math:`x^{(k)}` by the mean and variance. - + .. math:: - + \hat{x}^{(k)}=\frac{x^{(k)} - E[x^{(k)}]}{\sqrt{Var(x^{(k)}) + \epsilon}} where :math:`E[x^{(k)}]` and :math:`Var(x^{(k)})` are the mean and variance, calculated per channel axis of ``data`` input, and correspond to ``mean`` and ``variance`` inputs, respectively. Additionally, :math:`\epsilon` is a value added to the variance for numerical stability and corresponds to ``epsilon`` attribute. * Performs linear transformation of each normalized activation based on ``gamma`` and ``beta`` input, representing the scaling factor and shift, respectively. - + .. math:: - + \hat{y}^{(k)}=\gamma^{(k)}\hat{x}^{(k)} + \beta^{(k)} - + where :math:`\gamma^{(k)}` and :math:`\beta^{(k)}` are learnable parameters, calculated per channel axis, and correspond to ``gamma`` and ``beta`` inputs. **Mathematical Formulation** @@ -41,46 +41,46 @@ Let ``x`` be a *d*-dimensional input, :math:`x=(x_{1}\dotsc x_{d})`. Since norma For a particular activation, consider a mini-batch :math:`\mathcal{B}` of m values. *BatchNormInference* performs Batch Normalization algorithm as follows: * **Input**: Values of :math:`x` over a mini-batch: - + .. math:: - + \mathcal{B} = {x_{1...m}} * **Parameters to learn**: :math:`\gamma, \beta` * **Output**: - + .. math:: - + {o_{i} = BN_{\gamma, \beta} ( b_{i} )} * **Mini-batch mean**: - + .. math:: - + \mu_{\mathcal{B}} \leftarrow \frac{1}{m}\sum_{i=1}^{m}b_{i} * **Mini-batch variance**: - + .. math:: - + \sigma_{\mathcal{B}}^{2}\leftarrow \frac{1}{m}\sum_{i=1}^{m} ( b_{i} - \mu_{\mathcal{B}})^{2} * **Normalize**: - + .. math:: - + \hat{b_{i}} \leftarrow \frac{b_{i} - \mu_{\mathcal{B}}}{\sqrt{\sigma_{\mathcal{B}}^{2} + \epsilon }} * **Scale and shift**: - + .. math:: - + o_{i} \leftarrow \gamma\hat{b_{i}} + \beta = BN_{\gamma ,\beta } ( b_{i} ) **Attributes**: * *epsilon* - + * **Description**: *epsilon* is a constant added to the variance for numerical stability. * **Range of values**: a floating-point number greater than or equal to zero * **Type**: ``float`` @@ -104,28 +104,28 @@ For a particular activation, consider a mini-batch :math:`\mathcal{B}` of m valu **Examples** -Example: 2D input tensor ``data`` +Example: 2D input tensor ``data`` .. code-block:: xml :force: - + - < !-- input --> + 10 128 - < !-- gamma --> + 128 - < !-- beta --> + 128 - < !-- mean --> + 128 - < !-- variance --> + 128 @@ -141,26 +141,26 @@ Example: 4D input tensor ``data`` .. code-block:: xml :force: - + - < !-- input --> + 1 3 224 224 - < !-- gamma --> + 3 - < !-- beta --> + 3 - < !-- mean --> + 3 - < !-- variance --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.rst index 5c8bb387c4a116..d5a11a0db718c8 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/BatchNormInference_5.rst @@ -5,7 +5,7 @@ BatchNormInference .. meta:: - :description: Learn about BatchNormInference-5 - a normalization operation, which can be + :description: Learn about BatchNormInference-5 - a normalization operation, which can be performed on five required input tensors. **Versioned name**: *BatchNormInference-5* @@ -21,17 +21,17 @@ BatchNormInference * Normalizes each activation :math:`x^{(k)}` by the mean and variance. .. math:: - + \hat{x}^{(k)}=\frac{x^{(k)} - E[x^{(k)}]}{\sqrt{Var(x^{(k)}) + \epsilon}} - + where :math:`E[x^{(k)}]` and :math:`Var(x^{(k)})` are the mean and variance, calculated per channel axis of ``data`` input, and correspond to ``mean`` and ``variance`` inputs, respectively. Additionally, :math:`\epsilon` is a value added to the variance for numerical stability and corresponds to ``epsilon`` attribute. * Performs linear transformation of each normalized activation based on ``gamma`` and ``beta`` input, representing the scaling factor and shift, respectively. .. math:: - + \hat{y}^{(k)}=\gamma^{(k)}\hat{x}^{(k)} + \beta^{(k)} - + where :math:`\gamma^{(k)}` and :math:`\beta^{(k)}` are learnable parameters, calculated per channel axis, and correspond to ``gamma`` and ``beta`` inputs. **Mathematical Formulation** @@ -41,47 +41,47 @@ Let ``x`` be a *d*-dimensional input, :math:`x=(x_{1}\dotsc x_{d})`. Since norma For a particular activation, consider a mini-batch :math:`\mathcal{B}` of m values. *BatchNormInference* performs Batch Normalization algorithm as follows: * **Input**: Values of :math:`x` over a mini-batch: - + .. math:: - + \mathcal{B} = {x_{1...m}} - + * **Parameters to learn**: :math:`\gamma, \beta` * **Output**: - + .. math:: - + {o_{i} = BN_{\gamma, \beta} ( b_{i} )} - + * **Mini-batch mean**: - + .. math:: - + \mu_{\mathcal{B}} \leftarrow \frac{1}{m}\sum_{i=1}^{m}b_{i} * **Mini-batch variance**: - + .. math:: - + \sigma_{\mathcal{B}}^{2}\leftarrow \frac{1}{m}\sum_{i=1}^{m} ( b_{i} - \mu_{\mathcal{B}})^{2} * **Normalize**: - + .. math:: - + \hat{b_{i}} \leftarrow \frac{b_{i} - \mu_{\mathcal{B}}}{\sqrt{\sigma_{\mathcal{B}}^{2} + \epsilon }} * **Scale and shift**: - + .. math:: - + o_{i} \leftarrow \gamma\hat{b_{i}} + \beta = BN_{\gamma ,\beta } ( b_{i} ) **Attributes**: * *epsilon* - + * **Description**: *epsilon* is a constant added to the variance for numerical stability. * **Range of values**: a floating-point number greater than or equal to zero * **Type**: ``float`` @@ -109,24 +109,24 @@ Example: 2D input tensor ``data`` .. code-block:: xml :force: - + - < !-- input --> + 10 128 - < !-- gamma --> + 128 - < !-- beta --> + 128 - < !-- mean --> + 128 - < !-- variance --> + 128 @@ -142,26 +142,26 @@ Example: 4D input tensor ``data`` .. code-block:: xml :force: - + - < !-- input --> + 1 3 224 224 - < !-- gamma --> + 3 - < !-- beta --> + 3 - < !-- mean --> + 3 - < !-- variance --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.rst index c1fb7927f3ddf5..2231e3bc7fa7ed 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/LRN_1.rst @@ -5,7 +5,7 @@ LRN .. meta:: - :description: Learn about LRN-1 - a normalization operation, which can be + :description: Learn about LRN-1 - a normalization operation, which can be performed on two required input tensors. **Versioned name**: *LRN-1* @@ -105,7 +105,7 @@ Example for 4D ``data`` input tensor and ``axes = [2, 3]``: 24 - 1 < !-- value is [1] that means independent normalization for each pixel along channels --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.rst index ba04fb3b8cec33..444762f7ef0b57 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/MVN_6.rst @@ -5,7 +5,7 @@ MVN .. meta:: - :description: Learn about MVN-6 - a normalization operation, which can be + :description: Learn about MVN-6 - a normalization operation, which can be performed on two required input tensors. **Versioned name**: *MVN-6* @@ -100,7 +100,7 @@ If *normalize_variance* is set to ``true``, the output blob is divided by varian 24 - 3 < !-- value of [0,2,3] means independent normalization per channels --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.rst index 8bd1da903fdcc9..61b3d439a2ef7f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/normalization/NormalizeL2_1.rst @@ -5,7 +5,7 @@ NormalizeL2 .. meta:: - :description: Learn about MVN-1 - a normalization operation, which can be + :description: Learn about MVN-1 - a normalization operation, which can be performed on two required input tensors. **Versioned name**: *NormalizeL2-1* @@ -79,7 +79,7 @@ Example: Normalization over channel dimension for ``NCHW`` layout 24 - 1 < !-- axes list [1] means normalization over channel dimension --> + 1 @@ -108,7 +108,7 @@ Example: Normalization over channel and spatial dimensions for ``NCHW`` layout 24 - 3 < !-- axes list [1, 2, 3] means normalization over channel and spatial dimensions --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.rst index e2cd7c83c2feb3..6acdb9e5786943 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL1_4.rst @@ -5,7 +5,7 @@ ReduceL1 .. meta:: - :description: Learn about ReduceL1-4 - a reduction operation, which can be + :description: Learn about ReduceL1-4 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceL1-4* @@ -68,7 +68,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -96,7 +96,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -149,7 +149,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 @@ -160,4 +160,4 @@ Particular cases: - + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.rst index 0556516d5bfbd3..aa908c97f6c0c4 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceL2_4.rst @@ -5,7 +5,7 @@ ReduceL2 .. meta:: - :description: Learn about ReduceL2-4 - a reduction operation, which can be + :description: Learn about ReduceL2-4 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceL2-4* @@ -68,7 +68,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -95,7 +95,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -120,7 +120,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -146,7 +146,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.rst index e92b9153a12d12..01dbfc47902f05 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalAnd_1.rst @@ -5,7 +5,7 @@ ReduceLogicalAnd .. meta:: - :description: Learn about ReduceLogicalAnd-1 - a reduction operation, which can be + :description: Learn about ReduceLogicalAnd-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceLogicalAnd-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -148,7 +148,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.rst index 7cfdbc95eea5a0..e033e136f5b0e2 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceLogicalOr_1.rst @@ -5,7 +5,7 @@ ReduceLogicalOr .. meta:: - :description: Learn about ReduceLogicalOr-1 - a reduction operation, which can be + :description: Learn about ReduceLogicalOr-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceLogicalOr-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -121,7 +121,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -147,7 +147,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst index 5037372de4cbce..4e22be42d2636d 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst @@ -5,7 +5,7 @@ ReduceMax .. meta:: - :description: Learn about ReduceMax-1 - a reduction operation, which can be + :description: Learn about ReduceMax-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceMax-1* @@ -72,7 +72,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -99,7 +99,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -124,7 +124,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -150,7 +150,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.rst index c3b71fa89c95e1..9aef4e981cf46f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMean_1.rst @@ -5,7 +5,7 @@ ReduceMean .. meta:: - :description: Learn about ReduceMean-1 - a reduction operation, which can be + :description: Learn about ReduceMean-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceMean-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -147,7 +147,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst index 4986ddc474606f..f20a0cfda064d4 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst @@ -5,7 +5,7 @@ ReduceMin .. meta:: - :description: Learn about ReduceMin-1 - a reduction operation, which can be + :description: Learn about ReduceMin-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceMin-1* @@ -72,7 +72,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -99,7 +99,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -124,7 +124,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -150,7 +150,7 @@ Reducing empty tensor results in an undefined behavior. 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.rst index 0c75cb833c6a43..04af9115fb93c5 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceProd_1.rst @@ -5,7 +5,7 @@ ReduceProd .. meta:: - :description: Learn about ReduceProd-1 - a reduction operation, which can be + :description: Learn about ReduceProd-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceProd-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -148,7 +148,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.rst index d2a4858eae201e..b42536dc0baba7 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceSum_1.rst @@ -5,7 +5,7 @@ ReduceSum .. meta:: - :description: Learn about ReduceSum-1 - a reduction operation, which can be + :description: Learn about ReduceSum-1 - a reduction operation, which can be performed on two required input tensors. **Versioned name**: *ReduceSum-1* @@ -70,7 +70,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -97,7 +97,7 @@ Particular cases: 24 - 2 < !-- value is [2, 3] that means independent reduction in each channel and batch --> + 2 @@ -122,7 +122,7 @@ Particular cases: 24 - 1 < !-- value is [1] that means independent reduction in each channel and spatial dimensions --> + 1 @@ -148,7 +148,7 @@ Particular cases: 24 - 1 < !-- value is [-2] that means independent reduction in each channel, batch and second spatial dimension --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.rst index e64cd14263bebe..6ff19be0776a80 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCGreedyDecoderSeqLen_6.rst @@ -5,7 +5,7 @@ CTCGreedyDecoderSeqLen .. meta:: - :description: Learn about CTCGreedyDecoderSeqLen-6 - a sequence processing + :description: Learn about CTCGreedyDecoderSeqLen-6 - a sequence processing operation, which can be performed on two required input tensors. **Versioned name**: *CTCGreedyDecoderSeqLen-6* @@ -77,7 +77,7 @@ The main difference between :doc:`CTCGreedyDecoder @@ -89,7 +89,7 @@ The main difference between :doc:`CTCGreedyDecoder 8 - < !-- blank_index = 120 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.rst index 8f43fc62309fda..49191ec889b5fe 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/CTCLoss_4.rst @@ -5,7 +5,7 @@ CTCLoss .. meta:: - :description: Learn about CTCLoss-4 - a sequence processing operation, which + :description: Learn about CTCLoss-4 - a sequence processing operation, which can be performed on four required and one optional input tensor. **Versioned name**: *CTCLoss-4* @@ -29,19 +29,19 @@ Otherwise, the operation behaviour is undefined. 1. Compute probability of ``j``-th character at time step ``t`` for ``i``-th input sequence from ``logits`` using softmax formula: .. math:: - + p_{i,t,j} = \frac{\exp(logits[i,t,j])}{\sum^{K}_{k=0}{\exp(logits[i,t,k])}} 2. For a given ``i``-th target from ``labels[i,:]`` find all aligned paths. A path ``S = (c1,c2,...,cT)`` is aligned with a target ``G=(g1,g2,...,gT)`` if both chains are equal after decoding. The decoding extracts substring of length ``label_length[i]`` from a target ``G``, merges repeated characters in ``G`` in case *preprocess_collapse_repeated* equal to true and finds unique elements in the order of character occurrence in case *unique* equal to true. The decoding merges repeated characters in ``S`` in case *ctc_merge_repeated* equal to true and removes blank characters represented by ``blank_index``. By default, ``blank_index`` is equal to ``C-1``, where ``C`` is a number of classes including the blank. For example, in case default *ctc_merge_repeated*, *preprocess_collapse_repeated*, *unique* and ``blank_index`` a target sequence ``G=(0,3,2,2,2,2,2,4,3)`` of a length ``label_length[i]=4`` is processed to ``(0,3,2,2)`` and a path ``S=(0,0,4,3,2,2,4,2,4)`` of a length ``logit_length[i]=9`` is also processed to ``(0,3,2,2)``, where ``C=5``. There exist other paths that are also aligned with ``G``, for instance, ``0,4,3,3,2,4,2,2,2``. Paths checked for alignment with a target ``label[:,i]`` must be of length ``logit_length[i] = L_i``. Compute probabilities of these aligned paths (alignments) as follows: .. math:: - + p(S) = \prod_{t=1}^{L_i} p_{i,t,ct} 3. Finally, compute negative log of summed up probabilities of all found alignments: .. math:: - + CTCLoss = - \ln \sum_{S} p(S) **Note 1**: This calculation scheme does not provide steps for optimal implementation and primarily serves for better explanation. @@ -50,7 +50,7 @@ Otherwise, the operation behaviour is undefined. Having log-probabilities for aligned paths, log of summed up probabilities for these paths can be computed as follows: .. math:: - + \ln(a + b) = \ln(a) + \ln(1 + \exp(\ln(b) - \ln(a))) **Attributes** @@ -100,7 +100,7 @@ Having log-probabilities for aligned paths, log of summed up probabilities for t .. code-block:: xml :force: - + @@ -118,7 +118,7 @@ Having log-probabilities for aligned paths, log of summed up probabilities for t 8 - < !-- blank_index value is: 120 --> + diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.rst index 77631b7d1fa3f5..8e085896d9b05f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sequence/OneHot_1.rst @@ -5,7 +5,7 @@ OneHot .. meta:: - :description: Learn about OneHot-1 - a sequence processing operation, which + :description: Learn about OneHot-1 - a sequence processing operation, which can be performed on four required input tensors. **Versioned name**: *OneHot-1* @@ -64,18 +64,18 @@ The types of input scalars ``on_value`` and ``off_value`` should match and be eq - < !-- indices value: [0, 3, 1, 2] --> + 4 - < !-- depth value: 3 --> + - < !-- on_value 1 --> + - < !-- off_value 2 --> + - < !-- output value # [[1, 2, 2], [2, 2, 2], [2, 1, 2], [2, 2, 1]] --> + 4 3 @@ -90,20 +90,20 @@ The types of input scalars ``on_value`` and ``off_value`` should match and be eq - < !-- indices value: [[0, 3, 1], [1, 2, 4]] --> + 2 3 - < !-- depth value: 3 --> + - < !-- on_value 1 --> + - < !-- off_value 0 --> + - < !-- output value: [[[1, 0, 0], [0, 0, 1], [0, 0, 0]], --> - 2 < !-- [[0, 0, 0], [1, 0, 0], [0, 1, 0]]] --> + + 2 3 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.rst index 6700be5536c1d6..064fcb92a9b95e 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Reshape_1.rst @@ -5,7 +5,7 @@ Reshape .. meta:: - :description: Learn about Reshape-1 - a shape manipulation operation, which + :description: Learn about Reshape-1 - a shape manipulation operation, which can be performed on two required input tensors. **Versioned name**: *Reshape-1* @@ -65,7 +65,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 0 - 2 < !--The tensor contains 2 elements: 0, 4 --> + 2 @@ -92,7 +92,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 24 - 3 < !--The tensor contains 3 elements: 0, -1, 4 --> + 3 @@ -119,7 +119,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 3 - 4 < !--The tensor contains 4 elements: 0, 0, 1, -1 --> + 4 @@ -147,7 +147,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 1 - 2 < !--The tensor contains 2 elements: -1, 0 --> + 2 @@ -173,7 +173,7 @@ If ``special_zero`` is set to ``true`` index of ``0`` cannot be larger than the 1 - 2 < !--The tensor contains 2 elements: 0, -1 --> + 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.rst index 3b98d8362d181f..176e79a927c0bd 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_1.rst @@ -5,7 +5,7 @@ ShapeOf .. meta:: - :description: Learn about ShapeOf-1 - a shape manipulation operation, which + :description: Learn about ShapeOf-1 - a shape manipulation operation, which can be performed on an arbitrary input tensor. **Versioned name**: *ShapeOf-1* @@ -39,7 +39,7 @@ ShapeOf - < !-- output value is: [2,3,224,224]--> + 4 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.rst index 7e68b4447beeaf..bb64e7517f60de 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/ShapeOf_3.rst @@ -5,7 +5,7 @@ ShapeOf .. meta:: - :description: Learn about ShapeOf-3 - a shape manipulation operation, which + :description: Learn about ShapeOf-3 - a shape manipulation operation, which can be performed on an arbitrary input tensor. **Versioned name**: *ShapeOf-3* @@ -54,7 +54,7 @@ ShapeOf - < !-- output value is: [2,3,224,224]--> + 4 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.rst index e8933f2aceb603..9d426f88e5ff83 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Squeeze_1.rst @@ -5,7 +5,7 @@ Squeeze .. meta:: - :description: Learn about Squeeze-1 - a shape manipulation operation, which + :description: Learn about Squeeze-1 - a shape manipulation operation, which can be performed on one required and one optional input tensor. **Versioned name**: *Squeeze-1* @@ -55,7 +55,7 @@ Squeeze - 2 < !-- value [0, 2] --> + 2 @@ -79,7 +79,7 @@ Squeeze - 1 < !-- value is [0] --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.rst index 62a908b5296dac..5bab816f35f0bc 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/shape/Unsqueeze_1.rst @@ -5,7 +5,7 @@ Unsqueeze .. meta:: - :description: Learn about Unsqueeze-1 - a shape manipulation operation, which + :description: Learn about Unsqueeze-1 - a shape manipulation operation, which can be performed on two required input tensors. **Versioned name**: *Unsqueeze-1* @@ -48,7 +48,7 @@ Unsqueeze - 2 < !-- value is [0, 3] --> + 2 @@ -74,7 +74,7 @@ Unsqueeze - 1 < !-- value is [0] --> + 1 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.rst index 90a42a0ac1fdbb..b32b1eb4c23729 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/DFT_7.rst @@ -5,7 +5,7 @@ DFT .. meta:: - :description: Learn about DFT-7 - a signal processing operation, which can be + :description: Learn about DFT-7 - a signal processing operation, which can be performed on two required and one optional input tensor. **Versioned name**: *DFT-7* @@ -23,14 +23,14 @@ No attributes available. * **1**: ``data`` - Input tensor of type *T* with data for the DFT transformation. Type of elements is any supported floating-point type. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form ``[D_0, D_1, ..., D_{N-1}, 2]``, representing the real and imaginary components of complex numbers in ``[:, ..., :, 0]`` and in ``[:, ..., :, 1]`` correspondingly. **Required.** * **2**: ``axes`` - 1D tensor of type *T_IND* specifying dimension indices where DFT is applied, and ``axes`` is any unordered list of indices of different dimensions of input tensor, for example, ``[0, 4]``, ``[4, 0]``, ``[4, 2, 1]``, ``[1, 2, 3]``, ``[-3, 0, -2]``. These indices should be integers from ``-(r - 1)`` to ``(r - 2)`` inclusively, where ``r = rank(data)``. A negative axis ``a`` is interpreted as an axis ``r - 1 + a``. Other dimensions do not change. The order of elements in ``axes`` attribute matters, and is mapped directly to elements in the third input ``signal_size``. **Required.** - .. note:: - + .. note:: + The following constraint must be satisfied: ``rank(data) >= len(axes) + 1 and input_shape[-1] == 2 and (rank(data) - 1) not in axes and (-1) not in axes``. * **3**: ``signal_size`` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input ``axes``. If ``signal_size[i] == -1``, then DFT is calculated for full size of the axis ``axes[i]``. If ``signal_size[i] > input_shape[: r - 1][axes[i]]``, then input data are zero-padded with respect to the axis ``axes[i]`` at the end. Finally, ``signal_size[i] < input_shape[: r - 1][axes[i]]``, then input data are trimmed with respect to the axis ``axes[i]``. More precisely, if ``signal_size[i] < input_shape[: r - 1][axes[i]]``, the slice ``0: signal_size[i]`` of the axis ``axes[i]`` is considered. Optional, with default value ```[input_shape[: r - 1][a] for a in axes]```. - .. note:: - + .. note:: + If the input ``signal_size`` is specified, the size of ``signal_size`` must be the same as the size of ``axes``. **Outputs** @@ -52,7 +52,7 @@ Let ``D`` be an input tensor ``A``, taking into account the ``signal_size``, and Next, put .. math:: - + X[j_0,\dots,j_{k-1},j_k,\dots,j_{k+r-1}]=D[j_0,\dots,j_{k-1},j_k,\dots,j_{k+r-1},0]+iD[j_0,\dots,j_{k-1},j_k,\dots,j_{k+r-1},1] for all indices ``j_0,...,j_{k+r-1}``, where ``i`` is an imaginary unit, that is ``X`` is a complex tensor. @@ -60,17 +60,17 @@ for all indices ``j_0,...,j_{k+r-1}``, where ``i`` is an imaginary unit, that is Then the discrete Fourier transform is the tensor :math:`Y` of the same shape as the tensor :math:`X`, such that .. math:: - + Y[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}]=\sum\limits_{j_0=0}^{S_0-1}\cdots\sum\limits_{j_{r-1}=0}^{S_{r-1}-1}X[n_0,\dots,n_{k-1},j_0,\dots,j_{r-1}]\exp\left(-2\pi i\sum\limits_{q=0}^{r-1}\frac{m_qj_q}{S_q}\right) for all indices ``n_0,...,n_{k-1}``, ``m_0,...,m_{r-1}``, and the result of the operation is the real tensor ``Z`` with the shape ``[B_0, ..., B_{k-1}, S_0, ..., S_{r-1}, 2]`` and such that .. math:: - + Z[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}, 0]=Re Y[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}], .. math:: - + Z[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}, 1]=Im Y[n_0,\dots,n_{k-1},m_0,\dots,m_{r-1}]. Calculations for the generic case of axes and signal sizes are similar. @@ -81,7 +81,7 @@ There is no ``signal_size`` input (4D input tensor): .. code-block:: xml :force: - + @@ -91,7 +91,7 @@ There is no ``signal_size`` input (4D input tensor): 2 - 2 < !-- axes input contains [1, 2] --> + 2 @@ -107,7 +107,7 @@ There is no ``signal_size`` input (3D input tensor): .. code-block:: xml :force: - + @@ -116,7 +116,7 @@ There is no ``signal_size`` input (3D input tensor): 2 - 2 < !-- axes input contains [0, 1] --> + 2 @@ -131,7 +131,7 @@ There is ``signal_size`` input (4D input tensor): .. code-block:: xml :force: - + @@ -141,10 +141,10 @@ There is ``signal_size`` input (4D input tensor): 2 - 2 < !-- axes input contains [1, 2] --> + 2 - 2 < !-- signal_size input contains [512, 100] --> + 2 @@ -160,7 +160,7 @@ There is ``signal_size`` input (3D input tensor): .. code-block:: xml :force: - + @@ -169,10 +169,10 @@ There is ``signal_size`` input (3D input tensor): 2 - 2 < !-- axes input contains [0, 1] --> + 2 - 2 < !-- signal_size input contains [512, 100] --> + 2 @@ -187,7 +187,7 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso .. code-block:: xml :force: - + @@ -198,10 +198,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 1, 2] --> + 3 - 3 < !-- signal_size input contains [170, -1, 1024] --> + 3 @@ -218,7 +218,7 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso .. code-block:: xml :force: - + @@ -229,10 +229,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 0, 2] --> + 3 - 3 < !-- signal_size input contains [258, -1, 2056] --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.rst index e3651b2c44dc58..0621a323428543 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IDFT_7.rst @@ -5,7 +5,7 @@ Inverse Discrete Fourier Transformation (IDFT) .. meta:: - :description: Learn about IDFT-7 - a signal processing operation, which can be + :description: Learn about IDFT-7 - a signal processing operation, which can be performed on two required and one optional input tensor. **Versioned name**: *IDFT-7* @@ -22,16 +22,16 @@ No attributes available. * **1**: ``data`` - Input tensor of type *T* with data for the IDFT transformation. Type of elements is any supported floating-point type. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form ``[D_0, D_1, ..., D_{N-1}, 2]``, representing the real and imaginary components of complex numbers in ``[:, ..., :, 0]`` and in ``[:, ..., :, 1]`` correspondingly. **Required.** * **2**: ``axes`` - 1D tensor of type *T_IND* specifying dimension indices where IDFT is applied, and ``axes`` is any unordered list of indices of different dimensions of input tensor, for example, ``[0, 4]``, ``[4, 0]``, ``[4, 2, 1]``, ``[1, 2, 3]``, ``[-3, 0, -2]``. These indices should be integers from ``-(r - 1)`` to ``(r - 2)`` inclusively, where ``r = rank(data)``. A negative axis ``a`` is interpreted as an axis ``r - 1 + a``. Other dimensions do not change. The order of elements in ``axes`` attribute matters, and is mapped directly to elements in the third input ``signal_size``. **Required.** -* +* .. note:: - + The following constraint must be satisfied: ``rank(data) >= len(axes) + 1 and input_shape[-1] == 2 and (rank(data) - 1) not in axes and (-1) not in axes``. * **3**: ``signal_size`` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input ``axes``. If ``signal_size[i] == -1``, then IDFT is calculated for full size of the axis ``axes[i]``. If ``signal_size[i] > input_shape[: r - 1][axes[i]]``, then input data are zero-padded with respect to the axis ``axes[i]`` at the end. Finally, if ``signal_size[i] < input_shape[: r - 1][axes[i]]``, then input data are trimmed with respect to the axis ``axes[i]``. More precisely, if ``signal_size[i] < input_shape[: r - 1][axes[i]]``, the slice ``0: signal_size[i]`` of the axis ``axes[i]`` is considered. Optional, with default value ``[input_shape[: r - 1][a] for a in axes]``. -* +* .. note:: - + If the input ``signal_size`` is specified, then the size of ``signal_size`` must be the same as the size of ``axes``. **Outputs** @@ -52,7 +52,7 @@ For simplicity, assume that an input tensor ``A`` has the shape ``[B_0, ..., B_{ Let ``D`` be an input tensor ``A``, taking into account the ``signal_size``, and, hence, ``D`` has the shape ``[B_0, ..., B_{k-1}, S_0, ..., S_{r-1}, 2]``. -Next, put +Next, put .. math:: @@ -94,7 +94,7 @@ There is no ``signal_size`` input (4D input tensor): 2 - 2 < !-- [1, 2] --> + 2  @@ -120,7 +120,7 @@ There is no ``signal_size`` input (3D input tensor): 2 - 2 < !-- [0, 1] --> + 2  @@ -147,10 +147,10 @@ There is ``signal_size`` input (4D input tensor): 2 - 2 < !-- [1, 2] --> + 2  - 2 < !-- [512, 100] --> + 2  @@ -177,10 +177,10 @@ There is ``signal_size`` input (3D input tensor): 2 - 2 < !-- [0, 1] --> + 2  - 2 < !-- [512, 100] --> + 2  @@ -208,10 +208,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 1, 2] --> + 3  - 3 < !-- signal_size input contains [170, -1, 1024] --> + 3  @@ -241,10 +241,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 0, 2] --> + 3  - 3 < !-- signal_size input contains [258, -1, 2056] --> + 3  diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.rst index 7b8804cd841871..d067654709da10 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/IRDFT_9.rst @@ -5,7 +5,7 @@ Inverse Discrete complex-to-real Fourier Transformation (IRDFT) .. meta:: - :description: Learn about IRDFT-9 - a signal processing operation, which can be + :description: Learn about IRDFT-9 - a signal processing operation, which can be performed on two required and one optional input tensor. **Versioned name**: *IRDFT-9* @@ -22,18 +22,18 @@ No attributes available. * **1**: ``data`` - Input tensor of type *T* with data for the IRDFT transformation. The last dimension of the input tensor must be equal to 2, that is the input tensor shape must have the form ``[D_0, D_1, ..., D_{N-1}, 2]``, representing the real and imaginary components of complex numbers in ``[:, ..., :, 0]`` and in ``[:, ..., :, 1]`` correspondingly. **Required.** * **2**: ``axes`` - 1D tensor of type *T_IND* specifying dimension indices where IRDFT is applied, and ``axes`` is any unordered list of indices of different dimensions of the input tensor, for example, ``[0, 4]``, ``[4, 0]``, ``[4, 2, 1]``, ``[1, 2, 3]``, ``[-3, 0, -2]``. These indices should be integers from ``-(r - 1)`` to ``(r - 2)`` inclusively, where ``r = rank(data)``. A negative axis ``a`` is interpreted as an axis ``r - 1 + a``. Other dimensions do not change. The order of elements in the ``axes`` attribute matters, and is mapped directly to elements in the third input ``signal_size``. **Required.** -* +* .. note:: - + The following constraint must be satisfied: ``rank(data) >= len(axes) + 1 and (rank(data) - 1) not in axes and (-1) not in axes``. * **3**: ``signal_size`` - 1D tensor of type *T_SIZE* describing signal size with respect to axes from the input ``axes``. If ``signal_size[i] == -1``, then IRDFT is calculated for full size of the axis ``axes[i]``. If ``signal_size[i] > data_shape[: r - 1][axes[i]]``, then input data is zero-padded with respect to the axis ``axes[i]`` at the end. Finally, if ``signal_size[i] < data_shape[: r - 1][axes[i]]``, then input data is trimmed with respect to the axis ``axes[i]``. More precisely, if ``signal_size[i] < data_shape[: r - 1][axes[i]]``, the slice ``0: signal_size[i]`` of the axis ``axes[i]`` is considered. Optionally, with default value ``[data_shape[: r - 1][a] for a in axes]``. -* +* .. note:: - + If the input ``signal_size`` is specified, then the size of ``signal_size`` must be the same as the size of ``axes``. @@ -110,7 +110,7 @@ There is no ``signal_size`` input (4D input tensor): 2 - 2 < !-- [1, 2] --> + 2  @@ -135,7 +135,7 @@ There is no ``signal_size`` input (3D input tensor): 2 - 2 < !-- [0, 1] --> + 2  @@ -160,10 +160,10 @@ There is ``signal_size`` input (4D input tensor): 2 - 2 < !-- [1, 2] --> + 2  - 2 < !-- [512, 100] --> + 2  @@ -189,10 +189,10 @@ There is ``signal_size`` input (3D input tensor): 2 - 2 < !-- [0, 1] --> + 2  - 2 < !-- [512, 100] --> + 2  @@ -219,10 +219,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 1, 2] --> + 3  - 3 < !-- signal_size input contains [170, -1, 1024] --> + 3  @@ -250,10 +250,10 @@ There is ``signal_size`` input (5D input tensor, ``-1`` in ``signal_size``, unso 2 - 3 < !-- axes input contains [3, 0, 2] --> + 3  - 3 < !-- signal_size input contains [258, -1, 2056] --> + 3  diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.rst index 325a34bcdb55b5..14270fa42458ca 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/signals/RDFT_9.rst @@ -5,7 +5,7 @@ Discrete Fourier Transformation for real-valued input (RDFT) .. meta:: - :description: Learn about RDFT-9 - a signal processing operation, which can be + :description: Learn about RDFT-9 - a signal processing operation, which can be performed on two required and one optional input tensor. **Versioned name**: *RDFT-9* @@ -85,7 +85,7 @@ There is no ``signal_size`` input (3D input tensor): 320 - 2 < !-- axes input contains [1, 2] --> + 2 @@ -110,7 +110,7 @@ There is no ``signal_size`` input (2D input tensor): 320 - 2 < !-- axes input contains [0, 1] --> + 2 @@ -136,10 +136,10 @@ There is ``signal_size`` input (3D input tensor): 320 - 2 < !-- axes input contains [1, 2] --> + 2 - 2 < !-- signal_size input contains [512, 100] --> + 2 @@ -163,10 +163,10 @@ There is ``signal_size`` input (2D input tensor): 320 - 2 < !-- axes input contains [0, 1] --> + 2 - 2 < !-- signal_size input contains [512, 100] --> + 2 @@ -192,10 +192,10 @@ There is ``signal_size`` input (4D input tensor, ``-1`` in ``signal_size``, unso 320 - 3 < !-- axes input contains [3, 1, 2] --> + 3 - 3 < !-- signal_size input contains [170, -1, 1024] --> + 3 @@ -222,10 +222,10 @@ There is ``signal_size`` input (4D input tensor, ``-1`` in ``signal_size``, unso 320 - 3 < !-- axes input contains [3, 0, 2] --> + 3 - 3 < !-- signal_size input contains [258, -1, 2056] --> + 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.rst index c4d7cefdd3687c..881003047efe38 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MatrixNMS_8.rst @@ -5,8 +5,8 @@ MatrixNonMaxSuppression .. meta:: - :description: Learn about MatrixNonMaxSuppression-8 - a sorting and - maximization operation, which can be performed on two required + :description: Learn about MatrixNonMaxSuppression-8 - a sorting and + maximization operation, which can be performed on two required input tensors. **Versioned name**: *MatrixNonMaxSuppression-8* @@ -176,7 +176,7 @@ When there is no box selected, ``selected_num`` is filled with ``0``. ``selected - -1 < !-- "-1" means a undefined dimension calculated during the model inference --> + -1 6 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.rst index cd992042f1f534..5d0fe3c3c3a518 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_8.rst @@ -5,8 +5,8 @@ MulticlassNonMaxSuppression .. meta:: - :description: Learn about MulticlassNonMaxSuppression-8 - a sorting and - maximization operation, which can be performed on two required + :description: Learn about MulticlassNonMaxSuppression-8 - a sorting and + maximization operation, which can be performed on two required input tensors. **Versioned name**: *MulticlassNonMaxSuppression-8* @@ -168,7 +168,7 @@ When there is no box selected, ``selected_num`` is filled with ``0``. ``selected - -1 < !-- "-1" means a undefined dimension calculated during the model inference --> + -1 6 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.rst index 7caccb99ac3e6d..ae8187d60598b0 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/MulticlassNonMaxSuppression_9.rst @@ -2,8 +2,8 @@ .. meta:: - :description: Learn about MulticlassNonMaxSuppression-8 - a sorting and - maximization operation, which can be performed on two or three + :description: Learn about MulticlassNonMaxSuppression-8 - a sorting and + maximization operation, which can be performed on two or three required input tensors. **Versioned name**: *MulticlassNonMaxSuppression-9* @@ -174,7 +174,7 @@ When there is no box selected, ``selected_num`` is filled with ``0``. ``selected - -1 < !-- "-1" means a undefined dimension calculated during the model inference --> + -1 6 @@ -211,7 +211,7 @@ Another possible example with 3 inputs could be like: - -1 < !-- "-1" means a undefined dimension calculated during the model inference --> + -1 6 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.rst index 0400d62c414a6f..256e3ad76f637f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NMSRotated_13.rst @@ -131,11 +131,11 @@ Plugins that do not support dynamic output tensors produce ``selected_indices`` - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.rst index 34102251ddfefc..161a68b255b1c9 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_4.rst @@ -5,8 +5,8 @@ NonMaxSuppression .. meta:: - :description: Learn about NonMaxSuppression-4 - a sorting and maximization - operation, which can be performed on two required and three + :description: Learn about NonMaxSuppression-4 - a sorting and maximization + operation, which can be performed on two required and three optional input tensors. **Versioned name**: *NonMaxSuppression-4* @@ -108,7 +108,7 @@ The output tensor is filled with -1s for output tensor elements if the total num - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.rst index 21089de7445b14..f0756ca40d0b2a 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_5.rst @@ -5,8 +5,8 @@ NonMaxSuppression .. meta:: - :description: Learn about NonMaxSuppression-5 - a sorting and maximization - operation, which can be performed on two required and four + :description: Learn about NonMaxSuppression-5 - a sorting and maximization + operation, which can be performed on two required and four optional input tensors. **Versioned name**: *NonMaxSuppression-5* @@ -120,11 +120,11 @@ Plugins which do not support dynamic output tensors produce ``selected_indices`` - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.rst index 6dece225f34b95..54386e7fb41529 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sort/NonMaxSuppression_9.rst @@ -5,8 +5,8 @@ NonMaxSuppression .. meta:: - :description: Learn about NonMaxSuppression-9 - a sorting and maximization - operation, which can be performed on two required and four + :description: Learn about NonMaxSuppression-9 - a sorting and maximization + operation, which can be performed on two required and four optional input tensors. **Versioned name**: *NonMaxSuppression-9* @@ -120,11 +120,11 @@ Plugins which do not support dynamic output tensors produce ``selected_indices`` - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 - 150 < !-- min(100, 10) * 3 * 5 --> + 150 3 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.rst index 722da5f3f8c4cc..0a0cb67afb0f06 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagOffsetsSum_3.rst @@ -5,7 +5,7 @@ EmbeddingBagOffsetsSum .. meta:: - :description: Learn about EmbeddingBagOffsetsSum-3 - a sparse operation, which + :description: Learn about EmbeddingBagOffsetsSum-3 - a sparse operation, which can be performed on three required and two optional input tensors. **Versioned name**: *EmbeddingBagOffsetsSum-3* @@ -38,26 +38,26 @@ EmbeddingBagOffsetsSum **Example** .. code-block:: cpp - + - < !-- emb_table value is: [[-0.2, -0.6], [-0.1, -0.4], [-1.9, -1.8], [-1., 1.5], [ 0.8, -0.7]] --> + 5 2 - < !-- indices value is: [0, 2, 3, 4] --> + 4 - < !-- offsets value is: [0, 2, 2] - 3 "bags" containing [2,0,4-2] elements, second "bag" is empty --> + 3 - < !-- default_index value is: 0 --> - < !-- per_sample_weigths value is: [0.5, 0.5, 0.5, 0.5] --> + + 4 - < !-- output value is: [[-1.05, -1.2], [-0.2, -0.6], [-0.1, 0.4]] --> + 3 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.rst index d389446505409d..9ef623ca7755eb 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingBagPackedSum_3.rst @@ -5,7 +5,7 @@ EmbeddingBagPackedSum .. meta:: - :description: Learn about EmbeddingBagPackedSum-3 - a sparse operation, which + :description: Learn about EmbeddingBagPackedSum-3 - a sparse operation, which can be performed on two required and one optional input tensor. **Versioned name**: *EmbeddingBagPackedSum-3* @@ -36,24 +36,24 @@ EmbeddingBagPackedSum **Example** .. code-block:: cpp - + - < !-- emb_table value is: [[-0.2, -0.6], [-0.1, -0.4], [-1.9, -1.8], [-1., 1.5], [ 0.8, -0.7]] --> + 5 2 - < !-- indices value is: [[0, 2], [1, 2], [3, 4]] --> + 3 2 - < !-- per_sample_weigths value is: [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]] --> + 3 2 - < !-- output value is: [[-1.05, -1.2], [-1., -1.1], [-0.1, 0.4]] --> + 3 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.rst index 583477506df52c..20ae7b30675361 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/sparse/EmbeddingSegmentsSum_3.rst @@ -5,7 +5,7 @@ EmbeddingSegmentsSum .. meta:: - :description: Learn about EmbeddingSegmentsSum-3 - a sparse operation, which + :description: Learn about EmbeddingSegmentsSum-3 - a sparse operation, which can be performed on four required and two optional input tensors. **Versioned name**: *EmbeddingSegmentsSum-3* @@ -39,27 +39,27 @@ EmbeddingSegmentsSum **Example** .. code-block:: cpp - + - < !-- emb_table value is: [[-0.2, -0.6], [-0.1, -0.4], [-1.9, -1.8], [-1., 1.5], [ 0.8, -0.7]] --> + 5 2 - < !-- indices value is: [0, 2, 3, 4] --> + 4 - < !-- segment_ids value is: [0, 0, 2, 2] - second segment is empty --> + 4 - < !-- num_segments value is: 3 --> - < !-- default_index value is: 0 --> - < !-- per_sample_weigths value is: [0.5, 0.5, 0.5, 0.5] --> + + + 4 - < !-- output value is: [[-1.05, -1.2], [-0.2, -0.6], [-0.1, 0.4]] --> + 3 2 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.rst index e93fe6cd59878a..3f2cf1d356de5a 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/ConvertLike_1.rst @@ -5,7 +5,7 @@ ConvertLike .. meta:: - :description: Learn about ConvertLike-1 - an element-wise, type conversion + :description: Learn about ConvertLike-1 - an element-wise, type conversion operation, which can be performed two required input tensors. **Versioned name**: *ConvertLike-1* @@ -45,19 +45,19 @@ where ``a`` and ``b`` correspond to ``data`` and ``like`` input tensors, respect **Example** .. code-block:: cpp - + - < !-- type: int32 --> + 256 56 - < !-- type: float32 --> - 3 < !-- any data --> + + 3 - < !-- result type: float32 --> + 256 56 diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.rst index 3f209cc5168377..50c99c14d0b878 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/type/Convert_1.rst @@ -5,7 +5,7 @@ Convert .. meta:: - :description: Learn about Convert-1 - an element-wise, type conversion + :description: Learn about Convert-1 - an element-wise, type conversion operation, which can be performed on a single input tensor. **Versioned name**: *Convert-1* @@ -23,7 +23,7 @@ Conversion of negative signed integer to unsigned integer value happens in accor The result of unsupported conversions is undefined. Output elements are represented as follows: .. math:: - + o_{i} = Convert(a_{i}) where ``a`` corresponds to the input tensor. @@ -52,17 +52,17 @@ where ``a`` corresponds to the input tensor. **Example** .. code-block:: cpp - + - < !-- type: i32 --> + 256 56 - < !-- result type: f32 --> + 256 56 diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt index bb71809a9e2e87..757d95b8771ded 100644 --- a/docs/snippets/CMakeLists.txt +++ b/docs/snippets/CMakeLists.txt @@ -48,8 +48,7 @@ list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide2.c # create a static library add_library(${TARGET_NAME} STATIC ${SOURCES}) -target_include_directories(${TARGET_NAME} PRIVATE "${OpenVINO_SOURCE_DIR}/src/inference/include/ie" - "${OpenVINO_SOURCE_DIR}/src/inference/include" +target_include_directories(${TARGET_NAME} PRIVATE "${OpenVINO_SOURCE_DIR}/src/inference/include" "${OpenVINO_SOURCE_DIR}/src/inference/dev_api" "${OpenVINO_SOURCE_DIR}/src/core/include" "${OpenVINO_SOURCE_DIR}/src/bindings/c/include" diff --git a/src/bindings/python/wheel/setup.py b/src/bindings/python/wheel/setup.py index cc24316539123e..eae764ddb88b40 100644 --- a/src/bindings/python/wheel/setup.py +++ b/src/bindings/python/wheel/setup.py @@ -85,6 +85,13 @@ "rpath": LIBS_RPATH, "binary_dir": OPENVINO_BINARY_DIR, }, + "npu_plugin": { + "name": "npu", + "prefix": f"{BUILD_BASE}/libs.npu", + "install_dir": OV_RUNTIME_LIBS_DIR, + "rpath": LIBS_RPATH, + "binary_dir": OPENVINO_BINARY_DIR, + }, "multi_plugin": { "name": "multi", "prefix": f"{BUILD_BASE}/libs.multi", diff --git a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp index 87ce9fec59b8f3..b5062d5b761bcf 100644 --- a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp @@ -352,14 +352,13 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass TransformationContext &context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, - const bool updatePrecision, + const bool updateOutputPrecision = true, const bool moveSubtract = true) const; std::shared_ptr moveDequantizationBefore( TransformationContext& context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, - const bool updatePrecision, const bool moveSubtract = true) const; bool updateOutput( diff --git a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp index 5ca510079124c1..df5be5c8a22418 100644 --- a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp +++ b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp @@ -168,14 +168,13 @@ class LP_TRANSFORMATIONS_API NetworkHelper { static InsertDequantizationResult moveDequantizationAfter( const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, - const bool updatePrecision, + const bool updateOutputPrecision, const bool moveSubtract, const std::vector& defaultPrecisions = precision_set::get_int8_support()); static InsertDequantizationResult moveDequantizationBefore( const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, - const bool updatePrecision, const bool moveSubtract); static std::vector>> splitConstantsBeforeConcat( diff --git a/src/common/low_precision_transformations/src/batch_to_space.cpp b/src/common/low_precision_transformations/src/batch_to_space.cpp index cc80f95707eb70..0551ac9dcf2972 100644 --- a/src/common/low_precision_transformations/src/batch_to_space.cpp +++ b/src/common/low_precision_transformations/src/batch_to_space.cpp @@ -50,7 +50,7 @@ bool BatchToSpaceTransformation::transform(TransformationContext& context, ov::p } const std::shared_ptr op = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions), false); + moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/src/gather.cpp b/src/common/low_precision_transformations/src/gather.cpp index 59fbf5d7f78cba..86e699030fda4d 100644 --- a/src/common/low_precision_transformations/src/gather.cpp +++ b/src/common/low_precision_transformations/src/gather.cpp @@ -119,7 +119,7 @@ bool GatherTransformation::transform(TransformationContext& context, ov::pass::p replace_node(dequantization.subtractConstant, newConstant); } - moveDequantizationAfter(context, gather, NetworkHelper::getDequantization(gather, defaultPrecisions), false); + moveDequantizationAfter(context, gather, NetworkHelper::getDequantization(gather, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/src/interpolate.cpp b/src/common/low_precision_transformations/src/interpolate.cpp index 24855aac822ddd..56d47e0511cb20 100644 --- a/src/common/low_precision_transformations/src/interpolate.cpp +++ b/src/common/low_precision_transformations/src/interpolate.cpp @@ -60,7 +60,7 @@ bool InterpolateTransformation::transform(TransformationContext &context, ov::pa return false; } interpolate = NetworkHelper::separateInStandaloneBranch(interpolate, defaultPrecisions); - moveDequantizationAfter(context, interpolate, NetworkHelper::getDequantization(interpolate, defaultPrecisions), true); + moveDequantizationAfter(context, interpolate, NetworkHelper::getDequantization(interpolate, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/src/layer_transformation.cpp b/src/common/low_precision_transformations/src/layer_transformation.cpp index 86c2ba9e7df65c..b5c8c9e7f1da14 100644 --- a/src/common/low_precision_transformations/src/layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/layer_transformation.cpp @@ -127,11 +127,13 @@ bool LayerTransformation::canBeTransformedStatic(const std::shared_ptr& la bool LayerTransformation::canBeTransformedSpatialDimension(const TransformationContext& context, std::shared_ptr layer) const { if (!isQuantized(layer, defaultPrecisions)) { + OPENVINO_DEBUG << "LPT: early exit: not quantized"; return false; } const auto outputs = layer->outputs(); if (std::any_of(outputs.begin(), outputs.end(), [](const Output& out) { return out.get_partial_shape().rank().is_dynamic(); })) { + OPENVINO_DEBUG << "LPT: early exit: rank is dynamic"; return false; } return true; @@ -397,11 +399,11 @@ std::shared_ptr LayerTransformation::moveDequantizationAfter( TransformationContext &context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, - const bool updatePrecision, + const bool updateOutputPrecision, const bool moveSubtract) const { const auto result = ov::pass::low_precision::NetworkHelper::moveDequantizationAfter(operation, dequantization, - updatePrecision, + updateOutputPrecision, moveSubtract, defaultPrecisions); updateOutput(context, result.lastDequantization, result.newOperation); @@ -412,11 +414,9 @@ std::shared_ptr LayerTransformation::moveDequantizationBefore( TransformationContext& context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, - const bool updatePrecision, const bool moveSubtract) const { const auto result = ov::pass::low_precision::NetworkHelper::moveDequantizationBefore(operation, dequantization, - updatePrecision, moveSubtract); updateOutput(context, result.newOperation, result.lastDequantization); return result.newOperation; diff --git a/src/common/low_precision_transformations/src/mat_mul.cpp b/src/common/low_precision_transformations/src/mat_mul.cpp index a7244ed75578c6..557375d8e4906b 100644 --- a/src/common/low_precision_transformations/src/mat_mul.cpp +++ b/src/common/low_precision_transformations/src/mat_mul.cpp @@ -13,6 +13,7 @@ #include "openvino/pass/pattern/op/wrap_type.hpp" #include "low_precision/network_helper.hpp" +#include "openvino/util/log.hpp" #include "itt.hpp" using namespace ov; @@ -175,6 +176,7 @@ bool MatMulTransformation::transform(TransformationContext &context, ov::pass::p updateOutput(context, newMultiply, newMatMul); + OPENVINO_DEBUG << "LPT: done: " << newMatMul; return true; } @@ -189,12 +191,14 @@ bool MatMulTransformation::canBeTransformed(const TransformationContext& context std::shared_ptr matMul = ov::as_type_ptr(layer); if (matMul == nullptr) { + OPENVINO_DEBUG << "LPT: early exit: not MatMul"; return false; } const auto dequantization1 = NetworkHelper::getDequantization(layer, defaultPrecisions, 0); if (!dequantization1.empty()) { if (updatePrecisions && !dequantization1.isLowPrecision()) { + OPENVINO_DEBUG << "LPT: early exit: dequantization before is not in low precision"; return false; } diff --git a/src/common/low_precision_transformations/src/max_pool.cpp b/src/common/low_precision_transformations/src/max_pool.cpp index a2f4f70b0649de..8eb39f8a98603a 100644 --- a/src/common/low_precision_transformations/src/max_pool.cpp +++ b/src/common/low_precision_transformations/src/max_pool.cpp @@ -57,7 +57,7 @@ bool MaxPoolTransformation::transform(TransformationContext& context, ov::pass:: } const std::shared_ptr pooling = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - moveDequantizationAfter(context, pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions), false); + moveDequantizationAfter(context, pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/src/move_fake_quantize.cpp b/src/common/low_precision_transformations/src/move_fake_quantize.cpp index 3c43c501efcb29..c2e550d447fd8d 100644 --- a/src/common/low_precision_transformations/src/move_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/move_fake_quantize.cpp @@ -148,7 +148,7 @@ bool MoveFakeQuantize::transform(TransformationContext& context, ov::pass::patte newConcat->set_friendly_name(concat->get_friendly_name()); NetworkHelper::copyInfo(concat, newConcat); if (!dequantization.empty()) { - moveDequantizationBefore(context, newConcat, dequantization, false); + moveDequantizationBefore(context, newConcat, dequantization); return true; } replace_node(fq, newConcat); diff --git a/src/common/low_precision_transformations/src/network_helper.cpp b/src/common/low_precision_transformations/src/network_helper.cpp index 327188c7003d50..be575495ff1052 100644 --- a/src/common/low_precision_transformations/src/network_helper.cpp +++ b/src/common/low_precision_transformations/src/network_helper.cpp @@ -1450,7 +1450,7 @@ std::shared_ptr NetworkHelper::optimizeSubtract(std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, - const bool updatePrecision, + const bool updateOutputPrecision, const bool moveSubtract, const std::vector& defaultPrecisions) { assert( @@ -1466,6 +1466,10 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationAfter // we must have dequantization multiply assert(dequantization.multiply != nullptr); + OPENVINO_ASSERT(operation->get_output_size() == 1, + "moveDequantizationAfter doesn't suppport dequantization propagation for layers with several outputs. (", + operation, " has ", operation->get_output_size(), " outputs)"); + OutputVector inputs = operation->input_values(); const size_t dequantizationIndex = getChildInputIndex(dequantization.multiply, operation); inputs[dequantizationIndex] = (!moveSubtract && dequantization.subtract != nullptr) ? @@ -1477,10 +1481,12 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationAfter ov::copy_runtime_info(operation, newOperation); if (const auto op = std::dynamic_pointer_cast(newOperation)) { - op->set_overridden_output_type(updatePrecision ? + op->set_overridden_output_type(updateOutputPrecision ? newOperation->get_input_element_type(0) : dequantization.multiplyConstant->get_element_type()); newOperation->validate_and_infer_types(); + } else { + OPENVINO_ASSERT(updateOutputPrecision, "moveDequantizationAfter can't save old output precision since layer is not TypeRelaxed: ", newOperation); } std::shared_ptr parent = newOperation; @@ -1545,7 +1551,6 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationAfter NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationBefore( const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, - const bool updatePrecision, const bool moveSubtract) { assert( (NetworkHelper::getDequantizationBelow(operation).subtractConstant == nullptr) || @@ -1642,11 +1647,8 @@ NetworkHelper::InsertDequantizationResult NetworkHelper::moveDequantizationBefor THROW_TRANSFORMATION_EXCEPTION << "dequantization operations must end with multiply"; } replace_node(dequantization.multiply, newOperation); - if (const auto op = std::dynamic_pointer_cast(newOperation)) { - op->set_overridden_output_type(updatePrecision ? - newOperation->get_input_element_type(0) : - dequantization.multiplyConstant->get_element_type()); + op->set_overridden_output_type(dequantization.multiplyConstant->get_element_type()); newOperation->validate_and_infer_types(); } diff --git a/src/common/low_precision_transformations/src/pad.cpp b/src/common/low_precision_transformations/src/pad.cpp index 859e5d9488f6cc..9119ebef34d0f5 100644 --- a/src/common/low_precision_transformations/src/pad.cpp +++ b/src/common/low_precision_transformations/src/pad.cpp @@ -163,7 +163,7 @@ bool PadTransformation::transform(TransformationContext& context, ov::pass::patt const auto convertedZero = ov::opset1::Constant::create(dequantization.data.get_element_type(), Shape{}, { padConstantValue }); pad->set_argument(3, convertedZero); - moveDequantizationAfter(context, pad, dequantization, true); + moveDequantizationAfter(context, pad, dequantization); return true; } diff --git a/src/common/low_precision_transformations/src/relu.cpp b/src/common/low_precision_transformations/src/relu.cpp index 4cc0f26ce0cc12..9e72d90a497179 100644 --- a/src/common/low_precision_transformations/src/relu.cpp +++ b/src/common/low_precision_transformations/src/relu.cpp @@ -42,7 +42,7 @@ bool ReluTransformation::transform(TransformationContext& context, ov::pass::pat relu = NetworkHelper::separateInStandaloneBranch(relu, defaultPrecisions); const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(relu, defaultPrecisions, 0); - moveDequantizationAfter(context, relu, dequantization, false, false); + moveDequantizationAfter(context, relu, dequantization); return true; } diff --git a/src/common/low_precision_transformations/src/reshape.cpp b/src/common/low_precision_transformations/src/reshape.cpp index 0c5f83502df4e8..f8dfbfa8e00065 100644 --- a/src/common/low_precision_transformations/src/reshape.cpp +++ b/src/common/low_precision_transformations/src/reshape.cpp @@ -157,7 +157,7 @@ bool ReshapeTransformation::transform(TransformationContext& context, ov::pass:: reshape = ov::as_type_ptr(NetworkHelper::separateInStandaloneBranch(reshape, defaultPrecisions)); reshapeDequantizationConstant(reshape, defaultPrecisions); - moveDequantizationAfter(context, reshape, NetworkHelper::getDequantization(reshape, defaultPrecisions, 0), false); + moveDequantizationAfter(context, reshape, NetworkHelper::getDequantization(reshape, defaultPrecisions, 0)); return true; } diff --git a/src/common/low_precision_transformations/src/shuffle_channels.cpp b/src/common/low_precision_transformations/src/shuffle_channels.cpp index 9587d47939b9a6..332ddd0a8eb235 100644 --- a/src/common/low_precision_transformations/src/shuffle_channels.cpp +++ b/src/common/low_precision_transformations/src/shuffle_channels.cpp @@ -71,7 +71,7 @@ bool ShuffleChannelsTransformation::transform(TransformationContext& context, ov replace_node(dequantization.multiplyConstant, shuffledMulConst); dequantization.multiplyConstant = shuffledMulConst; - moveDequantizationAfter(context, shuffleChannels, dequantization, false); + moveDequantizationAfter(context, shuffleChannels, dequantization); return true; } diff --git a/src/common/low_precision_transformations/src/space_to_batch.cpp b/src/common/low_precision_transformations/src/space_to_batch.cpp index 75bf0f9dbbc559..0783f5e182dcc1 100644 --- a/src/common/low_precision_transformations/src/space_to_batch.cpp +++ b/src/common/low_precision_transformations/src/space_to_batch.cpp @@ -50,7 +50,7 @@ bool SpaceToBatchTransformation::transform(TransformationContext& context, ov::p } const std::shared_ptr op = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions), false); + moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/src/squeeze.cpp b/src/common/low_precision_transformations/src/squeeze.cpp index cdffa6e41f2edf..9570c74d12237b 100644 --- a/src/common/low_precision_transformations/src/squeeze.cpp +++ b/src/common/low_precision_transformations/src/squeeze.cpp @@ -66,7 +66,7 @@ bool SqueezeTransformation::transform(TransformationContext& context, ov::pass:: replace_node(dequantization.subtractConstant, newConstant); } - moveDequantizationAfter(context, squeeze, NetworkHelper::getDequantization(squeeze, defaultPrecisions), false); + moveDequantizationAfter(context, squeeze, NetworkHelper::getDequantization(squeeze, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/src/strided_slice.cpp b/src/common/low_precision_transformations/src/strided_slice.cpp index dbe9b852f875a2..3a067d2ee8265a 100644 --- a/src/common/low_precision_transformations/src/strided_slice.cpp +++ b/src/common/low_precision_transformations/src/strided_slice.cpp @@ -131,7 +131,7 @@ bool StridedSliceTransformation::transform(TransformationContext& context, ov::p replace_node(dequantization.multiplyConstant, new_mul_const); dequantization.multiplyConstant = new_mul_const; - moveDequantizationAfter(context, strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions), false); + moveDequantizationAfter(context, strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/src/transparent_base_transformation.cpp b/src/common/low_precision_transformations/src/transparent_base_transformation.cpp index 8630dfc932422f..014303f1b8cfab 100644 --- a/src/common/low_precision_transformations/src/transparent_base_transformation.cpp +++ b/src/common/low_precision_transformations/src/transparent_base_transformation.cpp @@ -20,7 +20,7 @@ bool TransparentBaseTransformation::transform(TransformationContext& context, ov } op = NetworkHelper::separateInStandaloneBranch(op, defaultPrecisions); - moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions), true); + moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/src/transpose.cpp b/src/common/low_precision_transformations/src/transpose.cpp index eb40b4e183abaa..7b4c68ddc780d6 100644 --- a/src/common/low_precision_transformations/src/transpose.cpp +++ b/src/common/low_precision_transformations/src/transpose.cpp @@ -90,7 +90,7 @@ bool TransposeTransformation::transform(TransformationContext& context, ov::pass transpose = NetworkHelper::separateInStandaloneBranch(transpose, defaultPrecisions); transposeDequantizationConstant(transpose, defaultPrecisions); - moveDequantizationAfter(context, transpose, NetworkHelper::getDequantization(transpose, defaultPrecisions, 0), false); + moveDequantizationAfter(context, transpose, NetworkHelper::getDequantization(transpose, defaultPrecisions, 0)); return true; } diff --git a/src/common/low_precision_transformations/src/unsqueeze.cpp b/src/common/low_precision_transformations/src/unsqueeze.cpp index 76967e2cca4de8..4b9e8b684e1f4e 100644 --- a/src/common/low_precision_transformations/src/unsqueeze.cpp +++ b/src/common/low_precision_transformations/src/unsqueeze.cpp @@ -68,7 +68,7 @@ bool UnsqueezeTransformation::transform(TransformationContext& context, ov::pass replace_node(dequantization.subtractConstant, newConstant); } - moveDequantizationAfter(context, unsqueeze, NetworkHelper::getDequantization(unsqueeze, defaultPrecisions), false); + moveDequantizationAfter(context, unsqueeze, NetworkHelper::getDequantization(unsqueeze, defaultPrecisions)); return true; } diff --git a/src/common/low_precision_transformations/tests/low_precision_transformations_test.cpp b/src/common/low_precision_transformations/tests/low_precision_transformations_test.cpp deleted file mode 100644 index 8179503e732d31..00000000000000 --- a/src/common/low_precision_transformations/tests/low_precision_transformations_test.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "low_precision/concat.hpp" -#include "low_precision/convolution.hpp" -#include "low_precision/mat_mul.hpp" -#include "low_precision/fuse_convert.hpp" - -using namespace ::testing; -using namespace ov::pass::low_precision; - -class smoke_LPT_LowPrecisionTransformationsTests : public Test {}; - -// TODO: LPT: not implemented -TEST_F(smoke_LPT_LowPrecisionTransformationsTests, DISABLED_removeAll) { - //TODO: FIXME - ASSERT_EQ(1, 0); - //LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); - //auto transformation = transformations.find("Convolution"); - //ASSERT_NE(0, transformation.size()); - - //transformations.removeAll(); - //transformation = transformations.find("Convolution"); - //ASSERT_EQ(0, transformation.size()); -} -// -//TEST_F(LowPrecisionTransformationsTests, removeBranchSpecific) { -// LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); -// auto transformation = transformations.find("Concat"); -// ASSERT_NE(0, transformation.size()); -// -// transformations.removeBranchSpecific(); -// transformation = transformations.find("Concat"); -// ASSERT_EQ(0, transformation.size()); -//} -// -//TEST_F(LowPrecisionTransformationsTests, remove) { -// LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); -// auto transformation = transformations.find("MatMul"); -// ASSERT_NE(0, transformation.size()); -// -// transformations.remove(); -// transformation = transformations.find("MatMul"); -// ASSERT_EQ(0, transformation.size()); -//} -// -//TEST_F(LowPrecisionTransformationsTests, removeCleanup) { -// LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); -// auto transformation = transformations.find("Multiply"); -// ASSERT_NE(0, transformation.size()); -// const size_t originalSize = transformation.size(); -// -// transformations.removeCleanup(); -// transformation = transformations.find("Multiply"); -// ASSERT_EQ(originalSize - 1, transformation.size()); -//} -// -//TEST_F(LowPrecisionTransformationsTests, removeStandaloneCleanup) { -// LowPrecisionTransformations transformations = LowPrecisionTransformer::getAllTransformations(LayerTransformation::Params()); -// auto transformation = transformations.find("Multiply"); -// ASSERT_NE(0, transformation.size()); -// const size_t originalSize = transformation.size(); -// -// transformations.removeStandaloneCleanup(); -// transformation = transformations.find("Multiply"); -// ASSERT_EQ(originalSize - 1, transformation.size()); -//} diff --git a/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp b/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp index f8e2ddb16e28db..44f6e884a766bc 100644 --- a/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp +++ b/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp @@ -20,6 +20,7 @@ #include "low_precision/fake_quantize_decomposition.hpp" #include "low_precision/group_convolution.hpp" #include "low_precision/interpolate.hpp" +#include "low_precision/low_precision.hpp" #include "low_precision/mat_mul.hpp" #include "low_precision/max_pool.hpp" #include "low_precision/multiply_partial.hpp" @@ -86,6 +87,7 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationAvgPoolTransformation) { auto f = std::make_shared(ResultVector{result1, result2}, ParameterVector{input}); pass::Manager m; + m.register_pass(); m.register_pass(); m.run_passes(f); @@ -106,6 +108,7 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationClampTransformation) { auto f = std::make_shared(ResultVector{result1, result2}, ParameterVector{input}); pass::Manager m; + m.register_pass(); m.register_pass(); m.run_passes(f); @@ -445,6 +448,7 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationPReluTransformation) { auto f = std::make_shared(ResultVector{result1, result2}, ParameterVector{input}); pass::Manager m; + m.register_pass(); m.register_pass(); m.run_passes(f); @@ -487,6 +491,7 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationReduceMeanTransformation) { auto f = std::make_shared(ResultVector{result1, result2}, ParameterVector{input}); pass::Manager m; + m.register_pass(); m.register_pass(); m.run_passes(f); @@ -529,6 +534,7 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationReduceSumTransformation) { auto f = std::make_shared(ResultVector{result1, result2}, ParameterVector{input}); pass::Manager m; + m.register_pass(); m.register_pass(); m.run_passes(f); diff --git a/src/common/low_precision_transformations/tests/move_dequantization_after_transformation.cpp b/src/common/low_precision_transformations/tests/move_dequantization_after_transformation.cpp index 5e69a03dd24021..b775b4c9865a65 100644 --- a/src/common/low_precision_transformations/tests/move_dequantization_after_transformation.cpp +++ b/src/common/low_precision_transformations/tests/move_dequantization_after_transformation.cpp @@ -36,13 +36,28 @@ class MoveDequantizationAfterTransformationParams { ov::element::Type precisionAfterOperation; ov::builder::subgraph::DequantizationOperations dequantizationAfter; }; + MoveDequantizationAfterTransformationParams(ov::element::Type originalPrecision, + TestTransformationParams params, + bool updateOutputPrecision, + bool moveSubtract, + Actual actual, + Expected expected, + bool typeRelaxed = true) + : originalPrecision(originalPrecision), + params(params), + updateOutputPrecision(updateOutputPrecision), + moveSubtract(moveSubtract), + actual(std::move(actual)), + expected(std::move(expected)), + typeRelaxed(typeRelaxed) {} ov::element::Type originalPrecision; TestTransformationParams params; - bool updatePrecision; + bool updateOutputPrecision; bool moveSubtract; Actual actual; Expected expected; + bool typeRelaxed; }; typedef std::tuple< @@ -59,14 +74,15 @@ class MoveDequantizationAfterTransformation : actualFunction = ov::builder::subgraph::MoveDequantizationAfterFunction::getOriginal( testValues.originalPrecision, inputShape, - testValues.actual.dequantization); + testValues.actual.dequantization, + testValues.typeRelaxed); const auto targetNode = actualFunction->get_output_op(0)->get_input_node_shared_ptr(0); const auto dequantization = ov::pass::low_precision::NetworkHelper::getDequantization(targetNode); ov::pass::low_precision::NetworkHelper::moveDequantizationAfter( targetNode, dequantization, - testValues.updatePrecision, + testValues.updateOutputPrecision, testValues.moveSubtract); referenceFunction = ov::builder::subgraph::MoveDequantizationAfterFunction::getReference( @@ -74,7 +90,8 @@ class MoveDequantizationAfterTransformation : inputShape, testValues.expected.dequantizationBefore, testValues.expected.precisionAfterOperation, - testValues.expected.dequantizationAfter); + testValues.expected.dequantizationAfter, + testValues.typeRelaxed); } static std::string getTestCaseName(testing::TestParamInfo obj) { @@ -87,7 +104,8 @@ class MoveDequantizationAfterTransformation : inputShape << "_" << testValues.actual.dequantization << "_" << (testValues.moveSubtract ? "move_subtract_" : "don't_move_subtract_") << - (testValues.updatePrecision ? "updatePrecision" : "don't_update_precision"); + (testValues.updateOutputPrecision ? "updateOutputPrecision_" : "don't_update_precision_") << + (testValues.typeRelaxed ? "typeRelaxed" : "not_typeRelaxed"); return result.str(); } }; @@ -100,6 +118,7 @@ TEST_P(MoveDequantizationAfterTransformation, CompareFunctions) { ASSERT_TRUE(LayerTransformation::allNamesAreUnique(actualFunction)) << "Not all names are unique"; } +namespace { const std::vector inputShapes = { { 1, 3, 16, 16 }, { 4, 3, 16, 16 } @@ -136,7 +155,7 @@ const std::vector testValues = { { {}, {}, { 10.f } }, }, }, - // updatePrecision = false + // updateOutputPrecision = false { ov::element::u8, LayerTransformation::createParamsU8I8(), @@ -151,7 +170,7 @@ const std::vector testValues = { { {}, { 7.f }, { 10.f } }, }, }, - // moveSubtract = false & updatePrecision = false + // moveSubtract = false & updateOutputPrecision = false { ov::element::u8, LayerTransformation::createParamsU8I8(), @@ -196,7 +215,7 @@ const std::vector testValues = { { {}, {}, { 10.f } }, }, }, - // updatePrecision = false + // updateOutputPrecision = false { ov::element::i8, LayerTransformation::createParamsI8I8(), @@ -211,7 +230,7 @@ const std::vector testValues = { { {}, { 7.f }, { 10.f } }, }, }, - // moveSubtract = false & updatePrecision = false + // moveSubtract = false & updateOutputPrecision = false { ov::element::i8, LayerTransformation::createParamsI8I8(), @@ -256,6 +275,22 @@ const std::vector testValues = { { {}, {}, { { 10.f, 12.f, 16.f } } }, }, }, + // updateOutputPrecision = true & typeRelaxed = false + { + ov::element::u8, + LayerTransformation::createParamsU8I8(), + true, + true, + { + { {ov::element::f32}, { 7.f }, { 10.f } }, + }, + { + { {}, {}, {} }, + ov::element::u8, + { {ov::element::f32}, { 7.f }, { 10.f } }, + }, + false + }, }; INSTANTIATE_TEST_SUITE_P( @@ -265,3 +300,24 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(inputShapes), ::testing::ValuesIn(testValues)), MoveDequantizationAfterTransformation::getTestCaseName); +} // namespace + +TEST(LPT, MoveDequantizationAfterTransformationNegative) { + const bool typeRelaxed = false; + const bool updateOutputPrecision = false; + + auto model = ov::builder::subgraph::MoveDequantizationAfterFunction::getOriginal( + ov::element::u8, + ov::Shape{1, 3, 16, 16}, + ov::builder::subgraph::DequantizationOperations{{ov::element::f32}, {7.f}, {10.f}}, + typeRelaxed); + + const auto targetNode = model->get_output_op(0)->get_input_node_shared_ptr(0); + const auto dequantization = ov::pass::low_precision::NetworkHelper::getDequantization(targetNode); + + // updateOutputPrecision is supported only for type relaxed nodes + EXPECT_ANY_THROW(ov::pass::low_precision::NetworkHelper::moveDequantizationAfter(targetNode, + dequantization, + updateOutputPrecision, + true)); +} diff --git a/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp b/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp index 81ec7e98a53193..172dcdf657d950 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp @@ -18,6 +18,11 @@ class TRANSFORMATIONS_API ConvertTensorIteratorToRNNSequence; class TRANSFORMATIONS_API ConvertTensorIteratorToGRUSequence; class TRANSFORMATIONS_API ConvertTensorIteratorToSequence; +class TRANSFORMATIONS_API ConvertLoopToLSTMSequence; +class TRANSFORMATIONS_API FuseReverseLSTMSequence; + +class TRANSFORMATIONS_API FuseLSTMSequencesToBidirectionalLSTMSequence; + } // namespace pass } // namespace ov @@ -62,3 +67,33 @@ class ov::pass::ConvertTensorIteratorToSequence : public GraphRewrite { OPENVINO_RTTI("ConvertTensorIteratorToSequence", "0"); ConvertTensorIteratorToSequence(); }; + +/** + * @ingroup ie_transformation_common_api + * @brief Replaces Loop with LSTMCell inside to LSTMSequence + */ +class ov::pass::ConvertLoopToLSTMSequence : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("ConvertLoopToLSTMSequence", "0"); + ConvertLoopToLSTMSequence(); +}; + +/** + * @ingroup ie_transformation_common_api + * @brief Fuses ReverseSequence->LSTM->ReverseSequence to LSTMSequence with REVERSE direction flag + */ +class ov::pass::FuseReverseLSTMSequence : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("FuseReverseLSTMSequence", "0"); + FuseReverseLSTMSequence(); +}; + +/** + * @ingroup ie_transformation_common_api + * @brief Replaces two LSTMSequences to one bidirectional LSTMSequence + */ +class ov::pass::FuseLSTMSequencesToBidirectionalLSTMSequence : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("FuseLSTMSequencesToBidirectionalLSTMSequence", "0"); + FuseLSTMSequencesToBidirectionalLSTMSequence(); +}; diff --git a/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp b/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp index c7c1a000e2cc4c..ad850281c1b051 100644 --- a/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp +++ b/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp @@ -36,7 +36,6 @@ #include "openvino/op/transpose.hpp" #include "openvino/op/unsqueeze.hpp" #include "openvino/op/util/broadcast_base.hpp" -#include "openvino/op/util/gather_base.hpp" #include "openvino/op/util/pad_base.hpp" #include "openvino/op/variadic_split.hpp" #include "openvino/pass/manager.hpp" @@ -97,7 +96,6 @@ const std::shared_ptr propagate_through_ops = ov::op::v8::Slice, ov::op::v1::VariadicSplit, ov::op::v1::Split, - op::util::GatherBase, ov::op::v0::Concat, ov::op::v0::Convert, // through Convert can go only to Constants ov::op::v0::Constant, @@ -389,7 +387,6 @@ class PropagateDownDisableSensitivityForQuantized : public pass::MatcherPass { ov::op::v8::Slice, ov::op::v1::VariadicSplit, ov::op::v1::Split, - op::util::GatherBase, ov::op::v0::Concat, ov::op::v0::Tile>(); diff --git a/src/common/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp b/src/common/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp index ced9d28880f272..669f0e6072f1f8 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_ti_to_sequences.cpp @@ -11,11 +11,15 @@ #include "openvino/core/graph_util.hpp" #include "openvino/core/node.hpp" #include "openvino/core/rt_info.hpp" +#include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/gru_cell.hpp" #include "openvino/op/gru_sequence.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/loop.hpp" #include "openvino/op/lstm_cell.hpp" #include "openvino/op/lstm_sequence.hpp" #include "openvino/op/parameter.hpp" @@ -23,14 +27,16 @@ #include "openvino/op/result.hpp" #include "openvino/op/rnn_cell.hpp" #include "openvino/op/rnn_sequence.hpp" +#include "openvino/op/scatter_nd_update.hpp" #include "openvino/op/shape_of.hpp" -#include "openvino/op/squeeze.hpp" #include "openvino/op/tensor_iterator.hpp" #include "openvino/op/transpose.hpp" #include "openvino/op/unsqueeze.hpp" #include "openvino/pass/manager.hpp" +#include "openvino/pass/pattern/op/or.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" #include "transformations/utils/utils.hpp" +#include "validation_util.hpp" namespace { bool convertTensorIteratorToSequence(const std::shared_ptr& ti, @@ -412,8 +418,842 @@ ov::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequence register_matcher(m, callback); } +static bool get_scalar_constant_value(const ov::Output& node, int64_t& output_value) { + auto constant = ov::as_type(node.get_node()); + if (!constant) + return false; + if (ov::shape_size(constant->get_shape()) != 1) + return false; + const auto& type = constant->get_output_element_type(0); + if (type != ov::element::i32 && type != ov::element::i64) + return false; + output_value = constant->cast_vector()[0]; + return true; +} + +// clang-format off +/* + + Following subgraph in Loop is fused into LSTMSequence + + + +------------------------------+ + | X | +----------------+ +------+ + | (invariant) | | sequence index | | axis | + | [seq_len, batch, input_size] | | [] | | {0} | + +--------------+---------------+ +--------+-------+ +--+---+ + | | | + | +----------------- + | + +---+ | | + | | +---------------------------+ + | | | + | | | + v v v +----------------------+ +----------------------+ + +---+------+------+---+ | H | | C | + | Gather | | (merged with H_out) | | (merged with C_out) | +-----+ +-----+ +-----+ + | [batch, input_size] | | [batch, hidden_size] | | [batch, hidden_size] | | W | | R | | B | + +----------+----------+ +----------+-----------+ +----------+-----------+ +--+--+ +--+--+ +--+--+ + | | | | | | + | | | | | | + | | | | | | + | | | | | | + | | | | | | + | | | | | | + | | +---------------+ | | | + | | | | | | + | | | | | | + | | | +------------------------------+ | | + | | | | | | + | | | | | | + | +------+ | | +------------------------------------+ | + | | | | | | + +----------------------------+ | | | | +------------------------------------------+ + | | | | | | + +---+ v v v v v v + | Y | +---+----+----+----+----+----+---+ + +---+ | LSTMCell | + | +--------+-------------------+---+ + | | | + v | | + +-----+-----+ +----------+---------------+ | + | Broadcast | | | +---------------------+ + +-----+-----+ | | | + | v v v + | +----------------+ +------------+------------+ +---------+------------+ +--------+--------+ + | | sequence index | | Unsqueeze | | H_out | | C_out | + | +--------+-------+ | [batch, 1, hidden_size] | | (merged with H) | | (merged with C) | + | | +------------+------------+ | [batch, hidden_size] | +-----------------+ + | | | +----------------------+ + | | | + | | | + | | | +------+ + | | | | axis | + | | | | {0} | + | | +----------------+ +--+---+ + | | | | + | | | | + | +---+ | +---------------------+ + | | | | + | | | | + +---------------+ | | | + | | | | + v v v v + +---+----+----+----+---+ + | ScatterUpdate | + | (loop body output) | + +----------------------+ + +*/ +// clang-format on + +ov::pass::ConvertLoopToLSTMSequence::ConvertLoopToLSTMSequence() { + MATCHER_SCOPE(ConvertLoopToLSTMSequence); + auto input_label = pattern::any_input(pattern::rank_equals(3)); + auto input_transpose_const_label = pattern::wrap_type(); + auto input_transpose_label = + pattern::wrap_type({input_label, input_transpose_const_label}, + pattern::rank_equals(3)); + auto scatter_indexes_label = pattern::wrap_type(); + auto scatter_update_label = std::make_shared(OutputVector{input_transpose_label, input_label}); + auto scatter_label = pattern::wrap_type( + {pattern::any_input(), scatter_indexes_label, scatter_update_label}); + auto trip_count_label = pattern::wrap_type(); + auto cond_label = pattern::wrap_type(); + auto loop_label = pattern::wrap_type({trip_count_label, + cond_label, + pattern::any_input(), + pattern::any_input(), + pattern::any_input(), + pattern::any_input(), + pattern::any_input(), + scatter_label}); + auto output_transpose_const_label = pattern::wrap_type(); + auto output_transpose_label = pattern::wrap_type({loop_label, output_transpose_const_label}); + + // Loop body pattern: + auto sequence_index_label = pattern::any_input(pattern::rank_equals(0)); + auto iteration_counter_label = pattern::any_input(); + auto iteration_counter_step_label = pattern::wrap_type(); + auto iteration_counter_incremented_label = + pattern::wrap_type({iteration_counter_label, iteration_counter_step_label}); + auto iteration_counter_limit_label = pattern::wrap_type(); + auto iteration_counter_less_than_limit_label = + pattern::wrap_type({iteration_counter_incremented_label, iteration_counter_limit_label}); + auto sequence_index_step_label = pattern::wrap_type(); + auto sequence_index_incremented_label = + pattern::wrap_type({sequence_index_label, sequence_index_step_label}); + auto sequence_index_limit_label = pattern::wrap_type(); + auto sequence_index_less_than_limit_label = + pattern::wrap_type({sequence_index_incremented_label, sequence_index_limit_label}); + auto and_label = pattern::wrap_type( + {iteration_counter_less_than_limit_label, sequence_index_less_than_limit_label}); + auto loop_condition_label = pattern::wrap_type({and_label}); + + auto X_body_label = pattern::any_input(pattern::rank_equals(3)); + auto C_body_label = pattern::any_input(pattern::rank_equals(2)); + auto H_body_label = pattern::any_input(pattern::rank_equals(2)); + auto gather_axis_label = pattern::wrap_type(); + auto sequence_index_new_shape_label = pattern::wrap_type(); + auto sequence_index_reshaped_label = + pattern::wrap_type({sequence_index_label, sequence_index_new_shape_label}); + auto sequence_index_or_label = + std::make_shared(OutputVector{sequence_index_label, sequence_index_reshaped_label}); + auto gather_body_label = + pattern::wrap_type({X_body_label, sequence_index_or_label, gather_axis_label}, + pattern::rank_equals(2)); + auto W_label = pattern::any_input(); + auto R_label = pattern::any_input(); + auto B_label = pattern::wrap_type(); + auto lstm_cell_label = pattern::wrap_type( + {gather_body_label, H_body_label, C_body_label, W_label, R_label, B_label}); + auto scatter_index_new_shape_label = pattern::wrap_type(); + auto scatter_index_body_label = + pattern::wrap_type({sequence_index_label, scatter_index_new_shape_label}); + auto updates_label = pattern::wrap_type( + {lstm_cell_label, pattern::wrap_type()}); + auto scatter_axis_label = pattern::wrap_type(); + auto scatter_body_label = pattern::wrap_type( + {pattern::any_input(), scatter_index_body_label, updates_label, scatter_axis_label}, + pattern::rank_equals(3)); + auto loop_output_label = pattern::wrap_type({scatter_body_label}); + + matcher_pass_callback callback = [=](pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto match_root = m.get_match_root(); + + const auto loop = ov::as_type_ptr(pattern_map.at(loop_label).get_node_shared_ptr()); + const auto& output_descs = loop->get_output_descriptions(); + if (output_descs.size() != 1) + return false; + const auto body_output_desc = + std::dynamic_pointer_cast(output_descs[0]); + if (!body_output_desc || body_output_desc->m_iteration != -1) + return false; + + ov::pass::pattern::Matcher loop_condition_matcher(loop_condition_label); + ov::pass::pattern::Matcher loop_output_matcher(loop_output_label); + + auto body = loop->get_function(); + const auto& body_parameters = body->get_parameters(); + const auto& body_results = body->get_results(); + const auto special_body_ports = loop->get_special_body_ports(); + + if (!loop_condition_matcher.match(body_results[special_body_ports.body_condition_output_idx]->output(0))) + return false; + if (!loop_output_matcher.match(body_results[body_output_desc->m_body_value_index]->output(0))) + return false; + + const auto& loop_condition_map = loop_condition_matcher.get_pattern_value_map(); + const auto& loop_output_map = loop_output_matcher.get_pattern_value_map(); + + int64_t iteration_counter_step = -1; + if (!get_scalar_constant_value(loop_condition_map.at(iteration_counter_step_label), iteration_counter_step) || + iteration_counter_step != 1) + return false; + int64_t sequence_index_step = -1; + if (!get_scalar_constant_value(loop_condition_map.at(sequence_index_step_label), sequence_index_step) || + sequence_index_step != 1) + return false; + + int64_t iteration_counter_limit = -1; + if (!get_scalar_constant_value(loop_condition_map.at(iteration_counter_limit_label), iteration_counter_limit)) + return false; + int64_t sequence_index_limit = -1; + if (!get_scalar_constant_value(loop_condition_map.at(sequence_index_limit_label), sequence_index_limit)) + return false; + if (iteration_counter_limit != sequence_index_limit) + return false; + + int64_t gather_axis = -1; + if (!get_scalar_constant_value(loop_output_map.at(gather_axis_label), gather_axis) || gather_axis != 0) + return false; + int64_t scatter_axis = -1; + if (!get_scalar_constant_value(loop_output_map.at(scatter_axis_label), scatter_axis) || scatter_axis != 0) + return false; + + const auto& sequence_index = loop_condition_map.at(sequence_index_label).get_node_shared_ptr(); + const auto& iteration_counter = loop_condition_map.at(iteration_counter_label).get_node_shared_ptr(); + + const auto& X_body = loop_output_map.at(X_body_label).get_node_shared_ptr(); + const auto& H_body = loop_output_map.at(H_body_label).get_node_shared_ptr(); + const auto& C_body = loop_output_map.at(C_body_label).get_node_shared_ptr(); + auto W = loop_output_map.at(W_label).get_node_shared_ptr(); + auto R = loop_output_map.at(R_label).get_node_shared_ptr(); + auto B = loop_output_map.at(B_label).get_node_shared_ptr(); + const auto lstm_cell = + ov::as_type_ptr(loop_output_map.at(lstm_cell_label).get_node_shared_ptr()); + const auto H_unsqueeze = loop_output_map.at(updates_label).get_node_shared_ptr(); + if (H_unsqueeze->input_value(0) != lstm_cell->output(0)) + return false; + + Output X = pattern_map.at(input_label); + Output H; + Output C; + + const auto& input_descs = loop->get_input_descriptions(); + for (const auto& desc : input_descs) { + if (body_parameters[desc->m_body_parameter_index] == X_body) { + if (!std::dynamic_pointer_cast(desc)) { + return false; + } + if (loop->input_value(desc->m_input_index) != pattern_map.at(scatter_label)) { + return false; + } + } + if (body_parameters[desc->m_body_parameter_index] == H_body) { + auto merged_desc = std::dynamic_pointer_cast(desc); + if (!merged_desc) { + return false; + } + H = loop->input_value(desc->m_input_index); + const auto& result = body_results[merged_desc->m_body_value_index]; + if (result->input_value(0) != lstm_cell->output(0)) { + return false; + } + } + if (body_parameters[desc->m_body_parameter_index] == C_body) { + auto merged_desc = std::dynamic_pointer_cast(desc); + if (!merged_desc) { + return false; + } + C = loop->input_value(desc->m_input_index); + const auto& result = body_results[merged_desc->m_body_value_index]; + if (result->input_value(0) != lstm_cell->output(1)) { + return false; + } + } + if (body_parameters[desc->m_body_parameter_index] == sequence_index) { + auto merged_desc = std::dynamic_pointer_cast(desc); + if (!merged_desc) { + return false; + } + } + if (body_parameters[desc->m_body_parameter_index] == iteration_counter) { + auto merged_desc = std::dynamic_pointer_cast(desc); + if (!merged_desc) { + return false; + } + } + } + + auto constant_is_zero = [](const Output& node) -> bool { + auto constant = ov::as_type_ptr(node.get_node_shared_ptr()); + if (!constant) { + return false; + } + float value = -1.0f; + return ov::op::util::get_single_value(constant, value) && value == 0.0f; + }; + + if (!constant_is_zero(H)) + return false; + if (!constant_is_zero(C)) + return false; + + const auto& scatter = pattern_map.at(scatter_label); + const auto& scatter_shape = scatter.get_partial_shape(); // scatter shape [sequence length, batch, input size] + const auto& sequence_length_dimension = scatter_shape[0]; + const auto& batch_size_dimension = scatter_shape[1]; + const auto& input_size_dimension = scatter_shape[2]; + + std::vector batch_first_perm{1, 0, 2}; + std::vector new_input_perm_values; + + if (pattern_map.count(input_transpose_label) > 0) { + const auto& input_transpose = pattern_map.at(input_transpose_label).get_node(); + if (ov::is_type(input_transpose)) { + auto input_perm = ov::as_type(input_transpose->get_input_node_ptr(1)); + if (!input_perm) + return false; + auto input_perm_values = input_perm->cast_vector(); + for (size_t i = 0; i < input_perm_values.size(); i++) { + new_input_perm_values.push_back(input_perm_values[batch_first_perm[i]]); + } + } else if (ov::is_type(input_transpose)) { + const auto& input_shape = input_transpose->get_input_partial_shape(0); + const auto& output_shape = input_transpose->get_output_partial_shape(0); + if (input_shape.size() != output_shape.size()) + return false; + for (size_t i = 0; i < output_shape.size(); i++) { + const auto& dim = output_shape[i]; + for (size_t j = 0; j < input_shape.size(); j++) { + if (input_shape[j] == dim) { + new_input_perm_values.push_back(batch_first_perm[j]); + break; + } + } + } + } + } else { + new_input_perm_values = batch_first_perm; + } + + NodeRegistry node_registry; + + if (new_input_perm_values != std::vector{0, 1, 2}) { + auto new_input_perm = node_registry.make(element::i32, + Shape{new_input_perm_values.size()}, + new_input_perm_values); + X = node_registry.make(X, new_input_perm); + } + + const auto& X_shape = X.get_partial_shape(); + if (!X_shape[0].compatible(batch_size_dimension) || !X_shape[1].compatible(sequence_length_dimension) || + !X_shape[2].compatible(input_size_dimension)) { + return false; + } + + // Finally create LSTMSequence + auto zero = node_registry.make(element::i32, Shape{1}, 0); + auto max_sequence_length = node_registry.make(element::i32, Shape{1}, sequence_index_limit); + auto shapeof_X = node_registry.make(X); + auto batch_size = node_registry.make(shapeof_X, zero, zero); + auto shapeof_H = node_registry.make(H); + auto new_H_shape = node_registry.make(OutputVector{batch_size, shapeof_H}, 0); + auto new_H = node_registry.make(H, new_H_shape); + auto shapeof_C = node_registry.make(C); + auto new_C_shape = node_registry.make(OutputVector{batch_size, shapeof_C}, 0); + auto new_C = node_registry.make(C, new_C_shape); + auto new_W = node_registry.make(W, zero); + auto new_R = node_registry.make(R, zero); + auto new_B = node_registry.make(B, zero); + std::shared_ptr sequence_lengths = std::make_shared(max_sequence_length, batch_size); + if (auto constant = ov::util::constantfold_subgraph(sequence_lengths)) { + sequence_lengths = constant; + } + node_registry.add(sequence_lengths); + auto lstm = node_registry.make(X, + new_H, + new_C, + sequence_lengths, + new_W, + new_R, + new_B, + lstm_cell->get_hidden_size(), + op::v5::LSTMSequence::direction::FORWARD, + lstm_cell->get_activations_alpha(), + lstm_cell->get_activations_beta(), + lstm_cell->get_activations(), + lstm_cell->get_clip()); + if (transformation_callback(lstm)) + return false; + + const auto one = node_registry.make(element::i32, Shape{1}, 1); + auto H_squeezed = node_registry.make(lstm->output(0), one); + H_squeezed->set_friendly_name(match_root->get_friendly_name()); + + copy_runtime_info(NodeVector{scatter.get_node_shared_ptr(), loop}, node_registry.get()); + + for (auto&& loop_consumer : loop->output(0).get_target_inputs()) { + auto node = loop_consumer.get_node()->shared_from_this(); + if (ov::is_type(node)) { + auto shapeof = std::make_shared(H_squeezed); + auto indices = op::v0::Constant::create(element::i32, Shape{3}, {1, 0, 2}); + auto shapeof_gather = std::make_shared(shapeof, indices, zero); + shapeof_gather->set_friendly_name(node->get_friendly_name()); + copy_runtime_info(node, {shapeof, indices, shapeof_gather}); + replace_node(node, shapeof_gather); + } + } + + replace_node(match_root, H_squeezed); + + return true; + }; + + auto m = std::make_shared(output_transpose_label, matcher_name); + register_matcher(m, callback); +} + +class EliminateGatherWithRange : public ov::pass::MatcherPass { +public: + EliminateGatherWithRange() { + using namespace ov; + using namespace ov::pass; + + auto data_label = pattern::any_input(pattern::rank_equals(3)); + auto shapeof_label = pattern::wrap_type({data_label}); + auto shapeof_gather_label = pattern::wrap_type( + {shapeof_label, pattern::wrap_type(), pattern::wrap_type()}); + auto shapeof_gather2_label = pattern::wrap_type( + {shapeof_gather_label, pattern::wrap_type(), pattern::wrap_type()}); + auto reshape_label = + pattern::wrap_type({shapeof_gather2_label, pattern::wrap_type()}); + auto range_label = pattern::wrap_type( + {pattern::wrap_type(), reshape_label, pattern::wrap_type()}); + auto match_node = pass::pattern::wrap_type( + {data_label, range_label, pattern::wrap_type()}); + + matcher_pass_callback callback = [=](pass::pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + auto gather = ov::as_type_ptr(m.get_match_root()); + if (!gather) + return false; + auto axis = gather->get_axis(); + if (axis == op::v1::Gather::AXIS_NOT_SET_VALUE) { + return false; + } + + const auto shapeof_gather = pattern_map.at(shapeof_gather_label).get_node_shared_ptr(); + const auto shapeof_gather_indexes_node = + ov::as_type_ptr(shapeof_gather->get_input_node_shared_ptr(1)); + auto shapeof_gather_indexes = shapeof_gather_indexes_node->cast_vector(); + if (shapeof_gather_indexes.size() != 3) + return false; + const auto shapeof_gather2 = pattern_map.at(shapeof_gather2_label).get_node_shared_ptr(); + int64_t shapeof_gather2_index = -1; + int64_t shapeof_gather2_axis = -1; + if (!get_scalar_constant_value(shapeof_gather2->get_input_node_shared_ptr(1), shapeof_gather2_index)) + return false; + if (!get_scalar_constant_value(shapeof_gather2->get_input_node_shared_ptr(2), shapeof_gather2_axis) || + shapeof_gather2_axis != 0) + return false; + const auto reshape = pattern_map.at(reshape_label).get_node_shared_ptr(); + const auto& reshape_shape = reshape->get_output_partial_shape(0); + if (reshape_shape.is_dynamic() || reshape_shape.size() != 0) + return false; + const auto range = pattern_map.at(range_label).get_node_shared_ptr(); + int64_t range_start = -1; + int64_t range_step = -1; + if (!get_scalar_constant_value(range->get_input_node_shared_ptr(0), range_start) || range_start != 0) + return false; + if (!get_scalar_constant_value(range->get_input_node_shared_ptr(2), range_step) || range_step != 1) + return false; + + int64_t gather_axis = -1; + if (!get_scalar_constant_value(gather->get_input_node_shared_ptr(2), gather_axis) || + gather_axis != shapeof_gather_indexes[shapeof_gather2_index]) + return false; + + return replace_output_update_name(gather->output(0), gather->input_value(0)); + }; + + auto m = std::make_shared(match_node, "EliminateGatherWithRange"); + register_matcher(m, callback); + } +}; + +ov::pass::FuseReverseLSTMSequence::FuseReverseLSTMSequence() { + MATCHER_SCOPE(FuseReverseLSTMSequence); + + auto data_label = pattern::any_input(pattern::rank_equals(3)); + auto first_transpose_label = + pattern::wrap_type({data_label, pattern::wrap_type()}, + pattern::rank_equals(3)); + auto input_to_first_reverse_sequence_label = + std::make_shared(OutputVector{first_transpose_label, data_label}); + auto first_reverse_sequence_label = + pattern::wrap_type({input_to_first_reverse_sequence_label, pattern::any_input()}); + auto second_transpose_label = + pattern::wrap_type({first_reverse_sequence_label, pattern::wrap_type()}); + auto lstm_label = pattern::wrap_type({second_transpose_label, + pattern::any_input(), + pattern::any_input(), + pattern::any_input(), + pattern::any_input(), + pattern::any_input(), + pattern::any_input()}, + pattern::consumers_count(1)); + auto squeeze_label = pattern::wrap_type({lstm_label, pattern::wrap_type()}); + auto second_reverse_sequence_label = + pattern::wrap_type({squeeze_label, pattern::any_input()}); + + matcher_pass_callback callback = [=](pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_value_map(); + const auto& data = pattern_map.at(data_label); + const auto second_transpose = pattern_map.at(second_transpose_label).get_node_shared_ptr(); + const auto second_transpose_perm = + ov::as_type_ptr(second_transpose->get_input_node_shared_ptr(1)); + auto lstm = ov::as_type_ptr(pattern_map.at(lstm_label).get_node_shared_ptr()); + if (lstm->get_direction() != op::v5::LSTMSequence::direction::FORWARD) + return false; + + std::shared_ptr new_transpose_perm; + if (pattern_map.count(first_transpose_label) > 0) { + auto first_transpose = pattern_map.at(first_transpose_label).get_node_shared_ptr(); + if (ov::is_type(first_transpose)) { + const auto& reshape_input_shape = first_transpose->get_input_partial_shape(0); + const auto& reshape_output_shape = first_transpose->get_output_partial_shape(0); + if (reshape_input_shape.size() != reshape_output_shape.size()) + return false; + const auto second_transpose_perm_values = second_transpose_perm->cast_vector(); + std::vector new_perm_values; + for (size_t i = 0; i < reshape_output_shape.size(); i++) { + const auto& dim = reshape_output_shape[i]; + for (size_t j = 0; j < reshape_input_shape.size(); j++) { + if (dim == reshape_input_shape[j]) { + new_perm_values.push_back(second_transpose_perm_values[j]); + } + } + } + if (new_perm_values.size() != 3) + return false; + if (new_perm_values != std::vector{0, 1, 2}) { + new_transpose_perm = + op::v0::Constant::create(element::i32, Shape{new_perm_values.size()}, new_perm_values); + } + } else if (ov::is_type(first_transpose)) { + const auto first_transpose_perm = ov::as_type(first_transpose->get_input_node_ptr(1)); + const auto first_transpose_perm_values = first_transpose_perm->cast_vector(); + const auto second_transpose_perm_values = second_transpose_perm->cast_vector(); + if (first_transpose_perm_values.size() != second_transpose_perm_values.size()) + return false; + std::vector new_perm_values; + for (size_t i = 0; i < first_transpose_perm_values.size(); i++) { + new_perm_values.push_back(first_transpose_perm_values[second_transpose_perm_values[i]]); + } + if (new_perm_values.size() != 3) + return false; + if (new_perm_values != std::vector{0, 1, 2}) { + new_transpose_perm = + op::v0::Constant::create(element::i32, Shape{new_perm_values.size()}, new_perm_values); + } + } + } else { + new_transpose_perm = second_transpose_perm; + } + + NodeRegistry node_registry; + + Output new_lstm_input; + if (new_transpose_perm) { + new_lstm_input = node_registry.make(data, new_transpose_perm); + } else { + new_lstm_input = data; + } + + auto new_lstm = node_registry.make(new_lstm_input, + lstm->input_value(1), + lstm->input_value(2), + lstm->input_value(3), + lstm->input_value(4), + lstm->input_value(5), + lstm->input_value(6), + lstm->get_hidden_size(), + op::v5::LSTMSequence::direction::REVERSE, + lstm->get_activations_alpha(), + lstm->get_activations_beta(), + lstm->get_activations(), + lstm->get_clip()); + + auto squeeze = pattern_map.at(squeeze_label).get_node_shared_ptr(); + if (squeeze->input_value(0) != lstm->output(0)) + return false; + int64_t squeeze_axis = -1; + if (!get_scalar_constant_value(squeeze->get_input_node_shared_ptr(1), squeeze_axis) || squeeze_axis != 1) + return false; + auto new_squeeze = node_registry.make(new_lstm->output(0), squeeze->input_value(1)); + const auto match_root = m.get_match_root(); + new_squeeze->set_friendly_name(match_root->get_friendly_name()); + + for (auto& consumer : second_transpose->output(0).get_target_inputs()) { + auto node = consumer.get_node()->shared_from_this(); + if (ov::is_type(node)) { + auto shapeof = std::make_shared(new_lstm_input); + replace_node(node, shapeof); + } + } + + NodeVector from{pattern_map.at(first_reverse_sequence_label).get_node_shared_ptr(), + second_transpose, + lstm, + squeeze, + pattern_map.at(second_reverse_sequence_label).get_node_shared_ptr()}; + if (pattern_map.count(first_transpose_label) > 0) { + from.push_back(pattern_map.at(first_transpose_label).get_node_shared_ptr()); + } + + copy_runtime_info(from, node_registry.get()); + replace_node(match_root, new_squeeze); + + return true; + }; + + auto m = std::make_shared(second_reverse_sequence_label, matcher_name); + register_matcher(m, callback); +} + +ov::pass::FuseLSTMSequencesToBidirectionalLSTMSequence::FuseLSTMSequencesToBidirectionalLSTMSequence() { + MATCHER_SCOPE(FuseLSTMSequencesToBidirectionalLSTMSequence); + auto data_label = pattern::any_input(); + + // forward pattern + auto transpose_forward_label = + pattern::wrap_type({data_label, pattern::wrap_type()}); + auto lstm_sequence_forward_first_input_label = + std::make_shared(OutputVector{transpose_forward_label, data_label}); + auto shapeof_forward_label = pattern::wrap_type({lstm_sequence_forward_first_input_label}); + auto gather_forward_label = pattern::wrap_type( + {shapeof_forward_label, pattern::wrap_type(), pattern::wrap_type()}); + auto max_sequence_len_forward_label = pattern::wrap_type(); + auto broadcast_forward_label = + pattern::wrap_type({max_sequence_len_forward_label, gather_forward_label}); + auto const_sequence_lengths_forward_label = pattern::wrap_type(); + auto sequence_lengths_forward_label = + std::make_shared(OutputVector{broadcast_forward_label, const_sequence_lengths_forward_label}); + auto lstm_sequence_forward_label = + pattern::wrap_type({lstm_sequence_forward_first_input_label, + pattern::any_input(), + pattern::any_input(), + sequence_lengths_forward_label, + pattern::any_input(), + pattern::any_input(), + pattern::any_input()}); + auto squeeze_forward_label = + pattern::wrap_type({lstm_sequence_forward_label, pattern::wrap_type()}, + pattern::rank_equals(3)); + + // backward pattern + auto transpose_reverse_label = + pattern::wrap_type({data_label, pattern::wrap_type()}); + auto lstm_sequence_reverse_first_input_label = + std::make_shared(OutputVector{transpose_reverse_label, data_label}); + auto shapeof_reverse_label = pattern::wrap_type({lstm_sequence_reverse_first_input_label}); + auto gather_reverse_label = pattern::wrap_type( + {shapeof_reverse_label, pattern::wrap_type(), pattern::wrap_type()}); + auto max_sequence_len_reverse_label = pattern::wrap_type(); + auto broadcast_reverse_label = + pattern::wrap_type({max_sequence_len_reverse_label, gather_reverse_label}); + auto const_sequence_lengths_reverse_label = pattern::wrap_type(); + auto sequence_lengths_reverse_label = + std::make_shared(OutputVector{broadcast_reverse_label, const_sequence_lengths_reverse_label}); + auto lstm_sequence_reverse_label = + pattern::wrap_type({lstm_sequence_reverse_first_input_label, + pattern::any_input(), + pattern::any_input(), + sequence_lengths_reverse_label, + pattern::any_input(), + pattern::any_input(), + pattern::any_input()}); + auto squeeze_reverse_label = + pattern::wrap_type({lstm_sequence_reverse_label, pattern::wrap_type()}, + pattern::rank_equals(3)); + + auto concat_label = pattern::wrap_type({squeeze_forward_label, squeeze_reverse_label}); + + matcher_pass_callback callback = [=](pattern::Matcher& m) { + const auto& pattern_map = m.get_pattern_map(); + auto lstm_forward = ov::as_type_ptr(pattern_map.at(lstm_sequence_forward_label)); + auto lstm_reverse = ov::as_type_ptr(pattern_map.at(lstm_sequence_reverse_label)); + + NodeVector from{lstm_forward, lstm_reverse}; + + if (lstm_forward->get_direction() != op::v5::LSTMSequence::direction::FORWARD || + lstm_reverse->get_direction() != op::v5::LSTMSequence::direction::REVERSE) + return false; + + if (lstm_forward->get_hidden_size() != lstm_reverse->get_hidden_size()) + return false; + if (lstm_forward->get_activations_alpha() != lstm_reverse->get_activations_alpha()) + return false; + if (lstm_forward->get_activations_beta() != lstm_reverse->get_activations_beta()) + return false; + if (lstm_forward->get_activations() != lstm_reverse->get_activations()) + return false; + if (lstm_forward->get_clip() != lstm_reverse->get_clip()) + return false; + + auto squeeze_forward = pattern_map.at(squeeze_forward_label); + if (squeeze_forward->input_value(0) != lstm_forward->output(0)) + return false; + int64_t squeeze_forward_axis = -1; + if (!get_scalar_constant_value(squeeze_forward->get_input_node_shared_ptr(1), squeeze_forward_axis) || + squeeze_forward_axis != 1) + return false; + + auto squeeze_reverse = pattern_map.at(squeeze_reverse_label); + if (squeeze_reverse->input_value(0) != lstm_reverse->output(0)) + return false; + int64_t squeeze_reverse_axis = -1; + if (!get_scalar_constant_value(squeeze_reverse->get_input_node_shared_ptr(1), squeeze_reverse_axis) || + squeeze_reverse_axis != 1) + return false; + + auto concat = ov::as_type_ptr(pattern_map.at(concat_label)); + if (concat->get_axis() != 2) + return false; + + from.push_back(squeeze_forward); + from.push_back(squeeze_reverse); + from.push_back(concat); + + bool has_input_transpose_forward = pattern_map.count(transpose_forward_label) > 0; + bool has_input_transpose_reverse = pattern_map.count(transpose_reverse_label) > 0; + if (has_input_transpose_forward ^ has_input_transpose_reverse) + return false; + + bool is_forward_sequence_lengths_constant = pattern_map.count(const_sequence_lengths_forward_label) > 0; + bool is_reverse_sequence_lengths_constant = pattern_map.count(const_sequence_lengths_reverse_label) > 0; + if (is_forward_sequence_lengths_constant ^ is_reverse_sequence_lengths_constant) + return false; + + if (is_forward_sequence_lengths_constant) { + auto sequence_lengths_forward = + ov::as_type_ptr(pattern_map.at(const_sequence_lengths_forward_label)); + auto sequence_lengths_reverse = + ov::as_type_ptr(pattern_map.at(const_sequence_lengths_reverse_label)); + if (sequence_lengths_forward->get_shape() != sequence_lengths_reverse->get_shape()) + return false; + auto sequence_lengths_forward_values = sequence_lengths_forward->cast_vector(); + auto sequence_lengths_reverse_values = sequence_lengths_reverse->cast_vector(); + if (sequence_lengths_forward_values != sequence_lengths_reverse_values) + return false; + from.push_back(sequence_lengths_forward); + from.push_back(sequence_lengths_reverse); + } else { + auto max_sequence_len_forward = + ov::as_type_ptr(pattern_map.at(max_sequence_len_forward_label)); + auto max_sequence_len_reverse = + ov::as_type_ptr(pattern_map.at(max_sequence_len_reverse_label)); + if (max_sequence_len_forward->get_shape() != max_sequence_len_reverse->get_shape()) + return false; + auto max_sequence_len_forward_values = max_sequence_len_forward->cast_vector(); + auto max_sequence_len_reverse_values = max_sequence_len_reverse->cast_vector(); + if (max_sequence_len_forward_values != max_sequence_len_reverse_values) + return false; + + auto gather_forward = pattern_map.at(gather_forward_label); + int64_t gather_index = -1; + int64_t gather_axis = -1; + if (!get_scalar_constant_value(gather_forward->get_input_node_shared_ptr(1), gather_index) || + gather_index != 0) + return false; + if (!get_scalar_constant_value(gather_forward->get_input_node_shared_ptr(2), gather_axis) || + gather_axis != 0) + return false; + + auto gather_reverse = pattern_map.at(gather_reverse_label); + gather_index = -1; + gather_axis = -1; + if (!get_scalar_constant_value(gather_reverse->get_input_node_shared_ptr(1), gather_index) || + gather_index != 0) + return false; + if (!get_scalar_constant_value(gather_reverse->get_input_node_shared_ptr(2), gather_axis) || + gather_axis != 0) + return false; + + from.push_back(max_sequence_len_forward); + from.push_back(max_sequence_len_reverse); + from.push_back(gather_forward); + from.push_back(gather_reverse); + } + + NodeRegistry node_registry; + + auto new_H = + node_registry.make(OutputVector{lstm_forward->input_value(1), lstm_reverse->input_value(1)}, + 1); + auto new_C = + node_registry.make(OutputVector{lstm_forward->input_value(2), lstm_reverse->input_value(2)}, + 1); + auto new_W = + node_registry.make(OutputVector{lstm_forward->input_value(4), lstm_reverse->input_value(4)}, + 0); + auto new_R = + node_registry.make(OutputVector{lstm_forward->input_value(5), lstm_reverse->input_value(5)}, + 0); + auto new_B = + node_registry.make(OutputVector{lstm_forward->input_value(6), lstm_reverse->input_value(6)}, + 0); + auto new_lstm = node_registry.make(lstm_forward->input_value(0), + new_H, + new_C, + lstm_forward->input_value(3), + new_W, + new_R, + new_B, + lstm_forward->get_hidden_size(), + op::v5::LSTMSequence::direction::BIDIRECTIONAL, + lstm_forward->get_activations_alpha(), + lstm_forward->get_activations_beta(), + lstm_forward->get_activations(), + lstm_forward->get_clip()); + if (transformation_callback(new_lstm)) + return false; + + auto transpose = + node_registry.make(new_lstm->output(0), + op::v0::Constant::create(element::i32, Shape{4}, {0, 2, 1, 3})); + auto new_shape = node_registry.make(element::i32, Shape{3}, std::vector{0, 0, -1}); + auto reshape = node_registry.make(transpose, new_shape, true); + reshape->set_friendly_name(concat->get_friendly_name()); + + copy_runtime_info(from, node_registry.get()); + replace_node(concat, reshape); + + return true; + }; + + auto m = std::make_shared(concat_label, matcher_name); + register_matcher(m, callback); +} + ov::pass::ConvertTensorIteratorToSequence::ConvertTensorIteratorToSequence() { add_matcher(); add_matcher(); add_matcher(); + add_matcher(); + add_matcher(); + add_matcher(); + add_matcher(); } diff --git a/src/common/transformations/tests/common_optimizations/low_latency_v2_test.cpp b/src/common/transformations/tests/common_optimizations/low_latency_v2_test.cpp index f56c34227a8be4..702b4cbdfeeb5b 100644 --- a/src/common/transformations/tests/common_optimizations/low_latency_v2_test.cpp +++ b/src/common/transformations/tests/common_optimizations/low_latency_v2_test.cpp @@ -910,7 +910,6 @@ TEST_P(LLT2Sequence, RNNLowLatency_v2) { auto H = make_shared(element::f32, Shape{attrs.batch, attrs.num_dir, attrs.hidden_size}); auto C = make_shared(element::f32, Shape{attrs.batch, attrs.num_dir, attrs.hidden_size}); auto outputs = create_sequence(p.rnn_type, attrs, X, H, C); - outputs[0].get_node()->set_friendly_name("lstm_node"); ParameterVector params{X, H}; if (p.rnn_type == RNNType::LSTM) { params.push_back(C); @@ -931,9 +930,9 @@ TEST_P(LLT2Sequence, RNNLowLatency_v2) { auto H = make_shared(element::f32, Shape{attrs.batch, attrs.num_dir, attrs.hidden_size}); auto C = make_shared(element::f32, Shape{attrs.batch, attrs.num_dir, attrs.hidden_size}); auto variable_h = make_shared( - ov::op::util::VariableInfo{H->get_shape(), H->get_element_type(), "lstm_node/variable_0"}); + ov::op::util::VariableInfo{H->get_shape(), H->get_element_type(), "node_28/variable_0"}); auto variable_c = make_shared( - ov::op::util::VariableInfo{C->get_shape(), C->get_element_type(), "lstm_node/variable_1"}); + ov::op::util::VariableInfo{C->get_shape(), C->get_element_type(), "node_28/variable_1"}); auto read_val_H = create_read_value(H, variable_h); auto read_val_C = create_read_value(C, variable_c); diff --git a/src/common/transformations/tests/op_conversions/convert_ti_to_sequences_test.cpp b/src/common/transformations/tests/op_conversions/convert_ti_to_sequences_test.cpp index 524e3735948a3b..4521936361c07c 100644 --- a/src/common/transformations/tests/op_conversions/convert_ti_to_sequences_test.cpp +++ b/src/common/transformations/tests/op_conversions/convert_ti_to_sequences_test.cpp @@ -802,3 +802,466 @@ TEST(TransformationTests, ConvertTensorIteratorToGRUSequenceDynamicSqueezeCase) auto res = compare_functions(f, f_ref); ASSERT_TRUE(res.first) << res.second; } + +using ConvertLoopToLSTMSequenceTestParams = std::tuple; // with_gather_reshape + +class ConvertLoopToLSTMSequenceTest : public testing::WithParamInterface, + public TransformationTestsF {}; + +TEST_P(ConvertLoopToLSTMSequenceTest, FusionTest) { + const auto& params = GetParam(); + bool with_input_transpose = std::get<0>(params); + bool with_gather_reshape = std::get<1>(params); + + size_t input_size = 3; + size_t hidden_size = 2; + size_t num_sequences = 5; + size_t batch_size = 1; + + { + auto trip_count = op::v0::Constant::create(element::i32, Shape{}, {-1}); + auto condition = op::v0::Constant::create(element::boolean, Shape{}, {true}); + auto iteration_counter = op::v0::Constant::create(element::i32, Shape{}, {0}); + auto sequence_index = op::v0::Constant::create(element::i32, Shape{}, {0}); + std::shared_ptr X; + std::shared_ptr scatter_updates; + if (with_input_transpose) { + X = std::make_shared(element::f32, Shape{batch_size, num_sequences, input_size}); + scatter_updates = + std::make_shared(X, op::v0::Constant::create(element::i32, Shape{3}, {1, 0, 2})); + } else { + X = std::make_shared(element::f32, Shape{num_sequences, batch_size, input_size}); + scatter_updates = X; + } + auto scatter_input = op::v0::Constant::create(element::f32, Shape{num_sequences, batch_size, input_size}, {0}); + std::vector indexes_values(num_sequences); + std::iota(indexes_values.begin(), indexes_values.end(), 0); + auto scatter_indexes = op::v0::Constant::create(element::i32, Shape{num_sequences, 1}, indexes_values); + auto scatter = std::make_shared(scatter_input, scatter_indexes, scatter_updates); + auto H = op::v0::Constant::create(element::f32, Shape{batch_size, hidden_size}, {0}); + auto C = op::v0::Constant::create(element::f32, Shape{batch_size, hidden_size}, {0}); + auto Y = op::v0::Constant::create(element::f32, Shape{num_sequences, batch_size, hidden_size}, {0}); + + auto loop = std::make_shared(trip_count, condition); + + auto X_body = std::make_shared(element::f32, Shape{num_sequences, batch_size, input_size}); + auto Y_body = std::make_shared( + element::f32, + PartialShape{static_cast(num_sequences), static_cast(batch_size), -1}); + auto C_body = std::make_shared(element::f32, Shape{batch_size, hidden_size}); + auto sequence_index_body = std::make_shared(element::i32, Shape{}); + auto H_body = std::make_shared(element::f32, Shape{batch_size, hidden_size}); + auto iteration_counter_body = std::make_shared(element::i32, Shape{}); + auto iteration_counter_step = op::v0::Constant::create(element::i32, Shape{}, {1}); + auto iteration_counter_incremented = + std::make_shared(iteration_counter_body, iteration_counter_step); + auto iteration_counter_limit = op::v0::Constant::create(element::i32, Shape{}, {num_sequences}); + auto iteration_counter_less_than_limit = + std::make_shared(iteration_counter_incremented, iteration_counter_limit); + auto sequence_index_step = op::v0::Constant::create(element::i32, Shape{}, {1}); + auto sequence_index_incremented = std::make_shared(sequence_index_body, sequence_index_step); + auto sequence_index_limit = op::v0::Constant::create(element::i32, Shape{}, {num_sequences}); + auto sequence_index_less_than_limit = + std::make_shared(sequence_index_incremented, sequence_index_limit); + auto output_condition = + std::make_shared(iteration_counter_less_than_limit, sequence_index_less_than_limit); + auto condition_result = std::make_shared(output_condition); + auto Y_shape = std::make_shared(Y_body); + auto zero = op::v0::Constant::create(element::i32, Shape{1}, {0}); + auto max_sequence_length = std::make_shared(Y_shape, zero, zero); + auto sequence_index_new_shape = op::v0::Constant::create(element::i32, Shape{0}, {}); + std::shared_ptr gather_index; + if (with_gather_reshape) { + gather_index = std::make_shared(sequence_index_body, sequence_index_new_shape, false); + } else { + gather_index = sequence_index_body; + } + auto gather_axis = op::v0::Constant::create(element::i32, Shape{1}, {0}); + auto X_slice = std::make_shared(X_body, gather_index, gather_axis); + std::vector W_values(4 * hidden_size * input_size); + std::iota(W_values.begin(), W_values.end(), 0.0f); + auto W = op::v0::Constant::create(element::f32, Shape{4 * hidden_size, input_size}, W_values); + std::vector R_values(4 * hidden_size * hidden_size); + std::iota(R_values.begin(), R_values.end(), 0.0f); + auto R = op::v0::Constant::create(element::f32, Shape{4 * hidden_size, hidden_size}, R_values); + std::vector B_values(4 * hidden_size); + std::iota(B_values.begin(), B_values.end(), 0.0f); + auto B = op::v0::Constant::create(element::f32, Shape{4 * hidden_size}, B_values); + auto lstm_cell = std::make_shared(X_slice, + H_body, + C_body, + W, + R, + B, + hidden_size, + std::vector{"sigmoid", "tanh", "tanh"}); + auto Y_new_shape2 = std::make_shared(lstm_cell->output(0)); + auto Y_new_shape = std::make_shared(OutputVector{max_sequence_length, Y_new_shape2}, 0); + auto Y_broadcasted = std::make_shared(Y_body, Y_new_shape); + auto sequence_index_new_shape2 = op::v0::Constant::create(element::i64, Shape{1}, {-1}); + auto scatter_update_index = + std::make_shared(sequence_index_body, sequence_index_new_shape2, false); + auto H_unsqueezed = std::make_shared(lstm_cell->output(0), zero); + auto scatter_update_body = + std::make_shared(Y_broadcasted, scatter_update_index, H_unsqueezed, zero); + auto Y_result = std::make_shared(scatter_update_body); + auto Co_result = std::make_shared(lstm_cell->output(1)); + auto sequence_index_result = std::make_shared(sequence_index_incremented); + auto Ho_result = std::make_shared(lstm_cell->output(0)); + auto iteration_counter_result = std::make_shared(iteration_counter_incremented); + + ParameterVector params{X_body, H_body, C_body, Y_body, sequence_index_body, iteration_counter_body}; + ResultVector results{Y_result, + Ho_result, + Co_result, + sequence_index_result, + iteration_counter_result, + condition_result}; + auto body = std::make_shared(results, params); + loop->set_function(body); + + loop->set_invariant_input(Y_body, Y); + loop->get_iter_value(Y_result, -1); + loop->set_merged_input(iteration_counter_body, iteration_counter, iteration_counter_result); + loop->set_merged_input(H_body, H, Ho_result); + loop->set_merged_input(sequence_index_body, sequence_index, sequence_index_result); + loop->set_merged_input(C_body, C, Co_result); + loop->set_invariant_input(X_body, scatter); + loop->set_special_body_ports({-1, 5}); + auto transpose = + std::make_shared(loop->output(0), + op::v0::Constant::create(element::i32, Shape{3}, {1, 0, 2})); + + model = std::make_shared(transpose, ParameterVector{X}); + + manager.register_pass(); + } + + { + auto perm = op::v0::Constant::create(element::i32, Shape{3}, {1, 0, 2}); + std::shared_ptr X; + std::shared_ptr X_lstm; + if (with_input_transpose) { + // fused subgraph doesn't have Transpose + X = std::make_shared(element::f32, Shape{batch_size, num_sequences, input_size}); + X_lstm = X; + } else { + X = std::make_shared(element::f32, Shape{num_sequences, batch_size, input_size}); + X_lstm = std::make_shared(X, perm); + } + auto one = op::v0::Constant::create(element::i32, Shape{1}, {1}); + auto zero = op::v0::Constant::create(element::i32, Shape{1}, {0}); + auto shapeof_X = std::make_shared(X_lstm); + auto batch_size_node = std::make_shared(shapeof_X, zero, zero); + auto H = op::v0::Constant::create(element::f32, Shape{batch_size, hidden_size}, {0}); + auto new_H_shape = + std::make_shared(OutputVector{batch_size_node, std::make_shared(H)}, 0); + auto H_broadcasted = std::make_shared(H, new_H_shape); + auto C = op::v0::Constant::create(element::f32, Shape{batch_size, hidden_size}, {0}); + auto new_C_shape = + std::make_shared(OutputVector{batch_size_node, std::make_shared(C)}, 0); + auto C_broadcasted = std::make_shared(C, new_C_shape); + + std::vector W_values(4 * hidden_size * input_size); + std::iota(W_values.begin(), W_values.end(), 0.0f); + std::vector R_values(4 * hidden_size * hidden_size); + std::iota(R_values.begin(), R_values.end(), 0.0f); + std::vector B_values(4 * hidden_size); + std::iota(B_values.begin(), B_values.end(), 0.0f); + auto W = std::make_shared( + op::v0::Constant::create(element::f32, Shape{4 * hidden_size, input_size}, W_values), + zero); + auto R = std::make_shared( + op::v0::Constant::create(element::f32, Shape{4 * hidden_size, hidden_size}, R_values), + zero); + auto B = std::make_shared( + op::v0::Constant::create(element::f32, Shape{4 * hidden_size}, B_values), + zero); + + auto sequence_lengths = op::v0::Constant::create(element::i32, Shape{1}, {num_sequences}); + auto lstm = std::make_shared(X_lstm, + H_broadcasted, + C_broadcasted, + sequence_lengths, + W, + R, + B, + hidden_size, + op::v5::LSTMSequence::direction::FORWARD, + std::vector{}, + std::vector{}, + std::vector{"sigmoid", "tanh", "tanh"}); + auto Ho_squeezed = std::make_shared(lstm->output(0), one); + + model_ref = std::make_shared(Ho_squeezed, ParameterVector{X}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); + comparator.enable(FunctionsComparator::CmpValues::ACCURACY); +} + +INSTANTIATE_TEST_SUITE_P(ConvertLoopToLSTMSequence, + ConvertLoopToLSTMSequenceTest, + testing::Combine(testing::Values(false, true), testing::Values(false, true))); + +class FuseReverseLSTMSequenceTest : public TransformationTestsF, public testing::WithParamInterface {}; + +TEST_P(FuseReverseLSTMSequenceTest, FusionTest) { + const auto with_input_transpose = GetParam(); + + size_t input_size = 3; + size_t hidden_size = 2; + size_t num_sequences = 5; + size_t batch_size = 1; + + { + std::shared_ptr input; + std::shared_ptr second_transpose; + if (with_input_transpose) { + input = std::make_shared(element::f32, Shape{batch_size, num_sequences, input_size}); + auto input_transpose = + std::make_shared(input, op::v0::Constant::create(element::i32, Shape{3}, {1, 0, 2})); + auto input_reverse = std::make_shared( + input_transpose, + op::v0::Constant::create(element::i32, Shape{1}, {num_sequences}), + 1, + 0); + second_transpose = + std::make_shared(input_reverse, + op::v0::Constant::create(element::i32, Shape{3}, {1, 0, 2})); + } else { + input = std::make_shared(element::f32, Shape{batch_size, input_size, num_sequences}); + auto input_reverse = std::make_shared( + input, + op::v0::Constant::create(element::i32, Shape{1}, {num_sequences}), + 0, + 2); + second_transpose = + std::make_shared(input_reverse, + op::v0::Constant::create(element::i32, Shape{3}, {0, 2, 1})); + } + auto H = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {1.0f}); + auto C = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {2.0f}); + auto sequence_lengths = op::v0::Constant::create(element::i32, Shape{1}, {num_sequences}); + auto W = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, input_size}, {3.0f}); + auto R = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, hidden_size}, {4.0f}); + auto B = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size}, {5.0f}); + auto lstm = std::make_shared(second_transpose, + H, + C, + sequence_lengths, + W, + R, + B, + hidden_size, + op::v5::LSTMSequence::direction::FORWARD); + auto squeeze = + std::make_shared(lstm->output(0), op::v0::Constant::create(element::i32, Shape{}, {1})); + auto output_reverse = + std::make_shared(squeeze, + op::v0::Constant::create(element::i32, Shape{1}, {num_sequences}), + 0, + 1); + + model = std::make_shared(output_reverse, ParameterVector{input}); + + manager.register_pass(); + } + + { + std::shared_ptr input; + std::shared_ptr lstm_input; + if (with_input_transpose) { + input = std::make_shared(element::f32, Shape{batch_size, num_sequences, input_size}); + lstm_input = input; + } else { + input = std::make_shared(element::f32, Shape{batch_size, input_size, num_sequences}); + lstm_input = + std::make_shared(input, op::v0::Constant::create(element::i32, Shape{3}, {0, 2, 1})); + } + auto H = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {1.0f}); + auto C = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {2.0f}); + auto sequence_lengths = op::v0::Constant::create(element::i32, Shape{1}, {num_sequences}); + auto W = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, input_size}, {3.0f}); + auto R = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, hidden_size}, {4.0f}); + auto B = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size}, {5.0f}); + auto lstm = std::make_shared(lstm_input, + H, + C, + sequence_lengths, + W, + R, + B, + hidden_size, + op::v5::LSTMSequence::direction::REVERSE); + auto squeeze = + std::make_shared(lstm->output(0), op::v0::Constant::create(element::i32, Shape{}, {1})); + + model_ref = std::make_shared(squeeze, ParameterVector{input}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); + comparator.enable(FunctionsComparator::CmpValues::ACCURACY); +} + +INSTANTIATE_TEST_SUITE_P(FuseReverseLSTMSequence, FuseReverseLSTMSequenceTest, testing::Values(false, true)); + +class FuseLSTMSequencesToBidirectionalLSTMSequenceTest : public TransformationTestsF, + public testing::WithParamInterface> {}; + +TEST_P(FuseLSTMSequencesToBidirectionalLSTMSequenceTest, FusionTest) { + const auto& params = GetParam(); + bool with_input_transpose = std::get<0>(params); + bool const_sequence_lengths = std::get<1>(params); + + size_t input_size = 3; + size_t hidden_size = 2; + size_t num_sequences = 5; + size_t batch_size = 1; + + { + std::shared_ptr input; + std::shared_ptr forward_lstm_input; + std::shared_ptr reverse_lstm_input; + std::shared_ptr forward_sequence_lengths; + std::shared_ptr reverse_sequence_lengths; + if (with_input_transpose) { + input = std::make_shared(element::f32, Shape{batch_size, input_size, num_sequences}); + forward_lstm_input = + std::make_shared(input, op::v0::Constant::create(element::i32, Shape{3}, {0, 2, 1})); + reverse_lstm_input = + std::make_shared(input, op::v0::Constant::create(element::i32, Shape{3}, {0, 2, 1})); + } else { + input = std::make_shared(element::f32, Shape{batch_size, num_sequences, input_size}); + forward_lstm_input = input; + reverse_lstm_input = input; + } + if (const_sequence_lengths) { + forward_sequence_lengths = op::v0::Constant::create(element::i32, Shape{batch_size}, {num_sequences}); + reverse_sequence_lengths = op::v0::Constant::create(element::i32, Shape{batch_size}, {num_sequences}); + } else { + auto shapeof_forward = std::make_shared(forward_lstm_input); + auto gather_forward = + std::make_shared(shapeof_forward, + op::v0::Constant::create(element::i32, Shape{1}, {0}), + op::v0::Constant::create(element::i32, Shape{}, {0})); + forward_sequence_lengths = + std::make_shared(op::v0::Constant::create(element::i32, Shape{}, {num_sequences}), + gather_forward); + auto shapeof_reverse = std::make_shared(reverse_lstm_input); + auto gather_reverse = + std::make_shared(shapeof_reverse, + op::v0::Constant::create(element::i32, Shape{1}, {0}), + op::v0::Constant::create(element::i32, Shape{}, {0})); + reverse_sequence_lengths = + std::make_shared(op::v0::Constant::create(element::i32, Shape{}, {num_sequences}), + gather_reverse); + } + auto H_forward = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {1.0f}); + auto C_forward = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {2.0f}); + auto W_forward = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, input_size}, {3.0f}); + auto R_forward = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, hidden_size}, {4.0f}); + auto B_forward = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size}, {5.0f}); + auto lstm_forward = std::make_shared(forward_lstm_input, + H_forward, + C_forward, + forward_sequence_lengths, + W_forward, + R_forward, + B_forward, + hidden_size, + op::v5::LSTMSequence::direction::FORWARD); + auto squeeze_forward = std::make_shared(lstm_forward->output(0), + op::v0::Constant::create(element::i32, Shape{}, {1})); + + auto H_reverse = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {6.0f}); + auto C_reverse = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {7.0f}); + auto W_reverse = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, input_size}, {8.0f}); + auto R_reverse = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, hidden_size}, {9.0f}); + auto B_reverse = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size}, {10.0f}); + auto lstm_reverse = std::make_shared(reverse_lstm_input, + H_reverse, + C_reverse, + reverse_sequence_lengths, + W_reverse, + R_reverse, + B_reverse, + hidden_size, + op::v5::LSTMSequence::direction::REVERSE); + auto squeeze_reverse = std::make_shared(lstm_reverse->output(0), + op::v0::Constant::create(element::i32, Shape{}, {1})); + + auto concat = std::make_shared(OutputVector{squeeze_forward, squeeze_reverse}, 2); + model = std::make_shared(concat, ParameterVector{input}); + + manager.register_pass(); + } + + { + std::shared_ptr input; + std::shared_ptr lstm_input; + std::shared_ptr sequence_lengths; + if (with_input_transpose) { + input = std::make_shared(element::f32, Shape{batch_size, input_size, num_sequences}); + lstm_input = + std::make_shared(input, op::v0::Constant::create(element::i32, Shape{3}, {0, 2, 1})); + } else { + input = std::make_shared(element::f32, Shape{batch_size, num_sequences, input_size}); + lstm_input = input; + } + if (const_sequence_lengths) { + sequence_lengths = op::v0::Constant::create(element::i32, Shape{batch_size}, {num_sequences}); + } else { + auto shapeof = std::make_shared(lstm_input); + auto gather = std::make_shared(shapeof, + op::v0::Constant::create(element::i32, Shape{1}, {0}), + op::v0::Constant::create(element::i32, Shape{}, {0})); + sequence_lengths = + std::make_shared(op::v0::Constant::create(element::i32, Shape{}, {num_sequences}), + gather); + } + auto H_forward = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {1.0f}); + auto H_reverse = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {6.0f}); + auto H = std::make_shared(OutputVector{H_forward, H_reverse}, 1); + auto C_forward = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {2.0f}); + auto C_reverse = op::v0::Constant::create(element::f32, Shape{batch_size, 1, hidden_size}, {7.0f}); + auto C = std::make_shared(OutputVector{C_forward, C_reverse}, 1); + auto W_forward = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, input_size}, {3.0f}); + auto W_reverse = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, input_size}, {8.0f}); + auto W = std::make_shared(OutputVector{W_forward, W_reverse}, 0); + auto R_forward = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, hidden_size}, {4.0f}); + auto R_reverse = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size, hidden_size}, {9.0f}); + auto R = std::make_shared(OutputVector{R_forward, R_reverse}, 0); + auto B_forward = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size}, {5.0f}); + auto B_reverse = op::v0::Constant::create(element::f32, Shape{1, 4 * hidden_size}, {10.0f}); + auto B = std::make_shared(OutputVector{B_forward, B_reverse}, 0); + + auto lstm = std::make_shared(lstm_input, + H, + C, + sequence_lengths, + W, + R, + B, + hidden_size, + op::v5::LSTMSequence::direction::BIDIRECTIONAL); + auto transpose = + std::make_shared(lstm->output(0), + op::v0::Constant::create(element::i32, Shape{4}, {0, 2, 1, 3})); + auto reshape = std::make_shared(transpose, + op::v0::Constant::create(element::i32, Shape{3}, {0, 0, -1}), + true); + model_ref = std::make_shared(reshape, ParameterVector{input}); + } + + comparator.enable(FunctionsComparator::CmpValues::CONST_VALUES); + comparator.enable(FunctionsComparator::CmpValues::ATTRIBUTES); + comparator.enable(FunctionsComparator::CmpValues::ACCURACY); +} + +INSTANTIATE_TEST_SUITE_P(FuseLSTMSequencesToBidirectionalLSTMSequence, + FuseLSTMSequencesToBidirectionalLSTMSequenceTest, + testing::Combine(testing::Values(false, true), testing::Values(false, true))); diff --git a/src/common/transformations/tests/resolve_names_collisions.cpp b/src/common/transformations/tests/resolve_names_collisions.cpp index a4986e51119800..a67ce5ba44ea3d 100644 --- a/src/common/transformations/tests/resolve_names_collisions.cpp +++ b/src/common/transformations/tests/resolve_names_collisions.cpp @@ -19,7 +19,6 @@ TEST(ResolveNameCollisionsTest, FixGeneratedNames) { EXPECT_NE(std::string::npos, gen_friendly_name.find("Parameter_")); unsigned long long index = std::stoull(gen_friendly_name.substr(name.length())); name += std::to_string(++index); - name += "_autogenerated"; arg0->set_friendly_name(name); diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 8a21637307e621..a98d3aa0b61343 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -29,8 +29,7 @@ if(ON) APPEND PROPERTY INCLUDE_DIRECTORIES $/src $/dev_api - $/include - $/include/ie) + $/include) endif() # Create named folders for the sources within the .vcproj diff --git a/src/core/dev_api/openvino/runtime/string_aligned_buffer.hpp b/src/core/dev_api/openvino/runtime/string_aligned_buffer.hpp index 689b0b680c7c2c..1133a8b5897c01 100644 --- a/src/core/dev_api/openvino/runtime/string_aligned_buffer.hpp +++ b/src/core/dev_api/openvino/runtime/string_aligned_buffer.hpp @@ -10,11 +10,16 @@ namespace ov { /// \brief StringAlignedBuffer class to store pointer to pre-allocated buffer with std::string objects /// it is responsible for deallocation of std::string objects that will be stored in the buffer -class StringAlignedBuffer : public ov::AlignedBuffer { +class OPENVINO_API StringAlignedBuffer : public ov::AlignedBuffer { public: StringAlignedBuffer() = default; + StringAlignedBuffer(size_t num_elements, size_t byte_size, size_t alignment, bool initialize); + virtual size_t get_num_elements() const { + return m_num_elements; + } + virtual ~StringAlignedBuffer(); private: @@ -25,4 +30,52 @@ class StringAlignedBuffer : public ov::AlignedBuffer { size_t m_num_elements; }; +/// \brief SharedStringAlignedBuffer class to store pointer to shared pre-allocated buffer with std::string objects +/// it must not be responsible for deallocation of std::string objects +class OPENVINO_API SharedStringAlignedBuffer : public ov::StringAlignedBuffer { +public: + SharedStringAlignedBuffer(char* ptr, size_t size); + + virtual ~SharedStringAlignedBuffer() { + m_allocated_buffer = nullptr; + m_aligned_buffer = nullptr; + m_byte_size = 0; + m_num_elements = 0; + } +}; + +template <> +class OPENVINO_API AttributeAdapter> + : public DirectValueAccessor> { +public: + AttributeAdapter(std::shared_ptr& value); + + OPENVINO_RTTI("AttributeAdapter"); + + static std::shared_ptr unpack_string_tensor(const char* packed_string_tensor_ptr, + size_t packed_string_tensor_size); + void get_header(std::shared_ptr& header, size_t& header_size); + void get_raw_string_by_index(const char*& raw_string_ptr, size_t& raw_string_size, size_t string_ind); + +protected: + std::shared_ptr m_header; + size_t m_header_size; +}; + +template <> +class OPENVINO_API AttributeAdapter> + : public DirectValueAccessor> { +public: + AttributeAdapter(std::shared_ptr& value); + + OPENVINO_RTTI("AttributeAdapter"); + + void get_header(std::shared_ptr& header, size_t& header_size); + void get_raw_string_by_index(const char*& raw_string_ptr, size_t& raw_string_size, size_t string_ind); + +protected: + std::shared_ptr m_header; + size_t m_header_size; +}; + } // namespace ov diff --git a/src/core/include/openvino/core/node.hpp b/src/core/include/openvino/core/node.hpp index 9006ab57a3a2a8..93b1e23db807e5 100644 --- a/src/core/include/openvino/core/node.hpp +++ b/src/core/include/openvino/core/node.hpp @@ -433,7 +433,6 @@ class OPENVINO_API Node : public std::enable_shared_from_this { std::vector> m_control_dependencies; size_t m_instance_id{m_next_instance_id.fetch_add(1)}; std::string m_friendly_name; - mutable std::string m_auto_generated_friendly_name; mutable std::string m_unique_name; mutable std::atomic_bool m_name_changing{false}; static std::atomic m_next_instance_id; diff --git a/src/core/src/node.cpp b/src/core/src/node.cpp index 84ad0004848c74..e152bcb7f4a63e 100644 --- a/src/core/src/node.cpp +++ b/src/core/src/node.cpp @@ -283,13 +283,8 @@ std::string ov::Node::description() const { } const std::string& ov::Node::get_friendly_name() const { - const auto& name = get_name(); - AtomicGuard lock(m_name_changing); if (m_friendly_name.empty()) { - if (m_auto_generated_friendly_name.empty()) { - m_auto_generated_friendly_name = name + "_" + "autogenerated"; - } - return m_auto_generated_friendly_name; + return get_name(); } return m_friendly_name; } @@ -406,14 +401,14 @@ ostream& operator<<(ostream& out, const Node* node) { } // namespace ov std::ostream& ov::Node::write_description(std::ostream& out, uint32_t depth) const { - if (depth == 0) { - out << get_friendly_name(); - } else { - auto version = get_type_info().version_id; - if (version) - out << version << "::" << get_type_info().name << " " << get_friendly_name() << " ("; - else - out << get_type_info().name << " " << get_friendly_name() << " ("; + auto version = get_type_info().version_id; + if (version) + out << version << "::" << get_type_info().name << " " << get_friendly_name(); + else + out << get_type_info().name << " " << get_friendly_name(); + + if (depth > 0) { + out << " ("; string sep = ""; for (const auto& arg : input_values()) { out << sep << arg; diff --git a/src/core/src/op/constant.cpp b/src/core/src/op/constant.cpp index 98cdfd9cc4104e..a32df53042b016 100644 --- a/src/core/src/op/constant.cpp +++ b/src/core/src/op/constant.cpp @@ -368,10 +368,32 @@ bool Constant::visit_attributes(AttributeVisitor& visitor) { const auto need_to_reallocate = (m_shape != prev_shape) || (prev_type != m_element_type); if (m_alloc_buffer_on_visit_attributes && need_to_reallocate) { - // Filling in a fresh constant - allocate_buffer(false); + if (m_element_type == ov::element::string) { + // string objects initialization is required + allocate_buffer(true); + } else { + // Filling in a fresh constant + allocate_buffer(false); + } + } + + if (m_element_type == ov::element::string) { + if (auto string_aligned_buffer = std::dynamic_pointer_cast(m_data)) { + visitor.on_attribute("value", string_aligned_buffer); + } else if (auto shared_string_tensor = std::dynamic_pointer_cast>(m_data)) { + auto shared_string_buffer = + std::make_shared(static_cast(shared_string_tensor->get_ptr()), + shared_string_tensor->size()); + visitor.on_attribute("value", shared_string_buffer); + } else { + // deserialization case when buffer does not exist yet + std::shared_ptr string_aligned_buffer; + visitor.on_attribute("value", string_aligned_buffer); + m_data = string_aligned_buffer; + } + } else { + visitor.on_attribute("value", m_data); } - visitor.on_attribute("value", m_data); update_identical_flags(false, false); return true; } diff --git a/src/core/src/pass/serialize.cpp b/src/core/src/pass/serialize.cpp index e3d133ee545d05..4d68132f4e6051 100644 --- a/src/core/src/pass/serialize.cpp +++ b/src/core/src/pass/serialize.cpp @@ -22,6 +22,7 @@ #include "openvino/pass/constant_folding.hpp" #include "openvino/reference/convert.hpp" #include "openvino/runtime/aligned_buffer.hpp" +#include "openvino/runtime/string_aligned_buffer.hpp" #include "openvino/util/file_util.hpp" #include "pugixml.hpp" #include "transformations/hash.hpp" @@ -511,6 +512,55 @@ class XmlSerializer : public ov::AttributeVisitor { } else if (const auto& a = ov::as_type>>(&adapter)) { m_xml_node.append_attribute(name.c_str()).set_value(a->get()->get_info().variable_id.c_str()); + } else if (ov::is_type>>(&adapter) || + ov::is_type>>(&adapter)) { + if (name == "value" && translate_type_name(m_node_type_name) == "Const") { + auto a1 = ov::as_type>>(&adapter); + auto a2 = ov::as_type>>(&adapter); + size_t new_size = 0; + size_t inter_size = 0; + // write a header of packed string tensor + std::shared_ptr header_ptr = nullptr; + size_t header_size = 0; + if (a1) { + a1->get_header(header_ptr, header_size); + } else { + a2->get_header(header_ptr, header_size); + } + + int64_t offset = m_constant_write_handler.write(reinterpret_cast(header_ptr.get()), + header_size, + &inter_size, + m_compress_to_fp16, + m_output_element_type); + new_size += inter_size; + + // write raw strings part + size_t num_elements = 0; + if (a1) { + num_elements = a1->get()->get_num_elements(); + } else { + num_elements = a2->get()->get_num_elements(); + } + for (size_t ind = 0; ind < num_elements; ++ind) { + const char* raw_string_ptr; + size_t raw_string_size; + if (a1) { + a1->get_raw_string_by_index(raw_string_ptr, raw_string_size, ind); + } else { + a2->get_raw_string_by_index(raw_string_ptr, raw_string_size, ind); + } + + m_constant_write_handler.write(raw_string_ptr, + raw_string_size, + &inter_size, + m_compress_to_fp16, + m_output_element_type); + new_size += inter_size; + } + m_xml_node.append_attribute("offset").set_value(static_cast(offset)); + m_xml_node.append_attribute("size").set_value(static_cast(new_size)); + } } else if (const auto& a = ov::as_type>>(&adapter)) { if (name == "value" && translate_type_name(m_node_type_name) == "Const") { const int64_t size = a->get()->size(); @@ -735,10 +785,6 @@ std::string generate_unique_name(const std::unordered_set& unique_n } } -bool is_name_auto_generated(const ov::Node& n) { - return n.get_friendly_name().find("autogenerated") != std::string::npos; -} - // TODO: remove when CNNNetwork will be supporting not-unique names std::string get_node_unique_name(std::unordered_set& unique_names, const ov::Node* n) { std::string name = n->get_friendly_name(); @@ -871,8 +917,8 @@ void ngfunction_2_ir(pugi::xml_node& netXml, ConstantWriter& constant_node_write_handler, int64_t version, bool deterministic) { - // If determinism is not required, do not include names into xml - // model name is not critial for hash computing + // If determinism is not required, include auto-generated names into xml + // model name is not critical for hash computing if (!deterministic) { netXml.append_attribute("name").set_value(model.get_friendly_name().c_str()); } @@ -918,7 +964,8 @@ void ngfunction_2_ir(pugi::xml_node& netXml, pugi::xml_node layer = layers.append_child("layer"); layer.append_attribute("id").set_value(layer_ids.find(node)->second); // If determinism is not required, include auto-generated names into xml - if (!deterministic || !is_name_auto_generated(*node)) { + // layer name is not critical for hash computing + if (!deterministic) { layer.append_attribute("name").set_value(get_node_unique_name(unique_names, node).c_str()); } layer.append_attribute("type").set_value(translate_type_name(node_type_name).c_str()); diff --git a/src/core/src/runtime/string_aligned_buffer.cpp b/src/core/src/runtime/string_aligned_buffer.cpp index 1410dbd6dc6fbf..28c4c82d0b5ab2 100644 --- a/src/core/src/runtime/string_aligned_buffer.cpp +++ b/src/core/src/runtime/string_aligned_buffer.cpp @@ -4,8 +4,80 @@ #include "openvino/runtime/string_aligned_buffer.hpp" +#include + +#include "openvino/core/type/element_type.hpp" #include "openvino/runtime/aligned_buffer.hpp" +namespace { +void aux_unpack_string_tensor(const char* data, size_t size, std::shared_ptr& string_buffer) { + // unpack string tensor + // packed format is the following: + // , <1st string offset>,..., , <1st string raw format>,..., + // check the format of the input bitstream representing the string tensor + OPENVINO_ASSERT(size >= 4, "Incorrect packed string tensor format: no batch size in the packed string tensor"); + const int32_t* pindices = reinterpret_cast(data); + int32_t num_strings = pindices[0]; + OPENVINO_ASSERT(int32_t(size) >= 4 + 4 + 4 * num_strings, + "Incorrect packed string tensor format: the packed string tensor must contain first " + "string offset and end indices"); + const int32_t* begin_ids = pindices + 1; + const int32_t* end_ids = pindices + 2; + const char* symbols = reinterpret_cast(pindices + 2 + num_strings); + + // allocate StringAlignedBuffer to store unpacked strings in std::string objects + // SharedBuffer to read byte stream is not applicable because we need unpacked format for strings + string_buffer = std::make_shared( + num_strings, + ov::element::string.size() * num_strings, + 64, // host alignment used the same as in creation of buffer for Constant + true); + std::string* src_strings = static_cast(string_buffer->get_ptr()); + for (int32_t idx = 0; idx < num_strings; ++idx) { + src_strings[idx] = std::string(symbols + begin_ids[idx], symbols + end_ids[idx]); + } +} + +void aux_get_header(const std::shared_ptr& string_aligned_buffer_ptr, + std::shared_ptr& header, + size_t& header_size) { + OPENVINO_ASSERT(string_aligned_buffer_ptr, "StringAlignedBuffer pointer is nullptr"); + // packed format is the following: + // , <1st string offset>,..., , <1st string raw format>,..., + auto num_elements = string_aligned_buffer_ptr->get_num_elements(); + auto strings = reinterpret_cast(string_aligned_buffer_ptr->get_ptr()); + + // first run over all elements: calculate total memory required to hold all strings + header_size = sizeof(int32_t) * (1 + 1 + num_elements); + header = std::shared_ptr(new uint8_t[header_size], std::default_delete()); + + int32_t* pindices = reinterpret_cast(header.get()); + pindices[0] = int32_t(num_elements); + pindices[1] = 0; + pindices += 2; + size_t current_symbols_pos = 0; + + for (size_t ind = 0; ind < num_elements; ++ind) { + auto str = strings[ind]; + current_symbols_pos += str.size(); + *pindices = int32_t(current_symbols_pos); + ++pindices; + } +} + +void aux_get_raw_string_by_index(const std::shared_ptr& string_aligned_buffer_ptr, + const char*& raw_string_ptr, + size_t& raw_string_size, + size_t string_ind) { + OPENVINO_ASSERT(string_aligned_buffer_ptr, "StringAlignedBuffer pointer is nullptr"); + OPENVINO_ASSERT(string_ind < string_aligned_buffer_ptr->get_num_elements(), + "Incorrect packed string tensor format: no batch size in the packed string tensor"); + const std::string* strings = reinterpret_cast(string_aligned_buffer_ptr->get_ptr()); + raw_string_ptr = strings[string_ind].data(); + raw_string_size = strings[string_ind].size(); +} +} // namespace + namespace ov { StringAlignedBuffer::StringAlignedBuffer(size_t num_elements, size_t byte_size, size_t alignment, bool initialize) : AlignedBuffer(byte_size, alignment), @@ -29,4 +101,62 @@ StringAlignedBuffer::~StringAlignedBuffer() { } } +SharedStringAlignedBuffer::SharedStringAlignedBuffer(char* ptr, size_t size) { + m_allocated_buffer = ptr; + m_aligned_buffer = ptr; + m_byte_size = size; + m_num_elements = size / ov::element::string.size(); +} + +AttributeAdapter>::AttributeAdapter( + std::shared_ptr& value) + : DirectValueAccessor>(value), + m_header(nullptr), + m_header_size(0) {} + +std::shared_ptr +AttributeAdapter>::unpack_string_tensor(const char* packed_string_tensor_ptr, + size_t packed_string_tensor_size) { + std::shared_ptr string_aligned_buffer; + aux_unpack_string_tensor(packed_string_tensor_ptr, packed_string_tensor_size, string_aligned_buffer); + return string_aligned_buffer; +} + +void AttributeAdapter>::get_header(std::shared_ptr& header, + size_t& header_size) { + if (!m_header) { + aux_get_header(m_ref, m_header, m_header_size); + } + header = m_header; + header_size = m_header_size; +} + +void AttributeAdapter>::get_raw_string_by_index(const char*& raw_string_ptr, + size_t& raw_string_size, + size_t string_ind) { + aux_get_raw_string_by_index(m_ref, raw_string_ptr, raw_string_size, string_ind); +} + +AttributeAdapter>::AttributeAdapter( + std::shared_ptr& value) + : DirectValueAccessor>(value), + m_header(nullptr), + m_header_size(0) {} + +void AttributeAdapter>::get_header(std::shared_ptr& header, + size_t& header_size) { + if (!m_header) { + aux_get_header(m_ref, m_header, m_header_size); + } + header = m_header; + header_size = m_header_size; +} + +void AttributeAdapter>::get_raw_string_by_index( + const char*& raw_string_ptr, + size_t& raw_string_size, + size_t string_ind) { + aux_get_raw_string_by_index(m_ref, raw_string_ptr, raw_string_size, string_ind); +} + } // namespace ov diff --git a/src/core/tests/models/ir/const_string.bin b/src/core/tests/models/ir/const_string.bin new file mode 100644 index 00000000000000..4cebb8d0799685 Binary files /dev/null and b/src/core/tests/models/ir/const_string.bin differ diff --git a/src/core/tests/models/ir/const_string.xml b/src/core/tests/models/ir/const_string.xml new file mode 100644 index 00000000000000..ab65a631ac6903 --- /dev/null +++ b/src/core/tests/models/ir/const_string.xml @@ -0,0 +1,26 @@ + + + + + + + + 2 + 3 + + + + + + + 2 + 3 + + + + + + + + + diff --git a/src/core/tests/pass/serialization/serialize.cpp b/src/core/tests/pass/serialization/serialize.cpp index d4531b1e029f98..d33a8701b45b0f 100644 --- a/src/core/tests/pass/serialization/serialize.cpp +++ b/src/core/tests/pass/serialization/serialize.cpp @@ -99,7 +99,8 @@ INSTANTIATE_TEST_SUITE_P( std::make_tuple("nms5_dynamism.xml", "nms5_dynamism.bin"), std::make_tuple("if_diff_case.xml", "if_diff_case.bin"), std::make_tuple("if_body_without_parameters.xml", "if_body_without_parameters.bin"), - std::make_tuple("string_parameter.xml", "string_parameter.bin"))); + std::make_tuple("string_parameter.xml", "string_parameter.bin"), + std::make_tuple("const_string.xml", "const_string.bin"))); #ifdef ENABLE_OV_ONNX_FRONTEND diff --git a/src/core/tests/visitors/op/constant.cpp b/src/core/tests/visitors/op/constant.cpp index 7022869d8ba395..beb5fe6cb95674 100644 --- a/src/core/tests/visitors/op/constant.cpp +++ b/src/core/tests/visitors/op/constant.cpp @@ -86,8 +86,7 @@ TEST(attributes, constant_op_from_host_tensor_identical_elements) { ASSERT_TRUE(g_k->get_all_data_elements_bitwise_identical()); } -// TODO: implement (de)serialization string constants -TEST(attributes, DISABLED_constant_op_string) { +TEST(attributes, constant_op_string) { vector data{"abc", "de fc qq", "", "123 abc", "0112 3 ", "&&&"}; auto k = make_shared(element::string, Shape{2, 3}, data); NodeBuilder builder(k); @@ -101,8 +100,7 @@ TEST(attributes, DISABLED_constant_op_string) { ASSERT_FALSE(g_k->get_all_data_elements_bitwise_identical()); } -// TODO: implement (de)serialization string constants -TEST(attributes, DISABLED_constant_op_identical_elements_string) { +TEST(attributes, constant_op_identical_elements_string) { vector data{"abc edfg", "abc edfg", "abc edfg", "abc edfg", "abc edfg", "abc edfg"}; auto k = make_shared(element::string, Shape{2, 3}, data); NodeBuilder builder(k); @@ -116,8 +114,7 @@ TEST(attributes, DISABLED_constant_op_identical_elements_string) { ASSERT_TRUE(g_k->get_all_data_elements_bitwise_identical()); } -// TODO: implement (de)serialization string constants -TEST(attributes, DISABLED_constant_op_from_host_tensor_different_elements_string) { +TEST(attributes, constant_op_from_host_tensor_different_elements_string) { vector data{"abc", "de fc qq", "", "123 abc", "0112 3 ", "&&&"}; auto tensor = ov::Tensor(element::string, Shape{2, 3}, &data[0]); auto k = make_shared(tensor); @@ -133,8 +130,7 @@ TEST(attributes, DISABLED_constant_op_from_host_tensor_different_elements_string ASSERT_FALSE(g_k->get_all_data_elements_bitwise_identical()); } -// TODO: implement (de)serialization string constants -TEST(attributes, DISABLED_constant_op_from_host_tensor_identical_elements_string) { +TEST(attributes, constant_op_from_host_tensor_identical_elements_string) { vector data{"abc edfg", "abc edfg", "abc edfg", "abc edfg", "abc edfg", "abc edfg"}; auto tensor = ov::Tensor(element::string, Shape{2, 3}, &data[0]); auto k = make_shared(tensor); diff --git a/src/core/tests/visitors/visitors.hpp b/src/core/tests/visitors/visitors.hpp index 7fb6d1999f66d3..6b47bcfdd7f55b 100644 --- a/src/core/tests/visitors/visitors.hpp +++ b/src/core/tests/visitors/visitors.hpp @@ -15,6 +15,7 @@ #include "openvino/op/util/variable.hpp" #include "openvino/opsets/opset.hpp" #include "openvino/runtime/aligned_buffer.hpp" +#include "openvino/runtime/string_aligned_buffer.hpp" #include "openvino/runtime/tensor.hpp" namespace ov { @@ -217,6 +218,19 @@ class DeserializeAttributeVisitor : public AttributeVisitor { if (auto a = ::ov::as_type<::ov::AttributeAdapter>>(&adapter)) { auto& data = m_values.get(name); std::memcpy(a->get()->get_ptr(), data.data(), a->get()->size()); + } else if (auto a = ov::as_type<::ov::AttributeAdapter>>(&adapter)) { + // get a data that is ov::Tensor of u8 type representing packed string tensor + auto& data = m_values.get(name); + auto src_string_aligned_buffer = + ov::AttributeAdapter>::unpack_string_tensor(data.data(), + data.get_size()); + std::string* src_strings = static_cast(src_string_aligned_buffer->get_ptr()); + auto dst_string_aligned = a->get(); + auto num_elements = dst_string_aligned->get_num_elements(); + auto dst_strings = static_cast(dst_string_aligned->get_ptr()); + for (size_t ind = 0; ind < num_elements; ++ind) { + dst_strings[ind] = src_strings[ind]; + } } else if (auto a = ov::as_type< ov::AttributeAdapter>>>( &adapter)) { @@ -306,6 +320,48 @@ class SerializeAttributeVisitor : public AttributeVisitor { ov::Tensor data(element::u8, Shape{a->get()->size()}); std::memcpy(data.data(), a->get()->get_ptr(), a->get()->size()); m_values.insert(name, data); + } else if (ov::is_type<::ov::AttributeAdapter>>(&adapter) || + ov::is_type<::ov::AttributeAdapter>>(&adapter)) { + auto a1 = ov::as_type<::ov::AttributeAdapter>>(&adapter); + auto a2 = ov::as_type<::ov::AttributeAdapter>>(&adapter); + // write packed string tensor into ov::Tensor of u8 type + std::vector packed_string_tensor; + std::shared_ptr header; + size_t header_size; + if (a1) { + a1->get_header(header, header_size); + } else { + a2->get_header(header, header_size); + } + for (size_t ind = 0; ind < header_size; ++ind) { + packed_string_tensor.push_back(header.get()[ind]); + } + + // write raw strings into packed format + size_t num_elements = 0; + if (a1) { + num_elements = a1->get()->get_num_elements(); + } else { + num_elements = a2->get()->get_num_elements(); + } + for (size_t string_ind = 0; string_ind < num_elements; ++string_ind) { + const char* string_ptr; + size_t string_size; + if (a1) { + a1->get_raw_string_by_index(string_ptr, string_size, string_ind); + } else { + a2->get_raw_string_by_index(string_ptr, string_size, string_ind); + } + + for (size_t ind = 0; ind < string_size; ++ind) { + packed_string_tensor.push_back(static_cast(string_ptr[ind])); + } + } + + size_t packed_string_tensor_size = packed_string_tensor.size(); + ov::Tensor data(element::u8, Shape{packed_string_tensor_size}); + std::memcpy(data.data(), packed_string_tensor.data(), packed_string_tensor_size); + m_values.insert(name, data); } else if (auto a = ov::as_type< ov::AttributeAdapter>>>( &adapter)) { diff --git a/src/frontends/common/include/openvino/frontend/extension/op.hpp b/src/frontends/common/include/openvino/frontend/extension/op.hpp index 668fd56ccc5aa6..79ec43527fe5c3 100644 --- a/src/frontends/common/include/openvino/frontend/extension/op.hpp +++ b/src/frontends/common/include/openvino/frontend/extension/op.hpp @@ -484,6 +484,7 @@ using OpExtension = ov::frontend::OpExtensionBasesize() < offset + size) OPENVINO_THROW("Incorrect weights in bin file!"); - if (size < ((ov::shape_size(shape) * el_type.bitwidth() + 7) >> 3)) - OPENVINO_THROW("Attribute and shape size are inconsistent for ", type, " op!"); + char* data = m_weights->get_ptr() + offset; + + if (el_type == element::string) { + auto buffer = + ov::AttributeAdapter>::unpack_string_tensor(data, size); + a->set(buffer); + } else { + if (size < ((ov::shape_size(shape) * el_type.bitwidth() + 7) >> 3)) + OPENVINO_THROW("Attribute and shape size are inconsistent for ", type, " op!"); + auto buffer = + std::make_shared>>(data, size, m_weights); + a->set(buffer); + } + } + } else if (auto a = ov::as_type>>(&adapter)) { + pugi::xml_node dn = m_node.child("data"); + const auto& type = pugixml::get_str_attr(m_node, "type"); + if (name == "value" && type == "Const") { + std::vector shape; + std::string el_type_str; + + size_t offset = static_cast(pugixml::get_uint64_attr(dn, "offset")); + size_t size = static_cast(pugixml::get_uint64_attr(dn, "size")); + if (!getStrAttribute(dn, "element_type", el_type_str)) + return; + if (!getParameters(dn, "shape", shape)) + return; + + if (!m_weights) + OPENVINO_THROW("Empty weights data in bin file or bin file cannot be found!"); + if (m_weights->size() < offset + size) + OPENVINO_THROW("Incorrect weights in bin file!"); char* data = m_weights->get_ptr() + offset; - auto buffer = std::make_shared>>(data, size, m_weights); + auto buffer = + ov::AttributeAdapter>::unpack_string_tensor(data, size); a->set(buffer); } } else if (auto a = ov::as_type>(&adapter)) { diff --git a/src/frontends/onnx/docs/how_to_add_op.md b/src/frontends/onnx/docs/how_to_add_op.md index 283292ba223cee..623f670ec2d726 100644 --- a/src/frontends/onnx/docs/how_to_add_op.md +++ b/src/frontends/onnx/docs/how_to_add_op.md @@ -9,50 +9,66 @@ The declaration in `.hpp` can look like: ```cpp #pragma once -#include "onnx_import/core/node.hpp" +#include "core/node.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector custom_add(const Node& node); +ov::OutputVector custom_add(const ov::frontend::onnx::Node& node); } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov ``` -The definition in `.cpp` contains an implementation of transformation from [ngraph::onnx_import::Node](../../../../src/frontends/onnx/frontend/include/onnx_import/core/node.hpp) to [ov::OutputVector](../../../../src/core/include/openvino/core/node_vector.hpp). Such implementation can look like: +The definition in `.cpp` contains an implementation of transformation from [ov::frontend::onnx::Node](../../../../src/frontends/onnx/frontend/include/onnx_import/core/node.hpp) to [ov::OutputVector](../../../../src/core/include/openvino/core/node_vector.hpp). Such implementation can look like: ```cpp #include "op/org.openvinotoolkit/custom_add.hpp" -#include - -#include "default_opset.hpp" +#include "exceptions.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/multiply.hpp" #include "utils/common.hpp" -namespace ngraph { -namespace onnx_import { +namespace ov { +namespace frontend { +namespace onnx { namespace op { namespace set_1 { -ov::OutputVector custom_add(const Node& node) { - const auto in1 = node.get_ng_inputs().at(0); - const auto in2 = node.get_ng_inputs().at(1); +ov::OutputVector custom_add(const ov::frontend::onnx::Node& node) { + const auto& inputs = node.get_ov_inputs(); + CHECK_VALID_NODE(node, + inputs.size() == 2, + "CustomAdd should have exactly 2 inputs, got: ", + inputs.size()); + const auto in1 = inputs[0]; + const auto in2 = inputs[1]; const auto alpha = node.get_attribute_value("alpha", 1); + + CHECK_VALID_NODE(node, + alpha >= 1 && alpha < 100, + "CustomAdd accepts alpha in a range [1, 100), got: ", + alpha); + const auto alpha_node = - std::make_shared(default_opset::Constant::create( ov::element::f32, {}, {alpha}), - in1.get_element_type()); + std::make_shared(v0::Constant::create(ov::element::f32, {}, {alpha}), in1.get_element_type()); - const auto add = std::make_shared(in1, in2); - return {std::make_shared(add, alpha_node)}; + const auto add = std::make_shared(in1, in2); + return {std::make_shared(add, alpha_node)}; } } // namespace set_1 } // namespace op -} // namespace onnx_import -} // namespace ngraph +} // namespace onnx +} // namespace frontend +} // namespace ov ``` The next step is to register a new op in [ops_bridge](../../../../src/frontends/onnx/frontend/src/ops_bridge.cpp). For `org.openvinotoolkit.CustomAdd`, the registration can look like: ```cpp diff --git a/src/frontends/onnx/frontend/src/core/operator_set.hpp b/src/frontends/onnx/frontend/src/core/operator_set.hpp index 60032edd133833..2221a055150861 100644 --- a/src/frontends/onnx/frontend/src/core/operator_set.hpp +++ b/src/frontends/onnx/frontend/src/core/operator_set.hpp @@ -13,7 +13,7 @@ namespace ov { namespace frontend { namespace onnx { -/// \brief Function which transforms single ONNX operator to nGraph sub-graph. +/// \brief Function which transforms single ONNX operator to OV sub-graph. using Operator = std::function; diff --git a/src/frontends/onnx/frontend/src/op/quant_conv.cpp b/src/frontends/onnx/frontend/src/op/quant_conv.cpp deleted file mode 100644 index a913399e378393..00000000000000 --- a/src/frontends/onnx/frontend/src/op/quant_conv.cpp +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// Disabled in CMakeLists.txt -// Update to higher opset is required - -#if 0 - -# include "op/quant_conv.hpp" - -# include -# include -# include - -# include "default_opset.hpp" -# include "exceptions.hpp" -# include "ngraph/builder/quantization/quantized_linear_convolution.hpp" -# include "ngraph/frontend/utils/convpool.hpp" -# include "ngraph/op/util/attr_types.hpp" -# include "ngraph/opsets/opset0.hpp" -# include "ngraph/strides.hpp" - -namespace ngraph -{ - namespace onnx_import - { - namespace op - { - namespace set_1 - { - namespace - { - struct OpScale - { - Output data_scale; - Output filter_scale; - Output output_scale; - }; - - struct OpZeroPoint - { - Output data_zero_point; - Output filter_zero_point; - Output output_zero_point; - }; - - std::shared_ptr - make_ng_quant_conv(const Output& data, - const Output& filters, - const ov::Strides& strides, - const ov::Strides& filter_dilations, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const ov::Strides& data_dilations, - int groups, - const OpScale& op_scale, - const OpZeroPoint& op_zero_point, - const Output& bias = nullptr) - { - ngraph:: ov::element::Type output_type; - if (data.get_element_type() == ngraph:: ov::element::u8 && - filters.get_element_type() == ngraph:: ov::element::i8) - { - output_type = ngraph:: ov::element::i8; - } - else if (data.get_element_type() == ngraph:: ov::element::u8 && - filters.get_element_type() == ngraph:: ov::element::u8) - { - output_type = ngraph:: ov::element::u8; - } - if (groups > 1) - { - // Split one convolution op to N ops where N is the number of groups - // and concat results after computation. - std::size_t n_data_channels{data.get_shape().at(1)}; - std::size_t n_filters_channels{filters.get_shape().at(0)}; - - std::size_t data_group_size{n_data_channels / groups}; - std::size_t filters_group_size{n_filters_channels / groups}; - ov::OutputVector convolution_nodes; - - // initial bounds for splice - std::vector data_lower_bounds(data.get_shape().size()); - std::vector data_upper_bounds{data.get_shape()}; - std::vector filters_lower_bounds( - filters->get_shape().size()); - std::vector filters_upper_bounds{filters.get_shape()}; - - for (int64_t group{0}; group < groups; ++group) - { - // slice data - data_lower_bounds[1] = group * data_group_size; - data_upper_bounds[1] = (group + 1) * data_group_size; - auto sliced_data = std::make_shared( - data, data_lower_bounds, data_upper_bounds); - // slice filters - filters_lower_bounds[0] = group * filters_group_size; - filters_upper_bounds[0] = (group + 1) * filters_group_size; - auto sliced_filters = std::make_shared( - filters, filters_lower_bounds, filters_upper_bounds); - - if (bias.get_node()) - { - OPENVINO_THROW( - "Groups != 1 not supported for Quantized Convolution with " - "bias."); - } - else - { - convolution_nodes.push_back( - std::make_shared( - sliced_data, - sliced_filters, - strides, - filter_dilations, - padding_below, - padding_above, - data_dilations, - op_scale.data_scale, - op_zero_point.data_zero_point, - op_scale.filter_scale, - op_zero_point.filter_zero_point, - op_scale.output_scale, - op_zero_point.output_zero_point, - output_type, - ov::AxisSet{}, - ov::AxisSet{}, - ov::AxisSet{})); - } - } - std::size_t concatenation_axis = 1; - return std::make_shared(convolution_nodes, - concatenation_axis); - } - else - { - if (bias.get_node()) - { - return ngraph::builder::quantization:: - QuantizedLinearConvolutionBias(data, - filters, - bias, - strides, - filter_dilations, - padding_below, - padding_above, - data_dilations, - op_scale.data_scale, - op_scale.filter_scale, - op_scale.output_scale); - } - else - { - return std::make_shared( - data, - filters, - strides, - filter_dilations, - padding_below, - padding_above, - data_dilations, - op_scale.data_scale, - op_zero_point.data_zero_point, - op_scale.filter_scale, - op_zero_point.filter_zero_point, - op_scale.output_scale, - op_zero_point.output_zero_point, - output_type, - ov::AxisSet{}, - ov::AxisSet{}, - ov::AxisSet{}); - } - } - } - - } // namespace - - ov::OutputVector quant_conv(const ov::frontend::onnx::Node& node) - { - const ov::OutputVector& inputs = node.get_ng_inputs(); - auto data = inputs.at(0); - auto filters = inputs.at(3); - - int64_t groups{node.get_attribute_value("group", 1)}; - - auto data_scale = inputs.at(1); - auto data_zero_point = inputs.at(2); - auto filters_scale = inputs.at(4); - auto filters_zero_point = inputs.at(5); - auto output_scale = inputs.at(6); - auto output_zero_point = inputs.at(7); - - CHECK_VALID_NODE(node, - ((groups >= 0) && - (groups <= static_cast(data.get_shape().at(1))) && - (groups <= static_cast(filters.get_shape().at(0)))), - "incorrect value of 'group' attribute: ", - groups); - - std::size_t n_data_channels{data.get_shape().at(1)}; - std::size_t n_filters_channels{filters.get_shape().at(0)}; - - CHECK_VALID_NODE( - node, - n_data_channels % groups == 0, - "provided group attribute value must be a multiple of data channels " - "count."); - CHECK_VALID_NODE( - node, - n_filters_channels % groups == 0, - "provided group attribute value must be a multiple of filter channels " - "count."); - - ov::Strides strides = convpool::get_strides(node); - ov::Strides filter_dilations = convpool::get_dilations(node); - ov::Strides data_dilations = Strides(convpool::get_kernel_shape(node).size(), 1UL); - auto paddings = convpool::get_pads(node); - ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node); - CoordinateDiff& padding_below = paddings.first; - CoordinateDiff& padding_above = paddings.second; - convpool::calculate_auto_pads(data.get_shape(), - filters.get_shape(), - strides, - filter_dilations, - auto_pad_type, - padding_below, - padding_above); - - std::shared_ptr conv_node = nullptr; - - // no bias param - if (inputs.size() == 9 && !ngraph::op::is_null(inputs.at(8))) - { - auto bias = inputs.at(8); - conv_node = make_ng_quant_conv( - data, - filters, - strides, - filter_dilations, - padding_below, - padding_above, - data_dilations, - groups, - OpScale{data_scale, filters_scale, output_scale}, - OpZeroPoint{data_zero_point, filters_zero_point, output_zero_point}, - bias); - } - else - { - conv_node = make_ng_quant_conv( - data, - filters, - strides, - filter_dilations, - padding_below, - padding_above, - data_dilations, - groups, - OpScale{data_scale, filters_scale, output_scale}, - OpZeroPoint{data_zero_point, filters_zero_point, output_zero_point}); - } - - return {conv_node}; - } - - } // namespace set_1 - - } // namespace op - - } // namespace onnx -} // namespace frontend -} // namespace ov - -#endif diff --git a/src/frontends/onnx/frontend/src/op/quant_conv.hpp b/src/frontends/onnx/frontend/src/op/quant_conv.hpp deleted file mode 100644 index 38135b34649885..00000000000000 --- a/src/frontends/onnx/frontend/src/op/quant_conv.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -// Disabled in CMakeList -// Update to higher opset required - -#pragma once - -#include "core/node.hpp" - -namespace ov { -namespace frontend { -namespace onnx { -namespace op { -namespace set_1 { -/// \brief Performs ONNX Quant Conv operation. -/// -/// \param node The ONNX node object representing this operation. -/// -/// \return The vector containing Ngraph nodes producing output of ONNX quantizied -/// convolution operation. -ov::OutputVector quant_conv(const ov::frontend::onnx::Node& node); - -} // namespace set_1 -} // namespace op -} // namespace onnx -} // namespace frontend -} // namespace ov diff --git a/src/frontends/onnx/tests/models/abs.prototxt b/src/frontends/onnx/tests/models/abs.prototxt index 696681a4f83ea6..e2aecada24a461 100644 --- a/src/frontends/onnx/tests/models/abs.prototxt +++ b/src/frontends/onnx/tests/models/abs.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/acosh.prototxt b/src/frontends/onnx/tests/models/acosh.prototxt index 7a8cdc1fa26d4d..8e4dbcd5594f3a 100644 --- a/src/frontends/onnx/tests/models/acosh.prototxt +++ b/src/frontends/onnx/tests/models/acosh.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/add_abc.prototxt b/src/frontends/onnx/tests/models/add_abc.prototxt index 7f63b68ad216c0..a183712b670031 100644 --- a/src/frontends/onnx/tests/models/add_abc.prototxt +++ b/src/frontends/onnx/tests/models/add_abc.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/add_abc_3d.prototxt b/src/frontends/onnx/tests/models/add_abc_3d.prototxt index 4862444ea19bba..050b85a891f73e 100644 --- a/src/frontends/onnx/tests/models/add_abc_3d.prototxt +++ b/src/frontends/onnx/tests/models/add_abc_3d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/add_abc_initializers.prototxt b/src/frontends/onnx/tests/models/add_abc_initializers.prototxt index 8d8c441c088ed7..f88c61653ac0b8 100644 --- a/src/frontends/onnx/tests/models/add_abc_initializers.prototxt +++ b/src/frontends/onnx/tests/models/add_abc_initializers.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/add_bcast.prototxt b/src/frontends/onnx/tests/models/add_bcast.prototxt index e82ba065db541f..0b98e661113f0c 100644 --- a/src/frontends/onnx/tests/models/add_bcast.prototxt +++ b/src/frontends/onnx/tests/models/add_bcast.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/add_v6_broadcast_axes_1_2.prototxt b/src/frontends/onnx/tests/models/add_v6_broadcast_axes_1_2.prototxt index e72ff7f5c093cd..84468a58db9b0d 100644 --- a/src/frontends/onnx/tests/models/add_v6_broadcast_axes_1_2.prototxt +++ b/src/frontends/onnx/tests/models/add_v6_broadcast_axes_1_2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/add_v6_broadcast_axis_1.prototxt b/src/frontends/onnx/tests/models/add_v6_broadcast_axis_1.prototxt index 77b12bc62aa32f..f5972379583ee3 100644 --- a/src/frontends/onnx/tests/models/add_v6_broadcast_axis_1.prototxt +++ b/src/frontends/onnx/tests/models/add_v6_broadcast_axis_1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/add_v6_broadcast_dynamic.prototxt b/src/frontends/onnx/tests/models/add_v6_broadcast_dynamic.prototxt index fd9ffd19436098..9f69c5f5f007d8 100644 --- a/src/frontends/onnx/tests/models/add_v6_broadcast_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/add_v6_broadcast_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/add_v6_broadcast_no_axis.prototxt b/src/frontends/onnx/tests/models/add_v6_broadcast_no_axis.prototxt index 0be986d39f7fae..e633bf93704e74 100644 --- a/src/frontends/onnx/tests/models/add_v6_broadcast_no_axis.prototxt +++ b/src/frontends/onnx/tests/models/add_v6_broadcast_no_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/add_v7.prototxt b/src/frontends/onnx/tests/models/add_v7.prototxt index f308f85f7a4607..fe62ba66c3a11e 100644 --- a/src/frontends/onnx/tests/models/add_v7.prototxt +++ b/src/frontends/onnx/tests/models/add_v7.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/addmul_abc.prototxt b/src/frontends/onnx/tests/models/addmul_abc.prototxt index f9ec0c6d22a4b3..b7ff3bf670166b 100644 --- a/src/frontends/onnx/tests/models/addmul_abc.prototxt +++ b/src/frontends/onnx/tests/models/addmul_abc.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/affine.prototxt b/src/frontends/onnx/tests/models/affine.prototxt index da40173ae49ff8..c2560633a30254 100644 --- a/src/frontends/onnx/tests/models/affine.prototxt +++ b/src/frontends/onnx/tests/models/affine.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/argmax_float.prototxt b/src/frontends/onnx/tests/models/argmax_float.prototxt index 755c50af476c0c..c42fd2e9fe75ae 100644 --- a/src/frontends/onnx/tests/models/argmax_float.prototxt +++ b/src/frontends/onnx/tests/models/argmax_float.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/argmax_int32.prototxt b/src/frontends/onnx/tests/models/argmax_int32.prototxt index 95bc3bbd4e472d..49a95df7999d20 100644 --- a/src/frontends/onnx/tests/models/argmax_int32.prototxt +++ b/src/frontends/onnx/tests/models/argmax_int32.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/argmax_select_last_index.prototxt b/src/frontends/onnx/tests/models/argmax_select_last_index.prototxt index d47a89d14a0a0e..d0d0998b5b888a 100644 --- a/src/frontends/onnx/tests/models/argmax_select_last_index.prototxt +++ b/src/frontends/onnx/tests/models/argmax_select_last_index.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/argmin_float.prototxt b/src/frontends/onnx/tests/models/argmin_float.prototxt index 6e8f970315993e..e6e59e9f3c2ecc 100644 --- a/src/frontends/onnx/tests/models/argmin_float.prototxt +++ b/src/frontends/onnx/tests/models/argmin_float.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/argmin_int32.prototxt b/src/frontends/onnx/tests/models/argmin_int32.prototxt index 5b8b6c848eef98..a0e0f39df1ece6 100644 --- a/src/frontends/onnx/tests/models/argmin_int32.prototxt +++ b/src/frontends/onnx/tests/models/argmin_int32.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/argmin_no_keepdims.prototxt b/src/frontends/onnx/tests/models/argmin_no_keepdims.prototxt index 77fdf45df7d098..460200d5fb530a 100644 --- a/src/frontends/onnx/tests/models/argmin_no_keepdims.prototxt +++ b/src/frontends/onnx/tests/models/argmin_no_keepdims.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/argmin_select_last_index.prototxt b/src/frontends/onnx/tests/models/argmin_select_last_index.prototxt index 623bc96ef69445..ba822764d6894f 100644 --- a/src/frontends/onnx/tests/models/argmin_select_last_index.prototxt +++ b/src/frontends/onnx/tests/models/argmin_select_last_index.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/asinh.prototxt b/src/frontends/onnx/tests/models/asinh.prototxt index 2684a528d68503..1074a169427a94 100644 --- a/src/frontends/onnx/tests/models/asinh.prototxt +++ b/src/frontends/onnx/tests/models/asinh.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/atanh.prototxt b/src/frontends/onnx/tests/models/atanh.prototxt index aeb293d797ebcc..c4ea43c57ea376 100644 --- a/src/frontends/onnx/tests/models/atanh.prototxt +++ b/src/frontends/onnx/tests/models/atanh.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/aten_embedding_sum_many_outputs.prototxt b/src/frontends/onnx/tests/models/aten_embedding_sum_many_outputs.prototxt index c89bd70373e8de..88d1373b3c0c37 100644 --- a/src/frontends/onnx/tests/models/aten_embedding_sum_many_outputs.prototxt +++ b/src/frontends/onnx/tests/models/aten_embedding_sum_many_outputs.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/aten_embedding_sum_offset_3in.prototxt b/src/frontends/onnx/tests/models/aten_embedding_sum_offset_3in.prototxt index be788ecde53b35..4aa3bceaff9c4c 100644 --- a/src/frontends/onnx/tests/models/aten_embedding_sum_offset_3in.prototxt +++ b/src/frontends/onnx/tests/models/aten_embedding_sum_offset_3in.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/aten_embedding_sum_offset_4in.prototxt b/src/frontends/onnx/tests/models/aten_embedding_sum_offset_4in.prototxt index 5e2fe46532c696..9660b56344f2fc 100644 --- a/src/frontends/onnx/tests/models/aten_embedding_sum_offset_4in.prototxt +++ b/src/frontends/onnx/tests/models/aten_embedding_sum_offset_4in.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/aten_embedding_sum_packed_2in.prototxt b/src/frontends/onnx/tests/models/aten_embedding_sum_packed_2in.prototxt index 72576cdcbe4684..04e2258e6e6d8a 100644 --- a/src/frontends/onnx/tests/models/aten_embedding_sum_packed_2in.prototxt +++ b/src/frontends/onnx/tests/models/aten_embedding_sum_packed_2in.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/aten_embedding_sum_packed_3in_offset_none.prototxt b/src/frontends/onnx/tests/models/aten_embedding_sum_packed_3in_offset_none.prototxt index 6f25b45f6f25e6..62d876a9f43987 100644 --- a/src/frontends/onnx/tests/models/aten_embedding_sum_packed_3in_offset_none.prototxt +++ b/src/frontends/onnx/tests/models/aten_embedding_sum_packed_3in_offset_none.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/aten_embedding_sum_packed_4in_per_sample_weights.prototxt b/src/frontends/onnx/tests/models/aten_embedding_sum_packed_4in_per_sample_weights.prototxt index bc15c5b616d401..764163ac6c8041 100644 --- a/src/frontends/onnx/tests/models/aten_embedding_sum_packed_4in_per_sample_weights.prototxt +++ b/src/frontends/onnx/tests/models/aten_embedding_sum_packed_4in_per_sample_weights.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/aten_embedding_sum_packed_4in_two_none.prototxt b/src/frontends/onnx/tests/models/aten_embedding_sum_packed_4in_two_none.prototxt index 74f0576e90e847..d08026bc1157a1 100644 --- a/src/frontends/onnx/tests/models/aten_embedding_sum_packed_4in_two_none.prototxt +++ b/src/frontends/onnx/tests/models/aten_embedding_sum_packed_4in_two_none.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/aten_unsupported_embedding_mode.prototxt b/src/frontends/onnx/tests/models/aten_unsupported_embedding_mode.prototxt index e83c561c2c61bb..8ab647ed6b83f4 100644 --- a/src/frontends/onnx/tests/models/aten_unsupported_embedding_mode.prototxt +++ b/src/frontends/onnx/tests/models/aten_unsupported_embedding_mode.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/aten_unsupported_operator.prototxt b/src/frontends/onnx/tests/models/aten_unsupported_operator.prototxt index 95139fef8509bb..0999490e66e886 100644 --- a/src/frontends/onnx/tests/models/aten_unsupported_operator.prototxt +++ b/src/frontends/onnx/tests/models/aten_unsupported_operator.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx_import_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "emb_tbl" diff --git a/src/frontends/onnx/tests/models/average_pool_2d.prototxt b/src/frontends/onnx/tests/models/average_pool_2d.prototxt index 4af495b55d905d..08db1163ec3aac 100644 --- a/src/frontends/onnx/tests/models/average_pool_2d.prototxt +++ b/src/frontends/onnx/tests/models/average_pool_2d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/average_pool_2d_pads.prototxt b/src/frontends/onnx/tests/models/average_pool_2d_pads.prototxt index 5ddc67997b9555..6223864e136bae 100644 --- a/src/frontends/onnx/tests/models/average_pool_2d_pads.prototxt +++ b/src/frontends/onnx/tests/models/average_pool_2d_pads.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/average_pool_empty_auto_pad.prototxt b/src/frontends/onnx/tests/models/average_pool_empty_auto_pad.prototxt index 4c62008bf5a785..8769f8c70fc48d 100644 --- a/src/frontends/onnx/tests/models/average_pool_empty_auto_pad.prototxt +++ b/src/frontends/onnx/tests/models/average_pool_empty_auto_pad.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/batchnorm_default.prototxt b/src/frontends/onnx/tests/models/batchnorm_default.prototxt index 2bda46d2920e4a..1f3effcfd2543d 100644 --- a/src/frontends/onnx/tests/models/batchnorm_default.prototxt +++ b/src/frontends/onnx/tests/models/batchnorm_default.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/bitwise_not.prototxt b/src/frontends/onnx/tests/models/bitwise_not.prototxt index 29e97d88172b18..dda00b819afa41 100644 --- a/src/frontends/onnx/tests/models/bitwise_not.prototxt +++ b/src/frontends/onnx/tests/models/bitwise_not.prototxt @@ -1,5 +1,5 @@ ir_version: 9 -producer_name: "BitwiseNotModel" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt index f8759ce921028a..894905adf64bf7 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_periodic.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "size" diff --git a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt index 1d60e783ead99a..3b16815ad4de85 100644 --- a/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/blackmanwindow_symmetric.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "size" diff --git a/src/frontends/onnx/tests/models/bool_const_op.prototxt b/src/frontends/onnx/tests/models/bool_const_op.prototxt index 960a923137fe92..10d1990bf0f9c7 100644 --- a/src/frontends/onnx/tests/models/bool_const_op.prototxt +++ b/src/frontends/onnx/tests/models/bool_const_op.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_graph" node { diff --git a/src/frontends/onnx/tests/models/bool_init_and.prototxt b/src/frontends/onnx/tests/models/bool_init_and.prototxt index bfbcf19e653865..8a2134ddbb9f91 100644 --- a/src/frontends/onnx/tests/models/bool_init_and.prototxt +++ b/src/frontends/onnx/tests/models/bool_init_and.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_graph" node { diff --git a/src/frontends/onnx/tests/models/bool_init_raw.prototxt b/src/frontends/onnx/tests/models/bool_init_raw.prototxt index eb6f51e86650a6..e9af296994f359 100644 --- a/src/frontends/onnx/tests/models/bool_init_raw.prototxt +++ b/src/frontends/onnx/tests/models/bool_init_raw.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_graph" initializer { diff --git a/src/frontends/onnx/tests/models/bool_input_or.prototxt b/src/frontends/onnx/tests/models/bool_input_or.prototxt index 3615eec4a5ad1e..c42b863108d88f 100644 --- a/src/frontends/onnx/tests/models/bool_input_or.prototxt +++ b/src/frontends/onnx/tests/models/bool_input_or.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_graph" node { diff --git a/src/frontends/onnx/tests/models/castlike_bfloat16_to_float32.prototxt b/src/frontends/onnx/tests/models/castlike_bfloat16_to_float32.prototxt index bd932116f02c92..f454d0da8e814a 100644 --- a/src/frontends/onnx/tests/models/castlike_bfloat16_to_float32.prototxt +++ b/src/frontends/onnx/tests/models/castlike_bfloat16_to_float32.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_float16_to_int64.prototxt b/src/frontends/onnx/tests/models/castlike_float16_to_int64.prototxt index ec765ef052c231..7ef9dd2f5026fc 100644 --- a/src/frontends/onnx/tests/models/castlike_float16_to_int64.prototxt +++ b/src/frontends/onnx/tests/models/castlike_float16_to_int64.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_float16_to_uint32.prototxt b/src/frontends/onnx/tests/models/castlike_float16_to_uint32.prototxt index 01466c26fa11fa..54c55421f69b24 100644 --- a/src/frontends/onnx/tests/models/castlike_float16_to_uint32.prototxt +++ b/src/frontends/onnx/tests/models/castlike_float16_to_uint32.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_float32_to_bfloat16.prototxt b/src/frontends/onnx/tests/models/castlike_float32_to_bfloat16.prototxt index c99069ef646135..e30a95efb96662 100644 --- a/src/frontends/onnx/tests/models/castlike_float32_to_bfloat16.prototxt +++ b/src/frontends/onnx/tests/models/castlike_float32_to_bfloat16.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_float64_to_int32.prototxt b/src/frontends/onnx/tests/models/castlike_float64_to_int32.prototxt index 5020ff816bdae1..e0fdcc817770a3 100644 --- a/src/frontends/onnx/tests/models/castlike_float64_to_int32.prototxt +++ b/src/frontends/onnx/tests/models/castlike_float64_to_int32.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_float64_to_int64.prototxt b/src/frontends/onnx/tests/models/castlike_float64_to_int64.prototxt index 383e3ca3df1ee5..9b90a87b43d208 100644 --- a/src/frontends/onnx/tests/models/castlike_float64_to_int64.prototxt +++ b/src/frontends/onnx/tests/models/castlike_float64_to_int64.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_int32_to_float64.prototxt b/src/frontends/onnx/tests/models/castlike_int32_to_float64.prototxt index 7bdfd6b64612ba..0ff8bb53244914 100644 --- a/src/frontends/onnx/tests/models/castlike_int32_to_float64.prototxt +++ b/src/frontends/onnx/tests/models/castlike_int32_to_float64.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_int8_to_float16.prototxt b/src/frontends/onnx/tests/models/castlike_int8_to_float16.prototxt index 213ebdf2f06df9..c4220f889222a3 100644 --- a/src/frontends/onnx/tests/models/castlike_int8_to_float16.prototxt +++ b/src/frontends/onnx/tests/models/castlike_int8_to_float16.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_int8_to_int16.prototxt b/src/frontends/onnx/tests/models/castlike_int8_to_int16.prototxt index ad950fdbfdd63c..e0a8ccdd284561 100644 --- a/src/frontends/onnx/tests/models/castlike_int8_to_int16.prototxt +++ b/src/frontends/onnx/tests/models/castlike_int8_to_int16.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/castlike_int8_to_uint16.prototxt b/src/frontends/onnx/tests/models/castlike_int8_to_uint16.prototxt index ef3dfc33a1bfb5..8820c83711a82f 100644 --- a/src/frontends/onnx/tests/models/castlike_int8_to_uint16.prototxt +++ b/src/frontends/onnx/tests/models/castlike_int8_to_uint16.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/clip_no_min_no_max.prototxt b/src/frontends/onnx/tests/models/clip_no_min_no_max.prototxt index 082bdf84123d0a..d21685ec4ee923 100644 --- a/src/frontends/onnx/tests/models/clip_no_min_no_max.prototxt +++ b/src/frontends/onnx/tests/models/clip_no_min_no_max.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/clip_no_min_no_max_int64.prototxt b/src/frontends/onnx/tests/models/clip_no_min_no_max_int64.prototxt index 03300976a8ff9d..3d9c687900eac4 100644 --- a/src/frontends/onnx/tests/models/clip_no_min_no_max_int64.prototxt +++ b/src/frontends/onnx/tests/models/clip_no_min_no_max_int64.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/clip_no_min_set_max.prototxt b/src/frontends/onnx/tests/models/clip_no_min_set_max.prototxt index e5e027600e581f..26c452ce53e004 100644 --- a/src/frontends/onnx/tests/models/clip_no_min_set_max.prototxt +++ b/src/frontends/onnx/tests/models/clip_no_min_set_max.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/clip_no_min_set_max_int64.prototxt b/src/frontends/onnx/tests/models/clip_no_min_set_max_int64.prototxt index 08c4e2e709b6f2..ca88db6851cf05 100644 --- a/src/frontends/onnx/tests/models/clip_no_min_set_max_int64.prototxt +++ b/src/frontends/onnx/tests/models/clip_no_min_set_max_int64.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/clip_set_min_no_max.prototxt b/src/frontends/onnx/tests/models/clip_set_min_no_max.prototxt index 10fcc2c46691ed..2e60e745806d24 100644 --- a/src/frontends/onnx/tests/models/clip_set_min_no_max.prototxt +++ b/src/frontends/onnx/tests/models/clip_set_min_no_max.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/clip_set_min_no_max_initializers.prototxt b/src/frontends/onnx/tests/models/clip_set_min_no_max_initializers.prototxt index 0901b86fb7af73..25cfde0a147d5f 100644 --- a/src/frontends/onnx/tests/models/clip_set_min_no_max_initializers.prototxt +++ b/src/frontends/onnx/tests/models/clip_set_min_no_max_initializers.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/clip_set_min_set_max.prototxt b/src/frontends/onnx/tests/models/clip_set_min_set_max.prototxt index a5ad023d81dfc6..5e9035309a591f 100644 --- a/src/frontends/onnx/tests/models/clip_set_min_set_max.prototxt +++ b/src/frontends/onnx/tests/models/clip_set_min_set_max.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/clip_set_min_set_max_initializers.prototxt b/src/frontends/onnx/tests/models/clip_set_min_set_max_initializers.prototxt index c09b604cee7e2a..c03250fd4eff07 100644 --- a/src/frontends/onnx/tests/models/clip_set_min_set_max_initializers.prototxt +++ b/src/frontends/onnx/tests/models/clip_set_min_set_max_initializers.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention.prototxt index 53ac350573b055..6b814172836876 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_dynamic_shapes.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_dynamic_shapes.prototxt index 97a4f3f1f9134a..e589f8c84660d6 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_dynamic_shapes.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_dynamic_shapes.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_extra_add.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_extra_add.prototxt index f8664f4507f459..71eac8a5255d33 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_extra_add.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_extra_add.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_1.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_1.prototxt index 56d4e1d1142a4e..fcbda837bd79f1 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_1.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_1.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_2.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_2.prototxt index 481d9ea86f5488..5ade4aea389e09 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_2.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_2.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_3.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_3.prototxt index 67558f33599282..add561937be59a 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_3.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_3.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_4.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_4.prototxt index 9b9387991a9c64..4b28b1d0f9ce03 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_4.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_mask_index_4.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_past.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_past.prototxt index 7625195fa044c8..5344a3d1b54dd1 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_past.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_past.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_qkv_hidden_sizes.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_qkv_hidden_sizes.prototxt index 5ee43aa5c3624a..987da932a18df6 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_qkv_hidden_sizes.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_qkv_hidden_sizes.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/attention_unidirectional.prototxt b/src/frontends/onnx/tests/models/com.microsoft/attention_unidirectional.prototxt index 31a65b299d60ee..2e270740faa2b1 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/attention_unidirectional.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/attention_unidirectional.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/bias_gelu.prototxt b/src/frontends/onnx/tests/models/com.microsoft/bias_gelu.prototxt index f01a6ca2c42f0d..4ea969e6613828 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/bias_gelu.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/bias_gelu.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization.prototxt b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization.prototxt index 1cd1bfcc1b3d70..ad2ebcf4d5e459 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input_ids" diff --git a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_dynamic_shapes.prototxt b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_dynamic_shapes.prototxt index 577926c6d3114f..a01d3b5b465fff 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_dynamic_shapes.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_dynamic_shapes.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input_ids" diff --git a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_dynamic_shapes_with_position_ids.prototxt b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_dynamic_shapes_with_position_ids.prototxt index 0cc8e8a3bd39cd..3a9b5ff330f474 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_dynamic_shapes_with_position_ids.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_dynamic_shapes_with_position_ids.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input_ids" diff --git a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_with_segment_embedding.prototxt b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_with_segment_embedding.prototxt index 36b7a1deaaa870..de690034a914a3 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_with_segment_embedding.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_with_segment_embedding.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input_ids" diff --git a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_with_segment_embedding_and_mask.prototxt b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_with_segment_embedding_and_mask.prototxt index 1181538c82ccaa..7fcfe40e290832 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_with_segment_embedding_and_mask.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/embed_layer_normalization_with_segment_embedding_and_mask.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input_ids" diff --git a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_clip.prototxt b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_clip.prototxt index b7ebca736ec719..4895d9b79c8e1a 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_clip.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_clip.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "OV ONNX Frontend" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_hard_sigmoid.prototxt b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_hard_sigmoid.prototxt index eedf947bcafcb0..329b82a5db27a8 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_hard_sigmoid.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_hard_sigmoid.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "OV ONNX Frontend" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_leaky_relu.prototxt b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_leaky_relu.prototxt index 1da86f1513cb47..7ebc92f4dfc957 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_leaky_relu.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_leaky_relu.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "OV ONNX Frontend" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_relu.prototxt b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_relu.prototxt index 7cb10b6e9daf46..e787ae89482148 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_relu.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_relu.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "OV ONNX Frontend" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_relu_z_input.prototxt b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_relu_z_input.prototxt index fe9462c1e7258e..0675e510d97603 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_relu_z_input.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_relu_z_input.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "OV ONNX Frontend" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_sigmoid.prototxt b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_sigmoid.prototxt index 53e8dfe7ecbd6f..67bc9c778a5350 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_sigmoid.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_sigmoid.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "OV ONNX Frontend" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_tanh.prototxt b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_tanh.prototxt index 12b9b8f4a10751..481ef6c2c8eab0 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/fused_conv_tanh.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/fused_conv_tanh.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "OV ONNX Frontend" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/com.microsoft/fusedgemm.prototxt b/src/frontends/onnx/tests/models/com.microsoft/fusedgemm.prototxt index b381de9a1e5ae1..9935a704867a41 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/fusedgemm.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/fusedgemm.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "a" diff --git a/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_dynamic_shapes.prototxt b/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_dynamic_shapes.prototxt index dea7e08d9a6301..ce5c37fda326ea 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_dynamic_shapes.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_dynamic_shapes.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma.prototxt b/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma.prototxt index 6d7dab3c676afe..d05e7406c47e63 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma_beta.prototxt b/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma_beta.prototxt index 90e92553245f0b..b82536baa58fa4 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma_beta.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma_beta.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma_beta_bias.prototxt b/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma_beta_bias.prototxt index 8d61aca7a57193..436f838f766235 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma_beta_bias.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/skip_layer_normalization_with_gamma_beta_bias.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/com.microsoft/trilu_lower.prototxt b/src/frontends/onnx/tests/models/com.microsoft/trilu_lower.prototxt index d21fb8938e0de2..c5fea8c623c915 100644 --- a/src/frontends/onnx/tests/models/com.microsoft/trilu_lower.prototxt +++ b/src/frontends/onnx/tests/models/com.microsoft/trilu_lower.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/compress_0.prototxt b/src/frontends/onnx/tests/models/compress_0.prototxt index d3e34c188bf92e..21bf8dec67a7b8 100644 --- a/src/frontends/onnx/tests/models/compress_0.prototxt +++ b/src/frontends/onnx/tests/models/compress_0.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/compress_1.prototxt b/src/frontends/onnx/tests/models/compress_1.prototxt index e76930e5f79bc0..f0040b351766db 100644 --- a/src/frontends/onnx/tests/models/compress_1.prototxt +++ b/src/frontends/onnx/tests/models/compress_1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/compress_default_axis.prototxt b/src/frontends/onnx/tests/models/compress_default_axis.prototxt index 825ebcb0d03c1d..44fb0171220b6d 100644 --- a/src/frontends/onnx/tests/models/compress_default_axis.prototxt +++ b/src/frontends/onnx/tests/models/compress_default_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/compress_negative_axis.prototxt b/src/frontends/onnx/tests/models/compress_negative_axis.prototxt index 51a05bc452f461..82ff05ade407a4 100644 --- a/src/frontends/onnx/tests/models/compress_negative_axis.prototxt +++ b/src/frontends/onnx/tests/models/compress_negative_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/concat.prototxt b/src/frontends/onnx/tests/models/concat.prototxt index 9195c5d93c5f10..3d87ced28768cd 100644 --- a/src/frontends/onnx/tests/models/concat.prototxt +++ b/src/frontends/onnx/tests/models/concat.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "value0" diff --git a/src/frontends/onnx/tests/models/concat_empty_init.prototxt b/src/frontends/onnx/tests/models/concat_empty_init.prototxt index 9d90c62ff00d40..fea396741b868d 100644 --- a/src/frontends/onnx/tests/models/concat_empty_init.prototxt +++ b/src/frontends/onnx/tests/models/concat_empty_init.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in_a" diff --git a/src/frontends/onnx/tests/models/concat_negative_axis.prototxt b/src/frontends/onnx/tests/models/concat_negative_axis.prototxt index 7634a1c2dc383f..307b3a9e2fedff 100644 --- a/src/frontends/onnx/tests/models/concat_negative_axis.prototxt +++ b/src/frontends/onnx/tests/models/concat_negative_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "value0" diff --git a/src/frontends/onnx/tests/models/constant_bfloat_tensor.prototxt b/src/frontends/onnx/tests/models/constant_bfloat_tensor.prototxt index 9f7e76623c9688..c43d6ac214fae6 100644 --- a/src/frontends/onnx/tests/models/constant_bfloat_tensor.prototxt +++ b/src/frontends/onnx/tests/models/constant_bfloat_tensor.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "values" diff --git a/src/frontends/onnx/tests/models/constant_fill_extra_shape.prototxt b/src/frontends/onnx/tests/models/constant_fill_extra_shape.prototxt index c88ab27073ec5e..a365d50eef2c46 100644 --- a/src/frontends/onnx/tests/models/constant_fill_extra_shape.prototxt +++ b/src/frontends/onnx/tests/models/constant_fill_extra_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "target_shape" diff --git a/src/frontends/onnx/tests/models/constant_fill_input_as_shape_default_value.prototxt b/src/frontends/onnx/tests/models/constant_fill_input_as_shape_default_value.prototxt index 30fa0dfa08a061..f28c8f50f8c7cc 100644 --- a/src/frontends/onnx/tests/models/constant_fill_input_as_shape_default_value.prototxt +++ b/src/frontends/onnx/tests/models/constant_fill_input_as_shape_default_value.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "target_shape" diff --git a/src/frontends/onnx/tests/models/constant_fill_input_as_shape_u8_type.prototxt b/src/frontends/onnx/tests/models/constant_fill_input_as_shape_u8_type.prototxt index e0312efefa5a3c..39a067255d29f9 100644 --- a/src/frontends/onnx/tests/models/constant_fill_input_as_shape_u8_type.prototxt +++ b/src/frontends/onnx/tests/models/constant_fill_input_as_shape_u8_type.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "target_shape" diff --git a/src/frontends/onnx/tests/models/constant_fill_shape_attribute.prototxt b/src/frontends/onnx/tests/models/constant_fill_shape_attribute.prototxt index cdbbf99419a241..1253e8c4dc207a 100644 --- a/src/frontends/onnx/tests/models/constant_fill_shape_attribute.prototxt +++ b/src/frontends/onnx/tests/models/constant_fill_shape_attribute.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "output" diff --git a/src/frontends/onnx/tests/models/constant_float_array.prototxt b/src/frontends/onnx/tests/models/constant_float_array.prototxt index 237fc6e05dc9f9..d72b537c61073c 100644 --- a/src/frontends/onnx/tests/models/constant_float_array.prototxt +++ b/src/frontends/onnx/tests/models/constant_float_array.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "values" diff --git a/src/frontends/onnx/tests/models/constant_float_scalar.prototxt b/src/frontends/onnx/tests/models/constant_float_scalar.prototxt index 3a711965bc86b9..464c8643068885 100644 --- a/src/frontends/onnx/tests/models/constant_float_scalar.prototxt +++ b/src/frontends/onnx/tests/models/constant_float_scalar.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "values" diff --git a/src/frontends/onnx/tests/models/constant_float_tensor.prototxt b/src/frontends/onnx/tests/models/constant_float_tensor.prototxt index b37db9d453e34f..5948fb3147e3e2 100644 --- a/src/frontends/onnx/tests/models/constant_float_tensor.prototxt +++ b/src/frontends/onnx/tests/models/constant_float_tensor.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "values" diff --git a/src/frontends/onnx/tests/models/constant_integer_array.prototxt b/src/frontends/onnx/tests/models/constant_integer_array.prototxt index 9aaa125ef6b61e..9af1f27ff5ed24 100644 --- a/src/frontends/onnx/tests/models/constant_integer_array.prototxt +++ b/src/frontends/onnx/tests/models/constant_integer_array.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "values" diff --git a/src/frontends/onnx/tests/models/constant_integer_scalar.prototxt b/src/frontends/onnx/tests/models/constant_integer_scalar.prototxt index 2b4f0f3d8b31f2..45425fa78fcf52 100644 --- a/src/frontends/onnx/tests/models/constant_integer_scalar.prototxt +++ b/src/frontends/onnx/tests/models/constant_integer_scalar.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "values" diff --git a/src/frontends/onnx/tests/models/constant_of_shape_empty_init.prototxt b/src/frontends/onnx/tests/models/constant_of_shape_empty_init.prototxt index 1bb3e6e2c3ec0f..01d83cae915331 100644 --- a/src/frontends/onnx/tests/models/constant_of_shape_empty_init.prototxt +++ b/src/frontends/onnx/tests/models/constant_of_shape_empty_init.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "onnx_frontend_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/constant_of_shape_null_node.prototxt b/src/frontends/onnx/tests/models/constant_of_shape_null_node.prototxt index ca72f775742ea9..9d634feeedff9f 100644 --- a/src/frontends/onnx/tests/models/constant_of_shape_null_node.prototxt +++ b/src/frontends/onnx/tests/models/constant_of_shape_null_node.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "onnx_frontend_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor.prototxt index b748e68bfd53cf..1bcc089ae0b573 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_bfloat16_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_bfloat16_3x4.prototxt index 90a0a550163f42..ad3b40068afa7e 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_bfloat16_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_bfloat16_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_boolean_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_boolean_3x4.prototxt index 75c65c837f64ed..ede069e2be43a5 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_boolean_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_boolean_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_double_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_double_3x4.prototxt index 2837c3dee7d6fc..60738d5593d439 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_double_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_double_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_float16_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_float16_3x4.prototxt index 8634d744a05e19..9506e5f8ac56a7 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_float16_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_float16_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_2x2x3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_2x2x3x4.prototxt index c0bb8420fc69e1..1118281fe1a99b 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_2x2x3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_2x2x3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_2x3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_2x3x4.prototxt index ffc91155f6afe1..6b7f32b5e32047 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_2x3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_2x3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_3x4.prototxt index ff524d9b3f13c9..ae17b689dd9082 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_3x4_linearized_indices.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_3x4_linearized_indices.prototxt index 0be52a8685959e..d93539abcdecf7 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_3x4_linearized_indices.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_3x4_linearized_indices.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_8x17.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_8x17.prototxt index a2a2b7feda0570..09f7332cb72882 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_float_8x17.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_float_8x17.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_int16_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_int16_3x4.prototxt index 54842011edd102..00873f715de436 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_int16_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_int16_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_int32_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_int32_3x4.prototxt index be963883b64c78..bf2c1ef588b40d 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_int32_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_int32_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_int64_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_int64_3x4.prototxt index f06104d3e854b0..12015fb5ec58d6 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_int64_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_int64_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_int8_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_int8_3x4.prototxt index 0ec44a02fbb9bf..973b80eaa7519e 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_int8_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_int8_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_uint16_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_uint16_3x4.prototxt index 13f5d3fe129506..e4cc9f6585445d 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_uint16_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_uint16_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_uint32_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_uint32_3x4.prototxt index b0ee0e78723ad1..2dcfee974915bf 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_uint32_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_uint32_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_uint64_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_uint64_3x4.prototxt index 1a6f821d58bf99..bcf16cbfbc38e7 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_uint64_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_uint64_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/constant_sparse_tensor_uint8_3x4.prototxt b/src/frontends/onnx/tests/models/constant_sparse_tensor_uint8_3x4.prototxt index da366bb7432d1b..70cc27ac0d6825 100644 --- a/src/frontends/onnx/tests/models/constant_sparse_tensor_uint8_3x4.prototxt +++ b/src/frontends/onnx/tests/models/constant_sparse_tensor_uint8_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "dense_constant" diff --git a/src/frontends/onnx/tests/models/controlflow/if_branches_with_different_inputs.prototxt b/src/frontends/onnx/tests/models/controlflow/if_branches_with_different_inputs.prototxt index 93933c39da7af0..297904a4db8177 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_branches_with_different_inputs.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_branches_with_different_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_branches_with_multiple_outputs.prototxt b/src/frontends/onnx/tests/models/controlflow/if_branches_with_multiple_outputs.prototxt index e0f721e7bfea3a..a2dc00e32e808a 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_branches_with_multiple_outputs.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_branches_with_multiple_outputs.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_branches_with_same_inputs.prototxt b/src/frontends/onnx/tests/models/controlflow/if_branches_with_same_inputs.prototxt index 28aa8a0eeb6ac5..431ce23e57d200 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_branches_with_same_inputs.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_branches_with_same_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_branches_without_inputs.prototxt b/src/frontends/onnx/tests/models/controlflow/if_branches_without_inputs.prototxt index 8224617781ee0d..cd87c275e776fd 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_branches_without_inputs.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_branches_without_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_dynamic_inputs.prototxt b/src/frontends/onnx/tests/models/controlflow/if_dynamic_inputs.prototxt index e3d2c2f1b3ec33..66096d1fb5b513 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_dynamic_inputs.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_dynamic_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_inside_if.prototxt b/src/frontends/onnx/tests/models/controlflow/if_inside_if.prototxt index afb79d0140278c..7b079f57e05601 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_inside_if.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_inside_if.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_inside_if_inside_loop.prototxt b/src/frontends/onnx/tests/models/controlflow/if_inside_if_inside_loop.prototxt index 1abaea478704a0..6ec80e6ec14cc0 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_inside_if_inside_loop.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_inside_if_inside_loop.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if inside if inside loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_inside_loop.prototxt b/src/frontends/onnx/tests/models/controlflow/if_inside_loop.prototxt index 544bb6b8602364..6851579d53c8fe 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_inside_loop.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_inside_loop.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if inside loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_missing_else_branch.prototxt b/src/frontends/onnx/tests/models/controlflow/if_missing_else_branch.prototxt index c7e8d9a9e36d68..cc88a2bdd79a37 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_missing_else_branch.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_missing_else_branch.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_missing_then_branch.prototxt b/src/frontends/onnx/tests/models/controlflow/if_missing_then_branch.prototxt index 5f8007a6dce8d6..044b600bb6c1eb 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_missing_then_branch.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_missing_then_branch.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/if_negative_mismatch_between_branches_output.prototxt b/src/frontends/onnx/tests/models/controlflow/if_negative_mismatch_between_branches_output.prototxt index 88515b28f92c3a..cac7cc18e548b5 100644 --- a/src/frontends/onnx/tests/models/controlflow/if_negative_mismatch_between_branches_output.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/if_negative_mismatch_between_branches_output.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add.prototxt index 3b0a0a8b966e3a..3eb41f59642af3 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_cond_and_trip_count_as_inputs.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_cond_and_trip_count_as_inputs.prototxt index 9882c0eddd6ac6..953d45d6b8ca1e 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_cond_and_trip_count_as_inputs.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_cond_and_trip_count_as_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_cond_and_trip_count_as_inputs_static_shapes.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_cond_and_trip_count_as_inputs_static_shapes.prototxt index 09bd922b5e60fb..4005b31e51be5e 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_cond_and_trip_count_as_inputs_static_shapes.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_cond_and_trip_count_as_inputs_static_shapes.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_cond.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_cond.prototxt index c6780118825a6a..8e5934454ada3f 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_cond.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_cond.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_no_identity_termination_cond.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_no_identity_termination_cond.prototxt index 41ea4ffd3d96fc..8e5a3f701f43aa 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_no_identity_termination_cond.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_no_identity_termination_cond.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_no_identity_termination_cond_static_shapes.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_no_identity_termination_cond_static_shapes.prototxt index 02de3522efb7b8..9cf10a501fa071 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_no_identity_termination_cond_static_shapes.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_const_no_identity_termination_cond_static_shapes.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_incorrect_access_body_scope.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_incorrect_access_body_scope.prototxt index bfab6ea5853dbe..9aa27b1e710cd4 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_incorrect_access_body_scope.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_incorrect_access_body_scope.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_initializer_from_parent_scope.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_initializer_from_parent_scope.prototxt index 8fdfe21b09ce85..9b83133bbdc911 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_initializer_from_parent_scope.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_initializer_from_parent_scope.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_input_from_parent_graph.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_input_from_parent_graph.prototxt index 46c928876d280e..ecbe4ac2531f21 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_input_from_parent_graph.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_input_from_parent_graph.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond.prototxt index edd40668d2df84..fb954819f7ba64 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond_false.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond_false.prototxt index 8bd963d07cb293..dd1d531d269ce3 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond_false.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond_false.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond_static_shapes.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond_static_shapes.prototxt index 4002e920993733..e4278601a4ef8a 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond_static_shapes.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_no_identity_termination_cond_static_shapes.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_node_from_parent_scope.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_node_from_parent_scope.prototxt index d98cd7fa87fdf1..b2c8c6391978c7 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_node_from_parent_scope.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_node_from_parent_scope.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_the_same_name.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_the_same_name.prototxt index 343c56c666ad18..5c41c5ecef787b 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_the_same_name.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_the_same_name.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_and_cond_skipped.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_and_cond_skipped.prototxt index 7b750ece3c1866..a4fdc9a140f600 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_and_cond_skipped.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_and_cond_skipped.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_dynamic.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_dynamic.prototxt index ad6cd5868cece2..3a0e44b11dec22 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_max_int.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_max_int.prototxt index 227e00d9e56f7f..a56865d6f8891d 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_max_int.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_add_trip_count_max_int.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_2d_mul_opset1.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_2d_mul_opset1.prototxt index 7a52cee2f4ed5e..24e416ff745f09 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_2d_mul_opset1.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_2d_mul_opset1.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_add_node_from_parent_scope_infer_types.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_add_node_from_parent_scope_infer_types.prototxt index 0577bf5d0ef3fd..0e8d3c59599fd0 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_add_node_from_parent_scope_infer_types.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_add_node_from_parent_scope_infer_types.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_add_node_from_parent_scope_used_in_parent_and_in_body.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_add_node_from_parent_scope_used_in_parent_and_in_body.prototxt index ad594c795e41e1..6e1723413ed885 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_add_node_from_parent_scope_used_in_parent_and_in_body.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_add_node_from_parent_scope_used_in_parent_and_in_body.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_concat_values.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_concat_values.prototxt index 9f5f4dd4b75eb0..26490f0ba4f593 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_concat_values.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_concat_values.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "iter_count_init" diff --git a/src/frontends/onnx/tests/models/controlflow/loop_infinite.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_infinite.prototxt index 67f1d16b954a87..116caba8e97e52 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_infinite.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_infinite.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "trip_count" diff --git a/src/frontends/onnx/tests/models/controlflow/loop_pow.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_pow.prototxt index 34d4a02698dd36..8a5ccfe4f6e208 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_pow.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_pow.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "power calculation" node { diff --git a/src/frontends/onnx/tests/models/controlflow/loop_scalars_add.prototxt b/src/frontends/onnx/tests/models/controlflow/loop_scalars_add.prototxt index f6d9d33113e082..e5bdb655de90da 100644 --- a/src/frontends/onnx/tests/models/controlflow/loop_scalars_add.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/loop_scalars_add.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/controlflow/onnx_controlflow_loop_2d_infer_types.prototxt b/src/frontends/onnx/tests/models/controlflow/onnx_controlflow_loop_2d_infer_types.prototxt index a29f7d97980a20..d822b7046cfdc8 100644 --- a/src/frontends/onnx/tests/models/controlflow/onnx_controlflow_loop_2d_infer_types.prototxt +++ b/src/frontends/onnx/tests/models/controlflow/onnx_controlflow_loop_2d_infer_types.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "basic loop" node { diff --git a/src/frontends/onnx/tests/models/conv2d_dilation_assym_pads_strides.prototxt b/src/frontends/onnx/tests/models/conv2d_dilation_assym_pads_strides.prototxt index 078fa6b2ef9b70..9bc88dbfaf1938 100644 --- a/src/frontends/onnx/tests/models/conv2d_dilation_assym_pads_strides.prototxt +++ b/src/frontends/onnx/tests/models/conv2d_dilation_assym_pads_strides.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/conv3d_bias.prototxt b/src/frontends/onnx/tests/models/conv3d_bias.prototxt index 464608d935d47e..a9218cda630e63 100644 --- a/src/frontends/onnx/tests/models/conv3d_bias.prototxt +++ b/src/frontends/onnx/tests/models/conv3d_bias.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/conv_fp16_W_as_int32.prototxt b/src/frontends/onnx/tests/models/conv_fp16_W_as_int32.prototxt index cdb880ac42615a..ebb708648438dd 100644 --- a/src/frontends/onnx/tests/models/conv_fp16_W_as_int32.prototxt +++ b/src/frontends/onnx/tests/models/conv_fp16_W_as_int32.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_integer.prototxt b/src/frontends/onnx/tests/models/conv_integer.prototxt index 59eaa27b097eab..2a3d28ce399c5f 100644 --- a/src/frontends/onnx/tests/models/conv_integer.prototxt +++ b/src/frontends/onnx/tests/models/conv_integer.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_integer_int8.prototxt b/src/frontends/onnx/tests/models/conv_integer_int8.prototxt index b0d9340f0d17ee..80e6c14a624dd6 100644 --- a/src/frontends/onnx/tests/models/conv_integer_int8.prototxt +++ b/src/frontends/onnx/tests/models/conv_integer_int8.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_integer_no_zero_point.prototxt b/src/frontends/onnx/tests/models/conv_integer_no_zero_point.prototxt index c2f0ce533a8db9..20442aae8b1c62 100644 --- a/src/frontends/onnx/tests/models/conv_integer_no_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/conv_integer_no_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_integer_overload.prototxt b/src/frontends/onnx/tests/models/conv_integer_overload.prototxt index 28f780e6edb0b5..fb8cfbc623ad6e 100644 --- a/src/frontends/onnx/tests/models/conv_integer_overload.prototxt +++ b/src/frontends/onnx/tests/models/conv_integer_overload.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_integer_scalar_zp.prototxt b/src/frontends/onnx/tests/models/conv_integer_scalar_zp.prototxt index 6c5763ac68871d..b7c75d7ec29ef4 100644 --- a/src/frontends/onnx/tests/models/conv_integer_scalar_zp.prototxt +++ b/src/frontends/onnx/tests/models/conv_integer_scalar_zp.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_integer_vector_w_zero_point.prototxt b/src/frontends/onnx/tests/models/conv_integer_vector_w_zero_point.prototxt index de26393029cc10..baff8ca90d486e 100644 --- a/src/frontends/onnx/tests/models/conv_integer_vector_w_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/conv_integer_vector_w_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_transpose_w_groups.prototxt b/src/frontends/onnx/tests/models/conv_transpose_w_groups.prototxt index e28aa5592257ea..b388a1f08bc0e1 100644 --- a/src/frontends/onnx/tests/models/conv_transpose_w_groups.prototxt +++ b/src/frontends/onnx/tests/models/conv_transpose_w_groups.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/conv_with_strides_and_asymmetric_padding.prototxt b/src/frontends/onnx/tests/models/conv_with_strides_and_asymmetric_padding.prototxt index da3ab6015e3d50..f2b5572122f4f1 100644 --- a/src/frontends/onnx/tests/models/conv_with_strides_and_asymmetric_padding.prototxt +++ b/src/frontends/onnx/tests/models/conv_with_strides_and_asymmetric_padding.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_with_strides_no_padding.prototxt b/src/frontends/onnx/tests/models/conv_with_strides_no_padding.prototxt index 00ef3fd7ed48c1..38349ec50705a2 100644 --- a/src/frontends/onnx/tests/models/conv_with_strides_no_padding.prototxt +++ b/src/frontends/onnx/tests/models/conv_with_strides_no_padding.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_with_strides_padding.prototxt b/src/frontends/onnx/tests/models/conv_with_strides_padding.prototxt index ba083d27bcd417..5a8cc52aa30abb 100644 --- a/src/frontends/onnx/tests/models/conv_with_strides_padding.prototxt +++ b/src/frontends/onnx/tests/models/conv_with_strides_padding.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/conv_with_strides_padding_bias.prototxt b/src/frontends/onnx/tests/models/conv_with_strides_padding_bias.prototxt index f62757b70df95c..2534f5e105741f 100644 --- a/src/frontends/onnx/tests/models/conv_with_strides_padding_bias.prototxt +++ b/src/frontends/onnx/tests/models/conv_with_strides_padding_bias.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/convtranspose_dyn_data.prototxt b/src/frontends/onnx/tests/models/convtranspose_dyn_data.prototxt index 3d8711649a6180..fe05361b8ee67e 100644 --- a/src/frontends/onnx/tests/models/convtranspose_dyn_data.prototxt +++ b/src/frontends/onnx/tests/models/convtranspose_dyn_data.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "Data" diff --git a/src/frontends/onnx/tests/models/convtranspose_dyn_filters.prototxt b/src/frontends/onnx/tests/models/convtranspose_dyn_filters.prototxt index 8962706816db50..8ff3225b8fb10a 100644 --- a/src/frontends/onnx/tests/models/convtranspose_dyn_filters.prototxt +++ b/src/frontends/onnx/tests/models/convtranspose_dyn_filters.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "Data" diff --git a/src/frontends/onnx/tests/models/convtranspose_groups_pads_bias.prototxt b/src/frontends/onnx/tests/models/convtranspose_groups_pads_bias.prototxt index b93a865a435558..4ab9e319afb950 100644 --- a/src/frontends/onnx/tests/models/convtranspose_groups_pads_bias.prototxt +++ b/src/frontends/onnx/tests/models/convtranspose_groups_pads_bias.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/convtranspose_groups_w_pads.prototxt b/src/frontends/onnx/tests/models/convtranspose_groups_w_pads.prototxt index f6dd6ef8c052fc..4273842e4c1f1f 100644 --- a/src/frontends/onnx/tests/models/convtranspose_groups_w_pads.prototxt +++ b/src/frontends/onnx/tests/models/convtranspose_groups_w_pads.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/convtranspose_output_shape.prototxt b/src/frontends/onnx/tests/models/convtranspose_output_shape.prototxt index 72ce82ed78d7c0..42f04d32ed7bb4 100644 --- a/src/frontends/onnx/tests/models/convtranspose_output_shape.prototxt +++ b/src/frontends/onnx/tests/models/convtranspose_output_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/convtranspose_output_shape_auto_pads_same_lower.prototxt b/src/frontends/onnx/tests/models/convtranspose_output_shape_auto_pads_same_lower.prototxt index 9b62275b35bd36..dfa569a2b07415 100644 --- a/src/frontends/onnx/tests/models/convtranspose_output_shape_auto_pads_same_lower.prototxt +++ b/src/frontends/onnx/tests/models/convtranspose_output_shape_auto_pads_same_lower.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/convtranspose_output_shape_auto_pads_same_upper.prototxt b/src/frontends/onnx/tests/models/convtranspose_output_shape_auto_pads_same_upper.prototxt index e10067834be127..284403e6ba7886 100644 --- a/src/frontends/onnx/tests/models/convtranspose_output_shape_auto_pads_same_upper.prototxt +++ b/src/frontends/onnx/tests/models/convtranspose_output_shape_auto_pads_same_upper.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/cosh.prototxt b/src/frontends/onnx/tests/models/cosh.prototxt index c2c376eee1ce8b..93dd144edc255e 100644 --- a/src/frontends/onnx/tests/models/cosh.prototxt +++ b/src/frontends/onnx/tests/models/cosh.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/crop.prototxt b/src/frontends/onnx/tests/models/crop.prototxt index c3ec30d4677bb4..d6e86ed29a63c4 100644 --- a/src/frontends/onnx/tests/models/crop.prototxt +++ b/src/frontends/onnx/tests/models/crop.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/crop_with_scale.prototxt b/src/frontends/onnx/tests/models/crop_with_scale.prototxt index a039c9e2a522c7..c16bf214e6e649 100644 --- a/src/frontends/onnx/tests/models/crop_with_scale.prototxt +++ b/src/frontends/onnx/tests/models/crop_with_scale.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/cum_sum_1d.prototxt b/src/frontends/onnx/tests/models/cum_sum_1d.prototxt index 6a09cfa51b34a2..90b7e830cc5838 100644 --- a/src/frontends/onnx/tests/models/cum_sum_1d.prototxt +++ b/src/frontends/onnx/tests/models/cum_sum_1d.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/cum_sum_2d_axis_input.prototxt b/src/frontends/onnx/tests/models/cum_sum_2d_axis_input.prototxt index 1585c29c6685d5..b1935051a5fe8a 100644 --- a/src/frontends/onnx/tests/models/cum_sum_2d_axis_input.prototxt +++ b/src/frontends/onnx/tests/models/cum_sum_2d_axis_input.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/cum_sum_2d_axis_input_1d.prototxt b/src/frontends/onnx/tests/models/cum_sum_2d_axis_input_1d.prototxt index 26653491426c08..16ea0328814b60 100644 --- a/src/frontends/onnx/tests/models/cum_sum_2d_axis_input_1d.prototxt +++ b/src/frontends/onnx/tests/models/cum_sum_2d_axis_input_1d.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "axis" diff --git a/src/frontends/onnx/tests/models/cum_sum_2d_dynamic_axis_input.prototxt b/src/frontends/onnx/tests/models/cum_sum_2d_dynamic_axis_input.prototxt index 6ee36ea10d3d07..da39376bdb8408 100644 --- a/src/frontends/onnx/tests/models/cum_sum_2d_dynamic_axis_input.prototxt +++ b/src/frontends/onnx/tests/models/cum_sum_2d_dynamic_axis_input.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/cum_sum_2d_dynamic_axis_input_1d.prototxt b/src/frontends/onnx/tests/models/cum_sum_2d_dynamic_axis_input_1d.prototxt index df0f0e13e989aa..456c764c54272b 100644 --- a/src/frontends/onnx/tests/models/cum_sum_2d_dynamic_axis_input_1d.prototxt +++ b/src/frontends/onnx/tests/models/cum_sum_2d_dynamic_axis_input_1d.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/cum_sum_3d_exclusive_reverse.prototxt b/src/frontends/onnx/tests/models/cum_sum_3d_exclusive_reverse.prototxt index 921201bb94738a..cea01cf45fa1f4 100644 --- a/src/frontends/onnx/tests/models/cum_sum_3d_exclusive_reverse.prototxt +++ b/src/frontends/onnx/tests/models/cum_sum_3d_exclusive_reverse.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/custom_operator.prototxt b/src/frontends/onnx/tests/models/custom_operator.prototxt index 01c521469ad600..de89b6425557ec 100644 --- a/src/frontends/onnx/tests/models/custom_operator.prototxt +++ b/src/frontends/onnx/tests/models/custom_operator.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/custom_operator_default_domain.prototxt b/src/frontends/onnx/tests/models/custom_operator_default_domain.prototxt index c9eb4c00fc7f5a..5437f527cd84e6 100644 --- a/src/frontends/onnx/tests/models/custom_operator_default_domain.prototxt +++ b/src/frontends/onnx/tests/models/custom_operator_default_domain.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/dangling_parameter.prototxt b/src/frontends/onnx/tests/models/dangling_parameter.prototxt index 1e07a93fef815e..20320bf1e8b97a 100644 --- a/src/frontends/onnx/tests/models/dangling_parameter.prototxt +++ b/src/frontends/onnx/tests/models/dangling_parameter.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "pytorch" +producer_name: "OpenVINO ONNX Frontend" producer_version: "1.2" graph { node { diff --git a/src/frontends/onnx/tests/models/depth_to_space.prototxt b/src/frontends/onnx/tests/models/depth_to_space.prototxt index c333504f2add72..db06467716ec08 100644 --- a/src/frontends/onnx/tests/models/depth_to_space.prototxt +++ b/src/frontends/onnx/tests/models/depth_to_space.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/depth_to_space_bad_blocksize.prototxt b/src/frontends/onnx/tests/models/depth_to_space_bad_blocksize.prototxt index 9d538068f9757e..d2dc5fadf01727 100644 --- a/src/frontends/onnx/tests/models/depth_to_space_bad_blocksize.prototxt +++ b/src/frontends/onnx/tests/models/depth_to_space_bad_blocksize.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/depth_to_space_bad_input_shape.prototxt b/src/frontends/onnx/tests/models/depth_to_space_bad_input_shape.prototxt index 8e7f4b6de7f614..c2ca604b777ed8 100644 --- a/src/frontends/onnx/tests/models/depth_to_space_bad_input_shape.prototxt +++ b/src/frontends/onnx/tests/models/depth_to_space_bad_input_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/depth_to_space_bad_mode.prototxt b/src/frontends/onnx/tests/models/depth_to_space_bad_mode.prototxt index f815683f355987..007da1be2eadc4 100644 --- a/src/frontends/onnx/tests/models/depth_to_space_bad_mode.prototxt +++ b/src/frontends/onnx/tests/models/depth_to_space_bad_mode.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/depth_to_space_crd.prototxt b/src/frontends/onnx/tests/models/depth_to_space_crd.prototxt index 8a95eb42e1b44b..ddbfebbf1ad78e 100644 --- a/src/frontends/onnx/tests/models/depth_to_space_crd.prototxt +++ b/src/frontends/onnx/tests/models/depth_to_space_crd.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/depth_to_space_no_blocksize.prototxt b/src/frontends/onnx/tests/models/depth_to_space_no_blocksize.prototxt index 3f5349cd2da2d2..732a856676fe81 100644 --- a/src/frontends/onnx/tests/models/depth_to_space_no_blocksize.prototxt +++ b/src/frontends/onnx/tests/models/depth_to_space_no_blocksize.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/depth_to_space_v1.prototxt b/src/frontends/onnx/tests/models/depth_to_space_v1.prototxt index 2d36e813ad68c8..45bc68e703f21f 100644 --- a/src/frontends/onnx/tests/models/depth_to_space_v1.prototxt +++ b/src/frontends/onnx/tests/models/depth_to_space_v1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/dequant_lin.prototxt b/src/frontends/onnx/tests/models/dequant_lin.prototxt index beda2709cfa952..ac056487c6ebb2 100644 --- a/src/frontends/onnx/tests/models/dequant_lin.prototxt +++ b/src/frontends/onnx/tests/models/dequant_lin.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_0.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_0.prototxt index 14471af9751921..c4c1ddff550615 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_0.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_0.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_1.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_1.prototxt index 0e0a5251860075..5a7972fb9c3681 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_1.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_2.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_2.prototxt index 4629456efdd909..4506d17b8e11df 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_2.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_3.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_3.prototxt index 112312fd08e596..75b1017ac54965 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_3.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_3.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_4.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_4.prototxt index 422046dea2c72f..30fd72ee9a7ea3 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_4.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_4.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_4_dynamic.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_4_dynamic.prototxt index 71f18163cc28df..5ed3fd5fb33cb2 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_4_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_4_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OpenVINO ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_5.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_5.prototxt index 2d9466c5af0818..9d9165618a60c4 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_5.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_5.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_no_zero_point.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_no_zero_point.prototxt index afadf5cdf231f7..a1e464846f0f04 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_no_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_no_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_scalar_ignore_axis.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_scalar_ignore_axis.prototxt index 1549feea81ba5f..1ab49258d8f551 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_scalar_ignore_axis.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_scalar_ignore_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_scalar_inputs.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_scalar_inputs.prototxt index def42280a7c40f..46e28e37b93b12 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_scalar_inputs.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_scalar_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_scalar_scale.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_scalar_scale.prototxt index 8f6744ef56152e..8c8453a2ab62c7 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_scalar_scale.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_scalar_scale.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_scalar_scale_and_zero_point.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_scalar_scale_and_zero_point.prototxt index 1a1670cf8a80e8..e8a9cb001f89de 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_scalar_scale_and_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_scalar_scale_and_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_scalar_zero_point.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_scalar_zero_point.prototxt index f88ca13ad0c846..f1b835518d9305 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_scalar_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_scalar_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dequantize_linear_u16.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_u16.prototxt index c7a5a2b9f80b72..975734ef8dc6cf 100644 --- a/src/frontends/onnx/tests/models/dequantize_linear_u16.prototxt +++ b/src/frontends/onnx/tests/models/dequantize_linear_u16.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/detection_output.prototxt b/src/frontends/onnx/tests/models/detection_output.prototxt index 3ce54672ee12e6..a3517d54de8202 100644 --- a/src/frontends/onnx/tests/models/detection_output.prototxt +++ b/src/frontends/onnx/tests/models/detection_output.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_graph" node { diff --git a/src/frontends/onnx/tests/models/div.prototxt b/src/frontends/onnx/tests/models/div.prototxt index d16b5205b0fddc..be2dd2e0682ea3 100644 --- a/src/frontends/onnx/tests/models/div.prototxt +++ b/src/frontends/onnx/tests/models/div.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/div_v6_broadcast_axes_1_2.prototxt b/src/frontends/onnx/tests/models/div_v6_broadcast_axes_1_2.prototxt index 272b0cf6c55c29..8b77e9a5b2542f 100644 --- a/src/frontends/onnx/tests/models/div_v6_broadcast_axes_1_2.prototxt +++ b/src/frontends/onnx/tests/models/div_v6_broadcast_axes_1_2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/div_v6_broadcast_axis_1.prototxt b/src/frontends/onnx/tests/models/div_v6_broadcast_axis_1.prototxt index f38bfffc887154..7533d7e75e48f3 100644 --- a/src/frontends/onnx/tests/models/div_v6_broadcast_axis_1.prototxt +++ b/src/frontends/onnx/tests/models/div_v6_broadcast_axis_1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/div_v6_broadcast_no_axis.prototxt b/src/frontends/onnx/tests/models/div_v6_broadcast_no_axis.prototxt index 4fe08f3c9c06d0..0e01ce7cc9d28b 100644 --- a/src/frontends/onnx/tests/models/div_v6_broadcast_no_axis.prototxt +++ b/src/frontends/onnx/tests/models/div_v6_broadcast_no_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/div_v7.prototxt b/src/frontends/onnx/tests/models/div_v7.prototxt index 7d1a553df0823d..faa398c097136c 100644 --- a/src/frontends/onnx/tests/models/div_v7.prototxt +++ b/src/frontends/onnx/tests/models/div_v7.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/div_v7_broadcast.prototxt b/src/frontends/onnx/tests/models/div_v7_broadcast.prototxt index 6b4d29c34effc2..a2bfdf61d71375 100644 --- a/src/frontends/onnx/tests/models/div_v7_broadcast.prototxt +++ b/src/frontends/onnx/tests/models/div_v7_broadcast.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/dropout12_no_training_no_return_mask.prototxt b/src/frontends/onnx/tests/models/dropout12_no_training_no_return_mask.prototxt index e28fb21b4e7bb5..120b16440d90eb 100644 --- a/src/frontends/onnx/tests/models/dropout12_no_training_no_return_mask.prototxt +++ b/src/frontends/onnx/tests/models/dropout12_no_training_no_return_mask.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dropout12_no_training_return_mask.prototxt b/src/frontends/onnx/tests/models/dropout12_no_training_return_mask.prototxt index 51046ebb8f4636..667a505e4ff35e 100644 --- a/src/frontends/onnx/tests/models/dropout12_no_training_return_mask.prototxt +++ b/src/frontends/onnx/tests/models/dropout12_no_training_return_mask.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dropout12_no_traning_no_const_rato.prototxt b/src/frontends/onnx/tests/models/dropout12_no_traning_no_const_rato.prototxt index a286df855933cc..cd671b13c21845 100644 --- a/src/frontends/onnx/tests/models/dropout12_no_traning_no_const_rato.prototxt +++ b/src/frontends/onnx/tests/models/dropout12_no_traning_no_const_rato.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "N" diff --git a/src/frontends/onnx/tests/models/dropout12_not_const_training_mode.prototxt b/src/frontends/onnx/tests/models/dropout12_not_const_training_mode.prototxt index 780f8f3d7246c8..f44b29cb0c73a6 100644 --- a/src/frontends/onnx/tests/models/dropout12_not_const_training_mode.prototxt +++ b/src/frontends/onnx/tests/models/dropout12_not_const_training_mode.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dropout12_training_mode.prototxt b/src/frontends/onnx/tests/models/dropout12_training_mode.prototxt index 518a0e1af4fff7..207bdcd84a1af0 100644 --- a/src/frontends/onnx/tests/models/dropout12_training_mode.prototxt +++ b/src/frontends/onnx/tests/models/dropout12_training_mode.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dropout1_no_training_no_return_mask.prototxt b/src/frontends/onnx/tests/models/dropout1_no_training_no_return_mask.prototxt index 9c106663844e9b..44bdffadf1ea15 100644 --- a/src/frontends/onnx/tests/models/dropout1_no_training_no_return_mask.prototxt +++ b/src/frontends/onnx/tests/models/dropout1_no_training_no_return_mask.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dropout1_no_training_return_mask.prototxt b/src/frontends/onnx/tests/models/dropout1_no_training_return_mask.prototxt index abc400dcdd175c..eb31d3368b772d 100644 --- a/src/frontends/onnx/tests/models/dropout1_no_training_return_mask.prototxt +++ b/src/frontends/onnx/tests/models/dropout1_no_training_return_mask.prototxt @@ -1,6 +1,6 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dropout7_no_return_mask.prototxt b/src/frontends/onnx/tests/models/dropout7_no_return_mask.prototxt index ced7fbca21ea13..11aa4c2a805c76 100644 --- a/src/frontends/onnx/tests/models/dropout7_no_return_mask.prototxt +++ b/src/frontends/onnx/tests/models/dropout7_no_return_mask.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/duplicated_more_output_names.prototxt b/src/frontends/onnx/tests/models/duplicated_more_output_names.prototxt index f6979b2729dc86..e2ef215c90c192 100644 --- a/src/frontends/onnx/tests/models/duplicated_more_output_names.prototxt +++ b/src/frontends/onnx/tests/models/duplicated_more_output_names.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx tests" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/duplicated_output_name.prototxt b/src/frontends/onnx/tests/models/duplicated_output_name.prototxt index 76d69797ea38f5..ffde38eca7297d 100644 --- a/src/frontends/onnx/tests/models/duplicated_output_name.prototxt +++ b/src/frontends/onnx/tests/models/duplicated_output_name.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx tests" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/a_plus_b_dyn_rank.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/a_plus_b_dyn_rank.prototxt index 667e94d22856d0..5520843ed3a36a 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/a_plus_b_dyn_rank.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/a_plus_b_dyn_rank.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/ab_plus_c.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/ab_plus_c.prototxt index 20f953c01434ca..dcefd14ee8a130 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/ab_plus_c.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/ab_plus_c.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/acosh_dyn_shape.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/acosh_dyn_shape.prototxt index 33de42dd437e2c..9d86aa7c36c009 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/acosh_dyn_shape.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/acosh_dyn_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/argmax_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/argmax_dyn.prototxt index c7661260bd6a0e..b2be02ae5675ee 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/argmax_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/argmax_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/argmin_no_keep_dims_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/argmin_no_keep_dims_dyn.prototxt index f8d6d85d102b9a..77901d95699d98 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/argmin_no_keep_dims_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/argmin_no_keep_dims_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/asinh_dyn_shape.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/asinh_dyn_shape.prototxt index 3c423d44ecdc3f..e097c453fd3f41 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/asinh_dyn_shape.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/asinh_dyn_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/atanh_dyn_shape.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/atanh_dyn_shape.prototxt index 4045fea449b078..6ef04f57eff945 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/atanh_dyn_shape.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/atanh_dyn_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/average_pool_2d_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/average_pool_2d_dyn.prototxt index 827c809446db7f..d418ab924e802a 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/average_pool_2d_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/average_pool_2d_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/constant_of_shape_float_zeros.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/constant_of_shape_float_zeros.prototxt index 385decebee0522..d38af7d76166ac 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/constant_of_shape_float_zeros.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/constant_of_shape_float_zeros.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/constant_of_shape_int_ones.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/constant_of_shape_int_ones.prototxt index 26be74f720ff27..84a149bc9ed5c5 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/constant_of_shape_int_ones.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/constant_of_shape_int_ones.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/conv_with_dynamic_batch.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/conv_with_dynamic_batch.prototxt index d1b385bbb0eb9f..956b79e582cb20 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/conv_with_dynamic_batch.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/conv_with_dynamic_batch.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/conv_with_dynamic_bias.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/conv_with_dynamic_bias.prototxt index c6d9444947d2e3..25dbc67c4ed723 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/conv_with_dynamic_bias.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/conv_with_dynamic_bias.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/depth_to_space.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/depth_to_space.prototxt index d2333b515de61a..ae6f904967f0ec 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/depth_to_space.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/depth_to_space.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/expand_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/expand_dyn.prototxt index 7cc67cd5f10ab5..3f831df7575135 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/expand_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/expand_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/expand_uint16_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/expand_uint16_dyn.prototxt index e4b077e075fdfe..5dc8cee94aec24 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/expand_uint16_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/expand_uint16_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/eye_like_dyn_rank.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/eye_like_dyn_rank.prototxt index e9879a570a6037..5a8d574e65e6aa 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/eye_like_dyn_rank.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/eye_like_dyn_rank.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/eye_like_dyn_shape.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/eye_like_dyn_shape.prototxt index 64de955f629432..37155329dbe5bd 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/eye_like_dyn_shape.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/eye_like_dyn_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_axis.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_axis.prototxt index 68d324cda8cd9a..c3a83ad4476298 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_axis.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { name: "Dynamic Flatten" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_axis0.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_axis0.prototxt index 0e9d8bff3ccc56..d4e58887f7644f 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_axis0.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_axis0.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { name: "Dynamic Flatten" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_neg_axis.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_neg_axis.prototxt index 3105f2624c3322..ce835c0b41d96c 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_neg_axis.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/flatten_dyn_shape_neg_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { name: "Dynamic Flatten" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/global_average_pool_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/global_average_pool_dyn.prototxt index 01d6464464806c..824a2572979930 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/global_average_pool_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/global_average_pool_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/global_max_pool_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/global_max_pool_dyn.prototxt index 2fcbe53b016019..235b8beffa1778 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/global_max_pool_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/global_max_pool_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/gru_defaults_fwd_const_dynamic.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/gru_defaults_fwd_const_dynamic.prototxt index 30098cc9998f88..aec711a54a93e7 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/gru_defaults_fwd_const_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/gru_defaults_fwd_const_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/instance_norm_dyn_shape.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/instance_norm_dyn_shape.prototxt index 544a9c90bf980f..7deb2798b4a3fa 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/instance_norm_dyn_shape.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/instance_norm_dyn_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/instance_norm_dyn_shape2.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/instance_norm_dyn_shape2.prototxt index 6974fa1fdaca0c..8e041bb2dc9280 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/instance_norm_dyn_shape2.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/instance_norm_dyn_shape2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/lstm_dyn_batch_seq.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/lstm_dyn_batch_seq.prototxt index 6e182b3c5d519a..89ab4e74963114 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/lstm_dyn_batch_seq.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/lstm_dyn_batch_seq.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/lstm_dyn_batch_seq_3_inputs.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/lstm_dyn_batch_seq_3_inputs.prototxt index 3dc8eba455b0ab..ebc7b2243f370a 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/lstm_dyn_batch_seq_3_inputs.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/lstm_dyn_batch_seq_3_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_2d_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_2d_dyn.prototxt index 7651be7c812f86..4fd7fc519c2d9f 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_2d_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_2d_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_dyn_rank_without_default_attrs.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_dyn_rank_without_default_attrs.prototxt index 02977c439f6fb1..0c93add575ef05 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_dyn_rank_without_default_attrs.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_dyn_rank_without_default_attrs.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_with_indices_output.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_with_indices_output.prototxt index 6105d792f4582f..f07b350e0fcc6b 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_with_indices_output.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/max_pool_with_indices_output.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/reduce_max_dynamic_input_rank_negative_axis.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/reduce_max_dynamic_input_rank_negative_axis.prototxt index c9822ecb7327e8..37b4bbbaa14a40 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/reduce_max_dynamic_input_rank_negative_axis.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/reduce_max_dynamic_input_rank_negative_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/rnn_defaults_fwd_const_dynamic.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/rnn_defaults_fwd_const_dynamic.prototxt index 563ed354ffcb48..c1406fdb125ab5 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/rnn_defaults_fwd_const_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/rnn_defaults_fwd_const_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/scalar_initializers.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/scalar_initializers.prototxt index 240897589ae2e0..9a998ed004f8a2 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/scalar_initializers.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/scalar_initializers.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initializer_of_A" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/size_op_dyn.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/size_op_dyn.prototxt index 75b060ac3b86f2..f7dd0577c9504b 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/size_op_dyn.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/size_op_dyn.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_clamp_neg_ends_opset1.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_clamp_neg_ends_opset1.prototxt index cf3b161c72a076..b4446d0869fbcc 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_clamp_neg_ends_opset1.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_clamp_neg_ends_opset1.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_default_steps_dyn_begin_end.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_default_steps_dyn_begin_end.prototxt index 874efbe84f7305..9adeaf28111f55 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_default_steps_dyn_begin_end.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_default_steps_dyn_begin_end.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_input.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_input.prototxt index 7b0bf299ba5941..804a061031e13a 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_input.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_input.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_input_opset1.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_input_opset1.prototxt index a58d74317a4c53..14a1b391ef91b5 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_input_opset1.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_input_opset1.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_the_same_out_shape.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_the_same_out_shape.prototxt index 27970a8d98973f..244cecbf37679d 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_the_same_out_shape.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_2d_the_same_out_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input.prototxt index 70f84189cd2b97..506cff2ee56259 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_12_axes.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_12_axes.prototxt index 57c0272c832c00..550cef3051db52 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_12_axes.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_12_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_20_axes.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_20_axes.prototxt index cafd3b1cdbb70f..00caf041afdb65 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_20_axes.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_20_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_21_axes_ends_max_opset1.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_21_axes_ends_max_opset1.prototxt index cccfa008ad6021..ba4ae866a6b70d 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_21_axes_ends_max_opset1.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_21_axes_ends_max_opset1.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_neg_axes.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_neg_axes.prototxt index cdaedbce493276..0fdd761af4259c 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_neg_axes.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_3d_input_neg_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_0231_axes_ends_max.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_0231_axes_ends_max.prototxt index 312e4dc3c775f0..cb45ee6cc2150d 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_0231_axes_ends_max.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_0231_axes_ends_max.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_2103_axes.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_2103_axes.prototxt index 1a9954201f003b..c0554c2e608d9c 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_2103_axes.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_2103_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_23_axes.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_23_axes.prototxt index 3bb61d8eb590bb..88979f5b2ec524 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_23_axes.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_23_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_23_axes_21_steps.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_23_axes_21_steps.prototxt index 64f83b82c4b159..eee6e96fc981f2 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_23_axes_21_steps.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_4d_input_23_axes_21_steps.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_default_axes.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_default_axes.prototxt index 069060772bde4d..0ff817a63d119d 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_default_axes.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_default_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/slice_default_steps.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/slice_default_steps.prototxt index 73dd4f37ff895a..1d6cbb3c00403f 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/slice_default_steps.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/slice_default_steps.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice" node { diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/space_to_depth.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/space_to_depth.prototxt index 0aab54f75ba971..34fd480fe14dac 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/space_to_depth.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/space_to_depth.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/transpose.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/transpose.prototxt index 0e953730a07cd6..65d2d6fedb1fb9 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/transpose.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/transpose.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { name: "Dynamic Transpose" diff --git a/src/frontends/onnx/tests/models/dynamic_shapes/trilu_lower.prototxt b/src/frontends/onnx/tests/models/dynamic_shapes/trilu_lower.prototxt index b1b5ce8efb5648..d03ec843ca9b14 100644 --- a/src/frontends/onnx/tests/models/dynamic_shapes/trilu_lower.prototxt +++ b/src/frontends/onnx/tests/models/dynamic_shapes/trilu_lower.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/einsum_sum.prototxt b/src/frontends/onnx/tests/models/einsum_sum.prototxt index 0f56b47d3abfe3..d805213a93413b 100644 --- a/src/frontends/onnx/tests/models/einsum_sum.prototxt +++ b/src/frontends/onnx/tests/models/einsum_sum.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/elu.prototxt b/src/frontends/onnx/tests/models/elu.prototxt index a1554bd2e53d49..70377d060f2823 100644 --- a/src/frontends/onnx/tests/models/elu.prototxt +++ b/src/frontends/onnx/tests/models/elu.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/empty_initializers_handling.prototxt b/src/frontends/onnx/tests/models/empty_initializers_handling.prototxt index a91df89844ca69..d226fdc93917f3 100644 --- a/src/frontends/onnx/tests/models/empty_initializers_handling.prototxt +++ b/src/frontends/onnx/tests/models/empty_initializers_handling.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { initializer { dims: 0 diff --git a/src/frontends/onnx/tests/models/erf.prototxt b/src/frontends/onnx/tests/models/erf.prototxt index ec5604102046c2..156e7059e2eaf6 100644 --- a/src/frontends/onnx/tests/models/erf.prototxt +++ b/src/frontends/onnx/tests/models/erf.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/erf_int32.prototxt b/src/frontends/onnx/tests/models/erf_int32.prototxt index 7fe6547662bda4..ef44b2c129572a 100644 --- a/src/frontends/onnx/tests/models/erf_int32.prototxt +++ b/src/frontends/onnx/tests/models/erf_int32.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/expand_failsafe_node.prototxt b/src/frontends/onnx/tests/models/expand_failsafe_node.prototxt index 3a0c9de1d2e582..1d51dced563981 100644 --- a/src/frontends/onnx/tests/models/expand_failsafe_node.prototxt +++ b/src/frontends/onnx/tests/models/expand_failsafe_node.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/expand_static_shape.prototxt b/src/frontends/onnx/tests/models/expand_static_shape.prototxt index 94c5b130747712..19156f9f432ecc 100644 --- a/src/frontends/onnx/tests/models/expand_static_shape.prototxt +++ b/src/frontends/onnx/tests/models/expand_static_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/external_data/external_data.prototxt b/src/frontends/onnx/tests/models/external_data/external_data.prototxt index 9500b33dcc5ffd..1768cb84e64869 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_different_paths.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_different_paths.prototxt index 82bb35912b5a10..9b4f30b28f30ff 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_different_paths.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_different_paths.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_a" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_file_not_found.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_file_not_found.prototxt index f65b8186aa8260..f573772f7e40a2 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_file_not_found.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_file_not_found.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_in_constant_node.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_in_constant_node.prototxt index d34eca94fbc9b6..ebe30c7d3ba280 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_in_constant_node.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_in_constant_node.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_incorrect_data_shape.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_incorrect_data_shape.prototxt index 967ab954886125..86f00e34eccc13 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_incorrect_data_shape.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_incorrect_data_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_invalid_data_length.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_invalid_data_length.prototxt index 2bc0d9fcf07d98..f80423b0c773a5 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_invalid_data_length.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_invalid_data_length.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_offset_not_aligned_with_page_and_greater_than_page_size_with_length_provided.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_offset_not_aligned_with_page_and_greater_than_page_size_with_length_provided.prototxt index 406b03f960ff6f..10be5a2b0f71c9 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_offset_not_aligned_with_page_and_greater_than_page_size_with_length_provided.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_offset_not_aligned_with_page_and_greater_than_page_size_with_length_provided.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_offset_not_aligned_with_page_and_less_than_page_size_with_length_provided.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_offset_not_aligned_with_page_and_less_than_page_size_with_length_provided.prototxt index 8296f1bb17bed1..779cb1ac18ff2a 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_offset_not_aligned_with_page_and_less_than_page_size_with_length_provided.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_offset_not_aligned_with_page_and_less_than_page_size_with_length_provided.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_optional_fields.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_optional_fields.prototxt index 70f4b56e5dcdd0..dc32d26481304b 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_optional_fields.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_optional_fields.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_optional_fields_offset_not_aligned.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_optional_fields_offset_not_aligned.prototxt index 4a5359d0de42dd..134eef0bf36599 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_optional_fields_offset_not_aligned.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_optional_fields_offset_not_aligned.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_sanitize_test.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_sanitize_test.prototxt index 950b90071c5edd..43bdf20327eb6e 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_sanitize_test.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_sanitize_test.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/external_data/external_data_two_tensors_data_in_the_same_file.prototxt b/src/frontends/onnx/tests/models/external_data/external_data_two_tensors_data_in_the_same_file.prototxt index 2b9b3626e73705..77e77ea3ef8ba5 100644 --- a/src/frontends/onnx/tests/models/external_data/external_data_two_tensors_data_in_the_same_file.prototxt +++ b/src/frontends/onnx/tests/models/external_data/external_data_two_tensors_data_in_the_same_file.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_a" diff --git a/src/frontends/onnx/tests/models/external_data/inner_scope/external_data_file_in_up_dir.prototxt b/src/frontends/onnx/tests/models/external_data/inner_scope/external_data_file_in_up_dir.prototxt index aa98b3bce98b24..dcc4c33d0c6b5b 100644 --- a/src/frontends/onnx/tests/models/external_data/inner_scope/external_data_file_in_up_dir.prototxt +++ b/src/frontends/onnx/tests/models/external_data/inner_scope/external_data_file_in_up_dir.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/external_data/offset_not_aligned_with_page_in_two_pages_scope.prototxt b/src/frontends/onnx/tests/models/external_data/offset_not_aligned_with_page_in_two_pages_scope.prototxt index 5a1737dcc13b6f..29de3afac98120 100644 --- a/src/frontends/onnx/tests/models/external_data/offset_not_aligned_with_page_in_two_pages_scope.prototxt +++ b/src/frontends/onnx/tests/models/external_data/offset_not_aligned_with_page_in_two_pages_scope.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/eye_like.prototxt b/src/frontends/onnx/tests/models/eye_like.prototxt index becaff04e7490c..ba19606a4edb52 100644 --- a/src/frontends/onnx/tests/models/eye_like.prototxt +++ b/src/frontends/onnx/tests/models/eye_like.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/eye_like_wrong_shape.prototxt b/src/frontends/onnx/tests/models/eye_like_wrong_shape.prototxt index d252fe6877dc50..79979fb17eefff 100644 --- a/src/frontends/onnx/tests/models/eye_like_wrong_shape.prototxt +++ b/src/frontends/onnx/tests/models/eye_like_wrong_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/flatten.prototxt b/src/frontends/onnx/tests/models/flatten.prototxt index 53e7835dba9452..4e01dba50f52aa 100644 --- a/src/frontends/onnx/tests/models/flatten.prototxt +++ b/src/frontends/onnx/tests/models/flatten.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/gatherND_float.prototxt b/src/frontends/onnx/tests/models/gatherND_float.prototxt index 39a990a63a65b3..aaea95ce36f00f 100644 --- a/src/frontends/onnx/tests/models/gatherND_float.prototxt +++ b/src/frontends/onnx/tests/models/gatherND_float.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/gatherND_int32.prototxt b/src/frontends/onnx/tests/models/gatherND_int32.prototxt index a10e901a44a5fc..86e254203d07f3 100644 --- a/src/frontends/onnx/tests/models/gatherND_int32.prototxt +++ b/src/frontends/onnx/tests/models/gatherND_int32.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/gather_elements_float_1D.prototxt b/src/frontends/onnx/tests/models/gather_elements_float_1D.prototxt index 6973b9e5fa3744..f6a8a934d7d903 100644 --- a/src/frontends/onnx/tests/models/gather_elements_float_1D.prototxt +++ b/src/frontends/onnx/tests/models/gather_elements_float_1D.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/gather_elements_float_3D_axis_2.prototxt b/src/frontends/onnx/tests/models/gather_elements_float_3D_axis_2.prototxt index 0e746696685610..dedc091bc12c6d 100644 --- a/src/frontends/onnx/tests/models/gather_elements_float_3D_axis_2.prototxt +++ b/src/frontends/onnx/tests/models/gather_elements_float_3D_axis_2.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/gather_elements_float_negative_axis.prototxt b/src/frontends/onnx/tests/models/gather_elements_float_negative_axis.prototxt index f09c7e9b5f01c8..8ed1b0a3db426c 100644 --- a/src/frontends/onnx/tests/models/gather_elements_float_negative_axis.prototxt +++ b/src/frontends/onnx/tests/models/gather_elements_float_negative_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/gather_elements_int32_axis_0.prototxt b/src/frontends/onnx/tests/models/gather_elements_int32_axis_0.prototxt index ee374991ca64bb..eba1513b45ae46 100644 --- a/src/frontends/onnx/tests/models/gather_elements_int32_axis_0.prototxt +++ b/src/frontends/onnx/tests/models/gather_elements_int32_axis_0.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/gather_elements_int8_axis_1.prototxt b/src/frontends/onnx/tests/models/gather_elements_int8_axis_1.prototxt index 494042f94d88e1..2a2202f1f47f14 100644 --- a/src/frontends/onnx/tests/models/gather_elements_int8_axis_1.prototxt +++ b/src/frontends/onnx/tests/models/gather_elements_int8_axis_1.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/gemm_abc.prototxt b/src/frontends/onnx/tests/models/gemm_abc.prototxt index a649dfb8ee1c85..fb5d3db79d9569 100644 --- a/src/frontends/onnx/tests/models/gemm_abc.prototxt +++ b/src/frontends/onnx/tests/models/gemm_abc.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "a" diff --git a/src/frontends/onnx/tests/models/global_lp_pool_dynamic_hw.prototxt b/src/frontends/onnx/tests/models/global_lp_pool_dynamic_hw.prototxt index aab38654dd6489..ad0226addf3e72 100644 --- a/src/frontends/onnx/tests/models/global_lp_pool_dynamic_hw.prototxt +++ b/src/frontends/onnx/tests/models/global_lp_pool_dynamic_hw.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/global_lp_pool_p0.prototxt b/src/frontends/onnx/tests/models/global_lp_pool_p0.prototxt index a5957073d63d2b..16a8c4a95f38a4 100644 --- a/src/frontends/onnx/tests/models/global_lp_pool_p0.prototxt +++ b/src/frontends/onnx/tests/models/global_lp_pool_p0.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/global_lp_pool_p1.prototxt b/src/frontends/onnx/tests/models/global_lp_pool_p1.prototxt index 5cbff0bbf65a68..d7effffc6b95a0 100644 --- a/src/frontends/onnx/tests/models/global_lp_pool_p1.prototxt +++ b/src/frontends/onnx/tests/models/global_lp_pool_p1.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/global_lp_pool_p2.prototxt b/src/frontends/onnx/tests/models/global_lp_pool_p2.prototxt index dbedca94310381..1ea4e799c6a2cd 100644 --- a/src/frontends/onnx/tests/models/global_lp_pool_p2.prototxt +++ b/src/frontends/onnx/tests/models/global_lp_pool_p2.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/global_lp_pool_p3.prototxt b/src/frontends/onnx/tests/models/global_lp_pool_p3.prototxt index b4bd9491280670..11a18157e95d82 100644 --- a/src/frontends/onnx/tests/models/global_lp_pool_p3.prototxt +++ b/src/frontends/onnx/tests/models/global_lp_pool_p3.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/greater_or_equal_float.prototxt b/src/frontends/onnx/tests/models/greater_or_equal_float.prototxt index 983efd96160bca..2acd66e9a84474 100644 --- a/src/frontends/onnx/tests/models/greater_or_equal_float.prototxt +++ b/src/frontends/onnx/tests/models/greater_or_equal_float.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/greater_or_equal_int.prototxt b/src/frontends/onnx/tests/models/greater_or_equal_int.prototxt index 16058fabd297e9..b8f54f34d54581 100644 --- a/src/frontends/onnx/tests/models/greater_or_equal_int.prototxt +++ b/src/frontends/onnx/tests/models/greater_or_equal_int.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/grid_sample.prototxt b/src/frontends/onnx/tests/models/grid_sample.prototxt index e8365b232e7adc..2e00e98e059185 100644 --- a/src/frontends/onnx/tests/models/grid_sample.prototxt +++ b/src/frontends/onnx/tests/models/grid_sample.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/group_norm.prototxt b/src/frontends/onnx/tests/models/group_norm.prototxt index e5f43cd7c3672c..21f271b3c6c6a8 100644 --- a/src/frontends/onnx/tests/models/group_norm.prototxt +++ b/src/frontends/onnx/tests/models/group_norm.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/group_norm_4D_bias_and_scale.prototxt b/src/frontends/onnx/tests/models/group_norm_4D_bias_and_scale.prototxt index 7138bf3bc395bd..74fb06c300851d 100644 --- a/src/frontends/onnx/tests/models/group_norm_4D_bias_and_scale.prototxt +++ b/src/frontends/onnx/tests/models/group_norm_4D_bias_and_scale.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/group_norm_5d.prototxt b/src/frontends/onnx/tests/models/group_norm_5d.prototxt index aced083efd2780..808dda22c63d1f 100644 --- a/src/frontends/onnx/tests/models/group_norm_5d.prototxt +++ b/src/frontends/onnx/tests/models/group_norm_5d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/group_normalization_2grp.prototxt b/src/frontends/onnx/tests/models/group_normalization_2grp.prototxt index 978ab918a2c521..65999939afe0df 100644 --- a/src/frontends/onnx/tests/models/group_normalization_2grp.prototxt +++ b/src/frontends/onnx/tests/models/group_normalization_2grp.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/group_normalization_3grp.prototxt b/src/frontends/onnx/tests/models/group_normalization_3grp.prototxt index 1711e41bd5d48f..5a6f53c8d8802a 100644 --- a/src/frontends/onnx/tests/models/group_normalization_3grp.prototxt +++ b/src/frontends/onnx/tests/models/group_normalization_3grp.prototxt @@ -3,7 +3,7 @@ opset_import { domain: "" version: 18 } -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/group_normalization_custom_eps.prototxt b/src/frontends/onnx/tests/models/group_normalization_custom_eps.prototxt index 083b5d8ecf5d0e..19924681b78d2e 100644 --- a/src/frontends/onnx/tests/models/group_normalization_custom_eps.prototxt +++ b/src/frontends/onnx/tests/models/group_normalization_custom_eps.prototxt @@ -3,7 +3,7 @@ opset_import { domain: "" version: 18 } -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/gru_bidir_mixed_seq_len_const.prototxt b/src/frontends/onnx/tests/models/gru_bidir_mixed_seq_len_const.prototxt index 4e3e9317868bc9..13805f931b0296 100644 --- a/src/frontends/onnx/tests/models/gru_bidir_mixed_seq_len_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_bidir_mixed_seq_len_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_bidirectional.prototxt b/src/frontends/onnx/tests/models/gru_bidirectional.prototxt index 66a4f47a0338f2..95c65aaf147597 100644 --- a/src/frontends/onnx/tests/models/gru_bidirectional.prototxt +++ b/src/frontends/onnx/tests/models/gru_bidirectional.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/gru_bidirectional_const.prototxt b/src/frontends/onnx/tests/models/gru_bidirectional_const.prototxt index 746cdc2012c386..f432ad1dc319cd 100644 --- a/src/frontends/onnx/tests/models/gru_bidirectional_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_bidirectional_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_defaults_fwd.prototxt b/src/frontends/onnx/tests/models/gru_defaults_fwd.prototxt index 2858ba169a42dc..da52c7c923e061 100644 --- a/src/frontends/onnx/tests/models/gru_defaults_fwd.prototxt +++ b/src/frontends/onnx/tests/models/gru_defaults_fwd.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/gru_defaults_fwd_const.prototxt b/src/frontends/onnx/tests/models/gru_defaults_fwd_const.prototxt index 68055158498088..2bb8588a0ced1b 100644 --- a/src/frontends/onnx/tests/models/gru_defaults_fwd_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_defaults_fwd_const.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_fwd_activations_relu_hardsigmoid.prototxt b/src/frontends/onnx/tests/models/gru_fwd_activations_relu_hardsigmoid.prototxt index 66537b43580f26..b32b143a0ad198 100644 --- a/src/frontends/onnx/tests/models/gru_fwd_activations_relu_hardsigmoid.prototxt +++ b/src/frontends/onnx/tests/models/gru_fwd_activations_relu_hardsigmoid.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/gru_fwd_activations_relu_sigmoid_const.prototxt b/src/frontends/onnx/tests/models/gru_fwd_activations_relu_sigmoid_const.prototxt index 9c8f39879f8a78..33b6d6a7eaaff1 100644 --- a/src/frontends/onnx/tests/models/gru_fwd_activations_relu_sigmoid_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_fwd_activations_relu_sigmoid_const.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_fwd_bias_initial_h.prototxt b/src/frontends/onnx/tests/models/gru_fwd_bias_initial_h.prototxt index adc6fa44d654b2..6406c2fe1fda36 100644 --- a/src/frontends/onnx/tests/models/gru_fwd_bias_initial_h.prototxt +++ b/src/frontends/onnx/tests/models/gru_fwd_bias_initial_h.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/gru_fwd_bias_initial_h_const.prototxt b/src/frontends/onnx/tests/models/gru_fwd_bias_initial_h_const.prototxt index d8feb77aad9340..41871ef4b51361 100644 --- a/src/frontends/onnx/tests/models/gru_fwd_bias_initial_h_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_fwd_bias_initial_h_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_fwd_linear_before_reset.prototxt b/src/frontends/onnx/tests/models/gru_fwd_linear_before_reset.prototxt index e4367ef3a9bdb5..f416cdfaf378a9 100644 --- a/src/frontends/onnx/tests/models/gru_fwd_linear_before_reset.prototxt +++ b/src/frontends/onnx/tests/models/gru_fwd_linear_before_reset.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/gru_fwd_linear_before_reset_const.prototxt b/src/frontends/onnx/tests/models/gru_fwd_linear_before_reset_const.prototxt index 7a50debeb488fb..3086ea7a2f335c 100644 --- a/src/frontends/onnx/tests/models/gru_fwd_linear_before_reset_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_fwd_linear_before_reset_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_fwd_mixed_seq_len.prototxt b/src/frontends/onnx/tests/models/gru_fwd_mixed_seq_len.prototxt index 88b77e245f3fec..43e55baad3a7fa 100644 --- a/src/frontends/onnx/tests/models/gru_fwd_mixed_seq_len.prototxt +++ b/src/frontends/onnx/tests/models/gru_fwd_mixed_seq_len.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/gru_fwd_mixed_seq_len_const.prototxt b/src/frontends/onnx/tests/models/gru_fwd_mixed_seq_len_const.prototxt index aeebe53c3189cb..93b8dd4b113b13 100644 --- a/src/frontends/onnx/tests/models/gru_fwd_mixed_seq_len_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_fwd_mixed_seq_len_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_rev_clip.prototxt b/src/frontends/onnx/tests/models/gru_rev_clip.prototxt index 3c1f7cee485cf3..65351ef876a60d 100644 --- a/src/frontends/onnx/tests/models/gru_rev_clip.prototxt +++ b/src/frontends/onnx/tests/models/gru_rev_clip.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/gru_rev_clip_const.prototxt b/src/frontends/onnx/tests/models/gru_rev_clip_const.prototxt index ffe25b92e13ebc..b0d6465ce2abaa 100644 --- a/src/frontends/onnx/tests/models/gru_rev_clip_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_rev_clip_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_reverse.prototxt b/src/frontends/onnx/tests/models/gru_reverse.prototxt index c0504334c8fa09..73045ba350696a 100644 --- a/src/frontends/onnx/tests/models/gru_reverse.prototxt +++ b/src/frontends/onnx/tests/models/gru_reverse.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/gru_reverse_const.prototxt b/src/frontends/onnx/tests/models/gru_reverse_const.prototxt index 929d7dc19540fd..366896bb7a6b54 100644 --- a/src/frontends/onnx/tests/models/gru_reverse_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_reverse_const.prototxt @@ -1,6 +1,6 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/gru_reverse_mixed_seq_len_const.prototxt b/src/frontends/onnx/tests/models/gru_reverse_mixed_seq_len_const.prototxt index 1f83234810ca88..0326325cd8a174 100644 --- a/src/frontends/onnx/tests/models/gru_reverse_mixed_seq_len_const.prototxt +++ b/src/frontends/onnx/tests/models/gru_reverse_mixed_seq_len_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt index 2bf75ed29fe7f6..75f5e6786fe5e5 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_periodic.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "size" diff --git a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt index 1c9a9019829383..8f8460d965b884 100644 --- a/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hammingwindow_symmetric.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "size" diff --git a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt index 2895bf5ad9b4d9..3e1beefc186b51 100644 --- a/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_periodic.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "size" diff --git a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt index ec2bc2b8e42bef..0a0da994569b30 100644 --- a/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt +++ b/src/frontends/onnx/tests/models/hannwindow_symmetric.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "size" diff --git a/src/frontends/onnx/tests/models/hard_sigmoid.prototxt b/src/frontends/onnx/tests/models/hard_sigmoid.prototxt index ece9fd275a7835..45a6944d3a2d82 100644 --- a/src/frontends/onnx/tests/models/hard_sigmoid.prototxt +++ b/src/frontends/onnx/tests/models/hard_sigmoid.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/hardmax.prototxt b/src/frontends/onnx/tests/models/hardmax.prototxt index 045f3757db0e89..b3151f54760a2b 100644 --- a/src/frontends/onnx/tests/models/hardmax.prototxt +++ b/src/frontends/onnx/tests/models/hardmax.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/image_scaler.prototxt b/src/frontends/onnx/tests/models/image_scaler.prototxt index bc45e8a768bf7f..ee531a35bd4590 100644 --- a/src/frontends/onnx/tests/models/image_scaler.prototxt +++ b/src/frontends/onnx/tests/models/image_scaler.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/initializer_wo_input.prototxt b/src/frontends/onnx/tests/models/initializer_wo_input.prototxt index 75ca2ca826fe52..74e65313ac0719 100644 --- a/src/frontends/onnx/tests/models/initializer_wo_input.prototxt +++ b/src/frontends/onnx/tests/models/initializer_wo_input.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/instance_norm.prototxt b/src/frontends/onnx/tests/models/instance_norm.prototxt index 6e446c95e000a0..ac144be1484c04 100644 --- a/src/frontends/onnx/tests/models/instance_norm.prototxt +++ b/src/frontends/onnx/tests/models/instance_norm.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/instance_norm_bad_scale_type.prototxt b/src/frontends/onnx/tests/models/instance_norm_bad_scale_type.prototxt index 8a13b675ffb25c..eaf9add9efb8a4 100644 --- a/src/frontends/onnx/tests/models/instance_norm_bad_scale_type.prototxt +++ b/src/frontends/onnx/tests/models/instance_norm_bad_scale_type.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/instance_norm_dynamic.prototxt b/src/frontends/onnx/tests/models/instance_norm_dynamic.prototxt index 4d6a938ad5ee81..a29ebb79679b8c 100644 --- a/src/frontends/onnx/tests/models/instance_norm_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/instance_norm_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/is_finite.prototxt b/src/frontends/onnx/tests/models/is_finite.prototxt index f597e20d357a5f..868ebfc5e3703e 100644 --- a/src/frontends/onnx/tests/models/is_finite.prototxt +++ b/src/frontends/onnx/tests/models/is_finite.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/is_inf.prototxt b/src/frontends/onnx/tests/models/is_inf.prototxt index cfffa8c467df05..c8a6535da9fecf 100644 --- a/src/frontends/onnx/tests/models/is_inf.prototxt +++ b/src/frontends/onnx/tests/models/is_inf.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/is_inf_negative.prototxt b/src/frontends/onnx/tests/models/is_inf_negative.prototxt index 7a9d7e664c2f1c..4a316c6544e869 100644 --- a/src/frontends/onnx/tests/models/is_inf_negative.prototxt +++ b/src/frontends/onnx/tests/models/is_inf_negative.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/is_inf_none.prototxt b/src/frontends/onnx/tests/models/is_inf_none.prototxt index 30da35b865b688..03361400bdf25b 100644 --- a/src/frontends/onnx/tests/models/is_inf_none.prototxt +++ b/src/frontends/onnx/tests/models/is_inf_none.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/is_inf_positive.prototxt b/src/frontends/onnx/tests/models/is_inf_positive.prototxt index 6d25dc3df8d0b4..c58f20e8d996e4 100644 --- a/src/frontends/onnx/tests/models/is_inf_positive.prototxt +++ b/src/frontends/onnx/tests/models/is_inf_positive.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/is_nan.prototxt b/src/frontends/onnx/tests/models/is_nan.prototxt index f6915dac0aa3d2..271b683d9ae615 100644 --- a/src/frontends/onnx/tests/models/is_nan.prototxt +++ b/src/frontends/onnx/tests/models/is_nan.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/leaky_relu.prototxt b/src/frontends/onnx/tests/models/leaky_relu.prototxt index a673caf87a8214..ab5b863969c357 100644 --- a/src/frontends/onnx/tests/models/leaky_relu.prototxt +++ b/src/frontends/onnx/tests/models/leaky_relu.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/logsoftmax13_1D.prototxt b/src/frontends/onnx/tests/models/logsoftmax13_1D.prototxt index b75dd3ff52484d..87a0ff0637e4e0 100644 --- a/src/frontends/onnx/tests/models/logsoftmax13_1D.prototxt +++ b/src/frontends/onnx/tests/models/logsoftmax13_1D.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/logsoftmax13_2D.prototxt b/src/frontends/onnx/tests/models/logsoftmax13_2D.prototxt index 7117079cae2211..8c0b1e49210c7a 100644 --- a/src/frontends/onnx/tests/models/logsoftmax13_2D.prototxt +++ b/src/frontends/onnx/tests/models/logsoftmax13_2D.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/logsoftmax_0D.prototxt b/src/frontends/onnx/tests/models/logsoftmax_0D.prototxt index cd9526b3fbdc72..71ea091eaa5918 100644 --- a/src/frontends/onnx/tests/models/logsoftmax_0D.prototxt +++ b/src/frontends/onnx/tests/models/logsoftmax_0D.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/logsoftmax_1D.prototxt b/src/frontends/onnx/tests/models/logsoftmax_1D.prototxt index 46799d23e8430a..3d50287d7f226f 100644 --- a/src/frontends/onnx/tests/models/logsoftmax_1D.prototxt +++ b/src/frontends/onnx/tests/models/logsoftmax_1D.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/lp_norm_default.prototxt b/src/frontends/onnx/tests/models/lp_norm_default.prototxt index 9fdbab7f040aca..6c23d670dd0949 100644 --- a/src/frontends/onnx/tests/models/lp_norm_default.prototxt +++ b/src/frontends/onnx/tests/models/lp_norm_default.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/lp_norm_default_dynamic.prototxt b/src/frontends/onnx/tests/models/lp_norm_default_dynamic.prototxt index 55d3bd3e705e09..e37e273fc1b7db 100644 --- a/src/frontends/onnx/tests/models/lp_norm_default_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/lp_norm_default_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/lp_norm_p1.prototxt b/src/frontends/onnx/tests/models/lp_norm_p1.prototxt index fd6993a8c83f7a..92e4e54b4cb33a 100644 --- a/src/frontends/onnx/tests/models/lp_norm_p1.prototxt +++ b/src/frontends/onnx/tests/models/lp_norm_p1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/lp_norm_p2.prototxt b/src/frontends/onnx/tests/models/lp_norm_p2.prototxt index 07d570ac8efb24..da25655e5dc859 100644 --- a/src/frontends/onnx/tests/models/lp_norm_p2.prototxt +++ b/src/frontends/onnx/tests/models/lp_norm_p2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/lstm_bidir_const.prototxt b/src/frontends/onnx/tests/models/lstm_bidir_const.prototxt index 0363f7ffaded85..938f9ab2dba349 100644 --- a/src/frontends/onnx/tests/models/lstm_bidir_const.prototxt +++ b/src/frontends/onnx/tests/models/lstm_bidir_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/lstm_bidir_mixed_seq_const.prototxt b/src/frontends/onnx/tests/models/lstm_bidir_mixed_seq_const.prototxt index 28ad3eed2199d1..cb9f4545ab435f 100644 --- a/src/frontends/onnx/tests/models/lstm_bidir_mixed_seq_const.prototxt +++ b/src/frontends/onnx/tests/models/lstm_bidir_mixed_seq_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/lstm_dynamic_batch_size_and_seq_len.prototxt b/src/frontends/onnx/tests/models/lstm_dynamic_batch_size_and_seq_len.prototxt index f05e32c39589d0..c9c51981f110bc 100644 --- a/src/frontends/onnx/tests/models/lstm_dynamic_batch_size_and_seq_len.prototxt +++ b/src/frontends/onnx/tests/models/lstm_dynamic_batch_size_and_seq_len.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/lstm_fwd_clip_const.prototxt b/src/frontends/onnx/tests/models/lstm_fwd_clip_const.prototxt index fff265e3d0fcdf..076a5bccac3185 100644 --- a/src/frontends/onnx/tests/models/lstm_fwd_clip_const.prototxt +++ b/src/frontends/onnx/tests/models/lstm_fwd_clip_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/lstm_fwd_default_const.prototxt b/src/frontends/onnx/tests/models/lstm_fwd_default_const.prototxt index c0a005698ee0f3..4d2e7dea7bab35 100644 --- a/src/frontends/onnx/tests/models/lstm_fwd_default_const.prototxt +++ b/src/frontends/onnx/tests/models/lstm_fwd_default_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/lstm_fwd_hardsigmoid_activation.prototxt b/src/frontends/onnx/tests/models/lstm_fwd_hardsigmoid_activation.prototxt index 0866e3b679a202..dc35d1f737b6bd 100644 --- a/src/frontends/onnx/tests/models/lstm_fwd_hardsigmoid_activation.prototxt +++ b/src/frontends/onnx/tests/models/lstm_fwd_hardsigmoid_activation.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/lstm_fwd_large_batch_no_clip.prototxt b/src/frontends/onnx/tests/models/lstm_fwd_large_batch_no_clip.prototxt index 1ba0544ebe760a..22e473ac66e6ec 100644 --- a/src/frontends/onnx/tests/models/lstm_fwd_large_batch_no_clip.prototxt +++ b/src/frontends/onnx/tests/models/lstm_fwd_large_batch_no_clip.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/lstm_fwd_mixed_seq.prototxt b/src/frontends/onnx/tests/models/lstm_fwd_mixed_seq.prototxt index c7e4f3565c3585..89400006b88cf6 100644 --- a/src/frontends/onnx/tests/models/lstm_fwd_mixed_seq.prototxt +++ b/src/frontends/onnx/tests/models/lstm_fwd_mixed_seq.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/lstm_fwd_mixed_seq_const.prototxt b/src/frontends/onnx/tests/models/lstm_fwd_mixed_seq_const.prototxt index 0f5dfa2e567193..3586851bec7444 100644 --- a/src/frontends/onnx/tests/models/lstm_fwd_mixed_seq_const.prototxt +++ b/src/frontends/onnx/tests/models/lstm_fwd_mixed_seq_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/lstm_fwd_with_clip_peepholes.prototxt b/src/frontends/onnx/tests/models/lstm_fwd_with_clip_peepholes.prototxt index 4c59b0d39fbc0f..2a218f089240a5 100644 --- a/src/frontends/onnx/tests/models/lstm_fwd_with_clip_peepholes.prototxt +++ b/src/frontends/onnx/tests/models/lstm_fwd_with_clip_peepholes.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/lstm_reverse_const.prototxt b/src/frontends/onnx/tests/models/lstm_reverse_const.prototxt index 5bf316b788cd91..b4e3d70338d421 100644 --- a/src/frontends/onnx/tests/models/lstm_reverse_const.prototxt +++ b/src/frontends/onnx/tests/models/lstm_reverse_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/lstm_reverse_mixed_seq_const.prototxt b/src/frontends/onnx/tests/models/lstm_reverse_mixed_seq_const.prototxt index eae833aad33ae0..e6c65b092f20f8 100644 --- a/src/frontends/onnx/tests/models/lstm_reverse_mixed_seq_const.prototxt +++ b/src/frontends/onnx/tests/models/lstm_reverse_mixed_seq_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/matmul.prototxt b/src/frontends/onnx/tests/models/matmul.prototxt index 2988c24577bd82..f68d68e189dd1d 100644 --- a/src/frontends/onnx/tests/models/matmul.prototxt +++ b/src/frontends/onnx/tests/models/matmul.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "a" diff --git a/src/frontends/onnx/tests/models/matmul_integer.prototxt b/src/frontends/onnx/tests/models/matmul_integer.prototxt index 813f4920ce7db0..1f7bad1d56c5e7 100644 --- a/src/frontends/onnx/tests/models/matmul_integer.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_2d_x_3d.prototxt b/src/frontends/onnx/tests/models/matmul_integer_2d_x_3d.prototxt index dcac5092c5a206..d6e1f69acbb550 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_2d_x_3d.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_2d_x_3d.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_3d.prototxt b/src/frontends/onnx/tests/models/matmul_integer_3d.prototxt index e640c7714d4a86..2c7df7ca8c808d 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_3d.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_3d.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_3d_x_2d.prototxt b/src/frontends/onnx/tests/models/matmul_integer_3d_x_2d.prototxt index e359be5d04d20e..2861cc22fbcbbc 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_3d_x_2d.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_3d_x_2d.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_4d.prototxt b/src/frontends/onnx/tests/models/matmul_integer_4d.prototxt index a1c29d86d62a21..07775ad17429da 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_4d.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_4d.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_4d_no_zero_point.prototxt b/src/frontends/onnx/tests/models/matmul_integer_4d_no_zero_point.prototxt index 6f14518837ba1f..5767a64177b47e 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_4d_no_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_4d_no_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_int8.prototxt b/src/frontends/onnx/tests/models/matmul_integer_int8.prototxt index e28705d03b8790..6fcbd8e5170dc7 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_int8.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_int8.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_matrix_zero_point.prototxt b/src/frontends/onnx/tests/models/matmul_integer_matrix_zero_point.prototxt index 31bfa2e775da4d..8dac6d79c79b09 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_matrix_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_matrix_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_no_zero_point.prototxt b/src/frontends/onnx/tests/models/matmul_integer_no_zero_point.prototxt index aefb304823cd3a..037e46e063d80a 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_no_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_no_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_integer_vectorized_zero_point.prototxt b/src/frontends/onnx/tests/models/matmul_integer_vectorized_zero_point.prototxt index 12974e6b4caf9f..37ccb6dadd897f 100644 --- a/src/frontends/onnx/tests/models/matmul_integer_vectorized_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/matmul_integer_vectorized_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/matmul_vec_ten3d.prototxt b/src/frontends/onnx/tests/models/matmul_vec_ten3d.prototxt index b21be7e48a6ea4..2747c2709da2b4 100644 --- a/src/frontends/onnx/tests/models/matmul_vec_ten3d.prototxt +++ b/src/frontends/onnx/tests/models/matmul_vec_ten3d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/max.prototxt b/src/frontends/onnx/tests/models/max.prototxt index 84e8e8a45a8fc2..3c5d418d20a3e0 100644 --- a/src/frontends/onnx/tests/models/max.prototxt +++ b/src/frontends/onnx/tests/models/max.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/max_opset1.prototxt b/src/frontends/onnx/tests/models/max_opset1.prototxt index 2a69d5fe49859b..c8577a0ee9b19b 100644 --- a/src/frontends/onnx/tests/models/max_opset1.prototxt +++ b/src/frontends/onnx/tests/models/max_opset1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/max_pool_2d_pads.prototxt b/src/frontends/onnx/tests/models/max_pool_2d_pads.prototxt index 06263b9247437f..6ee7522500e108 100644 --- a/src/frontends/onnx/tests/models/max_pool_2d_pads.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_2d_pads.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_3d.prototxt b/src/frontends/onnx/tests/models/max_pool_3d.prototxt index d84830a05d30ec..caddbf9eb9b62e 100644 --- a/src/frontends/onnx/tests/models/max_pool_3d.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_3d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_4d_ceil_mode.prototxt b/src/frontends/onnx/tests/models/max_pool_4d_ceil_mode.prototxt index 456aa6df3c4773..2fef073d539368 100644 --- a/src/frontends/onnx/tests/models/max_pool_4d_ceil_mode.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_4d_ceil_mode.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_4d_ceil_strides.prototxt b/src/frontends/onnx/tests/models/max_pool_4d_ceil_strides.prototxt index dab7cfdb0908f3..0e3c18aaa45f19 100644 --- a/src/frontends/onnx/tests/models/max_pool_4d_ceil_strides.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_4d_ceil_strides.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_4d_dilations.prototxt b/src/frontends/onnx/tests/models/max_pool_4d_dilations.prototxt index 12cfdbe162bb2f..cd240f961ae064 100644 --- a/src/frontends/onnx/tests/models/max_pool_4d_dilations.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_4d_dilations.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_4d_strides.prototxt b/src/frontends/onnx/tests/models/max_pool_4d_strides.prototxt index 2c207a1a9c0cc7..9dcec584effb62 100644 --- a/src/frontends/onnx/tests/models/max_pool_4d_strides.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_4d_strides.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_empty_auto_pad.prototxt b/src/frontends/onnx/tests/models/max_pool_empty_auto_pad.prototxt index bad6c74769c13c..6316de97fc5e48 100644 --- a/src/frontends/onnx/tests/models/max_pool_empty_auto_pad.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_empty_auto_pad.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_simple.prototxt b/src/frontends/onnx/tests/models/max_pool_simple.prototxt index dd433805e538ef..e567eba5b20229 100644 --- a/src/frontends/onnx/tests/models/max_pool_simple.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_simple.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_simple_named.prototxt b/src/frontends/onnx/tests/models/max_pool_simple_named.prototxt index 6e28e294bff7cc..3266a1a560411e 100644 --- a/src/frontends/onnx/tests/models/max_pool_simple_named.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_simple_named.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_transposed.prototxt b/src/frontends/onnx/tests/models/max_pool_transposed.prototxt index 6f177a18165448..ee4fafbb74324d 100644 --- a/src/frontends/onnx/tests/models/max_pool_transposed.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_transposed.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/max_pool_transposed_named.prototxt b/src/frontends/onnx/tests/models/max_pool_transposed_named.prototxt index f67e1aa47efc6f..7b62f47deb5926 100644 --- a/src/frontends/onnx/tests/models/max_pool_transposed_named.prototxt +++ b/src/frontends/onnx/tests/models/max_pool_transposed_named.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/mean.prototxt b/src/frontends/onnx/tests/models/mean.prototxt index 6a4db643940b24..ebf589982a0d22 100644 --- a/src/frontends/onnx/tests/models/mean.prototxt +++ b/src/frontends/onnx/tests/models/mean.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/mean_opset1.prototxt b/src/frontends/onnx/tests/models/mean_opset1.prototxt index 06a1da0223e3a8..38655e931f0fd6 100644 --- a/src/frontends/onnx/tests/models/mean_opset1.prototxt +++ b/src/frontends/onnx/tests/models/mean_opset1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/min_two_inputs.prototxt b/src/frontends/onnx/tests/models/min_two_inputs.prototxt index 7fb836420d32c6..eff51e9b29cced 100644 --- a/src/frontends/onnx/tests/models/min_two_inputs.prototxt +++ b/src/frontends/onnx/tests/models/min_two_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/min_two_inputs_opset1.prototxt b/src/frontends/onnx/tests/models/min_two_inputs_opset1.prototxt index 9d02bfa4907fc7..c02ac401d169f5 100644 --- a/src/frontends/onnx/tests/models/min_two_inputs_opset1.prototxt +++ b/src/frontends/onnx/tests/models/min_two_inputs_opset1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/missing_input.prototxt b/src/frontends/onnx/tests/models/missing_input.prototxt index a430045de04d3d..e11df36039e2fb 100644 --- a/src/frontends/onnx/tests/models/missing_input.prototxt +++ b/src/frontends/onnx/tests/models/missing_input.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "I0" diff --git a/src/frontends/onnx/tests/models/missing_op_domain.prototxt b/src/frontends/onnx/tests/models/missing_op_domain.prototxt index 943b19dd0ce23f..b5c9dc21f256e3 100644 --- a/src/frontends/onnx/tests/models/missing_op_domain.prototxt +++ b/src/frontends/onnx/tests/models/missing_op_domain.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mm_nms_rotated.prototxt b/src/frontends/onnx/tests/models/mm_nms_rotated.prototxt index feda742ec9a805..ddbf255f2b35d9 100644 --- a/src/frontends/onnx/tests/models/mm_nms_rotated.prototxt +++ b/src/frontends/onnx/tests/models/mm_nms_rotated.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx_frontend_test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "boxes" diff --git a/src/frontends/onnx/tests/models/mod_incorrect_fmod.prototxt b/src/frontends/onnx/tests/models/mod_incorrect_fmod.prototxt index 86305d5bd90b5b..3ab41092ebc261 100644 --- a/src/frontends/onnx/tests/models/mod_incorrect_fmod.prototxt +++ b/src/frontends/onnx/tests/models/mod_incorrect_fmod.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mod_sign.prototxt b/src/frontends/onnx/tests/models/mod_sign.prototxt index 50be1dfb479d4b..dcd46e0974c153 100644 --- a/src/frontends/onnx/tests/models/mod_sign.prototxt +++ b/src/frontends/onnx/tests/models/mod_sign.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mod_sign_broadcast.prototxt b/src/frontends/onnx/tests/models/mod_sign_broadcast.prototxt index 55c5f1f7414b05..68fd66f568caed 100644 --- a/src/frontends/onnx/tests/models/mod_sign_broadcast.prototxt +++ b/src/frontends/onnx/tests/models/mod_sign_broadcast.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mod_sign_f32.prototxt b/src/frontends/onnx/tests/models/mod_sign_f32.prototxt index 2a86ca082bd885..6246cef9053aa7 100644 --- a/src/frontends/onnx/tests/models/mod_sign_f32.prototxt +++ b/src/frontends/onnx/tests/models/mod_sign_f32.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mod_sign_fmod.prototxt b/src/frontends/onnx/tests/models/mod_sign_fmod.prototxt index 84dd4aac3cf51f..7846d95ab1fd61 100644 --- a/src/frontends/onnx/tests/models/mod_sign_fmod.prototxt +++ b/src/frontends/onnx/tests/models/mod_sign_fmod.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mod_sign_fmod_broadcast.prototxt b/src/frontends/onnx/tests/models/mod_sign_fmod_broadcast.prototxt index 826583b48b4a42..a494a72ff55959 100644 --- a/src/frontends/onnx/tests/models/mod_sign_fmod_broadcast.prototxt +++ b/src/frontends/onnx/tests/models/mod_sign_fmod_broadcast.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mod_sign_fmod_f32.prototxt b/src/frontends/onnx/tests/models/mod_sign_fmod_f32.prototxt index f8bfc3621b0877..05d2710c7f4e98 100644 --- a/src/frontends/onnx/tests/models/mod_sign_fmod_f32.prototxt +++ b/src/frontends/onnx/tests/models/mod_sign_fmod_f32.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mod_sign_i64.prototxt b/src/frontends/onnx/tests/models/mod_sign_i64.prototxt index 8043c896d1514e..a540a840fd8c1e 100644 --- a/src/frontends/onnx/tests/models/mod_sign_i64.prototxt +++ b/src/frontends/onnx/tests/models/mod_sign_i64.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/add_1D.prototxt b/src/frontends/onnx/tests/models/model_editor/add_1D.prototxt index 42725b13d1ac85..234cf4563e8374 100644 --- a/src/frontends/onnx/tests/models/model_editor/add_1D.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/add_1D.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/add_1D_invalid.prototxt b/src/frontends/onnx/tests/models/model_editor/add_1D_invalid.prototxt index df86c29455b8fe..d65ec118e8ca5e 100644 --- a/src/frontends/onnx/tests/models/model_editor/add_1D_invalid.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/add_1D_invalid.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/add_1D_with_initializers.prototxt b/src/frontends/onnx/tests/models/model_editor/add_1D_with_initializers.prototxt index 138c345cdf0b37..b894a3f5673162 100644 --- a/src/frontends/onnx/tests/models/model_editor/add_1D_with_initializers.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/add_1D_with_initializers.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/add_1D_with_initializers_only.prototxt b/src/frontends/onnx/tests/models/model_editor/add_1D_with_initializers_only.prototxt index 2b0a72a8dfb2ba..51bec7b4080adb 100644 --- a/src/frontends/onnx/tests/models/model_editor/add_1D_with_initializers_only.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/add_1D_with_initializers_only.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/add_ab.prototxt b/src/frontends/onnx/tests/models/model_editor/add_ab.prototxt index b614b84c379dd9..8b55dcca2ae021 100644 --- a/src/frontends/onnx/tests/models/model_editor/add_ab.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/add_ab.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/add_ab_duplicated_output.prototxt b/src/frontends/onnx/tests/models/model_editor/add_ab_duplicated_output.prototxt index d1645c130640a9..01f72d3895886a 100644 --- a/src/frontends/onnx/tests/models/model_editor/add_ab_duplicated_output.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/add_ab_duplicated_output.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/add_abc.prototxt b/src/frontends/onnx/tests/models/model_editor/add_abc.prototxt index 05c3e997f28193..b663f698df5040 100644 --- a/src/frontends/onnx/tests/models/model_editor/add_abc.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/add_abc.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/elem_type_missing_in_input.prototxt b/src/frontends/onnx/tests/models/model_editor/elem_type_missing_in_input.prototxt index 2d7b7780562010..92cb6dda869a95 100644 --- a/src/frontends/onnx/tests/models/model_editor/elem_type_missing_in_input.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/elem_type_missing_in_input.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/invalid_input_no_tensor_type.prototxt b/src/frontends/onnx/tests/models/model_editor/invalid_input_no_tensor_type.prototxt index e1df2b561419a7..bdc1dc73daad60 100644 --- a/src/frontends/onnx/tests/models/model_editor/invalid_input_no_tensor_type.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/invalid_input_no_tensor_type.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/invalid_input_no_type.prototxt b/src/frontends/onnx/tests/models/model_editor/invalid_input_no_type.prototxt index 81f895d945c158..55ff962cea904e 100644 --- a/src/frontends/onnx/tests/models/model_editor/invalid_input_no_type.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/invalid_input_no_type.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/onnx_shape_infer_exception.prototxt b/src/frontends/onnx/tests/models/model_editor/onnx_shape_infer_exception.prototxt index 7577695024bb24..728b9c12d04714 100644 --- a/src/frontends/onnx/tests/models/model_editor/onnx_shape_infer_exception.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/onnx_shape_infer_exception.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_all_edges_from_one_source_and_merge_all_new_inputs.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_all_edges_from_one_source_and_merge_all_new_inputs.prototxt index e17c6ebb21c9bb..50e621ddba6e8e 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_all_edges_from_one_source_and_merge_all_new_inputs.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_all_edges_from_one_source_and_merge_all_new_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "relu1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_custom_edges_from_different_sources_and_merge_all_new_inputs.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_custom_edges_from_different_sources_and_merge_all_new_inputs.prototxt index e3dae07e497263..6be942b1cad458 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_custom_edges_from_different_sources_and_merge_all_new_inputs.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_custom_edges_from_different_sources_and_merge_all_new_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_one_another_edge_and_merge_all_new_inputs.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_one_another_edge_and_merge_all_new_inputs.prototxt index 7525788f9f7376..8295b27ff01a60 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_one_another_edge_and_merge_all_new_inputs.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_one_another_edge_and_merge_all_new_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_one_edge_and_merge_all_new_inputs.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_one_edge_and_merge_all_new_inputs.prototxt index 89bc4b02ac59a7..acfc4a50fa167e 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_one_edge_and_merge_all_new_inputs.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_one_edge_and_merge_all_new_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_two_edges_from_different_sources_and_merge_all_new_inputs.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_two_edges_from_different_sources_and_merge_all_new_inputs.prototxt index df1845bae0389a..0ebee88d7bd36b 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_two_edges_from_different_sources_and_merge_all_new_inputs.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_two_edges_from_different_sources_and_merge_all_new_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_two_edges_from_one_source_and_merge_all_new_inputs.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_two_edges_from_one_source_and_merge_all_new_inputs.prototxt index 435aff8fbb7c2b..68fe8f5bb5fc86 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_two_edges_from_one_source_and_merge_all_new_inputs.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__cut_two_edges_from_one_source_and_merge_all_new_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__existing_inputs_and_outputs_based_extraction.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__existing_inputs_and_outputs_based_extraction.prototxt index 535ab6a83b4dff..8dfeffeb86db7d 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__existing_inputs_and_outputs_based_extraction.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__existing_inputs_and_outputs_based_extraction.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__initializer_to_input_replacement.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__initializer_to_input_replacement.prototxt index 64d29267fb6aca..14a5cf24a24a02 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__initializer_to_input_replacement.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__initializer_to_input_replacement.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__initializer_without_matching_input_tail_cut.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__initializer_without_matching_input_tail_cut.prototxt index a26c205ebc8af2..f0dfcfcd1d916b 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__initializer_without_matching_input_tail_cut.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__initializer_without_matching_input_tail_cut.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers.prototxt index 689ddab137aef9..6f579f193ef2b0 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_2.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_2.prototxt index f971386f7bb21e..f389ea0044a3d1 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_2.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_2.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_3.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_3.prototxt index 314cb096a9b2e7..2b1b754972ae91 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_3.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_3.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_4.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_4.prototxt index dc7c3b7c91db03..26b12ea574687f 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_4.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_4.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_5.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_5.prototxt index e2afe1ff1e2702..2ecb08363769b1 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_5.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_5.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names.prototxt index bc8894f9cd2f29..2452ce13ba66cc 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__input_edge_from_tensor_with_multiple_consumers_custom_names.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_deeper_head_cut.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_deeper_head_cut.prototxt index bbd569818b26c4..26126ecc6aa2cd 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_deeper_head_cut.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_deeper_head_cut.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_deeper_tail_cut.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_deeper_tail_cut.prototxt index dd42e019cfbf01..9dbbaa2ff032b6 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_deeper_tail_cut.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_deeper_tail_cut.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_head_cut.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_head_cut.prototxt index 41984db10eac71..5d3b959ebeca10 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_head_cut.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_head_cut.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_tail_cut.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_tail_cut.prototxt index 59efc1294d5d30..6db92acc8108b7 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_tail_cut.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_tail_cut.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_with_initializer_tail_cut.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_with_initializer_tail_cut.prototxt index ad845070c8362d..67fc71d3de2289 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_with_initializer_tail_cut.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__linear_model_with_initializer_tail_cut.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiout_op_output_edge.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiout_op_output_edge.prototxt index ccd953073ceaa4..236e6bf25f54fc 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiout_op_output_edge.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiout_op_output_edge.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_initializer.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_initializer.prototxt index 9f70d286c5c3a6..3a1be902eb4bf9 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_initializer.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_initializer.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_initializer_relu2_and_init.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_initializer_relu2_and_init.prototxt index 5c5083fb84a36e..3b70bb8686c6c6 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_initializer_relu2_and_init.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_initializer_relu2_and_init.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_input_relu2.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_input_relu2.prototxt index 1319a560db2892..a20881202de4d1 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_input_relu2.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__multiple_consumers_of_graph_input_relu2.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__twice_input_edge_from_tensor_with_single_consumer.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__twice_input_edge_from_tensor_with_single_consumer.prototxt index 12cbda0da5d621..47ef5a664a6fa0 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__twice_input_edge_from_tensor_with_single_consumer.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__twice_input_edge_from_tensor_with_single_consumer.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__use_edge_mapper_with_graph_cutter_custom_names.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__use_edge_mapper_with_graph_cutter_custom_names.prototxt index 1a30240a8543ac..54c6b2b8cf3d1a 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/subgraph__use_edge_mapper_with_graph_cutter_custom_names.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/subgraph__use_edge_mapper_with_graph_cutter_custom_names.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/reference/unknown_input_value_info.prototxt b/src/frontends/onnx/tests/models/model_editor/reference/unknown_input_value_info.prototxt index c6f6674d03ec04..550b24332dde18 100644 --- a/src/frontends/onnx/tests/models/model_editor/reference/unknown_input_value_info.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/reference/unknown_input_value_info.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { name: "No name" node { diff --git a/src/frontends/onnx/tests/models/model_editor/shapes__add_two_inputs.prototxt b/src/frontends/onnx/tests/models/model_editor/shapes__add_two_inputs.prototxt index 295cd01185af6c..c0ab4ae86b6521 100644 --- a/src/frontends/onnx/tests/models/model_editor/shapes__add_two_inputs.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/shapes__add_two_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/shapes__dynamic_rank_in_model.prototxt b/src/frontends/onnx/tests/models/model_editor/shapes__dynamic_rank_in_model.prototxt index 7b50a839113a0b..c649c81b3428c7 100644 --- a/src/frontends/onnx/tests/models/model_editor/shapes__dynamic_rank_in_model.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/shapes__dynamic_rank_in_model.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/subgraph__inception_head.prototxt b/src/frontends/onnx/tests/models/model_editor/subgraph__inception_head.prototxt index 1b024156b0e825..73a4236595863e 100644 --- a/src/frontends/onnx/tests/models/model_editor/subgraph__inception_head.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/subgraph__inception_head.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/subgraph__inception_head_with_initializer.prototxt b/src/frontends/onnx/tests/models/model_editor/subgraph__inception_head_with_initializer.prototxt index ca779cb9fffca6..c1c75193fb5d0a 100644 --- a/src/frontends/onnx/tests/models/model_editor/subgraph__inception_head_with_initializer.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/subgraph__inception_head_with_initializer.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/subgraph__initializer_without_matching_input.prototxt b/src/frontends/onnx/tests/models/model_editor/subgraph__initializer_without_matching_input.prototxt index 06fd4c315b35bc..0a44e133d7b6d0 100644 --- a/src/frontends/onnx/tests/models/model_editor/subgraph__initializer_without_matching_input.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/subgraph__initializer_without_matching_input.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" doc_string: "This model contains the first few nodes of the ONNX Inception V1 model" graph { name: "Inception V1 fragment" diff --git a/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests.prototxt b/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests.prototxt index fe8972df10c98d..8f21fa8044160f 100644 --- a/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests_2.prototxt b/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests_2.prototxt index a5fbee597db549..dfbae0a92ea14f 100644 --- a/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests_2.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests_2.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests_3.prototxt b/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests_3.prototxt index 486ed999b0aad9..8441771ecb0fbb 100644 --- a/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests_3.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/subgraph_extraction_tests_3.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input1" diff --git a/src/frontends/onnx/tests/models/model_editor/topological_sort/add_abc_const_node_unsorted.prototxt b/src/frontends/onnx/tests/models/model_editor/topological_sort/add_abc_const_node_unsorted.prototxt index 73a89b213390ab..d1892a952f371a 100644 --- a/src/frontends/onnx/tests/models/model_editor/topological_sort/add_abc_const_node_unsorted.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/topological_sort/add_abc_const_node_unsorted.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/model_editor/topological_sort/completely_unsorted.prototxt b/src/frontends/onnx/tests/models/model_editor/topological_sort/completely_unsorted.prototxt index 3a20867479a2f2..47d864d39a9284 100644 --- a/src/frontends/onnx/tests/models/model_editor/topological_sort/completely_unsorted.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/topological_sort/completely_unsorted.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "relu2" diff --git a/src/frontends/onnx/tests/models/model_editor/topological_sort/completely_unsorted_2.prototxt b/src/frontends/onnx/tests/models/model_editor/topological_sort/completely_unsorted_2.prototxt index 3053e0950b349a..7eaea81ceb5386 100644 --- a/src/frontends/onnx/tests/models/model_editor/topological_sort/completely_unsorted_2.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/topological_sort/completely_unsorted_2.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input1" diff --git a/src/frontends/onnx/tests/models/model_editor/topological_sort/empty_input_name.prototxt b/src/frontends/onnx/tests/models/model_editor/topological_sort/empty_input_name.prototxt index 40e7132c5d9148..411ff57b5959a6 100644 --- a/src/frontends/onnx/tests/models/model_editor/topological_sort/empty_input_name.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/topological_sort/empty_input_name.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "" diff --git a/src/frontends/onnx/tests/models/model_editor/topological_sort/multioutput_split_unsorted.prototxt b/src/frontends/onnx/tests/models/model_editor/topological_sort/multioutput_split_unsorted.prototxt index 930753aadf458c..aa39617a19f9c4 100644 --- a/src/frontends/onnx/tests/models/model_editor/topological_sort/multioutput_split_unsorted.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/topological_sort/multioutput_split_unsorted.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "split_out_0" diff --git a/src/frontends/onnx/tests/models/model_editor/topological_sort/same_name_of_unsorted_node_and_initializer.prototxt b/src/frontends/onnx/tests/models/model_editor/topological_sort/same_name_of_unsorted_node_and_initializer.prototxt index 4ed1c9f618106e..7ac39f6a0b195f 100644 --- a/src/frontends/onnx/tests/models/model_editor/topological_sort/same_name_of_unsorted_node_and_initializer.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/topological_sort/same_name_of_unsorted_node_and_initializer.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/model_editor/topological_sort/two_nodes_swap.prototxt b/src/frontends/onnx/tests/models/model_editor/topological_sort/two_nodes_swap.prototxt index 1751e631246133..5de4aa7f8501d9 100644 --- a/src/frontends/onnx/tests/models/model_editor/topological_sort/two_nodes_swap.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/topological_sort/two_nodes_swap.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/model_editor/unknown_input_value_info.prototxt b/src/frontends/onnx/tests/models/model_editor/unknown_input_value_info.prototxt index 7c983d31347594..66ae08a2383293 100644 --- a/src/frontends/onnx/tests/models/model_editor/unknown_input_value_info.prototxt +++ b/src/frontends/onnx/tests/models/model_editor/unknown_input_value_info.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_data_generator" +producer_name: "OpenVINO ONNX Frontend" graph { name: "No_name" node { diff --git a/src/frontends/onnx/tests/models/model_with_metadata.prototxt b/src/frontends/onnx/tests/models/model_with_metadata.prototxt index 6ca942ab0dadbf..9ba58b895a57f2 100644 --- a/src/frontends/onnx/tests/models/model_with_metadata.prototxt +++ b/src/frontends/onnx/tests/models/model_with_metadata.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mul_v6.prototxt b/src/frontends/onnx/tests/models/mul_v6.prototxt index d4c73e8f11ea12..7740de67f46c06 100644 --- a/src/frontends/onnx/tests/models/mul_v6.prototxt +++ b/src/frontends/onnx/tests/models/mul_v6.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mul_v6_broadcast_axes_1_2.prototxt b/src/frontends/onnx/tests/models/mul_v6_broadcast_axes_1_2.prototxt index 0a0b40fb60dfb8..482669c9d82279 100644 --- a/src/frontends/onnx/tests/models/mul_v6_broadcast_axes_1_2.prototxt +++ b/src/frontends/onnx/tests/models/mul_v6_broadcast_axes_1_2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mul_v6_broadcast_axis_1.prototxt b/src/frontends/onnx/tests/models/mul_v6_broadcast_axis_1.prototxt index a6b797d54d2288..227ec2b5c2e5b2 100644 --- a/src/frontends/onnx/tests/models/mul_v6_broadcast_axis_1.prototxt +++ b/src/frontends/onnx/tests/models/mul_v6_broadcast_axis_1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mul_v6_broadcast_no_axis.prototxt b/src/frontends/onnx/tests/models/mul_v6_broadcast_no_axis.prototxt index 044f25294bbc76..73626c82deaee1 100644 --- a/src/frontends/onnx/tests/models/mul_v6_broadcast_no_axis.prototxt +++ b/src/frontends/onnx/tests/models/mul_v6_broadcast_no_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mul_v7.prototxt b/src/frontends/onnx/tests/models/mul_v7.prototxt index 2ce131bc51b91d..ba47c61720ce73 100644 --- a/src/frontends/onnx/tests/models/mul_v7.prototxt +++ b/src/frontends/onnx/tests/models/mul_v7.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/mul_v7_broadcast.prototxt b/src/frontends/onnx/tests/models/mul_v7_broadcast.prototxt index e790d44adbe65f..4bc9057312ae5d 100644 --- a/src/frontends/onnx/tests/models/mul_v7_broadcast.prototxt +++ b/src/frontends/onnx/tests/models/mul_v7_broadcast.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/multiple_slices_last_layer.prototxt b/src/frontends/onnx/tests/models/multiple_slices_last_layer.prototxt index 6804369b77c35d..036af81ffb6a18 100644 --- a/src/frontends/onnx/tests/models/multiple_slices_last_layer.prototxt +++ b/src/frontends/onnx/tests/models/multiple_slices_last_layer.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "643" diff --git a/src/frontends/onnx/tests/models/mvn_v6.prototxt b/src/frontends/onnx/tests/models/mvn_v6.prototxt index 8dc05acc8d4910..94cec588094836 100644 --- a/src/frontends/onnx/tests/models/mvn_v6.prototxt +++ b/src/frontends/onnx/tests/models/mvn_v6.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/negativelog_likelihood_loss.prototxt b/src/frontends/onnx/tests/models/negativelog_likelihood_loss.prototxt index 0383bb878bac13..576676073b7741 100644 --- a/src/frontends/onnx/tests/models/negativelog_likelihood_loss.prototxt +++ b/src/frontends/onnx/tests/models/negativelog_likelihood_loss.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/nms_default_score_threshold.prototxt b/src/frontends/onnx/tests/models/nms_default_score_threshold.prototxt index 550da618065b37..caa865bbf40395 100644 --- a/src/frontends/onnx/tests/models/nms_default_score_threshold.prototxt +++ b/src/frontends/onnx/tests/models/nms_default_score_threshold.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "ONNX Frontend" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "max_output_boxes" diff --git a/src/frontends/onnx/tests/models/non_zero_1d.prototxt b/src/frontends/onnx/tests/models/non_zero_1d.prototxt index 1eb28756d29271..1c678d696278e9 100644 --- a/src/frontends/onnx/tests/models/non_zero_1d.prototxt +++ b/src/frontends/onnx/tests/models/non_zero_1d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/non_zero_1d_float.prototxt b/src/frontends/onnx/tests/models/non_zero_1d_float.prototxt index 33c67dd722b13a..7a50c4ea329fae 100644 --- a/src/frontends/onnx/tests/models/non_zero_1d_float.prototxt +++ b/src/frontends/onnx/tests/models/non_zero_1d_float.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/non_zero_2d_bool.prototxt b/src/frontends/onnx/tests/models/non_zero_2d_bool.prototxt index 90d9bd8462bcbe..185dcc59f36007 100644 --- a/src/frontends/onnx/tests/models/non_zero_2d_bool.prototxt +++ b/src/frontends/onnx/tests/models/non_zero_2d_bool.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/non_zero_3d.prototxt b/src/frontends/onnx/tests/models/non_zero_3d.prototxt index 0fcb9e8507e847..d76a3fdf130dac 100644 --- a/src/frontends/onnx/tests/models/non_zero_3d.prototxt +++ b/src/frontends/onnx/tests/models/non_zero_3d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/non_zero_scalar.prototxt b/src/frontends/onnx/tests/models/non_zero_scalar.prototxt index 4addc79758077f..9f88e407b777a0 100644 --- a/src/frontends/onnx/tests/models/non_zero_scalar.prototxt +++ b/src/frontends/onnx/tests/models/non_zero_scalar.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/nonmaxsuppression_center_point_box_format.prototxt b/src/frontends/onnx/tests/models/nonmaxsuppression_center_point_box_format.prototxt index 18ae732026ded7..044dd6babac9f7 100644 --- a/src/frontends/onnx/tests/models/nonmaxsuppression_center_point_box_format.prototxt +++ b/src/frontends/onnx/tests/models/nonmaxsuppression_center_point_box_format.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "boxes" diff --git a/src/frontends/onnx/tests/models/nonmaxsuppression_single_box.prototxt b/src/frontends/onnx/tests/models/nonmaxsuppression_single_box.prototxt index 6ded770ea25778..7bf95c1bc4e7ac 100644 --- a/src/frontends/onnx/tests/models/nonmaxsuppression_single_box.prototxt +++ b/src/frontends/onnx/tests/models/nonmaxsuppression_single_box.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "boxes" diff --git a/src/frontends/onnx/tests/models/nonmaxsuppression_v9_single_box.prototxt b/src/frontends/onnx/tests/models/nonmaxsuppression_v9_single_box.prototxt index f92e3eaf22a22e..5514c7ef75d6b3 100644 --- a/src/frontends/onnx/tests/models/nonmaxsuppression_v9_single_box.prototxt +++ b/src/frontends/onnx/tests/models/nonmaxsuppression_v9_single_box.prototxt @@ -1,5 +1,5 @@ ir_version: 10 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "boxes" diff --git a/src/frontends/onnx/tests/models/normalize.prototxt b/src/frontends/onnx/tests/models/normalize.prototxt index c86713ea6a4879..ca4709c628189b 100644 --- a/src/frontends/onnx/tests/models/normalize.prototxt +++ b/src/frontends/onnx/tests/models/normalize.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test" node { diff --git a/src/frontends/onnx/tests/models/not_supported.prototxt b/src/frontends/onnx/tests/models/not_supported.prototxt index eed00351629d68..8a1fce1e1cf3ce 100644 --- a/src/frontends/onnx/tests/models/not_supported.prototxt +++ b/src/frontends/onnx/tests/models/not_supported.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/one_hot_axis.prototxt b/src/frontends/onnx/tests/models/one_hot_axis.prototxt index 93353739765926..adc4f28ca6db06 100644 --- a/src/frontends/onnx/tests/models/one_hot_axis.prototxt +++ b/src/frontends/onnx/tests/models/one_hot_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "indices" diff --git a/src/frontends/onnx/tests/models/one_hot_no_axis.prototxt b/src/frontends/onnx/tests/models/one_hot_no_axis.prototxt index bfa176e9d6ca59..32e529cbf45693 100644 --- a/src/frontends/onnx/tests/models/one_hot_no_axis.prototxt +++ b/src/frontends/onnx/tests/models/one_hot_no_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "indices" diff --git a/src/frontends/onnx/tests/models/onnx_external_data.prototxt b/src/frontends/onnx/tests/models/onnx_external_data.prototxt index ac9bb40a30f610..d396a05460be99 100644 --- a/src/frontends/onnx/tests/models/onnx_external_data.prototxt +++ b/src/frontends/onnx/tests/models/onnx_external_data.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/deformable_conv_2d.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/deformable_conv_2d.prototxt index 7c211c034e68a3..5e2c70650456d2 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/deformable_conv_2d.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/deformable_conv_2d.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/detection_output.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/detection_output.prototxt index 12175ba15a2197..5d0bce8d0ecaca 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/detection_output.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/detection_output.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "rois" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/detection_output_most_attrs_default.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/detection_output_most_attrs_default.prototxt index 01bd66b10b0476..fc6db1fb4cac49 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/detection_output_most_attrs_default.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/detection_output_most_attrs_default.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "rois" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.prototxt index 1863687d27215c..aa8470bdb67b94 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/generate_proposals_single_image.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "im_info" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/group_norm.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/group_norm.prototxt index ea2106b5621be4..1a6dbb12d88a88 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/group_norm.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/group_norm.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/prior_grid_generator.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/prior_grid_generator.prototxt index 987f308761a041..6973c428dec9b8 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/prior_grid_generator.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/prior_grid_generator.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.prototxt index c761983667135f..e3ad8745e7358a 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/roi_feature_extractor.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "rois" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/topk_rios.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/topk_rios.prototxt index 67627a0743e5e1..153b531f273cf8 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/topk_rios.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/experimental_detectron/topk_rios.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/generate_proposals.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/generate_proposals.prototxt index 4214bdd1ea2c95..e8c0ee4bdb4aac 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/generate_proposals.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/generate_proposals.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "OpenVINO" +producer_name: "OpenVINO ONNX Frontend" graph { name: "just GenerateProposals" node { diff --git a/src/frontends/onnx/tests/models/org.openvinotoolkit/generate_proposals_batch2.prototxt b/src/frontends/onnx/tests/models/org.openvinotoolkit/generate_proposals_batch2.prototxt index 7c393bba887ca0..b3bce7e5c2af56 100644 --- a/src/frontends/onnx/tests/models/org.openvinotoolkit/generate_proposals_batch2.prototxt +++ b/src/frontends/onnx/tests/models/org.openvinotoolkit/generate_proposals_batch2.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "OpenVINO" +producer_name: "OpenVINO ONNX Frontend" graph { name: "just GenerateProposals" node { diff --git a/src/frontends/onnx/tests/models/org.pytorch/adaptive_avg_pooling2d_chw.prototxt b/src/frontends/onnx/tests/models/org.pytorch/adaptive_avg_pooling2d_chw.prototxt index 72f439dbbd0262..33faa4ef104247 100644 --- a/src/frontends/onnx/tests/models/org.pytorch/adaptive_avg_pooling2d_chw.prototxt +++ b/src/frontends/onnx/tests/models/org.pytorch/adaptive_avg_pooling2d_chw.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/org.pytorch/adaptive_avg_pooling2d_nchw.prototxt b/src/frontends/onnx/tests/models/org.pytorch/adaptive_avg_pooling2d_nchw.prototxt index 0c0253b4e7a1b8..01aa1e0ef54a71 100644 --- a/src/frontends/onnx/tests/models/org.pytorch/adaptive_avg_pooling2d_nchw.prototxt +++ b/src/frontends/onnx/tests/models/org.pytorch/adaptive_avg_pooling2d_nchw.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/override_op.prototxt b/src/frontends/onnx/tests/models/override_op.prototxt index 5f9534f2715322..9dbf7b022d86ee 100644 --- a/src/frontends/onnx/tests/models/override_op.prototxt +++ b/src/frontends/onnx/tests/models/override_op.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/pad_constant.prototxt b/src/frontends/onnx/tests/models/pad_constant.prototxt index 394f0926451fb0..4fecf1b765ca85 100644 --- a/src/frontends/onnx/tests/models/pad_constant.prototxt +++ b/src/frontends/onnx/tests/models/pad_constant.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/pad_negative_begin_end.prototxt b/src/frontends/onnx/tests/models/pad_negative_begin_end.prototxt index 5b3124e939b84c..a53141dd14775b 100644 --- a/src/frontends/onnx/tests/models/pad_negative_begin_end.prototxt +++ b/src/frontends/onnx/tests/models/pad_negative_begin_end.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/pad_non_scalar_values.prototxt b/src/frontends/onnx/tests/models/pad_non_scalar_values.prototxt index 2d91ad0bdb0534..cf4b4d8ef83645 100644 --- a/src/frontends/onnx/tests/models/pad_non_scalar_values.prototxt +++ b/src/frontends/onnx/tests/models/pad_non_scalar_values.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/pad_optional_constant.prototxt b/src/frontends/onnx/tests/models/pad_optional_constant.prototxt index 3c996f9291e9e1..f7a6dba5475296 100644 --- a/src/frontends/onnx/tests/models/pad_optional_constant.prototxt +++ b/src/frontends/onnx/tests/models/pad_optional_constant.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/pow_float32_float32.prototxt b/src/frontends/onnx/tests/models/pow_float32_float32.prototxt index 4fb1c23b15a53e..e2557d5b6dd5b3 100644 --- a/src/frontends/onnx/tests/models/pow_float32_float32.prototxt +++ b/src/frontends/onnx/tests/models/pow_float32_float32.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/pow_float32_int32.prototxt b/src/frontends/onnx/tests/models/pow_float32_int32.prototxt index 8100a447a07f70..9e420fbdbac9ce 100644 --- a/src/frontends/onnx/tests/models/pow_float32_int32.prototxt +++ b/src/frontends/onnx/tests/models/pow_float32_int32.prototxt @@ -1,4 +1,4 @@ -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/pow_int32_float32.prototxt b/src/frontends/onnx/tests/models/pow_int32_float32.prototxt index 901164cf8e69aa..dff6d3997ce206 100644 --- a/src/frontends/onnx/tests/models/pow_int32_float32.prototxt +++ b/src/frontends/onnx/tests/models/pow_int32_float32.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/prelu.prototxt b/src/frontends/onnx/tests/models/prelu.prototxt index 4eb3f3eda5e050..b346a6a5ffbec8 100644 --- a/src/frontends/onnx/tests/models/prelu.prototxt +++ b/src/frontends/onnx/tests/models/prelu.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/prelu_1d.prototxt b/src/frontends/onnx/tests/models/prelu_1d.prototxt index 9034d1d42f6137..c06db24f635cf6 100644 --- a/src/frontends/onnx/tests/models/prelu_1d.prototxt +++ b/src/frontends/onnx/tests/models/prelu_1d.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/prelu_batch_nd.prototxt b/src/frontends/onnx/tests/models/prelu_batch_nd.prototxt index cf693db83c379e..e2a76ee60582f6 100644 --- a/src/frontends/onnx/tests/models/prelu_batch_nd.prototxt +++ b/src/frontends/onnx/tests/models/prelu_batch_nd.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/prelu_c_1_1.prototxt b/src/frontends/onnx/tests/models/prelu_c_1_1.prototxt index 83f2a748860578..d8e3ad85415516 100644 --- a/src/frontends/onnx/tests/models/prelu_c_1_1.prototxt +++ b/src/frontends/onnx/tests/models/prelu_c_1_1.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/prior_box.prototxt b/src/frontends/onnx/tests/models/prior_box.prototxt index 9788d2aa82ee0c..9c06d1a9e6b88e 100644 --- a/src/frontends/onnx/tests/models/prior_box.prototxt +++ b/src/frontends/onnx/tests/models/prior_box.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test" node { diff --git a/src/frontends/onnx/tests/models/priorbox_clustered.prototxt b/src/frontends/onnx/tests/models/priorbox_clustered.prototxt index 68941b51d9e82d..36250bde74f39b 100644 --- a/src/frontends/onnx/tests/models/priorbox_clustered.prototxt +++ b/src/frontends/onnx/tests/models/priorbox_clustered.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { domain: "org.openvinotoolkit" diff --git a/src/frontends/onnx/tests/models/priorbox_clustered_first_input_bad_shape.prototxt b/src/frontends/onnx/tests/models/priorbox_clustered_first_input_bad_shape.prototxt index 48d4076d4e4b7e..40a6d5e585070e 100644 --- a/src/frontends/onnx/tests/models/priorbox_clustered_first_input_bad_shape.prototxt +++ b/src/frontends/onnx/tests/models/priorbox_clustered_first_input_bad_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { domain: "org.openvinotoolkit" diff --git a/src/frontends/onnx/tests/models/priorbox_clustered_most_attrs_default.prototxt b/src/frontends/onnx/tests/models/priorbox_clustered_most_attrs_default.prototxt index ac7c62bdf3663a..3a2fbc38a099f5 100644 --- a/src/frontends/onnx/tests/models/priorbox_clustered_most_attrs_default.prototxt +++ b/src/frontends/onnx/tests/models/priorbox_clustered_most_attrs_default.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { domain: "org.openvinotoolkit" diff --git a/src/frontends/onnx/tests/models/priorbox_clustered_second_input_bad_shape.prototxt b/src/frontends/onnx/tests/models/priorbox_clustered_second_input_bad_shape.prototxt index 8d53532b3742b0..d0231926a9bbbc 100644 --- a/src/frontends/onnx/tests/models/priorbox_clustered_second_input_bad_shape.prototxt +++ b/src/frontends/onnx/tests/models/priorbox_clustered_second_input_bad_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { domain: "org.openvinotoolkit" diff --git a/src/frontends/onnx/tests/models/provenance_input_tags.prototxt b/src/frontends/onnx/tests/models/provenance_input_tags.prototxt index add3b00f15acce..24a3ad4c553836 100644 --- a/src/frontends/onnx/tests/models/provenance_input_tags.prototxt +++ b/src/frontends/onnx/tests/models/provenance_input_tags.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initializer_of_A" diff --git a/src/frontends/onnx/tests/models/provenance_multiple_outputs_op.prototxt b/src/frontends/onnx/tests/models/provenance_multiple_outputs_op.prototxt index 0369588e46b7f6..18825d001fa077 100644 --- a/src/frontends/onnx/tests/models/provenance_multiple_outputs_op.prototxt +++ b/src/frontends/onnx/tests/models/provenance_multiple_outputs_op.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/provenance_node_name_and_outputs.prototxt b/src/frontends/onnx/tests/models/provenance_node_name_and_outputs.prototxt index 784a17a4129b49..5b3767361c46e1 100644 --- a/src/frontends/onnx/tests/models/provenance_node_name_and_outputs.prototxt +++ b/src/frontends/onnx/tests/models/provenance_node_name_and_outputs.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input_A" diff --git a/src/frontends/onnx/tests/models/provenance_only_outputs.prototxt b/src/frontends/onnx/tests/models/provenance_only_outputs.prototxt index b8dc775c67ce57..f3d0e2bbe7474a 100644 --- a/src/frontends/onnx/tests/models/provenance_only_outputs.prototxt +++ b/src/frontends/onnx/tests/models/provenance_only_outputs.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input_A" diff --git a/src/frontends/onnx/tests/models/provenance_tag_add.prototxt b/src/frontends/onnx/tests/models/provenance_tag_add.prototxt index 7f63b68ad216c0..a183712b670031 100644 --- a/src/frontends/onnx/tests/models/provenance_tag_add.prototxt +++ b/src/frontends/onnx/tests/models/provenance_tag_add.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/qlinear_conv_2d.prototxt b/src/frontends/onnx/tests/models/qlinear_conv_2d.prototxt index 8ba8cbf5b27ba4..0af10f8bd9e819 100644 --- a/src/frontends/onnx/tests/models/qlinear_conv_2d.prototxt +++ b/src/frontends/onnx/tests/models/qlinear_conv_2d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/qlinear_conv_3d.prototxt b/src/frontends/onnx/tests/models/qlinear_conv_3d.prototxt index 792001aaaf8a97..64eb8ad949bb2f 100644 --- a/src/frontends/onnx/tests/models/qlinear_conv_3d.prototxt +++ b/src/frontends/onnx/tests/models/qlinear_conv_3d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/qlinear_matmul.prototxt b/src/frontends/onnx/tests/models/qlinear_matmul.prototxt index 2c9b008d5d37a2..c04f8da78b0a5f 100644 --- a/src/frontends/onnx/tests/models/qlinear_matmul.prototxt +++ b/src/frontends/onnx/tests/models/qlinear_matmul.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "T1" diff --git a/src/frontends/onnx/tests/models/qlinear_matmul_3d.prototxt b/src/frontends/onnx/tests/models/qlinear_matmul_3d.prototxt index 14e80c71bef5b2..3f6aa836872253 100644 --- a/src/frontends/onnx/tests/models/qlinear_matmul_3d.prototxt +++ b/src/frontends/onnx/tests/models/qlinear_matmul_3d.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "T1" diff --git a/src/frontends/onnx/tests/models/quant_conv_lin.prototxt b/src/frontends/onnx/tests/models/quant_conv_lin.prototxt index 18f7b044104e9c..5b6f9184bda320 100644 --- a/src/frontends/onnx/tests/models/quant_conv_lin.prototxt +++ b/src/frontends/onnx/tests/models/quant_conv_lin.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/quant_dequant_pattern.prototxt b/src/frontends/onnx/tests/models/quant_dequant_pattern.prototxt index 73b2728787a7aa..d01aa786ccca7e 100644 --- a/src/frontends/onnx/tests/models/quant_dequant_pattern.prototxt +++ b/src/frontends/onnx/tests/models/quant_dequant_pattern.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/quant_dequant_pattern_axis.prototxt b/src/frontends/onnx/tests/models/quant_dequant_pattern_axis.prototxt index 2a92566398e4fc..f7aa39b901513f 100644 --- a/src/frontends/onnx/tests/models/quant_dequant_pattern_axis.prototxt +++ b/src/frontends/onnx/tests/models/quant_dequant_pattern_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/quantization/dynamic_quantize_linear.prototxt b/src/frontends/onnx/tests/models/quantization/dynamic_quantize_linear.prototxt index 75f904fcf3c635..956337f736f7cd 100644 --- a/src/frontends/onnx/tests/models/quantization/dynamic_quantize_linear.prototxt +++ b/src/frontends/onnx/tests/models/quantization/dynamic_quantize_linear.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/quantization/dynamic_quantize_linear_3x4.prototxt b/src/frontends/onnx/tests/models/quantization/dynamic_quantize_linear_3x4.prototxt index 8db343b670bc8e..80ae2793af984f 100644 --- a/src/frontends/onnx/tests/models/quantization/dynamic_quantize_linear_3x4.prototxt +++ b/src/frontends/onnx/tests/models/quantization/dynamic_quantize_linear_3x4.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/quantization/fake_quantize_const_inputs.prototxt b/src/frontends/onnx/tests/models/quantization/fake_quantize_const_inputs.prototxt index 8b465857707e53..2ea4f6f7f30479 100644 --- a/src/frontends/onnx/tests/models/quantization/fake_quantize_const_inputs.prototxt +++ b/src/frontends/onnx/tests/models/quantization/fake_quantize_const_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "input_low" diff --git a/src/frontends/onnx/tests/models/quantization/fake_quantize_nonconst_inputs.prototxt b/src/frontends/onnx/tests/models/quantization/fake_quantize_nonconst_inputs.prototxt index ce9de0f3292f11..2b918776ef6c8f 100644 --- a/src/frontends/onnx/tests/models/quantization/fake_quantize_nonconst_inputs.prototxt +++ b/src/frontends/onnx/tests/models/quantization/fake_quantize_nonconst_inputs.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { domain: "org.openvinotoolkit" diff --git a/src/frontends/onnx/tests/models/quantization/quant_conv_linear_onnx_example.prototxt b/src/frontends/onnx/tests/models/quantization/quant_conv_linear_onnx_example.prototxt index 94cf76c2b95775..4221eb1f910a07 100644 --- a/src/frontends/onnx/tests/models/quantization/quant_conv_linear_onnx_example.prototxt +++ b/src/frontends/onnx/tests/models/quantization/quant_conv_linear_onnx_example.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "onnx-examples" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/quantize_linear.prototxt b/src/frontends/onnx/tests/models/quantize_linear.prototxt index 8739189db251ed..bb2a040badb6f8 100644 --- a/src/frontends/onnx/tests/models/quantize_linear.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/quantize_linear_axis_negative.prototxt b/src/frontends/onnx/tests/models/quantize_linear_axis_negative.prototxt index e953eef06d461e..3673c7f47617d0 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_axis_negative.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_axis_negative.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/quantize_linear_axis_zero.prototxt b/src/frontends/onnx/tests/models/quantize_linear_axis_zero.prototxt index 0d10dd83f86398..2dde954fdd8046 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_axis_zero.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_axis_zero.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/quantize_linear_const.prototxt b/src/frontends/onnx/tests/models/quantize_linear_const.prototxt index b4bf9f72ff0451..e8aa11e2a3f383 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_const.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_const.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/quantize_linear_opset10.prototxt b/src/frontends/onnx/tests/models/quantize_linear_opset10.prototxt index 2ee5691173f76e..469d507c39b478 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_opset10.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_opset10.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/quantize_linear_opsets_10_and_13_axis0.prototxt b/src/frontends/onnx/tests/models/quantize_linear_opsets_10_and_13_axis0.prototxt index dce9d8a179f1d8..81d175ecf3db0a 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_opsets_10_and_13_axis0.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_opsets_10_and_13_axis0.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/quantize_linear_opsets_10_and_13_axis1.prototxt b/src/frontends/onnx/tests/models/quantize_linear_opsets_10_and_13_axis1.prototxt index 4c0bc111667cfe..af812152696f88 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_opsets_10_and_13_axis1.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_opsets_10_and_13_axis1.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/quantize_linear_scalar_ignore_axis.prototxt b/src/frontends/onnx/tests/models/quantize_linear_scalar_ignore_axis.prototxt index 7ce1578322ac8c..d9f248fbd2e9d8 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_scalar_ignore_axis.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_scalar_ignore_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/quantize_linear_u16.prototxt b/src/frontends/onnx/tests/models/quantize_linear_u16.prototxt index 1595fd9b481199..0b3391a0713f13 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_u16.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_u16.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/quantize_linear_zero_point.prototxt b/src/frontends/onnx/tests/models/quantize_linear_zero_point.prototxt index 5640ea40ceffe0..503d092a878622 100644 --- a/src/frontends/onnx/tests/models/quantize_linear_zero_point.prototxt +++ b/src/frontends/onnx/tests/models/quantize_linear_zero_point.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "ngraph ONNXImporter" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/random_normal.prototxt b/src/frontends/onnx/tests/models/random_normal.prototxt index bebf03d10d8163..7c459f35aa6502 100644 --- a/src/frontends/onnx/tests/models/random_normal.prototxt +++ b/src/frontends/onnx/tests/models/random_normal.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "y" diff --git a/src/frontends/onnx/tests/models/random_normal_like.prototxt b/src/frontends/onnx/tests/models/random_normal_like.prototxt index eb901eb33a3607..6c313a277fcce6 100644 --- a/src/frontends/onnx/tests/models/random_normal_like.prototxt +++ b/src/frontends/onnx/tests/models/random_normal_like.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/random_uniform.prototxt b/src/frontends/onnx/tests/models/random_uniform.prototxt index ce8e57b867e22c..d27b926efd3c48 100644 --- a/src/frontends/onnx/tests/models/random_uniform.prototxt +++ b/src/frontends/onnx/tests/models/random_uniform.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "y" diff --git a/src/frontends/onnx/tests/models/random_uniform_like.prototxt b/src/frontends/onnx/tests/models/random_uniform_like.prototxt index 92b6904700e4eb..9682eee18b9e5c 100644 --- a/src/frontends/onnx/tests/models/random_uniform_like.prototxt +++ b/src/frontends/onnx/tests/models/random_uniform_like.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/range.prototxt b/src/frontends/onnx/tests/models/range.prototxt index efeb883047c92e..ee0ac4d40bb490 100644 --- a/src/frontends/onnx/tests/models/range.prototxt +++ b/src/frontends/onnx/tests/models/range.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "start" diff --git a/src/frontends/onnx/tests/models/reciprocal.prototxt b/src/frontends/onnx/tests/models/reciprocal.prototxt index 0b65e895256c29..354b322434da12 100644 --- a/src/frontends/onnx/tests/models/reciprocal.prototxt +++ b/src/frontends/onnx/tests/models/reciprocal.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/reduce_l1.prototxt b/src/frontends/onnx/tests/models/reduce_l1.prototxt index b294b3ea474de4..2a97250cb7001e 100644 --- a/src/frontends/onnx/tests/models/reduce_l1.prototxt +++ b/src/frontends/onnx/tests/models/reduce_l1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_l2.prototxt b/src/frontends/onnx/tests/models/reduce_l2.prototxt index 9397e1c2d0982b..c862b09b1b8ac8 100644 --- a/src/frontends/onnx/tests/models/reduce_l2.prototxt +++ b/src/frontends/onnx/tests/models/reduce_l2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_log_sum.prototxt b/src/frontends/onnx/tests/models/reduce_log_sum.prototxt index 4a2c0d38d194a0..b34a1295056950 100644 --- a/src/frontends/onnx/tests/models/reduce_log_sum.prototxt +++ b/src/frontends/onnx/tests/models/reduce_log_sum.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_log_sum_exp.prototxt b/src/frontends/onnx/tests/models/reduce_log_sum_exp.prototxt index 6c53973467d6c5..54efa7618f81ef 100644 --- a/src/frontends/onnx/tests/models/reduce_log_sum_exp.prototxt +++ b/src/frontends/onnx/tests/models/reduce_log_sum_exp.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_max.prototxt b/src/frontends/onnx/tests/models/reduce_max.prototxt index eae393b2f31d23..7c3f1b7705a013 100644 --- a/src/frontends/onnx/tests/models/reduce_max.prototxt +++ b/src/frontends/onnx/tests/models/reduce_max.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_max_invalid_axes.prototxt b/src/frontends/onnx/tests/models/reduce_max_invalid_axes.prototxt index 98bfe21bd080e0..e0006802771c7d 100644 --- a/src/frontends/onnx/tests/models/reduce_max_invalid_axes.prototxt +++ b/src/frontends/onnx/tests/models/reduce_max_invalid_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_mean.prototxt b/src/frontends/onnx/tests/models/reduce_mean.prototxt index 8afa941e77a1cf..bee7ce7423c879 100644 --- a/src/frontends/onnx/tests/models/reduce_mean.prototxt +++ b/src/frontends/onnx/tests/models/reduce_mean.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_min.prototxt b/src/frontends/onnx/tests/models/reduce_min.prototxt index 9f4825c2a294ef..11ce7911493c5c 100644 --- a/src/frontends/onnx/tests/models/reduce_min.prototxt +++ b/src/frontends/onnx/tests/models/reduce_min.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_prod.prototxt b/src/frontends/onnx/tests/models/reduce_prod.prototxt index 2155f6020be80e..68b307d8850017 100644 --- a/src/frontends/onnx/tests/models/reduce_prod.prototxt +++ b/src/frontends/onnx/tests/models/reduce_prod.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_sum.prototxt b/src/frontends/onnx/tests/models/reduce_sum.prototxt index c697f9d01b6580..69e15f13d0e934 100644 --- a/src/frontends/onnx/tests/models/reduce_sum.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_0_dim_input.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_0_dim_input.prototxt index a840a9e35dd2c6..7b01dcba43ed00 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_0_dim_input.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_0_dim_input.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant.prototxt index f3f2dd09c59914..f16fbb4c89128c 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "axes" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant_keepdims_off.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant_keepdims_off.prototxt index 066d22727b6e1b..fda17f6a03e32d 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant_keepdims_off.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant_keepdims_off.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "axes" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant_single_axis.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant_single_axis.prototxt index 4fe71a1dd07b47..a649adaba7ab16 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant_single_axis.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_constant_single_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "axes" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_input.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_input.prototxt index 273e436803a42a..b2561b4e3ef479 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_input.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_as_input.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty.prototxt index 44d655723cf8ae..605f4191fbc7a0 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_dynamic_rank_input.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_dynamic_rank_input.prototxt index 61bb13255b4c9e..a55740a16bfdbb 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_dynamic_rank_input.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_dynamic_rank_input.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_with_noop.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_with_noop.prototxt index e44b67dcd83839..26c54b0c67e7bb 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_with_noop.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_with_noop.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_without_noop.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_without_noop.prototxt index 73c22f7739c0ae..441db546f2d7b0 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_without_noop.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_axes_empty_without_noop.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/reduce_sum_13_input_dynamic.prototxt b/src/frontends/onnx/tests/models/reduce_sum_13_input_dynamic.prototxt index bacf4619341bbc..bc0a93590a7201 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_13_input_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_13_input_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "B" diff --git a/src/frontends/onnx/tests/models/reduce_sum_dynamic_rank_input.prototxt b/src/frontends/onnx/tests/models/reduce_sum_dynamic_rank_input.prototxt index 3a9f2fd52ffcbf..16c855f65ebc33 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_dynamic_rank_input.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_dynamic_rank_input.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reduce_sum_square.prototxt b/src/frontends/onnx/tests/models/reduce_sum_square.prototxt index 4c667de83e0437..f4460e7bccf250 100644 --- a/src/frontends/onnx/tests/models/reduce_sum_square.prototxt +++ b/src/frontends/onnx/tests/models/reduce_sum_square.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/relu.prototxt b/src/frontends/onnx/tests/models/relu.prototxt index da8d0c01396aac..b53972ed303639 100644 --- a/src/frontends/onnx/tests/models/relu.prototxt +++ b/src/frontends/onnx/tests/models/relu.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/relu_custom_domain.prototxt b/src/frontends/onnx/tests/models/relu_custom_domain.prototxt index dc51e8299305fe..c4097cb9d9d2ef 100644 --- a/src/frontends/onnx/tests/models/relu_custom_domain.prototxt +++ b/src/frontends/onnx/tests/models/relu_custom_domain.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reshape_extended_dims.prototxt b/src/frontends/onnx/tests/models/reshape_extended_dims.prototxt index 423e0075019224..03ed1689c13074 100644 --- a/src/frontends/onnx/tests/models/reshape_extended_dims.prototxt +++ b/src/frontends/onnx/tests/models/reshape_extended_dims.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reshape_negative_dim.prototxt b/src/frontends/onnx/tests/models/reshape_negative_dim.prototxt index 7e5e3cc0dcbeb1..ea22688265de90 100644 --- a/src/frontends/onnx/tests/models/reshape_negative_dim.prototxt +++ b/src/frontends/onnx/tests/models/reshape_negative_dim.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/reshape_negative_with_zero_dims.prototxt b/src/frontends/onnx/tests/models/reshape_negative_with_zero_dims.prototxt index cd7e5b985725bf..939894100cf376 100644 --- a/src/frontends/onnx/tests/models/reshape_negative_with_zero_dims.prototxt +++ b/src/frontends/onnx/tests/models/reshape_negative_with_zero_dims.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reshape_output_shape_as_input.prototxt b/src/frontends/onnx/tests/models/reshape_output_shape_as_input.prototxt index 514d9768ed718c..9f8e2983efce33 100644 --- a/src/frontends/onnx/tests/models/reshape_output_shape_as_input.prototxt +++ b/src/frontends/onnx/tests/models/reshape_output_shape_as_input.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "const_shape" diff --git a/src/frontends/onnx/tests/models/reshape_reduced_dims.prototxt b/src/frontends/onnx/tests/models/reshape_reduced_dims.prototxt index 60fe04c3581449..c3e6049f9daa23 100644 --- a/src/frontends/onnx/tests/models/reshape_reduced_dims.prototxt +++ b/src/frontends/onnx/tests/models/reshape_reduced_dims.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reshape_reordered_dims.prototxt b/src/frontends/onnx/tests/models/reshape_reordered_dims.prototxt index 0bce195afa248a..6797fc93006e33 100644 --- a/src/frontends/onnx/tests/models/reshape_reordered_dims.prototxt +++ b/src/frontends/onnx/tests/models/reshape_reordered_dims.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/reshape_single_dim.prototxt b/src/frontends/onnx/tests/models/reshape_single_dim.prototxt index d2a888f0c838ac..ab649fe9732328 100644 --- a/src/frontends/onnx/tests/models/reshape_single_dim.prototxt +++ b/src/frontends/onnx/tests/models/reshape_single_dim.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/resize10_asymertic_dim_in_the_middle.prototxt b/src/frontends/onnx/tests/models/resize10_asymertic_dim_in_the_middle.prototxt index 8210aa156d199e..4bf604b109332c 100644 --- a/src/frontends/onnx/tests/models/resize10_asymertic_dim_in_the_middle.prototxt +++ b/src/frontends/onnx/tests/models/resize10_asymertic_dim_in_the_middle.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { diff --git a/src/frontends/onnx/tests/models/resize10_asymertic_last_dim.prototxt b/src/frontends/onnx/tests/models/resize10_asymertic_last_dim.prototxt index 94b02e762b335d..057a1fa1b3822f 100644 --- a/src/frontends/onnx/tests/models/resize10_asymertic_last_dim.prototxt +++ b/src/frontends/onnx/tests/models/resize10_asymertic_last_dim.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { diff --git a/src/frontends/onnx/tests/models/resize10_down_scales_const_linear.prototxt b/src/frontends/onnx/tests/models/resize10_down_scales_const_linear.prototxt index 469afcbd4443bc..9a6dd1f7cd1db6 100644 --- a/src/frontends/onnx/tests/models/resize10_down_scales_const_linear.prototxt +++ b/src/frontends/onnx/tests/models/resize10_down_scales_const_linear.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize10_down_scales_const_nearest.prototxt b/src/frontends/onnx/tests/models/resize10_down_scales_const_nearest.prototxt index 0003f1abd35285..0e08edad9a3a26 100644 --- a/src/frontends/onnx/tests/models/resize10_down_scales_const_nearest.prototxt +++ b/src/frontends/onnx/tests/models/resize10_down_scales_const_nearest.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize10_up_scales_const_linear.prototxt b/src/frontends/onnx/tests/models/resize10_up_scales_const_linear.prototxt index d21da6c4ae5d88..e6aa2afdcdcb02 100644 --- a/src/frontends/onnx/tests/models/resize10_up_scales_const_linear.prototxt +++ b/src/frontends/onnx/tests/models/resize10_up_scales_const_linear.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize10_up_scales_const_nearest.prototxt b/src/frontends/onnx/tests/models/resize10_up_scales_const_nearest.prototxt index ce88aea4ae7c3a..d09ef898105da8 100644 --- a/src/frontends/onnx/tests/models/resize10_up_scales_const_nearest.prototxt +++ b/src/frontends/onnx/tests/models/resize10_up_scales_const_nearest.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize11_down_scales_linear_asymmetric.prototxt b/src/frontends/onnx/tests/models/resize11_down_scales_linear_asymmetric.prototxt index b0e90078ffefb9..fd8fe95f95068c 100644 --- a/src/frontends/onnx/tests/models/resize11_down_scales_linear_asymmetric.prototxt +++ b/src/frontends/onnx/tests/models/resize11_down_scales_linear_asymmetric.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize11_down_sizes_cubic_half_pixel.prototxt b/src/frontends/onnx/tests/models/resize11_down_sizes_cubic_half_pixel.prototxt index 86674e5473da48..189fe7a17157a1 100644 --- a/src/frontends/onnx/tests/models/resize11_down_sizes_cubic_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/resize11_down_sizes_cubic_half_pixel.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/resize11_down_sizes_linear_pytorch_half_pixel.prototxt b/src/frontends/onnx/tests/models/resize11_down_sizes_linear_pytorch_half_pixel.prototxt index cf7bf96885d160..4fae291b3ed8a4 100644 --- a/src/frontends/onnx/tests/models/resize11_down_sizes_linear_pytorch_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/resize11_down_sizes_linear_pytorch_half_pixel.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/resize11_down_sizes_tf_half_pixel.prototxt b/src/frontends/onnx/tests/models/resize11_down_sizes_tf_half_pixel.prototxt index 4f06a18310ea10..0431fa20a22214 100644 --- a/src/frontends/onnx/tests/models/resize11_down_sizes_tf_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/resize11_down_sizes_tf_half_pixel.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/resize11_empty_constant_as_input.prototxt b/src/frontends/onnx/tests/models/resize11_empty_constant_as_input.prototxt index b4e397bc1801a6..a56d75e7a68f4e 100644 --- a/src/frontends/onnx/tests/models/resize11_empty_constant_as_input.prototxt +++ b/src/frontends/onnx/tests/models/resize11_empty_constant_as_input.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "empty_const" diff --git a/src/frontends/onnx/tests/models/resize11_scales_nearest_asymmetric_floor.prototxt b/src/frontends/onnx/tests/models/resize11_scales_nearest_asymmetric_floor.prototxt index 03d6c0508ba209..9fb96a40d57f90 100644 --- a/src/frontends/onnx/tests/models/resize11_scales_nearest_asymmetric_floor.prototxt +++ b/src/frontends/onnx/tests/models/resize11_scales_nearest_asymmetric_floor.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize11_scales_nearest_asymmetric_floor_dynamic_scales.prototxt b/src/frontends/onnx/tests/models/resize11_scales_nearest_asymmetric_floor_dynamic_scales.prototxt index 3c837791fdd756..f611a78cf1388e 100644 --- a/src/frontends/onnx/tests/models/resize11_scales_nearest_asymmetric_floor_dynamic_scales.prototxt +++ b/src/frontends/onnx/tests/models/resize11_scales_nearest_asymmetric_floor_dynamic_scales.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/resize11_sizes_nearest_asymmetric_floor.prototxt b/src/frontends/onnx/tests/models/resize11_sizes_nearest_asymmetric_floor.prototxt index f3ce3308c0d7d7..0c0828a6cc5d76 100644 --- a/src/frontends/onnx/tests/models/resize11_sizes_nearest_asymmetric_floor.prototxt +++ b/src/frontends/onnx/tests/models/resize11_sizes_nearest_asymmetric_floor.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize11_up_scales_cubic_align_corners.prototxt b/src/frontends/onnx/tests/models/resize11_up_scales_cubic_align_corners.prototxt index 9ed0556113e795..a653e462a257b5 100644 --- a/src/frontends/onnx/tests/models/resize11_up_scales_cubic_align_corners.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_scales_cubic_align_corners.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize11_up_scales_linear_asymmetric.prototxt b/src/frontends/onnx/tests/models/resize11_up_scales_linear_asymmetric.prototxt index 9a0a7ded8d6c06..20e178423aa3d2 100644 --- a/src/frontends/onnx/tests/models/resize11_up_scales_linear_asymmetric.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_scales_linear_asymmetric.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize11_up_scales_tf_half_pixel.prototxt b/src/frontends/onnx/tests/models/resize11_up_scales_tf_half_pixel.prototxt index 39d3eea3b7c605..ab760058936e40 100644 --- a/src/frontends/onnx/tests/models/resize11_up_scales_tf_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_scales_tf_half_pixel.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize11_up_sizes_all_attributes_default.prototxt b/src/frontends/onnx/tests/models/resize11_up_sizes_all_attributes_default.prototxt index afbb84874978d3..fa33edd24cfef3 100644 --- a/src/frontends/onnx/tests/models/resize11_up_sizes_all_attributes_default.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_sizes_all_attributes_default.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/resize11_up_sizes_cubic_half_pixel.prototxt b/src/frontends/onnx/tests/models/resize11_up_sizes_cubic_half_pixel.prototxt index 91a0041ca2782e..5b710fcf791654 100644 --- a/src/frontends/onnx/tests/models/resize11_up_sizes_cubic_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_sizes_cubic_half_pixel.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/resize11_up_sizes_cubic_half_pixel_dynamic_sizes.prototxt b/src/frontends/onnx/tests/models/resize11_up_sizes_cubic_half_pixel_dynamic_sizes.prototxt index ece1eaf97d17b9..27f74db4ede2ed 100644 --- a/src/frontends/onnx/tests/models/resize11_up_sizes_cubic_half_pixel_dynamic_sizes.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_sizes_cubic_half_pixel_dynamic_sizes.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/resize11_up_sizes_linear_asymmetric.prototxt b/src/frontends/onnx/tests/models/resize11_up_sizes_linear_asymmetric.prototxt index 6de38295973910..0038ce2a445860 100644 --- a/src/frontends/onnx/tests/models/resize11_up_sizes_linear_asymmetric.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_sizes_linear_asymmetric.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_ceil_half_pixel.prototxt b/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_ceil_half_pixel.prototxt index 475b42dee85d22..e8a88a463e0315 100644 --- a/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_ceil_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_ceil_half_pixel.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_floor_align_corners.prototxt b/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_floor_align_corners.prototxt index 0d2651bbdfb64a..f4142d3b658502 100644 --- a/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_floor_align_corners.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_floor_align_corners.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_prefer_ceil_asymmetric.prototxt b/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_prefer_ceil_asymmetric.prototxt index f3b6c6a62072b3..0596f3ab4ab4a0 100644 --- a/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_prefer_ceil_asymmetric.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_prefer_ceil_asymmetric.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_round_prefer_floor_half_pixel.prototxt b/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_round_prefer_floor_half_pixel.prototxt index 91a48780261752..e535a2be530934 100644 --- a/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_round_prefer_floor_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/resize11_up_sizes_nearest_round_prefer_floor_half_pixel.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "sizes" diff --git a/src/frontends/onnx/tests/models/reverse_sequence_incorrect_batch_axis.prototxt b/src/frontends/onnx/tests/models/reverse_sequence_incorrect_batch_axis.prototxt index 506f3cfe7bdc1d..3bad8090343735 100644 --- a/src/frontends/onnx/tests/models/reverse_sequence_incorrect_batch_axis.prototxt +++ b/src/frontends/onnx/tests/models/reverse_sequence_incorrect_batch_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/reverse_sequence_incorrect_time_axis.prototxt b/src/frontends/onnx/tests/models/reverse_sequence_incorrect_time_axis.prototxt index 07c87d796672e5..9c1ba0a823d05f 100644 --- a/src/frontends/onnx/tests/models/reverse_sequence_incorrect_time_axis.prototxt +++ b/src/frontends/onnx/tests/models/reverse_sequence_incorrect_time_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/reverse_sequence_time_0_batch_1.prototxt b/src/frontends/onnx/tests/models/reverse_sequence_time_0_batch_1.prototxt index d13a33472612d9..7b3c8d1ae7d364 100644 --- a/src/frontends/onnx/tests/models/reverse_sequence_time_0_batch_1.prototxt +++ b/src/frontends/onnx/tests/models/reverse_sequence_time_0_batch_1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/reverse_sequence_time_1_batch_0.prototxt b/src/frontends/onnx/tests/models/reverse_sequence_time_1_batch_0.prototxt index 0591d327e53d02..e06fef3e1de161 100644 --- a/src/frontends/onnx/tests/models/reverse_sequence_time_1_batch_0.prototxt +++ b/src/frontends/onnx/tests/models/reverse_sequence_time_1_batch_0.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/reverse_sequence_time_and_batch_axis_equal.prototxt b/src/frontends/onnx/tests/models/reverse_sequence_time_and_batch_axis_equal.prototxt index 7f1b81a3d307fe..b9a9ca7e79d2fa 100644 --- a/src/frontends/onnx/tests/models/reverse_sequence_time_and_batch_axis_equal.prototxt +++ b/src/frontends/onnx/tests/models/reverse_sequence_time_and_batch_axis_equal.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/rnn_bidir_mixed_seq_len_const.prototxt b/src/frontends/onnx/tests/models/rnn_bidir_mixed_seq_len_const.prototxt index 2a335fc717de50..8314e451413d51 100644 --- a/src/frontends/onnx/tests/models/rnn_bidir_mixed_seq_len_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_bidir_mixed_seq_len_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/rnn_bidirectional.prototxt b/src/frontends/onnx/tests/models/rnn_bidirectional.prototxt index 392e0d00c81155..852be149e3c841 100644 --- a/src/frontends/onnx/tests/models/rnn_bidirectional.prototxt +++ b/src/frontends/onnx/tests/models/rnn_bidirectional.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/rnn_bidirectional_const.prototxt b/src/frontends/onnx/tests/models/rnn_bidirectional_const.prototxt index f6ca7d04e70b0d..8447694c7ec99f 100644 --- a/src/frontends/onnx/tests/models/rnn_bidirectional_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_bidirectional_const.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/rnn_defaults_fwd.prototxt b/src/frontends/onnx/tests/models/rnn_defaults_fwd.prototxt index 54eba881ccfe54..fb9b3da7aa051e 100644 --- a/src/frontends/onnx/tests/models/rnn_defaults_fwd.prototxt +++ b/src/frontends/onnx/tests/models/rnn_defaults_fwd.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/rnn_defaults_fwd_const.prototxt b/src/frontends/onnx/tests/models/rnn_defaults_fwd_const.prototxt index bfec5fd52d8f95..9f3d163c8c8cb5 100644 --- a/src/frontends/onnx/tests/models/rnn_defaults_fwd_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_defaults_fwd_const.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/rnn_fwd_activations.prototxt b/src/frontends/onnx/tests/models/rnn_fwd_activations.prototxt index b5a4d93318dd59..e39f88b8326fb5 100644 --- a/src/frontends/onnx/tests/models/rnn_fwd_activations.prototxt +++ b/src/frontends/onnx/tests/models/rnn_fwd_activations.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/rnn_fwd_activations_const.prototxt b/src/frontends/onnx/tests/models/rnn_fwd_activations_const.prototxt index 4b2633092b18c6..3c0e90701a5c78 100644 --- a/src/frontends/onnx/tests/models/rnn_fwd_activations_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_fwd_activations_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/rnn_fwd_bias_initial_h.prototxt b/src/frontends/onnx/tests/models/rnn_fwd_bias_initial_h.prototxt index ead02b7d4462f5..0f6af98b99750d 100644 --- a/src/frontends/onnx/tests/models/rnn_fwd_bias_initial_h.prototxt +++ b/src/frontends/onnx/tests/models/rnn_fwd_bias_initial_h.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/rnn_fwd_bias_initial_h_const.prototxt b/src/frontends/onnx/tests/models/rnn_fwd_bias_initial_h_const.prototxt index a4d2716483bcd6..98da85509dff10 100644 --- a/src/frontends/onnx/tests/models/rnn_fwd_bias_initial_h_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_fwd_bias_initial_h_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/rnn_fwd_mixed_seq_len.prototxt b/src/frontends/onnx/tests/models/rnn_fwd_mixed_seq_len.prototxt index 615a65d994aa93..16b30aeb69980b 100644 --- a/src/frontends/onnx/tests/models/rnn_fwd_mixed_seq_len.prototxt +++ b/src/frontends/onnx/tests/models/rnn_fwd_mixed_seq_len.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/rnn_fwd_mixed_seq_len_const.prototxt b/src/frontends/onnx/tests/models/rnn_fwd_mixed_seq_len_const.prototxt index 1a7eb67fc46a66..bfd8eacaf3038f 100644 --- a/src/frontends/onnx/tests/models/rnn_fwd_mixed_seq_len_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_fwd_mixed_seq_len_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/rnn_rev_clip.prototxt b/src/frontends/onnx/tests/models/rnn_rev_clip.prototxt index 4699aeaa2cd2bf..1ada935751d159 100644 --- a/src/frontends/onnx/tests/models/rnn_rev_clip.prototxt +++ b/src/frontends/onnx/tests/models/rnn_rev_clip.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/rnn_rev_clip_const.prototxt b/src/frontends/onnx/tests/models/rnn_rev_clip_const.prototxt index 9bcfc22f8b4265..12226799aaec82 100644 --- a/src/frontends/onnx/tests/models/rnn_rev_clip_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_rev_clip_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/rnn_reverse.prototxt b/src/frontends/onnx/tests/models/rnn_reverse.prototxt index a06bdea0ae4abf..d2231a0a27e97d 100644 --- a/src/frontends/onnx/tests/models/rnn_reverse.prototxt +++ b/src/frontends/onnx/tests/models/rnn_reverse.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/rnn_reverse_const.prototxt b/src/frontends/onnx/tests/models/rnn_reverse_const.prototxt index d67f2a5b6c45a9..ae60c97e3cc00b 100644 --- a/src/frontends/onnx/tests/models/rnn_reverse_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_reverse_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/rnn_reverse_mixed_seq_len_const.prototxt b/src/frontends/onnx/tests/models/rnn_reverse_mixed_seq_len_const.prototxt index 98d390f02e0a2f..ff0e0c8317b95b 100644 --- a/src/frontends/onnx/tests/models/rnn_reverse_mixed_seq_len_const.prototxt +++ b/src/frontends/onnx/tests/models/rnn_reverse_mixed_seq_len_const.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "W" diff --git a/src/frontends/onnx/tests/models/roi_align_f32.prototxt b/src/frontends/onnx/tests/models/roi_align_f32.prototxt index ed8ec2285b719f..c36dc9323009b7 100644 --- a/src/frontends/onnx/tests/models/roi_align_f32.prototxt +++ b/src/frontends/onnx/tests/models/roi_align_f32.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node{ input: "feature_maps" diff --git a/src/frontends/onnx/tests/models/roialign16_avg_half_pixel.prototxt b/src/frontends/onnx/tests/models/roialign16_avg_half_pixel.prototxt index aebc8cf94bbf0e..513bceb1c34a4a 100644 --- a/src/frontends/onnx/tests/models/roialign16_avg_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/roialign16_avg_half_pixel.prototxt @@ -1,6 +1,6 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/roialign16_avg_out_half_pixel.prototxt b/src/frontends/onnx/tests/models/roialign16_avg_out_half_pixel.prototxt index 0c98525f2c9430..3a1cf9783b43d4 100644 --- a/src/frontends/onnx/tests/models/roialign16_avg_out_half_pixel.prototxt +++ b/src/frontends/onnx/tests/models/roialign16_avg_out_half_pixel.prototxt @@ -1,6 +1,6 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/round.prototxt b/src/frontends/onnx/tests/models/round.prototxt index 888e56a2714e1a..841eb1aa7e5284 100644 --- a/src/frontends/onnx/tests/models/round.prototxt +++ b/src/frontends/onnx/tests/models/round.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/round_half_nearest_even.prototxt b/src/frontends/onnx/tests/models/round_half_nearest_even.prototxt index 13981ba2a1df2e..c2deefb973d506 100644 --- a/src/frontends/onnx/tests/models/round_half_nearest_even.prototxt +++ b/src/frontends/onnx/tests/models/round_half_nearest_even.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/scan15_ND_b4_input_rev.prototxt b/src/frontends/onnx/tests/models/scan15_ND_b4_input_rev.prototxt index 2e9d472ff23d11..7d4e36e4c30dc4 100644 --- a/src/frontends/onnx/tests/models/scan15_ND_b4_input_rev.prototxt +++ b/src/frontends/onnx/tests/models/scan15_ND_b4_input_rev.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan15_ND_mixed.prototxt b/src/frontends/onnx/tests/models/scan15_ND_mixed.prototxt index 68d7bb700e845f..cc673cf8e0cbd4 100644 --- a/src/frontends/onnx/tests/models/scan15_ND_mixed.prototxt +++ b/src/frontends/onnx/tests/models/scan15_ND_mixed.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan15_ND_mixed_neg_axes.prototxt b/src/frontends/onnx/tests/models/scan15_ND_mixed_neg_axes.prototxt index 2a38e87ea44dce..e53bf53a5237df 100644 --- a/src/frontends/onnx/tests/models/scan15_ND_mixed_neg_axes.prototxt +++ b/src/frontends/onnx/tests/models/scan15_ND_mixed_neg_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan15_dyn_rank.prototxt b/src/frontends/onnx/tests/models/scan15_dyn_rank.prototxt index 6ffd873dba4c39..43cc9a6d39ae4f 100644 --- a/src/frontends/onnx/tests/models/scan15_dyn_rank.prototxt +++ b/src/frontends/onnx/tests/models/scan15_dyn_rank.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan15_dyn_rank_neg_axes.prototxt b/src/frontends/onnx/tests/models/scan15_dyn_rank_neg_axes.prototxt index 41742a239985c3..8f570fd9b7f46c 100644 --- a/src/frontends/onnx/tests/models/scan15_dyn_rank_neg_axes.prototxt +++ b/src/frontends/onnx/tests/models/scan15_dyn_rank_neg_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan15_fib_like.prototxt b/src/frontends/onnx/tests/models/scan15_fib_like.prototxt index aac869f689395f..884de3d84b96e6 100644 --- a/src/frontends/onnx/tests/models/scan15_fib_like.prototxt +++ b/src/frontends/onnx/tests/models/scan15_fib_like.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan15_fib_like_input_out_rev.prototxt b/src/frontends/onnx/tests/models/scan15_fib_like_input_out_rev.prototxt index db0f1f4b8ded66..dc0278c1a3e7f6 100644 --- a/src/frontends/onnx/tests/models/scan15_fib_like_input_out_rev.prototxt +++ b/src/frontends/onnx/tests/models/scan15_fib_like_input_out_rev.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan15_fib_like_input_rev.prototxt b/src/frontends/onnx/tests/models/scan15_fib_like_input_rev.prototxt index eb817dc6882d37..a024ebb5ed7987 100644 --- a/src/frontends/onnx/tests/models/scan15_fib_like_input_rev.prototxt +++ b/src/frontends/onnx/tests/models/scan15_fib_like_input_rev.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan15_fib_like_out_rev.prototxt b/src/frontends/onnx/tests/models/scan15_fib_like_out_rev.prototxt index a8b9c0b75aad6b..f85d9e7fbc57cc 100644 --- a/src/frontends/onnx/tests/models/scan15_fib_like_out_rev.prototxt +++ b/src/frontends/onnx/tests/models/scan15_fib_like_out_rev.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "initial" diff --git a/src/frontends/onnx/tests/models/scan8_ND_b4.prototxt b/src/frontends/onnx/tests/models/scan8_ND_b4.prototxt index 661e60db173d50..55b0a10f4e4713 100644 --- a/src/frontends/onnx/tests/models/scan8_ND_b4.prototxt +++ b/src/frontends/onnx/tests/models/scan8_ND_b4.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "" diff --git a/src/frontends/onnx/tests/models/scan8_ND_b4_input_rev.prototxt b/src/frontends/onnx/tests/models/scan8_ND_b4_input_rev.prototxt index 383f959c896217..94f8d0a97bb61d 100644 --- a/src/frontends/onnx/tests/models/scan8_ND_b4_input_rev.prototxt +++ b/src/frontends/onnx/tests/models/scan8_ND_b4_input_rev.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "" diff --git a/src/frontends/onnx/tests/models/scan8_ND_b4_seq_lens.prototxt b/src/frontends/onnx/tests/models/scan8_ND_b4_seq_lens.prototxt index c2fb5871cbc8fb..917ada32833a01 100644 --- a/src/frontends/onnx/tests/models/scan8_ND_b4_seq_lens.prototxt +++ b/src/frontends/onnx/tests/models/scan8_ND_b4_seq_lens.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "sequence_lens" diff --git a/src/frontends/onnx/tests/models/scatter_elements_add_opset18.prototxt b/src/frontends/onnx/tests/models/scatter_elements_add_opset18.prototxt index 80e80a864fe83d..8df62e62ee61e4 100644 --- a/src/frontends/onnx/tests/models/scatter_elements_add_opset18.prototxt +++ b/src/frontends/onnx/tests/models/scatter_elements_add_opset18.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "DATA" diff --git a/src/frontends/onnx/tests/models/scatter_elements_default_opset18.prototxt b/src/frontends/onnx/tests/models/scatter_elements_default_opset18.prototxt index a8493dd96e6fff..318e8882486ed4 100644 --- a/src/frontends/onnx/tests/models/scatter_elements_default_opset18.prototxt +++ b/src/frontends/onnx/tests/models/scatter_elements_default_opset18.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "DATA" diff --git a/src/frontends/onnx/tests/models/scatter_elements_max_opset18.prototxt b/src/frontends/onnx/tests/models/scatter_elements_max_opset18.prototxt index 0833773cc4fff0..064afa3538f295 100644 --- a/src/frontends/onnx/tests/models/scatter_elements_max_opset18.prototxt +++ b/src/frontends/onnx/tests/models/scatter_elements_max_opset18.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "DATA" diff --git a/src/frontends/onnx/tests/models/scatter_elements_min_opset18.prototxt b/src/frontends/onnx/tests/models/scatter_elements_min_opset18.prototxt index 5a448521ce282c..e4c1af5f956853 100644 --- a/src/frontends/onnx/tests/models/scatter_elements_min_opset18.prototxt +++ b/src/frontends/onnx/tests/models/scatter_elements_min_opset18.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "DATA" diff --git a/src/frontends/onnx/tests/models/scatter_elements_mul_opset18.prototxt b/src/frontends/onnx/tests/models/scatter_elements_mul_opset18.prototxt index b0dce69e3ecf41..082cb561b6367e 100644 --- a/src/frontends/onnx/tests/models/scatter_elements_mul_opset18.prototxt +++ b/src/frontends/onnx/tests/models/scatter_elements_mul_opset18.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "DATA" diff --git a/src/frontends/onnx/tests/models/scatter_elements_none_opset18.prototxt b/src/frontends/onnx/tests/models/scatter_elements_none_opset18.prototxt index e970075cf0fc7f..67d0efe1d6f4b6 100644 --- a/src/frontends/onnx/tests/models/scatter_elements_none_opset18.prototxt +++ b/src/frontends/onnx/tests/models/scatter_elements_none_opset18.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-frontend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "DATA" diff --git a/src/frontends/onnx/tests/models/scatter_elements_opset11.prototxt b/src/frontends/onnx/tests/models/scatter_elements_opset11.prototxt index 20f62a5fbbb165..c0f481bf1f4de0 100644 --- a/src/frontends/onnx/tests/models/scatter_elements_opset11.prototxt +++ b/src/frontends/onnx/tests/models/scatter_elements_opset11.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "test_model" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/scatter_nd_const_i32_indices.prototxt b/src/frontends/onnx/tests/models/scatter_nd_const_i32_indices.prototxt index c9aa29782e7443..637bb6c45f0ce9 100644 --- a/src/frontends/onnx/tests/models/scatter_nd_const_i32_indices.prototxt +++ b/src/frontends/onnx/tests/models/scatter_nd_const_i32_indices.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/scatter_nd_opset16_reduction_add.prototxt b/src/frontends/onnx/tests/models/scatter_nd_opset16_reduction_add.prototxt index 35fb5ab1a7c9e5..b1caee21246698 100644 --- a/src/frontends/onnx/tests/models/scatter_nd_opset16_reduction_add.prototxt +++ b/src/frontends/onnx/tests/models/scatter_nd_opset16_reduction_add.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/scatter_nd_opset16_reduction_none.prototxt b/src/frontends/onnx/tests/models/scatter_nd_opset16_reduction_none.prototxt index 81c896149dc426..80f7a2e8a38220 100644 --- a/src/frontends/onnx/tests/models/scatter_nd_opset16_reduction_none.prototxt +++ b/src/frontends/onnx/tests/models/scatter_nd_opset16_reduction_none.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/scatter_nd_param_i64_indices.prototxt b/src/frontends/onnx/tests/models/scatter_nd_param_i64_indices.prototxt index 8d382d7aa5edd8..42ce458a2033cd 100644 --- a/src/frontends/onnx/tests/models/scatter_nd_param_i64_indices.prototxt +++ b/src/frontends/onnx/tests/models/scatter_nd_param_i64_indices.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/scatter_opset10.prototxt b/src/frontends/onnx/tests/models/scatter_opset10.prototxt index 02ead411597144..3cf55c04afe64b 100644 --- a/src/frontends/onnx/tests/models/scatter_opset10.prototxt +++ b/src/frontends/onnx/tests/models/scatter_opset10.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "test_model" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data" diff --git a/src/frontends/onnx/tests/models/selu.prototxt b/src/frontends/onnx/tests/models/selu.prototxt index 0d4abdf8e52b0e..c597eacae14c7a 100644 --- a/src/frontends/onnx/tests/models/selu.prototxt +++ b/src/frontends/onnx/tests/models/selu.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/shape.prototxt b/src/frontends/onnx/tests/models/shape.prototxt index 187ba05be80e6e..091bf87ce28307 100644 --- a/src/frontends/onnx/tests/models/shape.prototxt +++ b/src/frontends/onnx/tests/models/shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/shrink_float.prototxt b/src/frontends/onnx/tests/models/shrink_float.prototxt index 8de6fd2598279d..4126a92afb7384 100644 --- a/src/frontends/onnx/tests/models/shrink_float.prototxt +++ b/src/frontends/onnx/tests/models/shrink_float.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/shrink_int.prototxt b/src/frontends/onnx/tests/models/shrink_int.prototxt index a0f5a4756aeb5a..a5ffcff2847015 100644 --- a/src/frontends/onnx/tests/models/shrink_int.prototxt +++ b/src/frontends/onnx/tests/models/shrink_int.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/sigmoid.prototxt b/src/frontends/onnx/tests/models/sigmoid.prototxt index 1687f8c2433223..d7932167050184 100644 --- a/src/frontends/onnx/tests/models/sigmoid.prototxt +++ b/src/frontends/onnx/tests/models/sigmoid.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/sign.prototxt b/src/frontends/onnx/tests/models/sign.prototxt index 13744291b8611c..2ad00a2ed10fe1 100644 --- a/src/frontends/onnx/tests/models/sign.prototxt +++ b/src/frontends/onnx/tests/models/sign.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/sinh.prototxt b/src/frontends/onnx/tests/models/sinh.prototxt index 2276d61290ab01..569331520ae6d0 100644 --- a/src/frontends/onnx/tests/models/sinh.prototxt +++ b/src/frontends/onnx/tests/models/sinh.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/size_op_graph_end.prototxt b/src/frontends/onnx/tests/models/size_op_graph_end.prototxt index c082bd045653e8..51ec3cda7bdd0c 100644 --- a/src/frontends/onnx/tests/models/size_op_graph_end.prototxt +++ b/src/frontends/onnx/tests/models/size_op_graph_end.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "N" diff --git a/src/frontends/onnx/tests/models/size_op_graph_middle.prototxt b/src/frontends/onnx/tests/models/size_op_graph_middle.prototxt index 293a8535ee9733..173f313a551e0e 100644 --- a/src/frontends/onnx/tests/models/size_op_graph_middle.prototxt +++ b/src/frontends/onnx/tests/models/size_op_graph_middle.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "N" diff --git a/src/frontends/onnx/tests/models/size_op_on_input_graph_middle.prototxt b/src/frontends/onnx/tests/models/size_op_on_input_graph_middle.prototxt index 2faed09ea624da..87a237d1d9ecfd 100644 --- a/src/frontends/onnx/tests/models/size_op_on_input_graph_middle.prototxt +++ b/src/frontends/onnx/tests/models/size_op_on_input_graph_middle.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "N" diff --git a/src/frontends/onnx/tests/models/size_op_single.prototxt b/src/frontends/onnx/tests/models/size_op_single.prototxt index fb05474b66f7c9..74a23fe2429454 100644 --- a/src/frontends/onnx/tests/models/size_op_single.prototxt +++ b/src/frontends/onnx/tests/models/size_op_single.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/slice_const_axes_source.prototxt b/src/frontends/onnx/tests/models/slice_const_axes_source.prototxt index e6da15faddbd81..00fa260c588021 100644 --- a/src/frontends/onnx/tests/models/slice_const_axes_source.prototxt +++ b/src/frontends/onnx/tests/models/slice_const_axes_source.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { name: "test_slice_with_unsqueeze_axes" initializer { diff --git a/src/frontends/onnx/tests/models/softmax_0D.prototxt b/src/frontends/onnx/tests/models/softmax_0D.prototxt index becdd7e49bde66..4a3d53a78a5468 100644 --- a/src/frontends/onnx/tests/models/softmax_0D.prototxt +++ b/src/frontends/onnx/tests/models/softmax_0D.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_1D.prototxt b/src/frontends/onnx/tests/models/softmax_1D.prototxt index f88e8cef49fccc..541631d46b0da9 100644 --- a/src/frontends/onnx/tests/models/softmax_1D.prototxt +++ b/src/frontends/onnx/tests/models/softmax_1D.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_axis_0.prototxt b/src/frontends/onnx/tests/models/softmax_axis_0.prototxt index 21b9ae9ab5e4ea..eb1c5e3c8b1956 100644 --- a/src/frontends/onnx/tests/models/softmax_axis_0.prototxt +++ b/src/frontends/onnx/tests/models/softmax_axis_0.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_axis_1.prototxt b/src/frontends/onnx/tests/models/softmax_axis_1.prototxt index 8b2ca9fa802e10..5cee1f9f334f7a 100644 --- a/src/frontends/onnx/tests/models/softmax_axis_1.prototxt +++ b/src/frontends/onnx/tests/models/softmax_axis_1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_axis_1_opset11.prototxt b/src/frontends/onnx/tests/models/softmax_axis_1_opset11.prototxt index 947b381db0b029..5fc26124b3b87a 100644 --- a/src/frontends/onnx/tests/models/softmax_axis_1_opset11.prototxt +++ b/src/frontends/onnx/tests/models/softmax_axis_1_opset11.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_axis_2.prototxt b/src/frontends/onnx/tests/models/softmax_axis_2.prototxt index 4d4311462e5088..7655d24ae7104e 100644 --- a/src/frontends/onnx/tests/models/softmax_axis_2.prototxt +++ b/src/frontends/onnx/tests/models/softmax_axis_2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_axis_negative_1_opset11.prototxt b/src/frontends/onnx/tests/models/softmax_axis_negative_1_opset11.prototxt index ad9a4b726033dd..1703eb493816b0 100644 --- a/src/frontends/onnx/tests/models/softmax_axis_negative_1_opset11.prototxt +++ b/src/frontends/onnx/tests/models/softmax_axis_negative_1_opset11.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_axis_negative_1_opset13.prototxt b/src/frontends/onnx/tests/models/softmax_axis_negative_1_opset13.prototxt index aff3afc2c527e5..4c7f779a1689ce 100644 --- a/src/frontends/onnx/tests/models/softmax_axis_negative_1_opset13.prototxt +++ b/src/frontends/onnx/tests/models/softmax_axis_negative_1_opset13.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_crossentropy_loss_mean.prototxt b/src/frontends/onnx/tests/models/softmax_crossentropy_loss_mean.prototxt index 89b569ec158fe1..c23f51278ad23d 100644 --- a/src/frontends/onnx/tests/models/softmax_crossentropy_loss_mean.prototxt +++ b/src/frontends/onnx/tests/models/softmax_crossentropy_loss_mean.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_invalid_axis_1D.prototxt b/src/frontends/onnx/tests/models/softmax_invalid_axis_1D.prototxt index da639c1b2bcc13..66a42013c73a5c 100644 --- a/src/frontends/onnx/tests/models/softmax_invalid_axis_1D.prototxt +++ b/src/frontends/onnx/tests/models/softmax_invalid_axis_1D.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softmax_invalid_axis_3D.prototxt b/src/frontends/onnx/tests/models/softmax_invalid_axis_3D.prototxt index b07e7903e05d9c..6fd0a7d64dde96 100644 --- a/src/frontends/onnx/tests/models/softmax_invalid_axis_3D.prototxt +++ b/src/frontends/onnx/tests/models/softmax_invalid_axis_3D.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/softplus.prototxt b/src/frontends/onnx/tests/models/softplus.prototxt index 62a7fc54c55239..c27b3d3196dfcc 100644 --- a/src/frontends/onnx/tests/models/softplus.prototxt +++ b/src/frontends/onnx/tests/models/softplus.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/softsign.prototxt b/src/frontends/onnx/tests/models/softsign.prototxt index 21db6242583392..98143833e21ae8 100644 --- a/src/frontends/onnx/tests/models/softsign.prototxt +++ b/src/frontends/onnx/tests/models/softsign.prototxt @@ -1,5 +1,5 @@ ir_version: 11 -producer_name: "OV ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/space_to_depth.prototxt b/src/frontends/onnx/tests/models/space_to_depth.prototxt index 2d0a2abc6ac109..843039e1fb10a2 100644 --- a/src/frontends/onnx/tests/models/space_to_depth.prototxt +++ b/src/frontends/onnx/tests/models/space_to_depth.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/space_to_depth_bad_blocksize.prototxt b/src/frontends/onnx/tests/models/space_to_depth_bad_blocksize.prototxt index 2b4ac7a4874824..bd2ec95fbedfd3 100644 --- a/src/frontends/onnx/tests/models/space_to_depth_bad_blocksize.prototxt +++ b/src/frontends/onnx/tests/models/space_to_depth_bad_blocksize.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/space_to_depth_invalid_input_shape.prototxt b/src/frontends/onnx/tests/models/space_to_depth_invalid_input_shape.prototxt index 8cb7db89894761..03dda326ddec1a 100644 --- a/src/frontends/onnx/tests/models/space_to_depth_invalid_input_shape.prototxt +++ b/src/frontends/onnx/tests/models/space_to_depth_invalid_input_shape.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/space_to_depth_no_blocksize.prototxt b/src/frontends/onnx/tests/models/space_to_depth_no_blocksize.prototxt index f6804a248b1ac6..96b09ba9ca8079 100644 --- a/src/frontends/onnx/tests/models/space_to_depth_no_blocksize.prototxt +++ b/src/frontends/onnx/tests/models/space_to_depth_no_blocksize.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/split_equal_parts_2d.prototxt b/src/frontends/onnx/tests/models/split_equal_parts_2d.prototxt index bef846d3dfd3e5..9e94942e907cf4 100644 --- a/src/frontends/onnx/tests/models/split_equal_parts_2d.prototxt +++ b/src/frontends/onnx/tests/models/split_equal_parts_2d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/split_equal_parts_default.prototxt b/src/frontends/onnx/tests/models/split_equal_parts_default.prototxt index 0069a532d6f411..1b0e9fcdd95507 100644 --- a/src/frontends/onnx/tests/models/split_equal_parts_default.prototxt +++ b/src/frontends/onnx/tests/models/split_equal_parts_default.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/split_variable_parts_2d.prototxt b/src/frontends/onnx/tests/models/split_variable_parts_2d.prototxt index 9b787c1ef79c63..0d6be1f0b05de6 100644 --- a/src/frontends/onnx/tests/models/split_variable_parts_2d.prototxt +++ b/src/frontends/onnx/tests/models/split_variable_parts_2d.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/squeeze.prototxt b/src/frontends/onnx/tests/models/squeeze.prototxt index 02b32affe4d67a..da0523c70c4f36 100644 --- a/src/frontends/onnx/tests/models/squeeze.prototxt +++ b/src/frontends/onnx/tests/models/squeeze.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/squeeze_default_domain_opset13.prototxt b/src/frontends/onnx/tests/models/squeeze_default_domain_opset13.prototxt index 14063ea1610e90..969e2e64028aab 100644 --- a/src/frontends/onnx/tests/models/squeeze_default_domain_opset13.prototxt +++ b/src/frontends/onnx/tests/models/squeeze_default_domain_opset13.prototxt @@ -1,6 +1,6 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "AXIS" diff --git a/src/frontends/onnx/tests/models/squeeze_empty_axes_attribute.prototxt b/src/frontends/onnx/tests/models/squeeze_empty_axes_attribute.prototxt index 982a0082c70878..1c951444786231 100644 --- a/src/frontends/onnx/tests/models/squeeze_empty_axes_attribute.prototxt +++ b/src/frontends/onnx/tests/models/squeeze_empty_axes_attribute.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/squeeze_opset13_no_axes.prototxt b/src/frontends/onnx/tests/models/squeeze_opset13_no_axes.prototxt index 919ad6eb9ba2d7..025fc11842a9a2 100644 --- a/src/frontends/onnx/tests/models/squeeze_opset13_no_axes.prototxt +++ b/src/frontends/onnx/tests/models/squeeze_opset13_no_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/sub.prototxt b/src/frontends/onnx/tests/models/sub.prototxt index 6cfdd9cbf84b68..f5444d5d2518ac 100644 --- a/src/frontends/onnx/tests/models/sub.prototxt +++ b/src/frontends/onnx/tests/models/sub.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/sub_v6_broadcast_axes_1_2.prototxt b/src/frontends/onnx/tests/models/sub_v6_broadcast_axes_1_2.prototxt index 50cd26ab6cfaf9..25ac429b84d3cf 100644 --- a/src/frontends/onnx/tests/models/sub_v6_broadcast_axes_1_2.prototxt +++ b/src/frontends/onnx/tests/models/sub_v6_broadcast_axes_1_2.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/sub_v6_broadcast_axis_1.prototxt b/src/frontends/onnx/tests/models/sub_v6_broadcast_axis_1.prototxt index 32b374d28ee11c..626515c8d952cb 100644 --- a/src/frontends/onnx/tests/models/sub_v6_broadcast_axis_1.prototxt +++ b/src/frontends/onnx/tests/models/sub_v6_broadcast_axis_1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/sub_v6_broadcast_no_axis.prototxt b/src/frontends/onnx/tests/models/sub_v6_broadcast_no_axis.prototxt index a7fb7d8c3c551e..adbdffba4d0983 100644 --- a/src/frontends/onnx/tests/models/sub_v6_broadcast_no_axis.prototxt +++ b/src/frontends/onnx/tests/models/sub_v6_broadcast_no_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/sub_v7.prototxt b/src/frontends/onnx/tests/models/sub_v7.prototxt index ec0006d44356bf..7429ed05af5290 100644 --- a/src/frontends/onnx/tests/models/sub_v7.prototxt +++ b/src/frontends/onnx/tests/models/sub_v7.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/sub_v7_broadcast.prototxt b/src/frontends/onnx/tests/models/sub_v7_broadcast.prototxt index 8bb4fcab479ba6..0a4b96b178371d 100644 --- a/src/frontends/onnx/tests/models/sub_v7_broadcast.prototxt +++ b/src/frontends/onnx/tests/models/sub_v7_broadcast.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" model_version: 1 graph { node { diff --git a/src/frontends/onnx/tests/models/sum.prototxt b/src/frontends/onnx/tests/models/sum.prototxt index c04a369f4d9403..b5da4e36d16b36 100644 --- a/src/frontends/onnx/tests/models/sum.prototxt +++ b/src/frontends/onnx/tests/models/sum.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/sum_one_input.prototxt b/src/frontends/onnx/tests/models/sum_one_input.prototxt index e381fe91f9ff82..e404838e501f3c 100644 --- a/src/frontends/onnx/tests/models/sum_one_input.prototxt +++ b/src/frontends/onnx/tests/models/sum_one_input.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/sum_opset1.prototxt b/src/frontends/onnx/tests/models/sum_opset1.prototxt index 4fda39b5c7377a..b8d445c4db7ac5 100644 --- a/src/frontends/onnx/tests/models/sum_opset1.prototxt +++ b/src/frontends/onnx/tests/models/sum_opset1.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/sum_opset8.prototxt b/src/frontends/onnx/tests/models/sum_opset8.prototxt index 3a3a831b7b4fb7..c4f3bce072cc24 100644 --- a/src/frontends/onnx/tests/models/sum_opset8.prototxt +++ b/src/frontends/onnx/tests/models/sum_opset8.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "data_0" diff --git a/src/frontends/onnx/tests/models/support_test/supported/basic.prototxt b/src/frontends/onnx/tests/models/support_test/supported/basic.prototxt index 7f63b68ad216c0..a183712b670031 100644 --- a/src/frontends/onnx/tests/models/support_test/supported/basic.prototxt +++ b/src/frontends/onnx/tests/models/support_test/supported/basic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/swish_with_beta.prototxt b/src/frontends/onnx/tests/models/swish_with_beta.prototxt index 170fd43ca6f8d1..e2727dd846e033 100644 --- a/src/frontends/onnx/tests/models/swish_with_beta.prototxt +++ b/src/frontends/onnx/tests/models/swish_with_beta.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "beta" diff --git a/src/frontends/onnx/tests/models/swish_without_beta.prototxt b/src/frontends/onnx/tests/models/swish_without_beta.prototxt index 23cb188dd7af25..15d039d52d0f63 100644 --- a/src/frontends/onnx/tests/models/swish_without_beta.prototxt +++ b/src/frontends/onnx/tests/models/swish_without_beta.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/tanh.prototxt b/src/frontends/onnx/tests/models/tanh.prototxt index 45c9fb1d6b8b19..edf015137eb618 100644 --- a/src/frontends/onnx/tests/models/tanh.prototxt +++ b/src/frontends/onnx/tests/models/tanh.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/tensor_names.prototxt b/src/frontends/onnx/tests/models/tensor_names.prototxt index 6b5b521220e375..f6cc444fdb4892 100644 --- a/src/frontends/onnx/tests/models/tensor_names.prototxt +++ b/src/frontends/onnx/tests/models/tensor_names.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "test_model" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/test_clip_inbounds.prototxt b/src/frontends/onnx/tests/models/test_clip_inbounds.prototxt index 7329f8e7754141..d3d101c7b5c0ee 100644 --- a/src/frontends/onnx/tests/models/test_clip_inbounds.prototxt +++ b/src/frontends/onnx/tests/models/test_clip_inbounds.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/thresholded_relu.prototxt b/src/frontends/onnx/tests/models/thresholded_relu.prototxt index d3c682f390ea84..368591c0b54aa1 100644 --- a/src/frontends/onnx/tests/models/thresholded_relu.prototxt +++ b/src/frontends/onnx/tests/models/thresholded_relu.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/tile.prototxt b/src/frontends/onnx/tests/models/tile.prototxt index ef738e213e5f60..5b78b641ea33dd 100644 --- a/src/frontends/onnx/tests/models/tile.prototxt +++ b/src/frontends/onnx/tests/models/tile.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/tile_static.prototxt b/src/frontends/onnx/tests/models/tile_static.prototxt index d8a998e95c71c4..0ff606f91dc3aa 100644 --- a/src/frontends/onnx/tests/models/tile_static.prototxt +++ b/src/frontends/onnx/tests/models/tile_static.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "input" diff --git a/src/frontends/onnx/tests/models/top_k.prototxt b/src/frontends/onnx/tests/models/top_k.prototxt index 97882ce2c1ad79..2fef72912df5fa 100644 --- a/src/frontends/onnx/tests/models/top_k.prototxt +++ b/src/frontends/onnx/tests/models/top_k.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/top_k_opset_10.prototxt b/src/frontends/onnx/tests/models/top_k_opset_10.prototxt index 6f37ee615f7544..8b10cf01cb7848 100644 --- a/src/frontends/onnx/tests/models/top_k_opset_10.prototxt +++ b/src/frontends/onnx/tests/models/top_k_opset_10.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/top_k_opset_10_const_k.prototxt b/src/frontends/onnx/tests/models/top_k_opset_10_const_k.prototxt index d4b1bd4bdaf6c4..89fb47e7041c08 100644 --- a/src/frontends/onnx/tests/models/top_k_opset_10_const_k.prototxt +++ b/src/frontends/onnx/tests/models/top_k_opset_10_const_k.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/top_k_opset_11_const_k_smallest.prototxt b/src/frontends/onnx/tests/models/top_k_opset_11_const_k_smallest.prototxt index 7f737fbef0ec93..e37676668bc116 100644 --- a/src/frontends/onnx/tests/models/top_k_opset_11_const_k_smallest.prototxt +++ b/src/frontends/onnx/tests/models/top_k_opset_11_const_k_smallest.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/top_k_opset_11_const_k_smallest_negative_axis.prototxt b/src/frontends/onnx/tests/models/top_k_opset_11_const_k_smallest_negative_axis.prototxt index 18d2440497122d..3edb40b815f370 100644 --- a/src/frontends/onnx/tests/models/top_k_opset_11_const_k_smallest_negative_axis.prototxt +++ b/src/frontends/onnx/tests/models/top_k_opset_11_const_k_smallest_negative_axis.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/topk.prototxt b/src/frontends/onnx/tests/models/topk.prototxt index 7f737fbef0ec93..e37676668bc116 100644 --- a/src/frontends/onnx/tests/models/topk.prototxt +++ b/src/frontends/onnx/tests/models/topk.prototxt @@ -1,5 +1,5 @@ ir_version: 5 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/transformations/celu_with_initializers.prototxt b/src/frontends/onnx/tests/models/transformations/celu_with_initializers.prototxt index 05f58f22de1d5d..7b390e9f179f87 100644 --- a/src/frontends/onnx/tests/models/transformations/celu_with_initializers.prototxt +++ b/src/frontends/onnx/tests/models/transformations/celu_with_initializers.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/transformations/greater_or_equal.prototxt b/src/frontends/onnx/tests/models/transformations/greater_or_equal.prototxt index 04c33addc8f420..ef5cad8f0ea7a3 100644 --- a/src/frontends/onnx/tests/models/transformations/greater_or_equal.prototxt +++ b/src/frontends/onnx/tests/models/transformations/greater_or_equal.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/transformations/greater_or_equal_inside_if.prototxt b/src/frontends/onnx/tests/models/transformations/greater_or_equal_inside_if.prototxt index e804fdc88cbc70..4de1359b8a3c1f 100644 --- a/src/frontends/onnx/tests/models/transformations/greater_or_equal_inside_if.prototxt +++ b/src/frontends/onnx/tests/models/transformations/greater_or_equal_inside_if.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { name: "if graph" node { diff --git a/src/frontends/onnx/tests/models/transformations/reference/greater_or_equal_expanded.prototxt b/src/frontends/onnx/tests/models/transformations/reference/greater_or_equal_expanded.prototxt index 5056cd2cc99e2c..bb624deb942bc6 100644 --- a/src/frontends/onnx/tests/models/transformations/reference/greater_or_equal_expanded.prototxt +++ b/src/frontends/onnx/tests/models/transformations/reference/greater_or_equal_expanded.prototxt @@ -1,5 +1,5 @@ ir_version: 6 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "in1" diff --git a/src/frontends/onnx/tests/models/transformations/reference/softmax_crossentropy_consumed_expanded.prototxt b/src/frontends/onnx/tests/models/transformations/reference/softmax_crossentropy_consumed_expanded.prototxt index 70214b9957b5fe..ff324c8bb7f4cc 100644 --- a/src/frontends/onnx/tests/models/transformations/reference/softmax_crossentropy_consumed_expanded.prototxt +++ b/src/frontends/onnx/tests/models/transformations/reference/softmax_crossentropy_consumed_expanded.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "Func_SoftmaxCrossEntropyLoss0x7ffe150df640Shape3D" diff --git a/src/frontends/onnx/tests/models/transformations/softmax_crossentropy_consumed.prototxt b/src/frontends/onnx/tests/models/transformations/softmax_crossentropy_consumed.prototxt index db75da69ae4911..9f4e01607fd4a1 100644 --- a/src/frontends/onnx/tests/models/transformations/softmax_crossentropy_consumed.prototxt +++ b/src/frontends/onnx/tests/models/transformations/softmax_crossentropy_consumed.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/trilu_basic.prototxt b/src/frontends/onnx/tests/models/trilu_basic.prototxt index 84eed05a184f78..dc122e5f69810d 100644 --- a/src/frontends/onnx/tests/models/trilu_basic.prototxt +++ b/src/frontends/onnx/tests/models/trilu_basic.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/trilu_lower.prototxt b/src/frontends/onnx/tests/models/trilu_lower.prototxt index e13915dd898e9c..45b1b82396ed3a 100644 --- a/src/frontends/onnx/tests/models/trilu_lower.prototxt +++ b/src/frontends/onnx/tests/models/trilu_lower.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/trilu_lower_4d.prototxt b/src/frontends/onnx/tests/models/trilu_lower_4d.prototxt index 5268d52e788b54..fa728e7b3b9445 100644 --- a/src/frontends/onnx/tests/models/trilu_lower_4d.prototxt +++ b/src/frontends/onnx/tests/models/trilu_lower_4d.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/trilu_upper.prototxt b/src/frontends/onnx/tests/models/trilu_upper.prototxt index d5c3f7ded8449d..150a1e924781f4 100644 --- a/src/frontends/onnx/tests/models/trilu_upper.prototxt +++ b/src/frontends/onnx/tests/models/trilu_upper.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/trilu_upper_3d.prototxt b/src/frontends/onnx/tests/models/trilu_upper_3d.prototxt index bdbec5ffcb01a6..f31f92554cf4bd 100644 --- a/src/frontends/onnx/tests/models/trilu_upper_3d.prototxt +++ b/src/frontends/onnx/tests/models/trilu_upper_3d.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "backend-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/unknown_domain.prototxt b/src/frontends/onnx/tests/models/unknown_domain.prototxt index e7966940586d48..52348af7dd8039 100644 --- a/src/frontends/onnx/tests/models/unknown_domain.prototxt +++ b/src/frontends/onnx/tests/models/unknown_domain.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/unknown_domain_add.prototxt b/src/frontends/onnx/tests/models/unknown_domain_add.prototxt index 52f9fe366282c1..958e0de2a785cf 100644 --- a/src/frontends/onnx/tests/models/unknown_domain_add.prototxt +++ b/src/frontends/onnx/tests/models/unknown_domain_add.prototxt @@ -1,5 +1,5 @@ ir_version: 4 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/unsqueeze.prototxt b/src/frontends/onnx/tests/models/unsqueeze.prototxt index 3eb0a3c8a7075d..1ce6a66820afcc 100644 --- a/src/frontends/onnx/tests/models/unsqueeze.prototxt +++ b/src/frontends/onnx/tests/models/unsqueeze.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/unsqueeze_ai_onnx_domain.prototxt b/src/frontends/onnx/tests/models/unsqueeze_ai_onnx_domain.prototxt index b8693e7f30e446..3eb9a03dba4d69 100644 --- a/src/frontends/onnx/tests/models/unsqueeze_ai_onnx_domain.prototxt +++ b/src/frontends/onnx/tests/models/unsqueeze_ai_onnx_domain.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/unsqueeze_ai_onnx_domain_opset13.prototxt b/src/frontends/onnx/tests/models/unsqueeze_ai_onnx_domain_opset13.prototxt index 44cc45907ad1ba..e3e11c6dbb777a 100644 --- a/src/frontends/onnx/tests/models/unsqueeze_ai_onnx_domain_opset13.prototxt +++ b/src/frontends/onnx/tests/models/unsqueeze_ai_onnx_domain_opset13.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "AXIS" diff --git a/src/frontends/onnx/tests/models/unsqueeze_default_domain.prototxt b/src/frontends/onnx/tests/models/unsqueeze_default_domain.prototxt index 448c2056247b76..507ba8080bfccd 100644 --- a/src/frontends/onnx/tests/models/unsqueeze_default_domain.prototxt +++ b/src/frontends/onnx/tests/models/unsqueeze_default_domain.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/unsqueeze_default_domain_opset13.prototxt b/src/frontends/onnx/tests/models/unsqueeze_default_domain_opset13.prototxt index 461fcd13f144ca..dd13c6f89728ea 100644 --- a/src/frontends/onnx/tests/models/unsqueeze_default_domain_opset13.prototxt +++ b/src/frontends/onnx/tests/models/unsqueeze_default_domain_opset13.prototxt @@ -1,5 +1,5 @@ ir_version: 8 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "AXIS" diff --git a/src/frontends/onnx/tests/models/unsqueeze_negative_axes.prototxt b/src/frontends/onnx/tests/models/unsqueeze_negative_axes.prototxt index 84cac44c3fd096..9323b017808296 100644 --- a/src/frontends/onnx/tests/models/unsqueeze_negative_axes.prototxt +++ b/src/frontends/onnx/tests/models/unsqueeze_negative_axes.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "x" diff --git a/src/frontends/onnx/tests/models/unsupported_op.prototxt b/src/frontends/onnx/tests/models/unsupported_op.prototxt index 67543a4e24f391..2cce84890b1f7b 100644 --- a/src/frontends/onnx/tests/models/unsupported_op.prototxt +++ b/src/frontends/onnx/tests/models/unsupported_op.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/unsupported_ops/add_unsupported.prototxt b/src/frontends/onnx/tests/models/unsupported_ops/add_unsupported.prototxt index 898670a5214db2..701f82a1816c3a 100644 --- a/src/frontends/onnx/tests/models/unsupported_ops/add_unsupported.prototxt +++ b/src/frontends/onnx/tests/models/unsupported_ops/add_unsupported.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/unsupported_ops/two_unsupported_nodes.prototxt b/src/frontends/onnx/tests/models/unsupported_ops/two_unsupported_nodes.prototxt index 210275148616d3..145385c64c9cc2 100644 --- a/src/frontends/onnx/tests/models/unsupported_ops/two_unsupported_nodes.prototxt +++ b/src/frontends/onnx/tests/models/unsupported_ops/two_unsupported_nodes.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "OV ONNX FE" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/unsupported_ops/unsupported_add_and_incorrect_dts.prototxt b/src/frontends/onnx/tests/models/unsupported_ops/unsupported_add_and_incorrect_dts.prototxt index 37f3ff6c370a5a..afef670b6bfd94 100644 --- a/src/frontends/onnx/tests/models/unsupported_ops/unsupported_add_and_incorrect_dts.prototxt +++ b/src/frontends/onnx/tests/models/unsupported_ops/unsupported_add_and_incorrect_dts.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/unsupported_ops/unsupported_add_incorrect_dts_and_inst_norm_bad_scale.prototxt b/src/frontends/onnx/tests/models/unsupported_ops/unsupported_add_incorrect_dts_and_inst_norm_bad_scale.prototxt index cdeb2551edf9c4..9be63bfb596c85 100644 --- a/src/frontends/onnx/tests/models/unsupported_ops/unsupported_add_incorrect_dts_and_inst_norm_bad_scale.prototxt +++ b/src/frontends/onnx/tests/models/unsupported_ops/unsupported_add_incorrect_dts_and_inst_norm_bad_scale.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "A" diff --git a/src/frontends/onnx/tests/models/upsample6_bilinear.prototxt b/src/frontends/onnx/tests/models/upsample6_bilinear.prototxt index 6c987d81c0f69e..3b088b5a4244fa 100644 --- a/src/frontends/onnx/tests/models/upsample6_bilinear.prototxt +++ b/src/frontends/onnx/tests/models/upsample6_bilinear.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/upsample6_dynamic.prototxt b/src/frontends/onnx/tests/models/upsample6_dynamic.prototxt index ca7d6485c01820..fab38829f7845e 100644 --- a/src/frontends/onnx/tests/models/upsample6_dynamic.prototxt +++ b/src/frontends/onnx/tests/models/upsample6_dynamic.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/upsample6_nearest.prototxt b/src/frontends/onnx/tests/models/upsample6_nearest.prototxt index 699c6516c462c9..ee8cefc786ce51 100644 --- a/src/frontends/onnx/tests/models/upsample6_nearest.prototxt +++ b/src/frontends/onnx/tests/models/upsample6_nearest.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/upsample8_linear.prototxt b/src/frontends/onnx/tests/models/upsample8_linear.prototxt index 3a547223ca628d..c34ba97ae44248 100644 --- a/src/frontends/onnx/tests/models/upsample8_linear.prototxt +++ b/src/frontends/onnx/tests/models/upsample8_linear.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/upsample8_nearest.prototxt b/src/frontends/onnx/tests/models/upsample8_nearest.prototxt index bdf3e44add2c7d..d65cac64251b72 100644 --- a/src/frontends/onnx/tests/models/upsample8_nearest.prototxt +++ b/src/frontends/onnx/tests/models/upsample8_nearest.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/upsample9_scales_const_linear.prototxt b/src/frontends/onnx/tests/models/upsample9_scales_const_linear.prototxt index a54f78c49ab47a..a2029f9afed7d4 100644 --- a/src/frontends/onnx/tests/models/upsample9_scales_const_linear.prototxt +++ b/src/frontends/onnx/tests/models/upsample9_scales_const_linear.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/upsample9_scales_const_nearest.prototxt b/src/frontends/onnx/tests/models/upsample9_scales_const_nearest.prototxt index cbd563f1d447f4..908244d9921b15 100644 --- a/src/frontends/onnx/tests/models/upsample9_scales_const_nearest.prototxt +++ b/src/frontends/onnx/tests/models/upsample9_scales_const_nearest.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { output: "scales" diff --git a/src/frontends/onnx/tests/models/upsample9_scales_input_nearest.prototxt b/src/frontends/onnx/tests/models/upsample9_scales_input_nearest.prototxt index cf99d01d257a63..2e9248fb858aa4 100644 --- a/src/frontends/onnx/tests/models/upsample9_scales_input_nearest.prototxt +++ b/src/frontends/onnx/tests/models/upsample9_scales_input_nearest.prototxt @@ -1,5 +1,5 @@ ir_version: 7 -producer_name: "onnx-importer-test" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "X" diff --git a/src/frontends/onnx/tests/models/where.prototxt b/src/frontends/onnx/tests/models/where.prototxt index af9fe1f6c581fc..0e0ced21930d64 100644 --- a/src/frontends/onnx/tests/models/where.prototxt +++ b/src/frontends/onnx/tests/models/where.prototxt @@ -1,5 +1,5 @@ ir_version: 3 -producer_name: "nGraph ONNX Importer" +producer_name: "OpenVINO ONNX Frontend" graph { node { input: "cond" diff --git a/src/frontends/onnx/tests/tests_python/test_frontend_lib_close.py b/src/frontends/onnx/tests/tests_python/test_frontend_lib_close.py index 73679e60a8ad07..89955ffa1c572c 100644 --- a/src/frontends/onnx/tests/tests_python/test_frontend_lib_close.py +++ b/src/frontends/onnx/tests/tests_python/test_frontend_lib_close.py @@ -22,7 +22,7 @@ def make_onnx_model(model_name: str) -> None: make_tensor_value_info("out1", onnx.TensorProto.FLOAT, (1, 2)), ] graph = make_graph([add], "test_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="ONNX Importer", opset_imports=[onnx.helper.make_opsetid("", 13)]) + model = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) onnx.save_model(model, model_name) diff --git a/src/frontends/onnx/tests/tests_python/test_frontend_onnx.py b/src/frontends/onnx/tests/tests_python/test_frontend_onnx.py index b21fa33453919c..18776168028ee1 100644 --- a/src/frontends/onnx/tests/tests_python/test_frontend_onnx.py +++ b/src/frontends/onnx/tests/tests_python/test_frontend_onnx.py @@ -30,7 +30,7 @@ def create_onnx_model(): ] output_tensors = [make_tensor_value_info("out", onnx.TensorProto.FLOAT, (2, 2))] graph = make_graph([add, const_node, mul], "graph", input_tensors, output_tensors) - return make_model(graph, producer_name="ONNX Frontend") + return make_model(graph, producer_name="OpenVINO ONNX Frontend") def create_onnx_model_2(): @@ -42,7 +42,7 @@ def create_onnx_model_2(): make_tensor_value_info("out", onnx.TensorProto.FLOAT, (1, 2)), ] graph = make_graph([relu], "test_graph", input_tensors, output_tensors) - return make_model(graph, producer_name="ONNX Frontend") + return make_model(graph, producer_name="OpenVINO ONNX Frontend") def create_onnx_model_with_subgraphs(): @@ -68,7 +68,7 @@ def create_onnx_model_with_subgraphs(): res = onnx.helper.make_tensor_value_info("res", onnx.TensorProto.FLOAT, [3]) graph = make_graph([if_node], "graph", [cond, x1, x2], [res]) - return make_model(graph, producer_name="ONNX Frontend") + return make_model(graph, producer_name="OpenVINO ONNX Frontend") def create_onnx_model_with_custom_attributes(): @@ -104,7 +104,7 @@ def create_onnx_model_with_custom_attributes(): ] output_tensors = [make_tensor_value_info("out", onnx.TensorProto.FLOAT, (2, 2))] graph = make_graph([add, const_node, mul], "graph", input_tensors, output_tensors) - return make_model(graph, producer_name="ONNX Frontend") + return make_model(graph, producer_name="OpenVINO ONNX Frontend") def create_onnx_model_for_op_extension(): @@ -155,7 +155,7 @@ def create_onnx_model_for_op_extension(): input_tensors, output_tensors, ) - return make_model(graph, producer_name="ONNX Frontend") + return make_model(graph, producer_name="OpenVINO ONNX Frontend") def create_onnx_model_extension_with_custom_domain(): @@ -175,7 +175,7 @@ def create_onnx_model_extension_with_custom_domain(): ] output_tensors = [make_tensor_value_info("out", onnx.TensorProto.FLOAT, (2, 2))] graph = make_graph([add, const_node, mul], "graph", input_tensors, output_tensors) - return make_model(graph, producer_name="ONNX Frontend") + return make_model(graph, producer_name="OpenVINO ONNX Frontend") def run_model(model, *inputs, expected): diff --git a/src/frontends/onnx/tests/tests_python/test_frontend_onnx_editor.py b/src/frontends/onnx/tests/tests_python/test_frontend_onnx_editor.py index 62fa37f3ee104a..2f9ea673476dd4 100644 --- a/src/frontends/onnx/tests/tests_python/test_frontend_onnx_editor.py +++ b/src/frontends/onnx/tests/tests_python/test_frontend_onnx_editor.py @@ -83,7 +83,7 @@ def create_test_onnx_models(): make_tensor_value_info("out4", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add, split, relu, mul], "test_graph", input_tensors, output_tensors) - models["input_model.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["input_model.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Input model 2 @@ -101,7 +101,7 @@ def create_test_onnx_models(): make_tensor_value_info("out2", onnx.TensorProto.FLOAT, (1, 2)), ] graph = make_graph([add, split_2, absolute, sin], "test_graph_2", input_tensors, output_tensors) - models["input_model_2.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["input_model_2.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Input model 3 @@ -118,7 +118,7 @@ def create_test_onnx_models(): make_tensor_value_info("out2", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add_2, relu_2], "test_graph_3", input_tensors, output_tensors) - models["input_model_3.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["input_model_3.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for extract_subgraph @@ -130,7 +130,7 @@ def create_test_onnx_models(): make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add], "test_graph", input_tensors, output_tensors) - models["extract_subgraph.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["extract_subgraph.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for extract_subgraph 2 @@ -144,7 +144,7 @@ def create_test_onnx_models(): make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add, relu], "test_graph", input_tensors, output_tensors) - models["extract_subgraph_2.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["extract_subgraph_2.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for extract_subgraph 3 @@ -158,7 +158,7 @@ def create_test_onnx_models(): expected_split = onnx.helper.make_node("Split", inputs=["out1/placeholder_port_0"], outputs=["out1", "out2"], name="split1", axis=0) graph = make_graph([expected_split], "test_graph", input_tensors, output_tensors) - models["extract_subgraph_3.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["extract_subgraph_3.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for extract_subgraph 4 @@ -177,7 +177,7 @@ def create_test_onnx_models(): expected_mul = onnx.helper.make_node("Mul", inputs=["out4/placeholder_port_0", "out4/placeholder_port_1"], outputs=["out4"]) graph = make_graph([expected_split, expected_mul], "test_graph", input_tensors, output_tensors) - models["extract_subgraph_4.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["extract_subgraph_4.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for extract_subgraph 5 @@ -189,7 +189,7 @@ def create_test_onnx_models(): make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add], "test_graph", input_tensors, output_tensors) - models["extract_subgraph_5.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["extract_subgraph_5.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for test_override_all_outputs @@ -203,7 +203,7 @@ def create_test_onnx_models(): make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add, relu], "test_graph", input_tensors, output_tensors) - models["test_override_all_outputs.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["test_override_all_outputs.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for test_override_all_outputs 2 @@ -215,7 +215,7 @@ def create_test_onnx_models(): make_tensor_value_info("out4", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add, mul], "test_graph", input_tensors, output_tensors) - models["test_override_all_outputs_2.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["test_override_all_outputs_2.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for test_override_all_outputs 3 @@ -228,7 +228,7 @@ def create_test_onnx_models(): make_tensor_value_info("out1", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add_2], "test_graph_3", input_tensors, output_tensors) - models["test_override_all_outputs_3.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["test_override_all_outputs_3.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for test_override_all_inputs @@ -249,7 +249,7 @@ def create_test_onnx_models(): expected_mul = onnx.helper.make_node("Mul", inputs=["out4/placeholder_port_0", "out4/placeholder_port_1"], outputs=["out4"]) graph = make_graph([expected_split, relu, expected_mul], "test_graph", input_tensors, output_tensors) - models["test_override_all_inputs.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["test_override_all_inputs.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for cut_and_add_new_input_edge @@ -267,7 +267,7 @@ def create_test_onnx_models(): ] new_mul = onnx.helper.make_node("Mul", inputs=["new_input", "add_out"], outputs=["out4"]) graph = make_graph([add, split, relu, new_mul], "test_graph", input_tensors, output_tensors) - models["cut_and_add_new_input_edge.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["cut_and_add_new_input_edge.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for cut_and_add_new_input_place @@ -285,7 +285,7 @@ def create_test_onnx_models(): new_split = onnx.helper.make_node("Split", inputs=["new_input"], outputs=["out1", "out2"], name="split1", axis=0) graph = make_graph([new_split, relu, new_mul], "test_graph", input_tensors, output_tensors) - models["cut_and_add_new_input_place.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["cut_and_add_new_input_place.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Expected for remove_output @@ -300,7 +300,7 @@ def create_test_onnx_models(): make_tensor_value_info("out3", onnx.TensorProto.FLOAT, (2, 2)), ] graph = make_graph([add, relu, split], "test_graph", input_tensors, output_tensors) - models["remove_output.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["remove_output.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # test partial shape @@ -316,7 +316,7 @@ def create_test_onnx_models(): make_tensor_value_info("out4", onnx.TensorProto.FLOAT, (8, 16)), ] graph = make_graph([add, split, relu, mul], "test_graph", input_tensors, output_tensors) - models["test_partial_shape.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["test_partial_shape.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # test place names model @@ -343,7 +343,7 @@ def create_test_onnx_models(): ] graph = make_graph([add, sub, split, mul], "test_graph", input_tensors, output_tensors, value_info=value_infos, initializer=initializers) - models["test_place_names.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["test_place_names.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) # Input model with integer types @@ -361,7 +361,7 @@ def create_test_onnx_models(): ] output_tensors = [make_tensor_value_info("out", onnx.TensorProto.FLOAT, (2, 2))] graph = make_graph([add, const_node, mul], "graph", input_tensors, output_tensors) - models["input_model_int32.onnx"] = make_model(graph, producer_name="ONNX Importer", + models["input_model_int32.onnx"] = make_model(graph, producer_name="OpenVINO ONNX Frontend", opset_imports=[onnx.helper.make_opsetid("", 13)]) return models @@ -390,7 +390,7 @@ def skip_if_onnx_frontend_is_disabled(): pytest.skip() -# Function to compare ng Functions (ops names, types and shapes). +# Function to compare OV Models (ops names, types and shapes). # Note that the functions uses get_ordered_ops, so the topological order of ops should be also preserved. def compare_models(current, expected): # noqa: C901 the function is too complex result = True diff --git a/src/frontends/onnx/tests/tests_python/test_onnx_import.py b/src/frontends/onnx/tests/tests_python/test_onnx_import.py index a2059cd3a628e3..4bca1a7d44c947 100644 --- a/src/frontends/onnx/tests/tests_python/test_onnx_import.py +++ b/src/frontends/onnx/tests/tests_python/test_onnx_import.py @@ -42,7 +42,7 @@ def test_simple_graph(): ], [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [1])], ) - model = make_model(graph, producer_name="ngraph ONNX Importer") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") graph_model_function = import_onnx_model(model) diff --git a/src/frontends/onnx/tests/tests_python/test_ops_binary.py b/src/frontends/onnx/tests/tests_python/test_ops_binary.py index 42e593ce161dc9..6f7cd4706f783e 100644 --- a/src/frontends/onnx/tests/tests_python/test_ops_binary.py +++ b/src/frontends/onnx/tests/tests_python/test_ops_binary.py @@ -21,7 +21,7 @@ def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **no output_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, ()) for name in onnx_node.output] graph = make_graph([onnx_node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="ngraph ONNX Importer") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") model.opset_import[0].version = opset inputs = [i.astype(np.float32) for i in inputs] # WA for new Python API return run_model(model, inputs)[0] diff --git a/src/frontends/onnx/tests/tests_python/test_ops_convpool.py b/src/frontends/onnx/tests/tests_python/test_ops_convpool.py index e4dd96c05d1b6c..1a242f947097dc 100644 --- a/src/frontends/onnx/tests/tests_python/test_ops_convpool.py +++ b/src/frontends/onnx/tests/tests_python/test_ops_convpool.py @@ -37,7 +37,7 @@ def make_onnx_model_for_conv_op(x_shape, weights_shape, transpose=False, **attri ], [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, output_shape)], ) - model = make_model(graph, producer_name="ngraph ONNXImporter") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") return model diff --git a/src/frontends/onnx/tests/tests_python/test_ops_matmul.py b/src/frontends/onnx/tests/tests_python/test_ops_matmul.py index 4eec4e4580805a..33a4d15374df5d 100644 --- a/src/frontends/onnx/tests/tests_python/test_ops_matmul.py +++ b/src/frontends/onnx/tests/tests_python/test_ops_matmul.py @@ -23,7 +23,7 @@ def make_onnx_model_for_matmul_op(input_left, input_right): ], [make_tensor_value_info("Z", onnx.TensorProto.FLOAT, output_shape)], ) - model = make_model(graph, producer_name="ngraph ONNXImporter") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") return model @@ -70,7 +70,7 @@ def make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs): ], [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, output_shape)], ) - model = make_model(graph, producer_name="ngraph ONNXImporter") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") return model diff --git a/src/frontends/onnx/tests/tests_python/test_ops_reduction.py b/src/frontends/onnx/tests/tests_python/test_ops_reduction.py index 408a5d3a546fba..9435fc1ab564ae 100644 --- a/src/frontends/onnx/tests/tests_python/test_ops_reduction.py +++ b/src/frontends/onnx/tests/tests_python/test_ops_reduction.py @@ -64,7 +64,7 @@ def import_and_compute_with_axes_as_const(op_type, data, axes, **node_attrs): [onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, ())], ) - model = onnx.helper.make_model(graph, producer_name="ngraph ONNX Importer") + model = onnx.helper.make_model(graph, producer_name="OpenVINO ONNX Frontend") model.opset_import[0].version = 13 graph_model = import_onnx_model(model) runtime = get_runtime() diff --git a/src/frontends/onnx/tests/tests_python/test_ops_reshape.py b/src/frontends/onnx/tests/tests_python/test_ops_reshape.py index 952983c0c5385a..3337358400b356 100644 --- a/src/frontends/onnx/tests/tests_python/test_ops_reshape.py +++ b/src/frontends/onnx/tests/tests_python/test_ops_reshape.py @@ -64,7 +64,7 @@ def test_reshape_opset5(): [make_tensor_value_info("reshaped", onnx.TensorProto.FLOAT, ())], ) - model = make_model(graph, producer_name="ngraph ONNX Importer") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") model.opset_import[0].version = 5 graph_model_function = import_onnx_model(model) runtime = get_runtime() diff --git a/src/frontends/onnx/tests/tests_python/test_ops_unary.py b/src/frontends/onnx/tests/tests_python/test_ops_unary.py index 71c7cacab5cb66..b1741b2ba94591 100644 --- a/src/frontends/onnx/tests/tests_python/test_ops_unary.py +++ b/src/frontends/onnx/tests/tests_python/test_ops_unary.py @@ -313,7 +313,7 @@ def test_identity(): ], [make_tensor_value_info("Y", onnx.TensorProto.FLOAT, shape)], ) - model = make_model(graph, producer_name="ngraph ONNX Importer") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") graph_model = import_onnx_model(model) runtime = get_runtime() computation = runtime.computation(graph_model) @@ -394,7 +394,7 @@ def test_cast_errors(): output_tensors = [make_tensor_value_info(node.output[0], onnx.TensorProto.FLOAT16, input_data.shape)] # type: ignore graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="NgraphBackend") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") with pytest.raises(ValidationError): import_onnx_model(model) @@ -407,7 +407,7 @@ def test_cast_errors(): output_tensors = [make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape)] # type: ignore graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="NgraphBackend") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") with pytest.raises(ValidationError): import_onnx_model(model) @@ -420,7 +420,7 @@ def test_cast_errors(): output_tensors = [make_tensor_value_info(node.output[0], onnx.TensorProto.INT32, input_data.shape)] # type: ignore graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="NgraphBackend") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") with pytest.raises((RuntimeError, OVTypeError)): import_onnx_model(model) @@ -433,7 +433,7 @@ def test_cast_errors(): output_tensors = [make_tensor_value_info(node.output[0], onnx.TensorProto.COMPLEX128, input_data.shape)] # type: ignore graph = make_graph([node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="NgraphBackend") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") with pytest.raises(RuntimeError): import_onnx_model(model) diff --git a/src/frontends/onnx/tests/tests_python/utils/__init__.py b/src/frontends/onnx/tests/tests_python/utils/__init__.py index a0923d9bbff3ec..1bfc383f971d9b 100644 --- a/src/frontends/onnx/tests/tests_python/utils/__init__.py +++ b/src/frontends/onnx/tests/tests_python/utils/__init__.py @@ -69,7 +69,7 @@ def get_node_model(op_type, *input_data, opset=1, num_outputs=1, **node_attribut ] # type: ignore graph = make_graph([onnx_node], "compute_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="Ngraph ONNX Importer") + model = make_model(graph, producer_name="OpenVINO ONNX Frontend") model.opset_import[0].version = opset return model diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index acc9ade78c0f18..835f9da29b4084 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -245,7 +245,8 @@ static const std::vector models{ std::string("generate_proposals_v2_2"), std::string("generate_proposals_v2_3"), std::string("generate_proposals_v2_4"), - std::string("generate_proposals_v2_5"), + // ticket 130605: actual res value is not close + // std::string("generate_proposals_v2_5"), std::string("generate_proposals_v2_6"), // greater_equal_big_int64(failure due to CPU inference), std::string("greater_equal_big_int64"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_generate_proposal_v2.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_generate_proposal_v2.py index 8621a596417332..55f03c09e39cff 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_generate_proposal_v2.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_generate_proposal_v2.py @@ -133,6 +133,8 @@ def generate_proposals_v2(name: str, input_data: dict, attr: dict): generate_proposals_v2(input_name, input_data, attr) # test case 5 + ''' + TODO: ticket 130605 input_name = "generate_proposals_v2_5" input_data["scores"] = np.random.rand(1, 6, 10, 8).astype('float32') input_data["bbox_deltas"] = np.random.rand(1, 24, 10, 8).astype('float32') @@ -145,6 +147,7 @@ def generate_proposals_v2(name: str, input_data: dict, attr: dict): attr["post_nms_top_n"] = 60 generate_proposals_v2(input_name, input_data, attr) + ''' # test case 6 input_name = "generate_proposals_v2_6" diff --git a/src/frontends/pytorch/src/op/add.cpp b/src/frontends/pytorch/src/op/add.cpp index ab2933bd6f7910..8ea9782838e9ff 100644 --- a/src/frontends/pytorch/src/op/add.cpp +++ b/src/frontends/pytorch/src/op/add.cpp @@ -26,7 +26,7 @@ OutputVector translate_add_common(const NodeContext& context, bool inplace) { if (dtype0.is() && dtype1.is()) { // aten::add.t(t[] a, t[] b) -> t[] // Case when two lists gets concatenated - FRONT_END_OP_CONVERSION_CHECK(false, "aten::add is used for concatenation of lists, not possible to convert"); + PYTORCH_OP_CONVERSION_CHECK(false, "aten::add is used for concatenation of lists, not possible to convert"); } if (inplace) { if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) diff --git a/src/frontends/pytorch/src/op/arange.cpp b/src/frontends/pytorch/src/op/arange.cpp index 8a6f05b9ba689d..f7b8b409db4ef7 100644 --- a/src/frontends/pytorch/src/op/arange.cpp +++ b/src/frontends/pytorch/src/op/arange.cpp @@ -60,7 +60,7 @@ OutputVector translate_arange(const NodeContext& context) { dtype_port = 3; dtype_applied = true; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); + PYTORCH_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); } if (dtype_port >= 0 && !context.input_is_none(dtype_port)) { if (std::dynamic_pointer_cast( @@ -72,7 +72,7 @@ OutputVector translate_arange(const NodeContext& context) { out_tensor = fw_node->input_value(0); dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto range = context.mark_node(std::make_shared(start, end, step, dtype)); @@ -130,7 +130,7 @@ OutputVector translate_arange_fx(const NodeContext& context) { dtype_port = 3; dtype_applied = true; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); + PYTORCH_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); } if (dtype_port >= 0 && !context.input_is_none(dtype_port)) { if (std::dynamic_pointer_cast( @@ -142,7 +142,7 @@ OutputVector translate_arange_fx(const NodeContext& context) { out_tensor = fw_node->input_value(0); dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto r_end = context.mark_node(std::make_shared(end, dtype)); diff --git a/src/frontends/pytorch/src/op/as_strided.cpp b/src/frontends/pytorch/src/op/as_strided.cpp index 5d1dfe38bdaa17..6bcaed8bfd49e3 100644 --- a/src/frontends/pytorch/src/op/as_strided.cpp +++ b/src/frontends/pytorch/src/op/as_strided.cpp @@ -32,8 +32,8 @@ OutputVector translate_as_strided(const NodeContext& context) { auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); auto input_strides = decoder->get_input_strides(0); - FRONT_END_OP_CONVERSION_CHECK(input_strides.size() != 0, - "aten::as_strided: Couldn't retrive input stride information from torchscript."); + PYTORCH_OP_CONVERSION_CHECK(input_strides.size() != 0, + "aten::as_strided: Couldn't retrive input stride information from torchscript."); std::vector idxs(input_strides.size()); iota(idxs.begin(), idxs.end(), 0); @@ -77,8 +77,8 @@ OutputVector translate_as_strided(const NodeContext& context) { if (!context.input_is_none(3)) { offset = context.get_input(3); } - FRONT_END_OP_CONVERSION_CHECK(sizes.size() == strides.size(), - "aten::as_strided: Vector for strides and sizes need to have equal length."); + PYTORCH_OP_CONVERSION_CHECK(sizes.size() == strides.size(), + "aten::as_strided: Vector for strides and sizes need to have equal length."); auto strides_size = strides.size() - 1; auto i = 0; auto strides_length_const = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {strides.size()})); diff --git a/src/frontends/pytorch/src/op/as_tensor.cpp b/src/frontends/pytorch/src/op/as_tensor.cpp index 93c4a647fb3ce7..6a97af6671303f 100644 --- a/src/frontends/pytorch/src/op/as_tensor.cpp +++ b/src/frontends/pytorch/src/op/as_tensor.cpp @@ -55,7 +55,7 @@ OutputVector translate_as_tensor(const NodeContext& context) { return {context.mark_node(std::make_shared(OutputVector(list_elems.begin(), list_elems.end()), 0))}; } else { // Input is already a tensor - FRONT_END_OP_CONVERSION_CHECK(list_elems.size() == 1, "Input must be single tensor."); + PYTORCH_OP_CONVERSION_CHECK(list_elems.size() == 1, "Input must be single tensor."); return {list_elems[0]}; } }; diff --git a/src/frontends/pytorch/src/op/avg_poolnd.cpp b/src/frontends/pytorch/src/op/avg_poolnd.cpp index e497256f1c6205..4a90db23a67c1e 100644 --- a/src/frontends/pytorch/src/op/avg_poolnd.cpp +++ b/src/frontends/pytorch/src/op/avg_poolnd.cpp @@ -45,8 +45,8 @@ OutputVector translate_avg_poolnd(const NodeContext& context) { if (!(context.input_is_none(5))) { count_include_pad = context.const_input(5); } - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(6), - "Translation for aten::avg_pool2d do not support divisor_override input."); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(6), + "Translation for aten::avg_pool2d do not support divisor_override input."); // Although ov::AvgPool provides exclude_pad=false, // The corner case of Average Pooling with ceil_mode on // PyTorch allows sliding window go off bound, which leads to this accommodation. diff --git a/src/frontends/pytorch/src/op/batch_norm.cpp b/src/frontends/pytorch/src/op/batch_norm.cpp index 5d10222eceb890..092e95ff38a290 100644 --- a/src/frontends/pytorch/src/op/batch_norm.cpp +++ b/src/frontends/pytorch/src/op/batch_norm.cpp @@ -139,8 +139,8 @@ OutputVector translate_batch_norm_legit_no_stats_fx(const NodeContext& context) bias = context.get_input(2); } auto training = context.const_input(3); - FRONT_END_OP_CONVERSION_CHECK(training, - "aten._native_batch_norm_legit.no_stats can only be used when training=True."); + PYTORCH_OP_CONVERSION_CHECK(training, + "aten._native_batch_norm_legit.no_stats can only be used when training=True."); // index 4 momentum is used during training only auto eps = context.const_input(5); auto output = make_batch_norm(context, context.get_input(0), weight, bias, {}, {}, eps); diff --git a/src/frontends/pytorch/src/op/cat.cpp b/src/frontends/pytorch/src/op/cat.cpp index 9476979a118bd7..5b873193157cda 100644 --- a/src/frontends/pytorch/src/op/cat.cpp +++ b/src/frontends/pytorch/src/op/cat.cpp @@ -35,7 +35,7 @@ OutputVector translate_cat_common(const NodeContext& context, return {context.mark_node(fw_node)}; } auto first_node = list_elems.front().get_node_shared_ptr(); - FRONT_END_OP_CONVERSION_CHECK( + PYTORCH_OP_CONVERSION_CHECK( list_elems.size() > 1 || !ov::as_type_ptr(first_node), "::cat is located inside body while inputs are located outside of the body. " "This case is not supported."); @@ -86,7 +86,7 @@ OutputVector translate_quantized_cat(const NodeContext& context) { num_inputs_check(context, 4, 4); const auto&& list_elems = get_list_as_outputs(context.get_input(0)); auto axis = context.const_input(1); - FRONT_END_OP_CONVERSION_CHECK(!list_elems.empty(), "Couldn't find quantized input for quantized::cat operation."); + PYTORCH_OP_CONVERSION_CHECK(!list_elems.empty(), "Couldn't find quantized input for quantized::cat operation."); return {quantize(context, translate_cat_common(context, list_elems, axis, false)[0], context.get_input(2), diff --git a/src/frontends/pytorch/src/op/conv_transposend.cpp b/src/frontends/pytorch/src/op/conv_transposend.cpp index 1f281f90486fad..079df5703e08ca 100644 --- a/src/frontends/pytorch/src/op/conv_transposend.cpp +++ b/src/frontends/pytorch/src/op/conv_transposend.cpp @@ -24,7 +24,7 @@ OutputVector translate_conv_transposend(const NodeContext& context) { auto pad_type = ov::op::PadType::EXPLICIT; auto dilations = context.const_input(7); auto groups = context.const_input(6); - FRONT_END_OP_CONVERSION_CHECK(groups > 0, "Number of groups for convolution_transpose should be >= 1"); + PYTORCH_OP_CONVERSION_CHECK(groups > 0, "Number of groups for convolution_transpose should be >= 1"); std::shared_ptr conv; if (groups == 1) { diff --git a/src/frontends/pytorch/src/op/elu.cpp b/src/frontends/pytorch/src/op/elu.cpp index 4f96371ee83ebd..fee33345436e1c 100644 --- a/src/frontends/pytorch/src/op/elu.cpp +++ b/src/frontends/pytorch/src/op/elu.cpp @@ -18,10 +18,10 @@ OutputVector translate_elu(const NodeContext& context) { auto x = context.get_input(0); auto alpha = context.const_input(1); // TODO: Figure out what scale and input_scale do - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == 1, - "Unexpected value of scale input for elu operation"); - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(3) || context.const_input(3) == 1, - "Unexpected value of input_scale input for elu operation"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == 1, + "Unexpected value of scale input for elu operation"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(3) || context.const_input(3) == 1, + "Unexpected value of input_scale input for elu operation"); return {context.mark_node(std::make_shared(x, alpha))}; }; diff --git a/src/frontends/pytorch/src/op/embedding_bag.cpp b/src/frontends/pytorch/src/op/embedding_bag.cpp index ee1cba3d1cff08..4560ea2a09db4f 100644 --- a/src/frontends/pytorch/src/op/embedding_bag.cpp +++ b/src/frontends/pytorch/src/op/embedding_bag.cpp @@ -21,7 +21,7 @@ OutputVector translate_embedding_bag(const NodeContext& context) { num_inputs_check(context, 9, 9); // we have only EmbeddingBagSum case support, check it before translation auto mode = context.const_input(4); - FRONT_END_OP_CONVERSION_CHECK(mode == 0, "Only sum mode supported for aten::embedding_bag translation"); + PYTORCH_OP_CONVERSION_CHECK(mode == 0, "Only sum mode supported for aten::embedding_bag translation"); auto weight = context.get_input(0); auto indices = context.get_input(1); indices = context.mark_node(std::make_shared(indices, element::i32)); @@ -44,7 +44,7 @@ OutputVector translate_embedding_bag(const NodeContext& context) { auto offsets = context.get_input(2); offsets = context.mark_node(std::make_shared(offsets, element::i32)); auto include_last_offset = context.const_input(7); - FRONT_END_OP_CONVERSION_CHECK(!include_last_offset, "Inclusion last offset is not supported"); + PYTORCH_OP_CONVERSION_CHECK(!include_last_offset, "Inclusion last offset is not supported"); // no per_sample_wights if (context.input_is_none(6)) { result = context.mark_node(std::make_shared(weight, indices, offsets)); diff --git a/src/frontends/pytorch/src/op/expand.cpp b/src/frontends/pytorch/src/op/expand.cpp index 7fcb7a898a48bc..2966436355a757 100644 --- a/src/frontends/pytorch/src/op/expand.cpp +++ b/src/frontends/pytorch/src/op/expand.cpp @@ -28,8 +28,8 @@ OutputVector translate_expand(const NodeContext& context) { auto x = context.get_input(0); auto sizes = context.get_input(1); // TODO: figure out what implicit means - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == false, - "Unexpected value of implicit for expand operation"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == false, + "Unexpected value of implicit for expand operation"); return base_expand(context, x, sizes); }; @@ -54,8 +54,8 @@ OutputVector translate_expand_fx(const NodeContext& context) { } auto sizes = context.get_input(1); // TODO: figure out what implicit means - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == false, - "Unexpected value of implicit for expand operation"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(2) || context.const_input(2) == false, + "Unexpected value of implicit for expand operation"); return base_expand(context, x, sizes); }; diff --git a/src/frontends/pytorch/src/op/eye.cpp b/src/frontends/pytorch/src/op/eye.cpp index 9b7f7ef8c3bc29..2a4be73a6ef500 100644 --- a/src/frontends/pytorch/src/op/eye.cpp +++ b/src/frontends/pytorch/src/op/eye.cpp @@ -36,7 +36,7 @@ OutputVector translate_eye(const NodeContext& context) { y = context.mark_node(std::make_shared(y, element::i32)); dtype_id = 2; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported number of inputs: ", num_inputs, " for aten::eye"); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported number of inputs: ", num_inputs, " for aten::eye"); } if (!context.input_is_none(dtype_id)) { dtype = convert_dtype(context.const_input(dtype_id)); diff --git a/src/frontends/pytorch/src/op/full.cpp b/src/frontends/pytorch/src/op/full.cpp index defcbab7095089..b87ec7867c96aa 100644 --- a/src/frontends/pytorch/src/op/full.cpp +++ b/src/frontends/pytorch/src/op/full.cpp @@ -264,7 +264,7 @@ OutputVector translate_fill_diagonal(const NodeContext& context) { auto const_zero_s = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); auto const_neg_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); if (input_rank.is_dynamic() || input_rank.get_length() < 2) { - FRONT_END_OP_CONVERSION_CHECK(false, "aten::fill_diagonal_ required tensor with static rank >= 2 "); + PYTORCH_OP_CONVERSION_CHECK(false, "aten::fill_diagonal_ required tensor with static rank >= 2 "); } auto flatten_input = context.mark_node(std::make_shared(input_tensor, const_neg_one, false)); auto wrap = context.const_input(2); diff --git a/src/frontends/pytorch/src/op/gelu.cpp b/src/frontends/pytorch/src/op/gelu.cpp index 64afb511b31dc3..ac38e41b93a0ed 100644 --- a/src/frontends/pytorch/src/op/gelu.cpp +++ b/src/frontends/pytorch/src/op/gelu.cpp @@ -21,7 +21,7 @@ OutputVector translate_gelu_common(const NodeContext& context, const std::string if (approximate == "tanh") { return {context.mark_node(std::make_shared(x, ov::op::GeluApproximationMode::TANH))}; } - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported approximate for Gelu: ", approximate); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported approximate for Gelu: ", approximate); }; } // namespace diff --git a/src/frontends/pytorch/src/op/get_attr.cpp b/src/frontends/pytorch/src/op/get_attr.cpp index 58bc63e60a700e..c28da30cf17ef3 100644 --- a/src/frontends/pytorch/src/op/get_attr.cpp +++ b/src/frontends/pytorch/src/op/get_attr.cpp @@ -13,9 +13,9 @@ namespace op { OutputVector translate_get_attr(const NodeContext& context) { auto res = context.get_decoder()->try_decode_get_attr(); - FRONT_END_OP_CONVERSION_CHECK(res.size() > 0, - "Failed to obtain data from GetAttr with output tensor name: ", - context.get_decoder()->get_output_debug_name(0)); + PYTORCH_OP_CONVERSION_CHECK(res.size() > 0, + "Failed to obtain data from GetAttr with output tensor name: ", + context.get_decoder()->get_output_debug_name(0)); if (res.size() == 1) { auto node = res[0].get_node(); if (node->get_friendly_name() != node->get_name()) { diff --git a/src/frontends/pytorch/src/op/getitem.cpp b/src/frontends/pytorch/src/op/getitem.cpp index 58d3639cc8aa92..1aab3e765d237f 100644 --- a/src/frontends/pytorch/src/op/getitem.cpp +++ b/src/frontends/pytorch/src/op/getitem.cpp @@ -20,29 +20,29 @@ OutputVector translate_getitem(const NodeContext& context) { num_inputs_check(context, 2, 2); auto input = context.get_input(0); const auto idx_type = context.get_input_type(1); - FRONT_END_OP_CONVERSION_CHECK(!idx_type.is(), - "String index in aten::__getitem__ means dict input, this is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!idx_type.is(), + "String index in aten::__getitem__ means dict input, this is not supported."); if (ov::as_type_ptr(input.get_node_shared_ptr())) { - FRONT_END_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::split"), - "special case for aten::__getitem__"); - FRONT_END_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::chunk"), - "special case for aten::__getitem__"); + PYTORCH_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::split"), + "special case for aten::__getitem__"); + PYTORCH_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::chunk"), + "special case for aten::__getitem__"); const auto&& list_elems = get_list_as_outputs(input); auto getitem_idx = context.const_input(1); if (getitem_idx < 0) { getitem_idx += list_elems.size(); } - FRONT_END_OP_CONVERSION_CHECK(getitem_idx < static_cast(list_elems.size()), - "Index: ", - getitem_idx, - " is out of bounds of input list of len: ", - list_elems.size()); + PYTORCH_OP_CONVERSION_CHECK(getitem_idx < static_cast(list_elems.size()), + "Index: ", + getitem_idx, + " is out of bounds of input list of len: ", + list_elems.size()); return {list_elems.at(getitem_idx)}; } if (ov::as_type_ptr(input.get_node_shared_ptr())) { const auto& outside_input_node = context.get_input_from_visible_context(0).get_node_shared_ptr(); - FRONT_END_OP_CONVERSION_CHECK(!ov::as_type_ptr(outside_input_node), - "Unsupported case: aten::__getitem__ is inside the body, and input is Loop."); + PYTORCH_OP_CONVERSION_CHECK(!ov::as_type_ptr(outside_input_node), + "Unsupported case: aten::__getitem__ is inside the body, and input is Loop."); } auto getitem_idx = context.get_input(1); auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); diff --git a/src/frontends/pytorch/src/op/grid_sampler.cpp b/src/frontends/pytorch/src/op/grid_sampler.cpp index 8c603813d888f7..d9b268d7aa4b3e 100644 --- a/src/frontends/pytorch/src/op/grid_sampler.cpp +++ b/src/frontends/pytorch/src/op/grid_sampler.cpp @@ -28,12 +28,12 @@ OutputVector translate_grid_sampler(const NodeContext& context) { {1, v9::GridSample::PaddingMode::BORDER}, {2, v9::GridSample::PaddingMode::REFLECTION}}; auto mode = context.const_input(2); - FRONT_END_OP_CONVERSION_CHECK(grid_sample_mode_map.count(mode), "Unknown interpolation mode: ", mode); + PYTORCH_OP_CONVERSION_CHECK(grid_sample_mode_map.count(mode), "Unknown interpolation mode: ", mode); attrs.mode = grid_sample_mode_map.at(mode); auto padding_mode = context.const_input(3); - FRONT_END_OP_CONVERSION_CHECK(grid_sample_padding_mode_map.count(padding_mode), - "Unknown padding mode: ", - padding_mode); + PYTORCH_OP_CONVERSION_CHECK(grid_sample_padding_mode_map.count(padding_mode), + "Unknown padding mode: ", + padding_mode); attrs.padding_mode = grid_sample_padding_mode_map.at(padding_mode); bool align_corners = false; if (!context.input_is_none(4)) { diff --git a/src/frontends/pytorch/src/op/if.cpp b/src/frontends/pytorch/src/op/if.cpp index 15d1c5e24c1873..5b5e07ab1bb7da 100644 --- a/src/frontends/pytorch/src/op/if.cpp +++ b/src/frontends/pytorch/src/op/if.cpp @@ -51,7 +51,7 @@ OutputVector translate_if(const NodeContext& context) { auto if_node = std::make_shared(context.get_input(0)); context.mark_node(if_node); auto decoder = context.get_decoder(); - FRONT_END_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 2, "If must have 2 subgraphs."); + PYTORCH_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 2, "If must have 2 subgraphs."); auto then_decoder = decoder->get_subgraph_decoder(0); auto then_body = context.convert_subgraph(0); @@ -72,13 +72,13 @@ OutputVector translate_if(const NodeContext& context) { auto session = context.get_session(); for (const auto& param : then_body->get_parameters()) { auto input_idx = session->decode_tensor_name(param->output(0)); - FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(input_idx) == 0, - "More than one then_body input with same tensor name: ", - input_idx, - "; existing: ", - inputs_map.at(input_idx)[0], - " adding: ", - param); + PYTORCH_OP_CONVERSION_CHECK(inputs_map.count(input_idx) == 0, + "More than one then_body input with same tensor name: ", + input_idx, + "; existing: ", + inputs_map.at(input_idx)[0], + " adding: ", + param); inputs_map[input_idx] = {param, nullptr}; } for (const auto& param : else_body->get_parameters()) { @@ -93,8 +93,8 @@ OutputVector translate_if(const NodeContext& context) { const auto num_outs = context.get_output_size(); const auto then_results = then_body->get_results(); const auto else_results = else_body->get_results(); - FRONT_END_OP_CONVERSION_CHECK(then_results.size() >= num_outs && else_results.size() >= num_outs, - "Else or then body have less outputs than prim::If requires."); + PYTORCH_OP_CONVERSION_CHECK(then_results.size() >= num_outs && else_results.size() >= num_outs, + "Else or then body have less outputs than prim::If requires."); for (size_t i = 0; i < num_outs; i++) { align_result_types(context, then_results[i], else_results[i]); res.push_back(if_node->set_output(then_results[i], else_results[i])); @@ -106,26 +106,26 @@ OutputVector translate_if(const NodeContext& context) { for (size_t i = num_outs; i < then_results.size(); i++) { const auto result = then_results[i]; auto output_idx = session->decode_tensor_name(result->input(0).get_source_output()); - FRONT_END_OP_CONVERSION_CHECK(extra_then_body_results.count(output_idx) == 0, - "More than one then_body output with same tensor name: ", - output_idx, - "; existing: ", - extra_then_body_results.at(output_idx), - " adding: ", - result); + PYTORCH_OP_CONVERSION_CHECK(extra_then_body_results.count(output_idx) == 0, + "More than one then_body output with same tensor name: ", + output_idx, + "; existing: ", + extra_then_body_results.at(output_idx), + " adding: ", + result); extra_then_body_results[output_idx] = result; extra_output_idxs.insert(output_idx); } for (size_t i = num_outs; i < else_results.size(); i++) { const auto result = else_results[i]; auto output_idx = session->decode_tensor_name(result->input(0).get_source_output()); - FRONT_END_OP_CONVERSION_CHECK(extra_else_body_results.count(output_idx) == 0, - "More than one else_body output with same tensor name: ", - output_idx, - "; existing: ", - extra_else_body_results.at(output_idx), - " adding: ", - result); + PYTORCH_OP_CONVERSION_CHECK(extra_else_body_results.count(output_idx) == 0, + "More than one else_body output with same tensor name: ", + output_idx, + "; existing: ", + extra_else_body_results.at(output_idx), + " adding: ", + result); extra_else_body_results[output_idx] = result; extra_output_idxs.insert(output_idx); } @@ -140,7 +140,7 @@ OutputVector translate_if(const NodeContext& context) { then_body->add_parameters({new_parameter}); then_body->add_results({new_result}); then_body->validate_nodes_and_infer_types(); - FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in else body: ", output_idx); + PYTORCH_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in else body: ", output_idx); inputs_map[output_idx][0] = new_parameter; extra_then_body_results[output_idx] = new_result; OPENVINO_DEBUG << "Modified then body: " << if_node << '\n'; @@ -152,7 +152,7 @@ OutputVector translate_if(const NodeContext& context) { else_body->add_parameters({new_parameter}); else_body->add_results({new_result}); else_body->validate_nodes_and_infer_types(); - FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in then body: ", output_idx); + PYTORCH_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in then body: ", output_idx); inputs_map[output_idx][1] = new_parameter; extra_else_body_results[output_idx] = new_result; OPENVINO_DEBUG << "Modified else body: " << if_node << '\n'; diff --git a/src/frontends/pytorch/src/op/im2col.cpp b/src/frontends/pytorch/src/op/im2col.cpp index 718e0eadaa4ca0..56545bc3270ff6 100644 --- a/src/frontends/pytorch/src/op/im2col.cpp +++ b/src/frontends/pytorch/src/op/im2col.cpp @@ -60,13 +60,13 @@ OutputVector translate_im2col(const NodeContext& context) { num_inputs_check(context, 5, 5); auto input = context.get_input(0); auto kernel_size = context.const_input>(1); - FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "kernel size should contains 2 elements"); + PYTORCH_OP_CONVERSION_CHECK(kernel_size.size() == 2, "kernel size should contains 2 elements"); auto dilation = context.const_input>(2); - FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "dilation should contains 2 elements"); + PYTORCH_OP_CONVERSION_CHECK(kernel_size.size() == 2, "dilation should contains 2 elements"); auto padding = context.const_input>(3); - FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "padding should contains 2 elements"); + PYTORCH_OP_CONVERSION_CHECK(kernel_size.size() == 2, "padding should contains 2 elements"); auto stride = context.const_input>(4); - FRONT_END_OP_CONVERSION_CHECK(kernel_size.size() == 2, "stride should contains 2 elements"); + PYTORCH_OP_CONVERSION_CHECK(kernel_size.size() == 2, "stride should contains 2 elements"); auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); auto input_shape = context.mark_node(std::make_shared(input, element::i32)); auto zero_f = context.mark_node(v0::Constant::create(element::f32, Shape{}, {0})); diff --git a/src/frontends/pytorch/src/op/index.cpp b/src/frontends/pytorch/src/op/index.cpp index a9aaf62257d71b..6030dd557deae3 100644 --- a/src/frontends/pytorch/src/op/index.cpp +++ b/src/frontends/pytorch/src/op/index.cpp @@ -224,7 +224,7 @@ OutputVector translate_index(const NodeContext& context) { ov::pass::NodeRegistry rg; auto rank = x.get_partial_shape().rank(); // index transformation supports only tensors with static rank - FRONT_END_OP_CONVERSION_CHECK(rank.is_static(), "Dynamic rank for aten::index input is not supported."); + PYTORCH_OP_CONVERSION_CHECK(rank.is_static(), "Dynamic rank for aten::index input is not supported."); auto res = index_on_list(rg, x, list_elems, rank.get_length()); context.mark_nodes(rg.get()); return res; @@ -266,7 +266,7 @@ OutputVector translate_index_fx(const NodeContext& context) { rank = context.get_decoder()->get_input_shape(0).rank(); } // index transformation supports only tensors with static rank - FRONT_END_OP_CONVERSION_CHECK(rank.is_static(), "Dynamic rank for aten::index input is not supported."); + PYTORCH_OP_CONVERSION_CHECK(rank.is_static(), "Dynamic rank for aten::index input is not supported."); auto res = index_on_list(rg, x, list_elems, rank.get_length()); context.mark_nodes(rg.get()); return res; diff --git a/src/frontends/pytorch/src/op/layer_norm.cpp b/src/frontends/pytorch/src/op/layer_norm.cpp index 974106e3aabe5d..9bcdd0c1bdd6f3 100644 --- a/src/frontends/pytorch/src/op/layer_norm.cpp +++ b/src/frontends/pytorch/src/op/layer_norm.cpp @@ -21,9 +21,9 @@ OutputVector translate_layer_norm(const NodeContext& context) { num_inputs_check(context, 5, 6); auto eps = context.const_input(4); auto normalized_shape = context.const_input(1); - FRONT_END_OP_CONVERSION_CHECK(normalized_shape.size() == 1, - "Translation for aten::layer_norm supports only single normalized_shape value, " - "which means normalizing over the last dimension."); + PYTORCH_OP_CONVERSION_CHECK(normalized_shape.size() == 1, + "Translation for aten::layer_norm supports only single normalized_shape value, " + "which means normalizing over the last dimension."); // TODO: support any dimension auto axes = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); auto out_node = diff --git a/src/frontends/pytorch/src/op/linspace.cpp b/src/frontends/pytorch/src/op/linspace.cpp index c2233bee15ee24..5bdf489d297566 100644 --- a/src/frontends/pytorch/src/op/linspace.cpp +++ b/src/frontends/pytorch/src/op/linspace.cpp @@ -44,7 +44,7 @@ OutputVector translate_linspace(const NodeContext& context) { out_tensor = fw_node->input_value(0); apply_dtype = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } else if (!context.input_is_none(3) && context.get_input_size() == 4) { // Case where dtype is inherited from out tensor. diff --git a/src/frontends/pytorch/src/op/list_construct.cpp b/src/frontends/pytorch/src/op/list_construct.cpp index e58a3c4744ff61..cc17aae928e37d 100644 --- a/src/frontends/pytorch/src/op/list_construct.cpp +++ b/src/frontends/pytorch/src/op/list_construct.cpp @@ -22,7 +22,7 @@ OutputVector translate_list_construct(const NodeContext& context) { for (size_t i = 0; i < context.get_input_size(); i++) { auto input = context.get_input_from_visible_context(i); auto c_node = std::dynamic_pointer_cast(input.get_node_shared_ptr()); - FRONT_END_OP_CONVERSION_CHECK(c_node, "Translation for prim::ListConstruct support only constant inputs"); + PYTORCH_OP_CONVERSION_CHECK(c_node, "Translation for prim::ListConstruct support only constant inputs"); if (c_node->get_shape().size() == 0) { c_node = std::make_shared(c_node->get_element_type(), Shape{1}, c_node->get_data_ptr()); consts.push_back(c_node); diff --git a/src/frontends/pytorch/src/op/list_unpack.cpp b/src/frontends/pytorch/src/op/list_unpack.cpp index 3a960f842352ed..30fe2d2ed5e8bd 100644 --- a/src/frontends/pytorch/src/op/list_unpack.cpp +++ b/src/frontends/pytorch/src/op/list_unpack.cpp @@ -31,7 +31,7 @@ OutputVector translate_list_unpack(const NodeContext& context) { } return res; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported operation type."); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported operation type."); } } else { return outputs; diff --git a/src/frontends/pytorch/src/op/loop.cpp b/src/frontends/pytorch/src/op/loop.cpp index 99c59b46f342a1..4aaf56b081f5b4 100644 --- a/src/frontends/pytorch/src/op/loop.cpp +++ b/src/frontends/pytorch/src/op/loop.cpp @@ -15,10 +15,10 @@ namespace op { OutputVector translate_loop(const NodeContext& context) { const auto& inputs = context.inputs(); - FRONT_END_OP_CONVERSION_CHECK(inputs.size() >= 2, "Loop must have at least 2 inputs."); + PYTORCH_OP_CONVERSION_CHECK(inputs.size() >= 2, "Loop must have at least 2 inputs."); auto loop = std::make_shared(inputs[0], inputs[1]); auto decoder = context.get_decoder(); - FRONT_END_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, "Loop must have 1 subgraph."); + PYTORCH_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, "Loop must have 1 subgraph."); auto subgraph_decoder = decoder->get_subgraph_decoder(0); auto body = context.convert_subgraph(0); loop->set_function(body); @@ -28,20 +28,20 @@ OutputVector translate_loop(const NodeContext& context) { // process outputs first auto session = context.get_session(); auto body_results = body->get_results(); - FRONT_END_OP_CONVERSION_CHECK(body_results.size() > 0, "At least one output from loop is required - condition."); + PYTORCH_OP_CONVERSION_CHECK(body_results.size() > 0, "At least one output from loop is required - condition."); std::map> output_idxs; // 0 output is condition, do not need to connect it for (size_t i = 1; i < body_results.size(); i++) { auto result = body_results[i]; auto out_idx = session->decode_tensor_name(result->input(0).get_source_output()); - FRONT_END_OP_CONVERSION_CHECK(output_idxs.count(out_idx) == 0, - "More then one body output with same tensor name."); + PYTORCH_OP_CONVERSION_CHECK(output_idxs.count(out_idx) == 0, + "More then one body output with same tensor name."); output_idxs[out_idx] = result; } auto body_parameters = body->get_parameters(); // #0 body parameter is counter; - FRONT_END_OP_CONVERSION_CHECK(body_parameters.size() > 0, "At least one input to Loop body is required"); + PYTORCH_OP_CONVERSION_CHECK(body_parameters.size() > 0, "At least one input to Loop body is required"); // Set counter type and shape body_parameters[0]->set_element_type(element::i32); body_parameters[0]->set_partial_shape(PartialShape{}); diff --git a/src/frontends/pytorch/src/op/lstm.cpp b/src/frontends/pytorch/src/op/lstm.cpp index 1ec859e5e7b8c5..a845229a17b314 100644 --- a/src/frontends/pytorch/src/op/lstm.cpp +++ b/src/frontends/pytorch/src/op/lstm.cpp @@ -95,12 +95,12 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg, bidirectional ? RecurrentSequenceDirection::BIDIRECTIONAL : RecurrentSequenceDirection::FORWARD; int64_t weights_per_layer = has_biases ? 4 : 2; int64_t mult = bidirectional ? 2 : 1; - FRONT_END_OP_CONVERSION_CHECK(static_cast(all_weights.size()) == num_layers * weights_per_layer * mult, - "Unexpected length of list with weights for rnn operation."); + PYTORCH_OP_CONVERSION_CHECK(static_cast(all_weights.size()) == num_layers * weights_per_layer * mult, + "Unexpected length of list with weights for rnn operation."); const auto w_hh = all_weights[1]; const auto w_hh_pshape = w_hh.get_partial_shape(); - FRONT_END_OP_CONVERSION_CHECK(w_hh_pshape.rank().is_static() && w_hh_pshape[1].is_static(), ""); + PYTORCH_OP_CONVERSION_CHECK(w_hh_pshape.rank().is_static() && w_hh_pshape[1].is_static(), ""); const auto hidden_size = w_hh_pshape[1].get_length(); const auto zero = v0::Constant::create(element::i32, Shape{}, {0}); @@ -119,7 +119,7 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg, h0 = initial_states[0]; c0 = initial_states[1]; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); } Output prev_output = input; @@ -269,7 +269,7 @@ OutputVector generic_rnn(ov::pass::NodeRegistry& rg, c_res = rg.make(c_res, order_102); return {prev_output, h_res, c_res}; } - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); } } // namespace @@ -289,7 +289,7 @@ OutputVector translate_lstm(const NodeContext& context) { const auto num_layers = context.const_input(5); // const auto dropout = context.const_input(6); - skip const auto train = context.const_input(7); - FRONT_END_OP_CONVERSION_CHECK(!train, "LSTM in train mode is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!train, "LSTM in train mode is not supported."); const auto bidirectional = context.const_input(8); const auto initial_states = get_list_as_outputs(hx); @@ -316,7 +316,7 @@ OutputVector translate_lstm(const NodeContext& context) { const auto num_layers = context.const_input(4); // const auto dropout = context.const_input(5); - skip const auto train = context.const_input(6); - FRONT_END_OP_CONVERSION_CHECK(!train, "LSTM in train mode is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!train, "LSTM in train mode is not supported."); const auto bidirectional = context.const_input(7); const auto batch_first = context.const_input(8); @@ -348,7 +348,7 @@ OutputVector translate_gru(const NodeContext& context) { const auto num_layers = context.const_input(4); // const auto dropout = context.const_input(5); - skip const auto train = context.const_input(6); - FRONT_END_OP_CONVERSION_CHECK(!train, "GRU in train mode is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!train, "GRU in train mode is not supported."); const auto bidirectional = context.const_input(7); const auto batch_first = context.const_input(8); @@ -378,13 +378,13 @@ OutputVector translate_rnn(const NodeContext& context) { const auto num_layers = context.const_input(4); // const auto dropout = context.const_input(5); - skip const auto train = context.const_input(6); - FRONT_END_OP_CONVERSION_CHECK(!train, "RNN in train mode is not supported."); + PYTORCH_OP_CONVERSION_CHECK(!train, "RNN in train mode is not supported."); const auto bidirectional = context.const_input(7); const auto batch_first = context.const_input(8); const auto weight = get_list_as_outputs(weight_v); const auto variant_it = RNN_VARIANT_MAP.find(context.get_op_type()); - FRONT_END_OP_CONVERSION_CHECK(variant_it != RNN_VARIANT_MAP.end(), "Unsupported RNN variant."); + PYTORCH_OP_CONVERSION_CHECK(variant_it != RNN_VARIANT_MAP.end(), "Unsupported RNN variant."); const auto res = generic_rnn(rg, variant_it->second, input, diff --git a/src/frontends/pytorch/src/op/multinomial.cpp b/src/frontends/pytorch/src/op/multinomial.cpp index c359c34bbcb657..bba2b045063f47 100644 --- a/src/frontends/pytorch/src/op/multinomial.cpp +++ b/src/frontends/pytorch/src/op/multinomial.cpp @@ -27,8 +27,8 @@ OutputVector translate_multinomial(const NodeContext& context) { auto input = context.get_input(0); auto num_samples = context.mark_node(std::make_shared(context.get_input(1), const_1, false)); auto replacement = context.const_input(2); - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(3), - "aten::multinomial conversion with generator is not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(3), + "aten::multinomial conversion with generator is not supported"); // Torch multinomial accept input of [class_probs] or [bs, class_probs], convert always to [bs, class_probs] for OV. auto input_shape = context.mark_node(std::make_shared(input, element::i32)); diff --git a/src/frontends/pytorch/src/op/native_multi_head_attention.cpp b/src/frontends/pytorch/src/op/native_multi_head_attention.cpp index 6ecc798b439394..e70fae3976d7b8 100644 --- a/src/frontends/pytorch/src/op/native_multi_head_attention.cpp +++ b/src/frontends/pytorch/src/op/native_multi_head_attention.cpp @@ -132,7 +132,7 @@ OutputVector translate_native_multi_head_attention(const NodeContext& context) { } else { // Once int/float mask type is supported in PyTorch, // remove this assert to allow for such masks in OV - FRONT_END_OP_CONVERSION_CHECK(1, "Non-boolean masks are not supported."); + PYTORCH_OP_CONVERSION_CHECK(1, "Non-boolean masks are not supported."); atten_mask = context.mark_node(std::make_shared(atten_mask, scaled_dot_product)); } diff --git a/src/frontends/pytorch/src/op/norm.cpp b/src/frontends/pytorch/src/op/norm.cpp index cd0311972dd0dd..73ec824ddd6059 100644 --- a/src/frontends/pytorch/src/op/norm.cpp +++ b/src/frontends/pytorch/src/op/norm.cpp @@ -53,8 +53,8 @@ Output norm_vector(const NodeContext& context, res = context.mark_node(std::make_shared(abs, dim, keep_dim)); } else if (p == 0) { auto input_rank = input_tensor.get_partial_shape().rank(); - FRONT_END_OP_CONVERSION_CHECK(input_rank.is_dynamic() || input_rank.get_length() == 1, - "ord=0 supported only for vector norm"); + PYTORCH_OP_CONVERSION_CHECK(input_rank.is_dynamic() || input_rank.get_length() == 1, + "ord=0 supported only for vector norm"); auto zero = context.mark_node(v0::Constant::create(element::f32, Shape{}, {0})); zero = context.mark_node(std::make_shared(zero, input_tensor)); auto cond = context.mark_node(std::make_shared(input_tensor, zero)); @@ -100,7 +100,7 @@ Output norm_matrix(const NodeContext& context, auto sum = context.mark_node(std::make_shared(abs, first_dim, true)); res = context.mark_node(std::make_shared(sum, second_dim, true)); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported ord ", p, " for matrix norm"); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported ord ", p, " for matrix norm"); } if (!keep_dim) { res = context.mark_node(std::make_shared(res, dim)); @@ -139,7 +139,7 @@ OutputVector translate_norm(const NodeContext& context) { if (p_str == "fro") { res = frobenius_norm(context, input_tensor, dim, keep_dim); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); } } else { auto p = context.const_input(1); @@ -230,7 +230,7 @@ OutputVector translate_linalg_matrix_norm(const NodeContext& context) { if (p_str == "fro") { result = frobenius_norm(context, x, dim, keep_dim); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); } } else { auto p = context.const_input(1); @@ -272,8 +272,7 @@ OutputVector translate_linalg_norm(const NodeContext& context) { } else if (input_rank.is_dynamic() || input_rank.get_length() == 1) { result = norm_vector(context, x, dim, 2, keep_dim); } else { - FRONT_END_OP_CONVERSION_CHECK(false, - "linalg norm for tensor rank > 2 without ord specification unsupported"); + PYTORCH_OP_CONVERSION_CHECK(false, "linalg norm for tensor rank > 2 without ord specification unsupported"); } } else { // ord defines the norm that is computed can be string or number @@ -283,7 +282,7 @@ OutputVector translate_linalg_norm(const NodeContext& context) { if (p_str == "fro") { result = frobenius_norm(context, x, dim, keep_dim); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported ord ", p_str); } } else { auto p = context.const_input(1); diff --git a/src/frontends/pytorch/src/op/pad.cpp b/src/frontends/pytorch/src/op/pad.cpp index 390277edcf5796..4f6e186599544a 100644 --- a/src/frontends/pytorch/src/op/pad.cpp +++ b/src/frontends/pytorch/src/op/pad.cpp @@ -91,10 +91,10 @@ OutputVector translate_pad_common(const NodeContext& context, {"replicate", PadMode::EDGE}, }; auto ov_mode = pt_to_ov_pad.find(mode); - FRONT_END_OP_CONVERSION_CHECK(ov_mode != pt_to_ov_pad.end(), - "aten::pad conversion doesn't support [ ", - mode, - " ] padding mode"); + PYTORCH_OP_CONVERSION_CHECK(ov_mode != pt_to_ov_pad.end(), + "aten::pad conversion doesn't support [ ", + mode, + " ] padding mode"); return {context.mark_node(std::make_shared(data, pads_begins, pads_ends, pad_value_, ov_mode->second))}; } } // namespace diff --git a/src/frontends/pytorch/src/op/pythonop.cpp b/src/frontends/pytorch/src/op/pythonop.cpp index ccaac4a4909004..6040e6bbc97bf9 100644 --- a/src/frontends/pytorch/src/op/pythonop.cpp +++ b/src/frontends/pytorch/src/op/pythonop.cpp @@ -13,16 +13,15 @@ namespace op { OutputVector translate_pythonop(const NodeContext& context) { auto decoder = context.get_decoder(); - FRONT_END_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, - "PythonOp must have 1 subgraph to be able to translate it to OV."); + PYTORCH_OP_CONVERSION_CHECK(decoder->get_subgraph_size() == 1, + "PythonOp must have 1 subgraph to be able to translate it to OV."); auto body = context.convert_subgraph(0); auto session = context.get_session(); std::map inputs_map; for (const auto& param : body->get_parameters()) { auto tensor_idx = session->decode_tensor_name(param->output(0)); - FRONT_END_OP_CONVERSION_CHECK(!inputs_map.count(tensor_idx), - "Multiple nodes with the same id are not allowed."); + PYTORCH_OP_CONVERSION_CHECK(!inputs_map.count(tensor_idx), "Multiple nodes with the same id are not allowed."); inputs_map[tensor_idx] = {param}; } for (const auto& input : inputs_map) { diff --git a/src/frontends/pytorch/src/op/quantized_convnd.cpp b/src/frontends/pytorch/src/op/quantized_convnd.cpp index 37ab867d72a4ad..485ce9f9d71eb8 100644 --- a/src/frontends/pytorch/src/op/quantized_convnd.cpp +++ b/src/frontends/pytorch/src/op/quantized_convnd.cpp @@ -23,16 +23,16 @@ Output translate_quantized_convnd_base(const NodeContext& context) { auto input = context.get_input(0); auto packed_params_node = std::dynamic_pointer_cast(context.get_input(1).get_node_shared_ptr()); - FRONT_END_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); + PYTORCH_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); const auto& attrs = packed_params_node->get_attrs(); - FRONT_END_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), - "Packed params input node does not contain information about op type."); - FRONT_END_OP_CONVERSION_CHECK((attrs.at(PtFrameworkNode::op_type_key) == "prim::GetAttr"), - "Incorrect packed params input node operator type, expected prim::GetAttr."); + PYTORCH_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), + "Packed params input node does not contain information about op type."); + PYTORCH_OP_CONVERSION_CHECK((attrs.at(PtFrameworkNode::op_type_key) == "prim::GetAttr"), + "Incorrect packed params input node operator type, expected prim::GetAttr."); auto packed_params = packed_params_node->inputs(); - FRONT_END_OP_CONVERSION_CHECK(packed_params.size() == 6, - "Packed parameters for quantized conv should contain 6 items."); + PYTORCH_OP_CONVERSION_CHECK(packed_params.size() == 6, + "Packed parameters for quantized conv should contain 6 items."); // Packed params: weight, bias, stride, padding, dilation, groups auto weight = packed_params[0].get_source_output(); auto bias = packed_params[1].get_source_output(); diff --git a/src/frontends/pytorch/src/op/quantized_linear.cpp b/src/frontends/pytorch/src/op/quantized_linear.cpp index a69013f3fabb6b..e414d0c6f5a62f 100644 --- a/src/frontends/pytorch/src/op/quantized_linear.cpp +++ b/src/frontends/pytorch/src/op/quantized_linear.cpp @@ -20,16 +20,16 @@ OutputVector translate_quantized_linear(const NodeContext& context) { auto x = context.get_input(0); auto packed_params_node = std::dynamic_pointer_cast(context.get_input(1).get_node_shared_ptr()); - FRONT_END_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); + PYTORCH_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); const auto& attrs = packed_params_node->get_attrs(); - FRONT_END_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), - "Packed params input node does not contain information about op type."); - FRONT_END_OP_CONVERSION_CHECK((attrs.at(PtFrameworkNode::op_type_key) == "prim::GetAttr"), - "Incorrect packed params input node operator type, expected prim::GetAttr."); + PYTORCH_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), + "Packed params input node does not contain information about op type."); + PYTORCH_OP_CONVERSION_CHECK((attrs.at(PtFrameworkNode::op_type_key) == "prim::GetAttr"), + "Incorrect packed params input node operator type, expected prim::GetAttr."); auto packed_params = packed_params_node->inputs(); - FRONT_END_OP_CONVERSION_CHECK(packed_params.size() == 2, - "Packed parameters for quantized linear should contain 2 items."); + PYTORCH_OP_CONVERSION_CHECK(packed_params.size() == 2, + "Packed parameters for quantized linear should contain 2 items."); auto weights = packed_params[0].get_source_output(); auto bias = packed_params[1].get_source_output(); diff --git a/src/frontends/pytorch/src/op/rand.cpp b/src/frontends/pytorch/src/op/rand.cpp index d04b3bbd2780b7..a5a6771d36d581 100644 --- a/src/frontends/pytorch/src/op/rand.cpp +++ b/src/frontends/pytorch/src/op/rand.cpp @@ -56,8 +56,8 @@ OutputVector translate_rand(const NodeContext& context) { auto dtype = element::f32; size_t out_id = 1; if (context.get_input_size() == 3) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), - "aten::randn conversion with generator does not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); out_id = 2; } // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -75,8 +75,8 @@ OutputVector translate_rand(const NodeContext& context) { Output convert_like_out; size_t dtype_id = 1; if (context.get_input_size() == 6) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), - "aten::rand conversion with generator does not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::rand conversion with generator does not supported"); dtype_id = 2; } if (!context.input_is_none(dtype_id)) { @@ -92,7 +92,7 @@ OutputVector translate_rand(const NodeContext& context) { dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto res = context.mark_node(std::make_shared(sizes, low, high, dtype)); @@ -130,7 +130,7 @@ OutputVector translate_rand_like(const NodeContext& context) { dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto res = context.mark_node(std::make_shared(sizes, low, high, dtype)); @@ -150,8 +150,8 @@ OutputVector translate_randn(const NodeContext& context) { auto dtype = element::f32; size_t out_id = 1; if (context.get_input_size() == 3) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), - "aten::randn conversion with generator does not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); out_id = 2; } // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) @@ -165,8 +165,8 @@ OutputVector translate_randn(const NodeContext& context) { } size_t dtype_id = 1; if (context.get_input_size() == 6) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), - "aten::randn conversion with generator does not supported"); + PYTORCH_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); dtype_id = 2; } // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? @@ -186,7 +186,7 @@ OutputVector translate_randn(const NodeContext& context) { dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto scale = context.mark_node(v0::Constant::create(dtype, Shape{1}, {1})); @@ -226,7 +226,7 @@ OutputVector translate_randn_like(const NodeContext& context) { dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto scale = context.mark_node(v0::Constant::create(dtype, Shape{1}, {1})); @@ -256,7 +256,7 @@ OutputVector translate_randint(const NodeContext& context) { convert_like_out = fw_node->input_value(0); dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } low = context.mark_node(std::make_shared(low, dtype)); @@ -331,7 +331,7 @@ OutputVector translate_normal(const NodeContext& context) { convert_like_out = fw_node->input_value(0); dtype_applied = false; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + PYTORCH_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); } } auto res = make_random_normal(context, sizes, dtype, std, mean); @@ -340,9 +340,9 @@ OutputVector translate_normal(const NodeContext& context) { } return res; } else { - FRONT_END_OP_CONVERSION_CHECK(false, - "Unsupported number of inputs to aten::normal operation: ", - context.get_input_size()); + PYTORCH_OP_CONVERSION_CHECK(false, + "Unsupported number of inputs to aten::normal operation: ", + context.get_input_size()); } } diff --git a/src/frontends/pytorch/src/op/scatter.cpp b/src/frontends/pytorch/src/op/scatter.cpp index d60cfd91bf6c90..afbf8c2208d3a2 100644 --- a/src/frontends/pytorch/src/op/scatter.cpp +++ b/src/frontends/pytorch/src/op/scatter.cpp @@ -58,9 +58,9 @@ const v12::ScatterElementsUpdate::Reduction get_reduction_mode(const std::string {"amax", v12::ScatterElementsUpdate::Reduction::MAX}, {"amin", v12::ScatterElementsUpdate::Reduction::MIN}}; - FRONT_END_OP_CONVERSION_CHECK(TORCH_REDUCTION_TO_OV.count(pt_reduce_mode), - "Unknown reduction mode: ", - pt_reduce_mode); + PYTORCH_OP_CONVERSION_CHECK(TORCH_REDUCTION_TO_OV.count(pt_reduce_mode), + "Unknown reduction mode: ", + pt_reduce_mode); auto reduction = TORCH_REDUCTION_TO_OV.at(pt_reduce_mode); return reduction; } diff --git a/src/frontends/pytorch/src/op/slice.cpp b/src/frontends/pytorch/src/op/slice.cpp index 20f51a8e786745..62b65922e455ec 100644 --- a/src/frontends/pytorch/src/op/slice.cpp +++ b/src/frontends/pytorch/src/op/slice.cpp @@ -40,7 +40,7 @@ OutputVector translate_slice_common(const NodeContext& context, const size_t num step_idx = 3; dim = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Slice must have either 4 or 5 inputs."); + PYTORCH_OP_CONVERSION_CHECK(false, "Slice must have either 4 or 5 inputs."); } // TODO: support default start/end with negative step ov::Output start; diff --git a/src/frontends/pytorch/src/op/to.cpp b/src/frontends/pytorch/src/op/to.cpp index d902caeb8487bc..cc563217f5eb03 100644 --- a/src/frontends/pytorch/src/op/to.cpp +++ b/src/frontends/pytorch/src/op/to.cpp @@ -54,7 +54,7 @@ OutputVector translate_to(const NodeContext& context) { return {context.get_input(0)}; } } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unknown aten::to format"); + PYTORCH_OP_CONVERSION_CHECK(false, "Unknown aten::to format"); } // We ignore both non_blocking and copy inputs since non_blocking argument is used diff --git a/src/frontends/pytorch/src/op/tuple_index.cpp b/src/frontends/pytorch/src/op/tuple_index.cpp index 320733d701284d..b1f0917168d08b 100644 --- a/src/frontends/pytorch/src/op/tuple_index.cpp +++ b/src/frontends/pytorch/src/op/tuple_index.cpp @@ -22,8 +22,8 @@ OutputVector translate_tuple_index(const NodeContext& context) { if (cast_fw_node(tuple, "prim::TupleConstruct")) { // this case require index to be constant auto index = context.const_input(1); - FRONT_END_OP_CONVERSION_CHECK(static_cast(index) < tuple->get_input_size(), - "Index of TupleIndex operation is higher then number of tuple elements."); + PYTORCH_OP_CONVERSION_CHECK(static_cast(index) < tuple->get_input_size(), + "Index of TupleIndex operation is higher then number of tuple elements."); return {tuple->get_input_source_output(index)}; } else { // Assume this case is when tuple is represented as tensor diff --git a/src/frontends/pytorch/src/op/upsample.cpp b/src/frontends/pytorch/src/op/upsample.cpp index 1d405cf60acd07..83a1e59d93d4bc 100644 --- a/src/frontends/pytorch/src/op/upsample.cpp +++ b/src/frontends/pytorch/src/op/upsample.cpp @@ -41,7 +41,7 @@ OutputVector base_translate_upsample(const NodeContext& context, } else if (dims == 3) { spatial_axes = {2, 3, 4}; } else { - FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported number of dimensions in upsample"); + PYTORCH_OP_CONVERSION_CHECK(false, "Unsupported number of dimensions in upsample"); } auto target_axes = std::make_shared(element::i32, Shape{spatial_axes.size()}, spatial_axes); auto scales = @@ -50,7 +50,7 @@ OutputVector base_translate_upsample(const NodeContext& context, context.mark_node(std::make_shared(element::i32, Shape{dims}, std::vector(dims, 1))); Output scales_sizes; if (context.input_is_none(1)) { - FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(scale_id), "Scale or Output size should be provided"); + PYTORCH_OP_CONVERSION_CHECK(!context.input_is_none(scale_id), "Scale or Output size should be provided"); auto spatial_scales = context.get_input(scale_id); if (context.get_input_type(1).is()) { spatial_scales = concat_list_construct(spatial_scales); diff --git a/src/frontends/pytorch/src/op/where.cpp b/src/frontends/pytorch/src/op/where.cpp index 3d03706970bc67..3c8bf86d84adda 100644 --- a/src/frontends/pytorch/src/op/where.cpp +++ b/src/frontends/pytorch/src/op/where.cpp @@ -17,7 +17,7 @@ using namespace ov::op; OutputVector translate_where(const NodeContext& context) { num_inputs_check(context, 1, 3); auto cond = context.get_input(0); - FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(1), "aten::where(cond) unsupported"); + PYTORCH_OP_CONVERSION_CHECK(!context.input_is_none(1), "aten::where(cond) unsupported"); auto bool_cond = context.mark_node(std::make_shared(cond, element::boolean)); auto x = context.get_input(1); auto y = context.get_input(2); diff --git a/src/frontends/pytorch/src/translate_session.cpp b/src/frontends/pytorch/src/translate_session.cpp index 1791326f41a57b..5aff8bd1f05755 100644 --- a/src/frontends/pytorch/src/translate_session.cpp +++ b/src/frontends/pytorch/src/translate_session.cpp @@ -9,6 +9,7 @@ #include "input_model.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/slice.hpp" +#include "openvino/util/common_util.hpp" #include "openvino/util/log.hpp" #include "place.hpp" #include "pt_framework_node.hpp" @@ -273,6 +274,12 @@ OutputVector TranslateSession::convert_node(const NodeContext& context) { OPENVINO_DEBUG << "No translator found for: " << context.get_op_type() << "\n"; } catch (std::exception& e) { exception = e.what(); + if (m_telemetry) { + auto cropped_message = ov::util::filter_lines_by_prefix(exception, get_pytorch_prefix()); + if (cropped_message.size()) { + m_telemetry->send_event("error_info", cropped_message); + } + } } catch (...) { exception = "Unknown exception type."; } diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 86c6d11e1d65e7..32f62eed603d47 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -25,6 +25,10 @@ void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_ } } +const std::string& get_pytorch_prefix() { + return pytorch_prefix; +} + Output make_optional_bias(const Output& base_op, const NodeContext& context, int bias_input_idx, diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 3cb5798af9f65c..f7387bd6adaa61 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -20,6 +20,21 @@ class FrameworkNode; namespace frontend { namespace pytorch { +const std::string pytorch_prefix = "[PyTorch Frontend] "; + +const std::string& get_pytorch_prefix(); + +/// \brief Macro to check whether a boolean condition holds. +/// \param COND Condition to check +/// \param ... Additional error message info to be added to the error message via the `<<` +/// stream-insertion operator. Note that the expressions here will be evaluated lazily, +/// i.e., only if the `cond` evalutes to `false`. +/// \throws ::ov::frontend::OpConversionFailure if `cond` is false. +#ifndef PYTORCH_OP_CONVERSION_CHECK +# define PYTORCH_OP_CONVERSION_CHECK(COND, ...) \ + OPENVINO_ASSERT_HELPER(::ov::frontend::OpConversionFailure, "", (COND), get_pytorch_prefix(), __VA_ARGS__) +#endif + void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs); Output make_optional_bias(const Output& base_op, diff --git a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp index 36c2be5f204620..aacbadd9615803 100644 --- a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp @@ -96,7 +96,8 @@ std::shared_ptr GraphIteratorFlatBuffer::get_decoder() const if (type == "CUSTOM") { type = operator_code->custom_code()->str(); } - return std::make_shared(node, type, std::to_string(node_index), input_info, output_info); + auto name = std::to_string(node_index - m_graph->inputs()->size() - m_graph->outputs()->size()); + return std::make_shared(node, type, name, input_info, output_info); } else { auto tensor_id = m_nodes[node_index].as(); auto tensor = (*tensors)[tensor_id]; diff --git a/src/frontends/tensorflow_lite/src/op/op_translation_utils.hpp b/src/frontends/tensorflow_lite/src/op/op_translation_utils.hpp index 16ee477f292de5..94fbc5a5cdca07 100644 --- a/src/frontends/tensorflow_lite/src/op/op_translation_utils.hpp +++ b/src/frontends/tensorflow_lite/src/op/op_translation_utils.hpp @@ -73,10 +73,13 @@ OutputVector translate_binary_op_with_activation(const ov::frontend::tensorflow_ ov::frontend::tensorflow_lite::dequantize_inputs(inputs); auto context = ov::frontend::tensorflow_lite::NodeContext(node.get_decoder(), inputs); auto output = ov::frontend::tensorflow::op::translate_binary_op(context); + output[0].get_node()->set_friendly_name(""); + output[0].set_names({}); const auto& decoder = get_decoder(context); get_activation(output, context, EnumNameActivationFunctionType(decoder->get_attribute(&TF_TYPE::fused_activation_function))); + output[0].get_node()->set_friendly_name(node.get_name()); return output; } diff --git a/src/inference/dev_api/openvino/runtime/icompiled_model.hpp b/src/inference/dev_api/openvino/runtime/icompiled_model.hpp index dbbd6bce182984..b4b2e472deba80 100644 --- a/src/inference/dev_api/openvino/runtime/icompiled_model.hpp +++ b/src/inference/dev_api/openvino/runtime/icompiled_model.hpp @@ -15,6 +15,7 @@ #include "openvino/core/node_output.hpp" #include "openvino/runtime/common.hpp" +#include "openvino/runtime/iplugin.hpp" #include "openvino/runtime/iremote_context.hpp" #include "openvino/runtime/isync_infer_request.hpp" #include "openvino/runtime/remote_context.hpp" diff --git a/src/inference/dev_api/openvino/runtime/icore.hpp b/src/inference/dev_api/openvino/runtime/icore.hpp index fd75171f8fd69e..67eeaa5ee3327b 100644 --- a/src/inference/dev_api/openvino/runtime/icore.hpp +++ b/src/inference/dev_api/openvino/runtime/icore.hpp @@ -24,6 +24,9 @@ class Plugin; } +class ICompiledModel; +class IRemoteContext; + /** * @interface ICore * @brief Minimal ICore interface to allow plugin to get information from Core OpenVINO class. diff --git a/src/inference/dev_api/openvino/runtime/iplugin.hpp b/src/inference/dev_api/openvino/runtime/iplugin.hpp index 1ad03ff4b05ddf..59a51556b38166 100644 --- a/src/inference/dev_api/openvino/runtime/iplugin.hpp +++ b/src/inference/dev_api/openvino/runtime/iplugin.hpp @@ -24,6 +24,8 @@ namespace ov { +class ICompiledModel; + /** * @defgroup ov_dev_api OpenVINO Plugin API * @brief Defines Inference Engine Plugin API which can be used in plugin development diff --git a/src/inference/docs/api_details.md b/src/inference/docs/api_details.md index 89a4ec9965196b..aca93eb4474fc5 100644 --- a/src/inference/docs/api_details.md +++ b/src/inference/docs/api_details.md @@ -1,7 +1,6 @@ # OpenVINO Inference API OpenVINO Inference API contains two folders: - * [ie](../include/ie/) - legacy API, this API is no longer being developed, * [openvino](../include/openvino/) - current public API, this part is described below. ## Components of Public OpenVINO Inference API diff --git a/src/inference/include/openvino/runtime/common.hpp b/src/inference/include/openvino/runtime/common.hpp index e78df4ccedb10b..9e6bac8fa9e37c 100644 --- a/src/inference/include/openvino/runtime/common.hpp +++ b/src/inference/include/openvino/runtime/common.hpp @@ -50,3 +50,9 @@ namespace ov { using SupportedOpsMap = std::map; } // namespace ov + +#if defined(_WIN32) && !defined(__GNUC__) +# define __PRETTY_FUNCTION__ __FUNCSIG__ +#else +# define __PRETTY_FUNCTION__ __PRETTY_FUNCTION__ +#endif diff --git a/src/inference/include/openvino/runtime/variable_state.hpp b/src/inference/include/openvino/runtime/variable_state.hpp index 9ea114d7b92fc9..2cd3a7d494af5e 100644 --- a/src/inference/include/openvino/runtime/variable_state.hpp +++ b/src/inference/include/openvino/runtime/variable_state.hpp @@ -19,7 +19,6 @@ namespace ov { class InferRequest; class IVariableState; -class IInferRequestInternalWrapper; /** * @brief VariableState class diff --git a/src/inference/src/cpp/core.cpp b/src/inference/src/cpp/core.cpp index afa769f7e9c7b7..9458bda126edad 100644 --- a/src/inference/src/cpp/core.cpp +++ b/src/inference/src/cpp/core.cpp @@ -84,6 +84,7 @@ Core::Core(const std::string& xml_config_file) { std::map Core::get_versions(const std::string& device_name) const { OV_CORE_CALL_STATEMENT({ return _impl->get_versions(device_name); }) } + #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT std::shared_ptr Core::read_model(const std::wstring& model_path, const std::wstring& bin_path) const { OV_CORE_CALL_STATEMENT( diff --git a/src/inference/src/cpp/infer_request.cpp b/src/inference/src/cpp/infer_request.cpp index 12f5523d168f7b..b3d5709c3bf794 100644 --- a/src/inference/src/cpp/infer_request.cpp +++ b/src/inference/src/cpp/infer_request.cpp @@ -252,8 +252,8 @@ bool InferRequest::wait_for(const std::chrono::milliseconds timeout) { OPENVINO_ASSERT(_impl != nullptr, "InferRequest was not initialized."); try { return _impl->wait_for(timeout); - } catch (const Cancelled& e) { - throw e; + } catch (const ov::Cancelled&) { + throw; } catch (const std::exception& ex) { OPENVINO_THROW(ex.what()); } catch (...) { diff --git a/src/inference/src/cpp/variable_state.cpp b/src/inference/src/cpp/variable_state.cpp index e38c527367417e..3c024f42b00801 100644 --- a/src/inference/src/cpp/variable_state.cpp +++ b/src/inference/src/cpp/variable_state.cpp @@ -24,11 +24,8 @@ VariableState::~VariableState() { _impl = {}; } -VariableState::VariableState(const std::shared_ptr& impl, const std::shared_ptr& so) : _impl{impl}, _so{so} { - OPENVINO_ASSERT(_impl != nullptr, "VariableState was not initialized."); -} void VariableState::reset() { OV_VARIABLE_CALL_STATEMENT(_impl->reset()); diff --git a/src/inference/src/model_reader.cpp b/src/inference/src/model_reader.cpp index e12ccdc1d66d16..2b3110720a0dad 100644 --- a/src/inference/src/model_reader.cpp +++ b/src/inference/src/model_reader.cpp @@ -14,20 +14,19 @@ #include "transformations/utils/utils.hpp" namespace { - -ov::element::Type toLegacyType(const ov::element::Type& ngraph_type, bool input) { +ov::element::Type to_legacy_type(const ov::element::Type& legacy_type, bool input) { if (input) { - return ngraph_type == ov::element::f16 ? ov::element::f32 : ngraph_type; + return legacy_type == ov::element::f16 ? ov::element::f32 : legacy_type; } else { - if (ngraph_type == ov::element::i64 || ngraph_type == ov::element::u64 || ngraph_type == ov::element::i32 || - ngraph_type == ov::element::u32) { + if (legacy_type == ov::element::i64 || legacy_type == ov::element::u64 || legacy_type == ov::element::i32 || + legacy_type == ov::element::u32) { return ov::element::i32; - } else if (ngraph_type != ov::element::f32) { + } else if (legacy_type != ov::element::f32) { return ov::element::f32; } } - return ngraph_type; + return legacy_type; } void update_v10_model(std::shared_ptr& model, bool frontendMode = false) { @@ -41,7 +40,7 @@ void update_v10_model(std::shared_ptr& model, bool frontendMode = fal for (size_t i = 0; i < inputs.size(); ++i) { if (!frontendMode) { const auto ov_type = inputs[i].get_element_type(); - const auto legacy_type = toLegacyType(ov_type, true); + const auto legacy_type = to_legacy_type(ov_type, true); prepost.input(i).tensor().set_element_type(legacy_type); } for (const auto& name : inputs[i].get_names()) { @@ -56,7 +55,7 @@ void update_v10_model(std::shared_ptr& model, bool frontendMode = fal for (size_t i = 0; i < outputs.size(); ++i) { if (!frontendMode) { const auto ov_type = outputs[i].get_element_type(); - const auto legacy_type = toLegacyType(ov_type, false); + const auto legacy_type = to_legacy_type(ov_type, false); prepost.output(i).tensor().set_element_type(legacy_type); } for (const auto& name : outputs[i].get_names()) { diff --git a/src/inference/tests/functional/ov_shared_object_test.cpp b/src/inference/tests/functional/ov_shared_object_test.cpp index 96700e584338bb..b4108125f50fe8 100644 --- a/src/inference/tests/functional/ov_shared_object_test.cpp +++ b/src/inference/tests/functional/ov_shared_object_test.cpp @@ -45,7 +45,7 @@ TEST_F(SharedObjectOVTests, loaderThrowsIfNoPlugin) { TEST_F(SharedObjectOVTests, canFindExistedMethod) { loadDll(get_mock_engine_name()); - auto factory = make_std_function("CreatePluginEngine"); + auto factory = make_std_function(ov::create_plugin_function); EXPECT_NE(nullptr, factory); } diff --git a/src/plugins/intel_cpu/CMakeLists.txt b/src/plugins/intel_cpu/CMakeLists.txt index df9bddd23bc22e..63bb9db0e1f0b6 100644 --- a/src/plugins/intel_cpu/CMakeLists.txt +++ b/src/plugins/intel_cpu/CMakeLists.txt @@ -168,6 +168,13 @@ cross_compiled_file(${TARGET_NAME} NAME attn_memcpy NAMESPACE ov::Extensions::Cpu::XARCH ) +cross_compiled_file(${TARGET_NAME} + ARCH AVX512F AVX2 ANY + src/nodes/kernels/scaled_attn/attn_quant.cpp + API src/nodes/kernels/scaled_attn/attn_quant.hpp + NAME attn_quantkv attn_quant_u8 attn_dequant_u8 + NAMESPACE ov::Extensions::Cpu::XARCH +) # system dependencies must go last target_link_libraries(${TARGET_NAME} PRIVATE openvino::pugixml) ov_set_threading_interface_for(${TARGET_NAME}) diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index 2b2c38ca772db5..39a72bd80aaad7 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -234,6 +234,11 @@ void Graph::InitGraph(bool optimize) { ResolveEdgeConflicts(); + optimizer.ShareReorders(*this); + RemoveDroppedNodes(); + + ResolveComplexInplaceConflicts(); + optimizer.ApplyImplSpecificGraphOptimizations(*this); SortTopologically(); @@ -308,7 +313,7 @@ void Graph::ResolveInplaceDirections() { OV_ITT_SCOPED_TASK(itt::domains::intel_cpu, "Graph::ResolveInplaceDirections"); for (auto& node : graphNodes) { - resolveInPlaceDirection(node); + node->resolveInPlaceDirection(); } } @@ -423,6 +428,22 @@ static bool isReorderAvailable(const MemoryDescPtr& parentDesc, const MemoryDesc return dnnl_success == status; } +void Graph::insertReorder(EdgePtr& edge, bool isOptimized, std::unordered_set& uniqueLayerNames) { + std::string basicLayerName = edge->getParent()->getName() + "_" + + node::Reorder::getReorderArgs(edge->getInputDesc(), edge->getOutputDesc()) + "_" + + edge->getChild()->getName(); + std::string layerName = basicLayerName; + int idx = 0; + while (uniqueLayerNames.find(layerName) != uniqueLayerNames.end()) { + idx++; + layerName = basicLayerName + "_" + std::to_string(idx); + } + uniqueLayerNames.insert(layerName); + + // optimized flag indicate that just desc update w/o actual physical memory movement. + InsertReorder(edge, layerName, edge->getInputDesc(), edge->getOutputDesc(), isOptimized); +} + void Graph::ResolveEdgeConflicts() { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::intel_cpu_LT, "Graph::ResolveEdgeConflicts"); @@ -433,22 +454,6 @@ void Graph::ResolveEdgeConflicts() { uniqueLayerNames.insert(node->getName()); } - auto insertReorder = [&](EdgePtr& edge, bool isOptimized) { - std::string basicLayerName = edge->getParent()->getName() + "_" + - node::Reorder::getReorderArgs(edge->getInputDesc(), edge->getOutputDesc()) + "_" + - edge->getChild()->getName(); - std::string layerName = basicLayerName; - int idx = 0; - while (uniqueLayerNames.find(layerName) != uniqueLayerNames.end()) { - idx++; - layerName = basicLayerName + "_" + std::to_string(idx); - } - uniqueLayerNames.insert(layerName); - - // optimized flag indicate that just desc update w/o actual physical memory movement. - InsertReorder(edge, layerName, edge->getInputDesc(), edge->getOutputDesc(), isOptimized); - }; - auto updateEdge = [&](ptrdiff_t& i) { graphEdges.erase(graphEdges.begin() + i); i--; @@ -484,14 +489,31 @@ void Graph::ResolveEdgeConflicts() { edge = convertNode->getChildEdgeAt(0); } if (reorderStatusInternal != Edge::ReorderStatus::No) { - insertReorder(edge, reorderStatusInternal == Edge::ReorderStatus::Optimized); + insertReorder(edge, reorderStatusInternal == Edge::ReorderStatus::Optimized, uniqueLayerNames); } updateEdge(i); } else if (reorderStatus == Edge::ReorderStatus::Optimized) { - insertReorder(edge, true); + insertReorder(edge, true, uniqueLayerNames); updateEdge(i); } } +} + +void Graph::ResolveComplexInplaceConflicts() { + OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::intel_cpu_LT, "Graph::ResolveComplexInplaceConflicts"); + + ptrdiff_t numberOfEdges = static_cast(graphEdges.size()); + + std::unordered_set uniqueLayerNames; + for (auto node : graphNodes) { + uniqueLayerNames.insert(node->getName()); + } + + auto updateEdge = [&](ptrdiff_t& i) { + graphEdges.erase(graphEdges.begin() + i); + i--; + numberOfEdges--; + }; // secondary pass to eliminate complex inplace conflicts auto needReorder = [](const EdgePtr& edge) -> bool { @@ -518,13 +540,10 @@ void Graph::ResolveEdgeConflicts() { return false; }; - numberOfEdges = graphEdges.size(); //update the total number - for (ptrdiff_t i = 0; i < numberOfEdges; i++) { auto edge = graphEdges[i]; if (needReorder(edge)) { - constexpr bool optimizedReorder = false; - insertReorder(edge, optimizedReorder); + insertReorder(edge, false, uniqueLayerNames); updateEdge(i); } } @@ -1552,10 +1571,9 @@ bool Graph::InsertNode(NodePtr parent, NodePtr child, NodePtr node, int parentPo node->initSupportedPrimitiveDescriptors(); node->filterSupportedPrimitiveDescriptors(); node->selectOptimalPrimitiveDescriptor(); - resolveInPlaceDirection(node); + node->resolveInPlaceDirection(); node->initOptimalPrimitiveDescriptor(); } - return true; } @@ -1706,125 +1724,6 @@ std::shared_ptr Graph::dump() const { return dump_graph_as_ie_ngraph_net(*this); } -void Graph::resolveInPlaceDirection(const NodePtr& node) const { - enum InplaceDirectionType {UP, DOWN, CYCLIC, NONE}; - enum PortType {INPUT, OUTPUT}; - - auto inPlaceDirection = [](const NodePtr& node, PortType portType, int portNum) -> InplaceDirectionType { - if (PortType::INPUT == portType) { - auto inPlaceInpPort = node->inPlaceInputPort(portNum); - if (inPlaceInpPort >= 0) { - auto inPlaceOutPort = node->inPlaceOutPort(inPlaceInpPort); - if (inPlaceOutPort == inPlaceInpPort) { - return InplaceDirectionType::CYCLIC; - } else if (inPlaceOutPort < 0) { - return InplaceDirectionType::DOWN; - } else { - OPENVINO_THROW("Non trivial inPlace memory dependency has been detected"); - } - } - // the requested port has a negative inPlace tag, let's check whether it is referenced from the output - auto& config = node->getSelectedPrimitiveDescriptor()->getConfig(); - for (auto& portConf : config.outConfs) { - if (portConf.inPlace() == portNum) { - return InplaceDirectionType::UP; - } - } - } else if (PortType::OUTPUT == portType) { - auto inPlaceOutPort = node->inPlaceOutPort(portNum); - if (inPlaceOutPort >= 0) { - auto inPlaceInpPort = node->inPlaceInputPort(inPlaceOutPort); - if (inPlaceOutPort == inPlaceInpPort) { - return InplaceDirectionType::CYCLIC; - } else if (inPlaceInpPort < 0) { - return InplaceDirectionType::UP; - } else { - OPENVINO_THROW("Non trivial inPlace memory dependency has been detected"); - } - } - // the requested port has a negative inPlace tag, let's check whether it is referenced from the input - auto& config = node->getSelectedPrimitiveDescriptor()->getConfig(); - for (auto& portConf : config.inConfs) { - if (portConf.inPlace() == portNum) { - return InplaceDirectionType::DOWN; - } - } - } - return InplaceDirectionType::NONE; - }; - - auto& inpEdges = node->getParentEdges(); - for (auto& wEdge : inpEdges) { - if (auto pEdge = wEdge.lock()) { - auto inpPort = pEdge->getOutputNum(); - auto inPlaceInpPort = node->inPlaceInputPort(inpPort); - if (inPlaceInpPort < 0 || inPlaceDirection(node, PortType::INPUT, inpPort) != InplaceDirectionType::CYCLIC) { - continue; - } - // inPlace memory cyclic dependency detected, need to resolve - // let's check the parent node first - auto pParent = pEdge->getParent(); - auto parentInPlaceDirection = inPlaceDirection(pParent, PortType::OUTPUT, pEdge->getInputNum()); - if (parentInPlaceDirection == InplaceDirectionType::UP) { - auto config = node->getSelectedPrimitiveDescriptor()->getConfig(); - config.inConfs[inpPort].inPlace(-1); - node->initDescriptor(config); - } else if (parentInPlaceDirection == InplaceDirectionType::DOWN) { - //search if siblings already have downstream direction - auto downstreamPeers = [&] { - for (auto& peerEdge : pParent->getChildEdgesAtPort(pEdge->getInputNum())) { - auto peerNode = peerEdge->getChild(); - if (peerNode == node) continue; - if (inPlaceDirection(peerNode, PortType::INPUT, peerEdge->getOutputNum()) == InplaceDirectionType::DOWN) { - return true; - } - } - return false; - }(); - if (downstreamPeers) { - // when there is an downstream peer we have to resolve upstream inplace for the node - // to avoid inplace conflict - auto config = node->getSelectedPrimitiveDescriptor()->getConfig(); - config.inConfs[inpPort].inPlace(-1); - node->initDescriptor(config); - } else { - auto config = node->getSelectedPrimitiveDescriptor()->getConfig(); - config.outConfs[inPlaceInpPort].inPlace(-1); - node->initDescriptor(config); - } - } else { - // the parent node does not use inPlace memory, let's check children - std::function searchNonCyclicDirection; - searchNonCyclicDirection = [&](const NodePtr& node, int portIdx) -> InplaceDirectionType { - auto childEdges = node->getChildEdgesAtPort(portIdx); - for (auto& edge : childEdges) { - auto pChild = edge->getChild(); - auto result = inPlaceDirection(pChild, PortType::INPUT, edge->getOutputNum()); - if (InplaceDirectionType::UP == result || InplaceDirectionType::DOWN == result) { - return result; - } else if (InplaceDirectionType::CYCLIC == result) { - return searchNonCyclicDirection(pChild, pChild->inPlaceInputPort(edge->getOutputNum())); - } - } - return InplaceDirectionType::NONE; - }; - auto result = searchNonCyclicDirection(node, inPlaceInpPort); - if (one_of(result, InplaceDirectionType::UP, InplaceDirectionType::NONE)) { - auto config = node->getSelectedPrimitiveDescriptor()->getConfig(); - config.inConfs[inpPort].inPlace(-1); - node->initDescriptor(config); - } else if (InplaceDirectionType::DOWN == result) { - auto config = node->getSelectedPrimitiveDescriptor()->getConfig(); - config.outConfs[inPlaceInpPort].inPlace(-1); - node->initDescriptor(config); - } else { - OPENVINO_THROW("A node without an inPlace memory cyclic dependency has not been found"); - } - } - } - } -} - void Graph::SearchInternalStateNodes() { for (auto&& node : graphNodes) { if (node->getType() == Type::MemoryInput) { diff --git a/src/plugins/intel_cpu/src/graph.h b/src/plugins/intel_cpu/src/graph.h index 4c19e2c20443ec..17a820dcca0ca4 100644 --- a/src/plugins/intel_cpu/src/graph.h +++ b/src/plugins/intel_cpu/src/graph.h @@ -227,6 +227,7 @@ class Graph { void ResolveInplaceDirections(); void InitOptimalPrimitiveDescriptors(); void ResolveEdgeConflicts(); + void ResolveComplexInplaceConflicts(); bool ProcessDynNodes(); void Allocate(); void AllocateWithReuse(); @@ -259,7 +260,7 @@ class Graph { void EnforceInferencePrecision(); void EnforceBF16(); - void resolveInPlaceDirection(const NodePtr& node) const; + void insertReorder(EdgePtr& edge, bool isOptimized, std::unordered_set& uniqueLayerNames); }; using GraphPtr = std::shared_ptr; diff --git a/src/plugins/intel_cpu/src/graph_optimizer.cpp b/src/plugins/intel_cpu/src/graph_optimizer.cpp index b0fa76a845e5b4..a4622853f6977c 100644 --- a/src/plugins/intel_cpu/src/graph_optimizer.cpp +++ b/src/plugins/intel_cpu/src/graph_optimizer.cpp @@ -2212,6 +2212,69 @@ void GraphOptimizer::FuseEltwiseAndSimple(Graph &graph) { } } +void GraphOptimizer::ShareReorders(Graph& graph) { + auto getSuitableReorder = [](NodePtr node) -> Reorder* { + if (node->getType() != Type::Reorder) + return nullptr; + Reorder* reorder = dynamic_cast(node.get()); + if (reorder == nullptr) + OPENVINO_THROW("Cannot get reorder layer ", node->getName()); + + // inplace children cannot be safely shared with each other + auto reorderConsumers = reorder->getChildEdgesAtPort(0); + if (std::any_of(reorderConsumers.begin(), reorderConsumers.end(), [](EdgePtr e) { + return e->inPlace(Edge::LOOK_DOWN); + })) + return nullptr; + return reorder; + }; + + std::set dropped; + for (const auto& node : graph.GetNodes()) { + if (dropped.find(node) != dropped.end()) + continue; + + Reorder* reorder = getSuitableReorder(node); + if (!reorder) + continue; + + // find shareable sibling + auto dataEdge = reorder->getParentEdgeAt(0); + auto parentNode = dataEdge->getParent(); + auto parentPort = dataEdge->getInputNum(); + for (auto& edge : parentNode->getChildEdgesAtPort(parentPort)) { + auto siblingNode = edge->getChild(); + if (siblingNode == node) + continue; + Reorder* siblingReorder = getSuitableReorder(siblingNode); + if (!siblingReorder) + continue; + if (!reorder->getOutput().isCompatible(siblingReorder->getOutput())) + continue; + + DEBUG_LOG(node->getName(), " is shared by ", siblingNode->getName()); + + // siblingReorder can share output with current reorder + for (auto pwEdge : siblingReorder->getParentEdges()) { + auto pEdge = pwEdge.lock(); + if (pEdge) + graph.RemoveEdge(pEdge); + } + + for (auto pwEdge : siblingReorder->getChildEdges()) { + auto pEdge = pwEdge.lock(); + if (pEdge) { + graph.RemoveEdge(pEdge); + if (pEdge->getInputNum() == 0) + graph.CreateEdge(node, pEdge->getChild(), 0, pEdge->getOutputNum()); + } + } + + dropped.insert(siblingNode); + } + } +} + void GraphOptimizer::DropDoubleReorders(Graph &graph) { std::set processed; @@ -2512,7 +2575,6 @@ void GraphOptimizer::MergeTransposeAndReorder(Graph &graph) { auto isSuitableChildNode = [](NodePtr node) { return node->getType() == Type::Reorder - && node->getChildEdges().size() == 1 && !node->isDynamicNode(); // TODO [DS]: enable for dynamic shapes when inPlace in the dynamic case is available (CVS-74863) }; @@ -2589,92 +2651,127 @@ void GraphOptimizer::MergeTransposeAndReorder(Graph &graph) { // As in the first case, we also replace Transpose+Reorder pattern with a new Reorder. // Additionally, we insert another Reorder that performs the conversion from the input precision (inPrec) // to the output precision (outPrec) - auto mergeTransposeAndReorder = [&](std::shared_ptr& parentNode, std::shared_ptr& childNode) { - auto parentParentNode = parentNode->getParentEdgeAt(0)->getParent(); - auto parentParentConstNode = parentNode->getParentEdgeAt(1)->getParent(); - auto childChildNode = childNode->getChildEdgeAt(0)->getChild(); - - auto remEdge = parentNode->getParentEdgeAt(1); + auto mergeTransposeAndReorder = [&](std::shared_ptr& trans_node, std::shared_ptr& reorder_node) { + // parentParentNode ===> trans_node ===> reorder_node ===> cc0, cc1, ... + // is transfomed into + // parentParentNode ===> reorder_nop ===> [reorder_convert] ==> cc0, cc1, ... + auto parentParentNode = trans_node->getParentEdgeAt(0)->getParent(); + auto parentParenPort = trans_node->getParentEdgeAt(0)->getInputNum(); + auto parentParentConstNode = trans_node->getParentEdgeAt(1)->getParent(); + + auto remEdge = trans_node->getParentEdgeAt(1); graph.RemoveEdge(remEdge); // to prevent inPlace conflict we must check that the memory reference is unidirectional or // inPlace memory is not used - const auto parentInPlace = parentNode->getParentEdgeAt(0)->inPlace(Edge::LOOK_UP); - const auto& childEdges = childNode->getChildEdgesAtPort(0); + const auto parentInPlace = trans_node->getParentEdgeAt(0)->inPlace(Edge::LOOK_UP); + const auto& childEdges = reorder_node->getChildEdgesAtPort(0); + const auto childInPlace = std::any_of(childEdges.begin(), childEdges.end(), [](const EdgePtr& edge){ return edge->inPlace(Edge::LOOK_DOWN); }); bool isOptimized = !(parentInPlace && childInPlace); - graph.DropNode(parentNode); - graph.DropNode(childNode); - - auto inDesc = parentNode->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].getMemDesc(); - auto outDesc = childNode->getSelectedPrimitiveDescriptor()->getConfig().outConfs[0].getMemDesc(); - - auto inPrec = inDesc->getPrecision(); - auto outPrec = outDesc->getPrecision(); + // hold references to all children before dropping reorder_node + std::vector> reorderChildren; + for (auto ccEdge : childEdges) + reorderChildren.emplace_back(ccEdge->getChild(), ccEdge->getOutputNum()); + + // detach trans_node and reorder_node from graph by remove all of their edges + // they will be removed in future graph.RemoveDroppedNodes() call + auto detachNode = [&](std::shared_ptr& node) { + std::vector edges; + edges = node->getParentEdges(); + for (auto& edge : edges) + graph.RemoveEdge(edge.lock()); + edges = node->getChildEdges(); + for (auto& edge : edges) + graph.RemoveEdge(edge.lock()); + }; + detachNode(trans_node); + detachNode(reorder_node); - auto reorderInDesc = inDesc; - auto reorderOutDesc = outDesc->cloneWithNewPrecision(inPrec); + auto reorderInDesc = trans_node->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].getMemDesc(); + auto finalDesc = reorder_node->getSelectedPrimitiveDescriptor()->getConfig().outConfs[0].getMemDesc(); + auto reorderOutDesc = finalDesc->cloneWithNewPrecision(reorderInDesc->getPrecision()); std::string reorderlayerName = parentParentNode->getName() + "_" + Reorder::getReorderArgs(*reorderInDesc, *reorderOutDesc) + "_" + "fake"; - DEBUG_LOG("mergeTransposeAndReorder ", parentNode->getName(), " and ", childNode->getName(), " -> ", reorderlayerName); - - EdgePtr edge; - for (auto &childEdge : parentParentNode->getChildEdges()) { - if (childEdge.lock()->getChild() == childChildNode) { - edge = childEdge.lock(); - break; - } - } - if (!edge) { - OPENVINO_THROW("Transpose node '", parentNode->getName(), "' has invalid edges."); - } + DEBUG_LOG("mergeTransposeAndReorder ", trans_node->getName(), " and ", reorder_node->getName(), " -> ", reorderlayerName); std::vector srcPerm; - auto configReorder = [&]() { - // case 1. transposeNode support blocked input & non-blocked output, in the case, the reorder - // cannot be optimized - // case 2. Transpose and Reorder do opposite permutation to each other as expected, but isOptimized is already set false - // due to some preliminarily checks. We need to reinterpret layout Transpose input without physical change of the memory. - auto* transposeNode = dynamic_cast(parentNode.get()); - if (transposeNode == nullptr) { - OPENVINO_THROW("[CPU] parent node of type:", - parentNode->getTypeStr(), - " with name: ", - parentNode->getName(), - " is not a transpose node"); + // case 1. transposeNode support blocked input & non-blocked output, in the case, the reorder + // cannot be optimized + // case 2. Transpose and Reorder do opposite permutation to each other as expected, but isOptimized is already set false + // due to some preliminarily checks. We need to reinterpret layout Transpose input without physical change of the memory. + auto* transposeNode = dynamic_cast(trans_node.get()); + if (transposeNode == nullptr) { + OPENVINO_THROW("[CPU] parent node of type:", + trans_node->getTypeStr(), + " with name: ", + trans_node->getName(), + " is not a transpose node"); + } + const auto& inOrder = transposeNode->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].getMemDesc()->as()->getOrder(); + const auto& outOrder = reorderOutDesc->as()->getOrder(); + if (!isOptimized || inOrder.size() > outOrder.size()) { + isOptimized = false; + // inDesc should be permuted before calling reorder + auto & ord = transposeNode->getOrder(); + srcPerm = std::vector(ord.size()); + for (size_t i = 0; i < ord.size(); i++) { + srcPerm[ord[i]] = i; } - auto inOrder = transposeNode->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].getMemDesc()->as()->getOrder(); - auto outOrder = reorderOutDesc->as()->getOrder(); - if (!isOptimized || inOrder.size() > outOrder.size()) { - isOptimized = false; - // inDesc should be permuted before calling reorder - auto & ord = transposeNode->getOrder(); - srcPerm = std::vector(ord.size()); - for (size_t i = 0; i < ord.size(); i++) { - srcPerm[ord[i]] = i; - } - } - }; - - configReorder(); + } + auto reorder_layout = + std::make_shared(*reorderInDesc, *reorderOutDesc, reorderlayerName, graph.getGraphContext()); + reorder_layout->setOptimized(isOptimized); + reorder_layout->setSrcPermutation(srcPerm); - auto reorderNode = graph.InsertReorder(edge, reorderlayerName, *reorderInDesc, *reorderOutDesc, isOptimized, srcPerm); + graph.CreateEdge(parentParentNode, reorder_layout, parentParenPort, 0); // case 2 - if (inPrec != outPrec) { - auto reorderInDesc2 = reorderOutDesc; - auto reorderOutDesc2 = outDesc; - - std::string reorderLayerName2 = reorderNode->getName() + "_" + - Reorder::getReorderArgs(*reorderInDesc2, *reorderOutDesc2) + "_" + childChildNode->getName(); - - graph.InsertReorder(reorderNode->getChildEdgeAt(0), reorderLayerName2, *reorderInDesc2, *reorderOutDesc2, false); - } + auto reorder_last = reorder_layout; + if (reorderOutDesc->getPrecision() != finalDesc->getPrecision()) { + std::string reorderLayerName2 = reorder_layout->getName() + "_" + + Reorder::getReorderArgs(*reorderOutDesc, *finalDesc) + "_x_" + + reorderChildren[0].first->getName(); + reorder_last = std::make_shared(*reorderOutDesc, + *finalDesc, + reorderLayerName2, + graph.getGraphContext()); + reorder_last->setOptimized(false); + reorder_last->setSrcPermutation(srcPerm); + graph.CreateEdge(reorder_layout, reorder_last, 0, 0); + } + + for (auto& cc : reorderChildren) + graph.CreateEdge(reorder_last, cc.first, 0, cc.second); + + // initialize and add nodes into graph + std::vector new_nodes; + new_nodes.push_back(reorder_layout); + if (reorder_last != reorder_layout) { + new_nodes.push_back(reorder_last); + } + for (auto& node : new_nodes) + graph.AddNode(node); + + // multiple nodes must be initialized in specific order + for (auto& node : new_nodes) + node->init(); + for (auto& node : new_nodes) { + node->getSupportedDescriptors(); + node->initSupportedPrimitiveDescriptors(); + node->filterSupportedPrimitiveDescriptors(); + } + for (auto& node : new_nodes) + node->selectOptimalPrimitiveDescriptor(); + for (auto& node : new_nodes) + node->resolveInPlaceDirection(); + for (auto& node : new_nodes) + node->initOptimalPrimitiveDescriptor(); }; for (size_t i = 0; i < graphNodes.size(); i++) { diff --git a/src/plugins/intel_cpu/src/graph_optimizer.h b/src/plugins/intel_cpu/src/graph_optimizer.h index 0716bedc802c73..181866562d5afb 100644 --- a/src/plugins/intel_cpu/src/graph_optimizer.h +++ b/src/plugins/intel_cpu/src/graph_optimizer.h @@ -16,6 +16,7 @@ class GraphOptimizer { public: void ApplyCommonGraphOptimizations(Graph& graph); void ApplyImplSpecificGraphOptimizations(Graph& graph); + void ShareReorders(Graph &graph); private: void FuseConvMatmulFCDeconvAndDQScales(Graph &graph); diff --git a/src/plugins/intel_cpu/src/memory_desc/blocked_memory_desc.cpp b/src/plugins/intel_cpu/src/memory_desc/blocked_memory_desc.cpp index 1ee27d32309a93..fb25a75c59530f 100644 --- a/src/plugins/intel_cpu/src/memory_desc/blocked_memory_desc.cpp +++ b/src/plugins/intel_cpu/src/memory_desc/blocked_memory_desc.cpp @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "blocked_memory_desc.h" + #include -#include "blocked_memory_desc.h" #include "utils/general_utils.h" namespace ov { diff --git a/src/plugins/intel_cpu/src/memory_state.cpp b/src/plugins/intel_cpu/src/memory_state.cpp index 44b87a082fab3d..0632f42a55a66e 100644 --- a/src/plugins/intel_cpu/src/memory_state.cpp +++ b/src/plugins/intel_cpu/src/memory_state.cpp @@ -12,6 +12,9 @@ #include "utils/plain_tensor.hpp" #include "openvino/core/parallel.hpp" #include "nodes/common/cpu_convert.h" +#include "nodes/kernels/scaled_attn/attn_quant.hpp" + +using namespace ov::Extensions::Cpu::XARCH; namespace ov { namespace intel_cpu { @@ -195,14 +198,33 @@ ov::SoPtr VariableStateKVcache::get_state() const { auto H = pastkv.size(1); auto L0 = pastkv.size(2); auto S = pastkv.size(3); - parallel_for3d(B, H, L0, [&](size_t b, size_t h, size_t m) { - auto b_kv = static_cast(beam_table.at({b, m})); - cpu_convert(&pastkv.at({b_kv, h, m}), - &output.at({b, h, m}), - pastkv.m_dt, - output.m_dt, - S); - }); + if (pastkv.get_precision() == element::u8) { + auto nthr = parallel_get_max_threads(); + std::vector buffers(nthr); + parallel_for3d(B, H, L0, [&](size_t ithr, size_t b, size_t h, size_t m) { + auto b_kv = static_cast(beam_table.at({b, m})); + buffers[ithr].resize({S}); + attn_dequant_u8(pastkv.ptr(b_kv, h, m), + buffers[ithr].ptr(), + S, + m_scale_zp.ptr(b_kv, h, m)[0], + m_scale_zp.ptr(b_kv, h, m)[1]); + cpu_convert(buffers[ithr].ptr(), + output.ptr_v(b, h, m), + element::f32, + output.m_dt, + S); + }); + } else { + parallel_for3d(B, H, L0, [&](size_t b, size_t h, size_t m) { + auto b_kv = static_cast(beam_table.at({b, m})); + cpu_convert(pastkv.ptr_v(b_kv, h, m), + output.ptr_v(b, h, m), + pastkv.m_dt, + output.m_dt, + S); + }); + } return std::make_shared(external_mem); } @@ -218,7 +240,35 @@ void VariableStateKVcache::set_state_impl(const ov::SoPtr& state) { m_internal_mem = std::make_shared(get_engine(), dense_internal_desc); Memory external_mem(get_engine(), state_desc, m_state->data()); - m_internal_mem->load(external_mem); + if (dense_internal_desc->getPrecision() == element::u8) { + PlainTensor external, internal; + auto&& actual_internal_order = m_dense_internal_desc->getOrder(); + external.resize(external_mem.getStaticDims(), state_desc->getPrecision().size(), state_desc->getPrecision(), m_state->data()); + internal.reset(m_internal_mem); + external = external.permute(actual_internal_order); + internal = internal.permute(actual_internal_order); + auto B = internal.size(0); + auto H = internal.size(1); + auto L0 = internal.size(2); + auto S = internal.size(3); + auto nthr = parallel_get_max_threads(); + std::vector buffers(nthr); + parallel_for3d(B, H, L0, [&](size_t ithr, size_t b, size_t h, size_t m) { + buffers[ithr].resize({S}); + cpu_convert(external.ptr_v(b, h, m), + buffers[ithr].ptr(), + external.m_dt, + element::f32, + S); + attn_quant_u8(buffers[ithr].ptr(), + internal.ptr(b, h, m), + S, + m_scale_zp.at({b, h, m, size_t{0}}), + m_scale_zp.at({b, h, m, size_t{1}})); + }); + } else { + m_internal_mem->load(external_mem); + } //2. Reset the beam search table auto&& state_dims = dense_internal_desc->getShape().getStaticDims(); diff --git a/src/plugins/intel_cpu/src/memory_state.h b/src/plugins/intel_cpu/src/memory_state.h index c762c4afa2f142..f95186b92e48fc 100644 --- a/src/plugins/intel_cpu/src/memory_state.h +++ b/src/plugins/intel_cpu/src/memory_state.h @@ -8,6 +8,7 @@ #include "memory_desc/blocked_memory_desc.h" #include "openvino/runtime/ivariable_state.hpp" #include "openvino/runtime/tensor.hpp" +#include "utils/plain_tensor.hpp" namespace ov { namespace intel_cpu { @@ -128,6 +129,13 @@ class VariableStateKVcache : public VariableStateBase { m_hidden_state_max_size = max_size; } + PlainTensor& get_scale_zp() { + return m_scale_zp; + } + void set_scale_zp(const PlainTensor& t) { + m_scale_zp = t; + } + private: //ov::intel_cpu::VariableStateBase void set_state_impl(const ov::SoPtr& state) override; @@ -142,6 +150,9 @@ class VariableStateKVcache : public VariableStateBase { // this desc stores the internal prc and axis permutation BlockedMemoryDescPtr m_dense_internal_desc; + + // for u8 kv cache: [B, H, L, 2], 0 for scale, 1 for zp + PlainTensor m_scale_zp; }; using MemStatePtr = std::shared_ptr; diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index ba00cde21577fc..f5a145f63e8d9f 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -1721,5 +1721,125 @@ int Node::inPlaceOutPort(int portIdx) const { return conf.outConfs[portIdx].inPlace(); } + +void Node::resolveInPlaceDirection() { + enum InplaceDirectionType {UP, DOWN, CYCLIC, NONE}; + enum PortType {INPUT, OUTPUT}; + + auto inPlaceDirection = [](const Node* node, PortType portType, int portNum) -> InplaceDirectionType { + if (PortType::INPUT == portType) { + auto inPlaceInpPort = node->inPlaceInputPort(portNum); + if (inPlaceInpPort >= 0) { + auto inPlaceOutPort = node->inPlaceOutPort(inPlaceInpPort); + if (inPlaceOutPort == inPlaceInpPort) { + return InplaceDirectionType::CYCLIC; + } else if (inPlaceOutPort < 0) { + return InplaceDirectionType::DOWN; + } else { + OPENVINO_THROW("Non trivial inPlace memory dependency has been detected"); + } + } + // the requested port has a negative inPlace tag, let's check whether it is referenced from the output + auto& config = node->getSelectedPrimitiveDescriptor()->getConfig(); + for (auto& portConf : config.outConfs) { + if (portConf.inPlace() == portNum) { + return InplaceDirectionType::UP; + } + } + } else if (PortType::OUTPUT == portType) { + auto inPlaceOutPort = node->inPlaceOutPort(portNum); + if (inPlaceOutPort >= 0) { + auto inPlaceInpPort = node->inPlaceInputPort(inPlaceOutPort); + if (inPlaceOutPort == inPlaceInpPort) { + return InplaceDirectionType::CYCLIC; + } else if (inPlaceInpPort < 0) { + return InplaceDirectionType::UP; + } else { + OPENVINO_THROW("Non trivial inPlace memory dependency has been detected"); + } + } + // the requested port has a negative inPlace tag, let's check whether it is referenced from the input + auto& config = node->getSelectedPrimitiveDescriptor()->getConfig(); + for (auto& portConf : config.inConfs) { + if (portConf.inPlace() == portNum) { + return InplaceDirectionType::DOWN; + } + } + } + return InplaceDirectionType::NONE; + }; + + auto& inpEdges = getParentEdges(); + for (auto& wEdge : inpEdges) { + if (auto pEdge = wEdge.lock()) { + auto inpPort = pEdge->getOutputNum(); + auto inPlaceInpPort = inPlaceInputPort(inpPort); + if (inPlaceInpPort < 0 || inPlaceDirection(this, PortType::INPUT, inpPort) != InplaceDirectionType::CYCLIC) { + continue; + } + // inPlace memory cyclic dependency detected, need to resolve + // let's check the parent node first + auto pParent = pEdge->getParent().get(); + auto parentInPlaceDirection = inPlaceDirection(pParent, PortType::OUTPUT, pEdge->getInputNum()); + if (parentInPlaceDirection == InplaceDirectionType::UP) { + auto config = getSelectedPrimitiveDescriptor()->getConfig(); + config.inConfs[inpPort].inPlace(-1); + initDescriptor(config); + } else if (parentInPlaceDirection == InplaceDirectionType::DOWN) { + //search if siblings already have downstream direction + auto downstreamPeers = [&] { + for (auto& peerEdge : pParent->getChildEdgesAtPort(pEdge->getInputNum())) { + auto peerNode = peerEdge->getChild().get(); + if (peerNode == this) continue; + if (inPlaceDirection(peerNode, PortType::INPUT, peerEdge->getOutputNum()) == InplaceDirectionType::DOWN) { + return true; + } + } + return false; + }(); + if (downstreamPeers) { + // when there is an downstream peer we have to resolve upstream inplace for the node + // to avoid inplace conflict + auto config = getSelectedPrimitiveDescriptor()->getConfig(); + config.inConfs[inpPort].inPlace(-1); + initDescriptor(config); + } else { + auto config = getSelectedPrimitiveDescriptor()->getConfig(); + config.outConfs[inPlaceInpPort].inPlace(-1); + initDescriptor(config); + } + } else { + // the parent node does not use inPlace memory, let's check children + std::function searchNonCyclicDirection; + searchNonCyclicDirection = [&](const Node* node, int portIdx) -> InplaceDirectionType { + auto childEdges = node->getChildEdgesAtPort(portIdx); + for (auto& edge : childEdges) { + auto pChild = edge->getChild().get(); + auto result = inPlaceDirection(pChild, PortType::INPUT, edge->getOutputNum()); + if (InplaceDirectionType::UP == result || InplaceDirectionType::DOWN == result) { + return result; + } else if (InplaceDirectionType::CYCLIC == result) { + return searchNonCyclicDirection(pChild, pChild->inPlaceInputPort(edge->getOutputNum())); + } + } + return InplaceDirectionType::NONE; + }; + auto result = searchNonCyclicDirection(this, inPlaceInpPort); + if (one_of(result, InplaceDirectionType::UP, InplaceDirectionType::NONE)) { + auto config = getSelectedPrimitiveDescriptor()->getConfig(); + config.inConfs[inpPort].inPlace(-1); + initDescriptor(config); + } else if (InplaceDirectionType::DOWN == result) { + auto config = getSelectedPrimitiveDescriptor()->getConfig(); + config.outConfs[inPlaceInpPort].inPlace(-1); + initDescriptor(config); + } else { + OPENVINO_THROW("A node without an inPlace memory cyclic dependency has not been found"); + } + } + } + } +} + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index b27ab305bc6329..631278e311ad45 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -451,6 +451,7 @@ class Node { virtual void selectOptimalPrimitiveDescriptor(); virtual void initOptimalPrimitiveDescriptor(); + void resolveInPlaceDirection(); virtual void getSupportedDescriptors() = 0; // TODO [DS]: Should be moved into Node derivative class diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_memcpy.cpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_memcpy.cpp index 41efa80cf40a82..f2147479d4be98 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_memcpy.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_memcpy.cpp @@ -52,11 +52,11 @@ void attn_memcpy_kernel(const ov::intel_cpu::PlainTensor& k_input, const ov::intel_cpu::PlainTensor& past_v_output) { size_t B = k_input.m_dims[0], H = k_input.m_dims[1], L1 = k_input.m_dims[2], S = k_input.m_dims[3]; parallel_for3d(B, H, L1, [&](size_t b, size_t h, size_t m) { - attn_copy(&past_k_output.at({b, h, m, 0}), - &k_input.at({b, h, m, 0}), + attn_copy(past_k_output.ptr(b, h, m, 0), + k_input.ptr(b, h, m, 0), S); - attn_copy(&past_v_output.at({b, h, m, 0}), - &v_input.at({b, h, m, 0}), + attn_copy(past_v_output.ptr(b, h, m, 0), + v_input.ptr(b, h, m, 0), S); }); } @@ -67,11 +67,11 @@ static void attn_memcpy_kernel(const ov::intel_cpu::PlainTensor& k_input, const ov::intel_cpu::PlainTensor& past_v_output) { size_t B = k_input.m_dims[0], H = k_input.m_dims[1], L1 = k_input.m_dims[2], S = k_input.m_dims[3]; parallel_for3d(B, H, L1, [&](size_t b, size_t h, size_t m) { - std::memcpy(&past_k_output.at({b, h, m, 0}), - &k_input.at({b, h, m, 0}), + std::memcpy(past_k_output.ptr_v(b, h, m, 0), + k_input.ptr_v(b, h, m, 0), S * k_input.m_element_size); - std::memcpy(&past_v_output.at({b, h, m, 0}), - &v_input.at({b, h, m, 0}), + std::memcpy(past_v_output.ptr_v(b, h, m, 0), + v_input.ptr_v(b, h, m, 0), S * v_input.m_element_size); }); } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_quant.cpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_quant.cpp new file mode 100644 index 00000000000000..018b52b4bd1f75 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_quant.cpp @@ -0,0 +1,250 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include +#include +#include +#include +#include + +#if defined(HAVE_AVX2) || defined(HAVE_AVX512F) +# include +#endif + +#include "openvino/core/type/bfloat16.hpp" +#include "openvino/core/parallel.hpp" +#include "common.hpp" +#include "attn_quant.hpp" + +namespace ov { +namespace Extensions { +namespace Cpu { +namespace XARCH { + +using namespace ov; + +template +static void quant_u8(const T* src, uint8_t* dst, size_t n, float& scale, float& zp) { + size_t i = 0; + float max = -FLT_MAX; + float min = FLT_MAX; +#if defined(HAVE_AVX512F) + auto v0_max = _mm512_set1_ps(-FLT_MAX); + auto v0_min = _mm512_set1_ps(FLT_MAX); + auto v1_max = _mm512_set1_ps(-FLT_MAX); + auto v1_min = _mm512_set1_ps(FLT_MAX); + auto v2_max = _mm512_set1_ps(-FLT_MAX); + auto v2_min = _mm512_set1_ps(FLT_MAX); + auto v3_max = _mm512_set1_ps(-FLT_MAX); + auto v3_min = _mm512_set1_ps(FLT_MAX); + for (; i + 4 * vec_len_f32_avx512 <= n; i += vec_len_f32_avx512 * 4) { + auto v0 = mm512_uni_loadu_ps(src + i); + auto v1 = mm512_uni_loadu_ps(src + i + vec_len_f32_avx512); + auto v2 = mm512_uni_loadu_ps(src + i + 2 * vec_len_f32_avx512); + auto v3 = mm512_uni_loadu_ps(src + i + 3 * vec_len_f32_avx512); + v0_max = _mm512_max_ps(v0_max, v0); + v0_min = _mm512_min_ps(v0_min, v0); + v1_max = _mm512_max_ps(v1_max, v1); + v1_min = _mm512_min_ps(v1_min, v1); + v2_max = _mm512_max_ps(v2_max, v2); + v2_min = _mm512_min_ps(v2_min, v2); + v3_max = _mm512_max_ps(v3_max, v3); + v3_min = _mm512_min_ps(v3_min, v3); + } + if (i + 2 * vec_len_f32_avx512 <= n) { + auto v0 = mm512_uni_loadu_ps(src + i); + auto v1 = mm512_uni_loadu_ps(src + i + vec_len_f32_avx512); + v0_max = _mm512_max_ps(v0_max, v0); + v0_min = _mm512_min_ps(v0_min, v0); + v1_max = _mm512_max_ps(v1_max, v1); + v1_min = _mm512_min_ps(v1_min, v1); + i += 2 * vec_len_f32_avx512; + } + if (i + vec_len_f32_avx512 <= n) { + auto v0 = mm512_uni_loadu_ps(src + i); + v0_max = _mm512_max_ps(v0_max, v0); + v0_min = _mm512_min_ps(v0_min, v0); + i += vec_len_f32_avx512; + } + v0_max = _mm512_max_ps(v0_max, v1_max); + v0_min = _mm512_min_ps(v0_min, v1_min); + v2_max = _mm512_max_ps(v2_max, v3_max); + v2_min = _mm512_min_ps(v2_min, v3_min); + v0_max = _mm512_max_ps(v0_max, v2_max); + v0_min = _mm512_min_ps(v0_min, v2_min); + max = _mm512_reduce_max_ps(v0_max); + min = _mm512_reduce_min_ps(v0_min); +#elif defined(HAVE_AVX2) + auto v0_max = _mm256_set1_ps(-FLT_MAX); + auto v0_min = _mm256_set1_ps(FLT_MAX); + auto v1_max = _mm256_set1_ps(-FLT_MAX); + auto v1_min = _mm256_set1_ps(FLT_MAX); + auto v2_max = _mm256_set1_ps(-FLT_MAX); + auto v2_min = _mm256_set1_ps(FLT_MAX); + auto v3_max = _mm256_set1_ps(-FLT_MAX); + auto v3_min = _mm256_set1_ps(FLT_MAX); + for (; i + 4 * vec_len_f32_avx2 <= n; i += vec_len_f32_avx2 * 4) { + auto v0 = mm256_uni_loadu_ps(src + i); + auto v1 = mm256_uni_loadu_ps(src + i + vec_len_f32_avx2); + auto v2 = mm256_uni_loadu_ps(src + i + 2 * vec_len_f32_avx2); + auto v3 = mm256_uni_loadu_ps(src + i + 3 * vec_len_f32_avx2); + v0_max = _mm256_max_ps(v0_max, v0); + v0_min = _mm256_min_ps(v0_min, v0); + v1_max = _mm256_max_ps(v1_max, v1); + v1_min = _mm256_min_ps(v1_min, v1); + v2_max = _mm256_max_ps(v2_max, v2); + v2_min = _mm256_min_ps(v2_min, v2); + v3_max = _mm256_max_ps(v3_max, v3); + v3_min = _mm256_min_ps(v3_min, v3); + } + if (i + 2 * vec_len_f32_avx2 <= n) { + auto v0 = mm256_uni_loadu_ps(src + i); + auto v1 = mm256_uni_loadu_ps(src + i + vec_len_f32_avx2); + v0_max = _mm256_max_ps(v0_max, v0); + v0_min = _mm256_min_ps(v0_min, v0); + v1_max = _mm256_max_ps(v1_max, v1); + v1_min = _mm256_min_ps(v1_min, v1); + i += 2 * vec_len_f32_avx2; + } + if (i + vec_len_f32_avx2 <= n) { + auto v0 = mm256_uni_loadu_ps(src + i); + v0_max = _mm256_max_ps(v0_max, v0); + v0_min = _mm256_min_ps(v0_min, v0); + i += vec_len_f32_avx2; + } + v0_max = _mm256_max_ps(v0_max, v1_max); + v0_min = _mm256_min_ps(v0_min, v1_min); + v2_max = _mm256_max_ps(v2_max, v3_max); + v2_min = _mm256_min_ps(v2_min, v3_min); + v0_max = _mm256_max_ps(v0_max, v2_max); + v0_min = _mm256_min_ps(v0_min, v2_min); + hmax(v0_max); + hmin(v0_min); + max = _mm256_cvtss_f32(v0_max); + min = _mm256_cvtss_f32(v0_min); +#endif + for (; i < n; i++) { + float tmp = src[i]; + max = std::max(max, tmp); + min = std::min(min, tmp); + } + scale = (max - min) / 255; + zp = -min / scale; + + i = 0; +#if defined(HAVE_AVX512F) + auto v_scale = _mm512_set1_ps(1 / scale); + auto v_zp = _mm512_set1_ps(zp); + auto v_zero = _mm512_setzero_epi32(); + for (; i + vec_len_f32_avx512 <= n; i += vec_len_f32_avx512) { + auto v = mm512_uni_loadu_ps(src + i); + v = _mm512_fmadd_ps(v, v_scale, v_zp); + auto v_i32 = _mm512_cvt_roundps_epi32(v, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); + v_i32 = _mm512_max_epi32(v_i32, v_zero); + _mm512_mask_cvtusepi32_storeu_epi8(dst + i, 0xffff, v_i32); + } +#elif defined(HAVE_AVX2) + auto v_scale = _mm256_set1_ps(1 / scale); + auto v_zp = _mm256_set1_ps(zp); + for (; i + vec_len_f32_avx2 <= n; i += vec_len_f32_avx2) { + auto v = mm256_uni_loadu_ps(src + i); + v = _mm256_fmadd_ps(v, v_scale, v_zp); + v = _mm256_round_ps(v, _MM_ROUND_NEAREST); + auto v_i32 = _mm256_cvtps_epi32(v); + + auto high4 = _mm256_extractf128_si256(v_i32, 1); + auto low4 = _mm256_castsi256_si128(v_i32); + auto packed = _mm_packs_epi32(low4, high4); + packed = _mm_packus_epi16(packed, packed); + _mm_storel_epi64(reinterpret_cast<__m128i*>(dst + i), packed); + } +#endif + for (; i < n; i++) { + float tmp = src[i]; + dst[i] = static_cast(std::round(tmp / scale + zp)); + } +} + +template +static void attn_quant_mt(const ov::intel_cpu::PlainTensor& k_src, + const ov::intel_cpu::PlainTensor& v_src, + const ov::intel_cpu::PlainTensor& k_dst, + const ov::intel_cpu::PlainTensor& v_dst, + const ov::intel_cpu::PlainTensor& k_scale_zp, + const ov::intel_cpu::PlainTensor& v_scale_zp) { + size_t B = k_src.m_dims[0], H = k_src.m_dims[1], L1 = k_src.m_dims[2], S = k_src.m_dims[3]; + parallel_for3d(B, H, L1, [&](size_t b, size_t h, size_t m) { + auto p_k = k_scale_zp.ptr(b, h, m); + auto p_v = v_scale_zp.ptr(b, h, m); + quant_u8(k_src.ptr(b, h, m), + k_dst.ptr(b, h, m), + S, + p_k[0], + p_k[1]); + quant_u8(v_src.ptr(b, h, m), + v_dst.ptr(b, h, m), + S, + p_v[0], + p_v[1]); + }); +} + +void attn_quantkv(const ov::intel_cpu::PlainTensor& k_src, + const ov::intel_cpu::PlainTensor& v_src, + const ov::intel_cpu::PlainTensor& k_dst, + const ov::intel_cpu::PlainTensor& v_dst, + const ov::intel_cpu::PlainTensor& k_scale_zp, + const ov::intel_cpu::PlainTensor& v_scale_zp) { + if (k_src.get_precision() == ov::element::f32 && k_dst.get_precision() == ov::element::u8) { + attn_quant_mt(k_src, v_src, k_dst, v_dst, k_scale_zp, v_scale_zp); + } else if (k_src.get_precision() == ov::element::bf16 && k_dst.get_precision() == ov::element::u8) { + attn_quant_mt(k_src, v_src, k_dst, v_dst, k_scale_zp, v_scale_zp); + } else { + OPENVINO_THROW("unsupport src type: ", k_src.get_precision(), ", dst type: ", k_dst.get_precision(), " in attn_quantkv"); + } +} + +void attn_quant_u8(const float* src, uint8_t* dst, size_t n, float& scale, float& zp) { + quant_u8(src, dst, n, scale, zp); +} + +void attn_dequant_u8(const uint8_t* src, float* dst, size_t n, float scale, float zp) { + size_t i = 0; + // loadu_si128/epi64 does not support const qualifier + uint8_t* src_nc = const_cast(src); +#if defined(HAVE_AVX512F) + auto v_zp = _mm512_set1_ps(zp); + auto v_scale = _mm512_set1_ps(scale); + for (; i + vec_len_f32_avx512 <= n; i += vec_len_f32_avx512) { + auto v0_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(src_nc + i)); + auto v0_512 = _mm512_cvtepu8_epi32(v0_128); + auto v0_value = _mm512_cvtepi32_ps(v0_512); + v0_value = _mm512_sub_ps(v0_value, v_zp); + auto v0_out = _mm512_mul_ps(v0_value, v_scale); + mm512_uni_storeu_ps(dst + i, v0_out); + } +#elif defined(HAVE_AVX2) + auto v_zp = _mm256_set1_ps(zp); + auto v_scale = _mm256_set1_ps(scale); + for (; i + vec_len_f32_avx2 <= n; i += vec_len_f32_avx2) { + auto v0_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(src_nc + i)); + auto v0_256 = _mm256_cvtepu8_epi32(v0_128); + auto v0_value = _mm256_cvtepi32_ps(v0_256); + v0_value = _mm256_sub_ps(v0_value, v_zp); + auto v0_out = _mm256_mul_ps(v0_value, v_scale); + mm256_uni_storeu_ps(dst + i, v0_out); + } +#endif + for (; i < n; ++i) { + float tmp = src_nc[i]; + tmp = (tmp - zp) * scale; + dst[i] = tmp; + } +} + +} // namespace XARCH +} // namespace Cpu +} // namespace Extensions +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_quant.hpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_quant.hpp new file mode 100644 index 00000000000000..a95aa8e630cf13 --- /dev/null +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/attn_quant.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include +#include +#include +#include +#include "openvino/core/type/element_type.hpp" +#include "utils/plain_tensor.hpp" + +namespace ov { +namespace Extensions { +namespace Cpu { +namespace XARCH { + +void attn_quantkv(const ov::intel_cpu::PlainTensor& k_src, + const ov::intel_cpu::PlainTensor& v_src, + const ov::intel_cpu::PlainTensor& k_dst, + const ov::intel_cpu::PlainTensor& v_dst, + const ov::intel_cpu::PlainTensor& k_scale_zp, + const ov::intel_cpu::PlainTensor& v_scale_zp); + +void attn_quant_u8(const float* src, uint8_t* dst, size_t n, float& scale, float& zp); + +void attn_dequant_u8(const uint8_t* src, float* dst, size_t n, float scale, float zp); + +} // namespace XARCH +} // namespace Cpu +} // namespace Extensions +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp index e624382fd2eb0f..231a9d2c51fbb2 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp @@ -163,6 +163,24 @@ static constexpr size_t vec_len_f32_avx2 = vec_len_avx2 / sizeof(float); y = _mm256_permute2f128_ps(x, x, 1); // y: 4567 x x x 0123 x x x x = _mm256_add_ps(x, y); // x: 01234567 x x x x x x x } + inline void hmax(__m256& x) { + __m256 y; // x: 0 1 2 3 4 5 6 7 + y = _mm256_permute_ps(x, 0x39); // y: 1 2 3 0 5 6 7 4 + x = _mm256_max_ps(x, y); // X: 01 12 23 30 45 56 67 74 + y = _mm256_permute_ps(x, 0x4e); // y: 23 30 01 12 67 74 45 56 + x = _mm256_max_ps(x, y); // x: 0123 x x x 4567 x x x + y = _mm256_permute2f128_ps(x, x, 1); // y: 4567 x x x 0123 x x x + x = _mm256_max_ps(x, y); // x: 01234567 x x x x x x x + } + inline void hmin(__m256& x) { + __m256 y; // x: 0 1 2 3 4 5 6 7 + y = _mm256_permute_ps(x, 0x39); // y: 1 2 3 0 5 6 7 4 + x = _mm256_min_ps(x, y); // X: 01 12 23 30 45 56 67 74 + y = _mm256_permute_ps(x, 0x4e); // y: 23 30 01 12 67 74 45 56 + x = _mm256_min_ps(x, y); // x: 0123 x x x 4567 x x x + y = _mm256_permute2f128_ps(x, x, 1); // y: 4567 x x x 0123 x x x + x = _mm256_min_ps(x, y); // x: 01234567 x x x x x x x + } #endif } // namespace XARCH diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.cpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.cpp index 393d58fcfde958..aa9643fe7b09ff 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.cpp @@ -26,8 +26,22 @@ namespace XARCH { using namespace ov; +#if defined(HAVE_AVX2) + +#define prefetch_bytes(bytes, sel, advance, src) { \ + auto *p = reinterpret_cast(src); \ + for (size_t i = 0; i < bytes; i += 64) \ + _mm_prefetch(p + i + advance, sel); \ +} + +#else + +#define prefetch_bytes(bytes, sel, advance, src) + +#endif + template -void attn_acc_value(float* out, float weight, T* v, size_t S) { +static void attn_acc_value(float* out, float weight, T* v, size_t S, float* scale, float* zp) { size_t i = 0; #if defined(HAVE_AVX512F) auto attn_w_vec_fp32 = _mm512_set1_ps(weight); @@ -51,27 +65,328 @@ void attn_acc_value(float* out, float weight, T* v, size_t S) { } } +static void attn_acc_value(float* out, float weight, uint8_t* v, size_t S, float* scale, float* zp) { + size_t i = 0; + weight *= *scale; +#if defined(HAVE_AVX512F) + auto attn_w_vec_fp32 = _mm512_set1_ps(weight); + auto v_zp = _mm512_set1_ps(*zp); + for (; i + 4 * vec_len_f32_avx512 <= S; i += 4 * vec_len_f32_avx512) { + auto v0_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(v + i)); + auto v1_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(v + i + vec_len_f32_avx512)); + auto v2_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(v + i + vec_len_f32_avx512 * 2)); + auto v3_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(v + i + vec_len_f32_avx512 * 3)); + + auto v0_out = mm512_uni_loadu_ps(out + i); + auto v1_out = mm512_uni_loadu_ps(out + i + vec_len_f32_avx512); + auto v2_out = mm512_uni_loadu_ps(out + i + vec_len_f32_avx512 * 2); + auto v3_out = mm512_uni_loadu_ps(out + i + vec_len_f32_avx512 * 3); + + auto v0_256 = _mm512_cvtepu8_epi32(v0_128); + auto v1_256 = _mm512_cvtepu8_epi32(v1_128); + auto v2_256 = _mm512_cvtepu8_epi32(v2_128); + auto v3_256 = _mm512_cvtepu8_epi32(v3_128); + + auto v0_value = _mm512_cvtepi32_ps(v0_256); + auto v1_value = _mm512_cvtepi32_ps(v1_256); + auto v2_value = _mm512_cvtepi32_ps(v2_256); + auto v3_value = _mm512_cvtepi32_ps(v3_256); + + v0_value = _mm512_sub_ps(v0_value, v_zp); + v1_value = _mm512_sub_ps(v1_value, v_zp); + v2_value = _mm512_sub_ps(v2_value, v_zp); + v3_value = _mm512_sub_ps(v3_value, v_zp); + + v0_out = _mm512_fmadd_ps(attn_w_vec_fp32, v0_value, v0_out); + v1_out = _mm512_fmadd_ps(attn_w_vec_fp32, v1_value, v1_out); + v2_out = _mm512_fmadd_ps(attn_w_vec_fp32, v2_value, v2_out); + v3_out = _mm512_fmadd_ps(attn_w_vec_fp32, v3_value, v3_out); + + mm512_uni_storeu_ps(out + i + vec_len_f32_avx512 * 0, v0_out); + mm512_uni_storeu_ps(out + i + vec_len_f32_avx512 * 1, v1_out); + mm512_uni_storeu_ps(out + i + vec_len_f32_avx512 * 2, v2_out); + mm512_uni_storeu_ps(out + i + vec_len_f32_avx512 * 3, v3_out); + } + if (i + 2 * vec_len_f32_avx512 <= S) { + auto v0_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(v + i)); + auto v1_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(v + i + vec_len_f32_avx512)); + + auto v0_out = mm512_uni_loadu_ps(out + i); + auto v1_out = mm512_uni_loadu_ps(out + i + vec_len_f32_avx512); + + auto v0_256 = _mm512_cvtepu8_epi32(v0_128); + auto v1_256 = _mm512_cvtepu8_epi32(v1_128); + + auto v0_value = _mm512_cvtepi32_ps(v0_256); + auto v1_value = _mm512_cvtepi32_ps(v1_256); + + v0_value = _mm512_sub_ps(v0_value, v_zp); + v1_value = _mm512_sub_ps(v1_value, v_zp); + + v0_out = _mm512_fmadd_ps(attn_w_vec_fp32, v0_value, v0_out); + v1_out = _mm512_fmadd_ps(attn_w_vec_fp32, v1_value, v1_out); + + mm512_uni_storeu_ps(out + i + vec_len_f32_avx512 * 0, v0_out); + mm512_uni_storeu_ps(out + i + vec_len_f32_avx512 * 1, v1_out); + i += 2 * vec_len_f32_avx512; + } + if (i + vec_len_f32_avx512 <= S) { + auto v0_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(v + i)); + auto v0_out = mm512_uni_loadu_ps(out + i); + auto v0_256 = _mm512_cvtepu8_epi32(v0_128); + auto v0_value = _mm512_cvtepi32_ps(v0_256); + v0_value = _mm512_sub_ps(v0_value, v_zp); + v0_out = _mm512_fmadd_ps(attn_w_vec_fp32, v0_value, v0_out); + mm512_uni_storeu_ps(out + i + vec_len_f32_avx512 * 0, v0_out); + i += vec_len_f32_avx512; + } +#elif defined(HAVE_AVX2) + auto attn_w_vec_fp32 = _mm256_set1_ps(weight); + auto v_zp = _mm256_set1_ps(*zp); + for (; i + 4 * vec_len_f32_avx2 <= S; i += 4 * vec_len_f32_avx2) { + auto v0_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(v + i)); + auto v1_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(v + i + vec_len_f32_avx2)); + auto v2_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(v + i + vec_len_f32_avx2 * 2)); + auto v3_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(v + i + vec_len_f32_avx2 * 3)); + + auto v0_out = mm256_uni_loadu_ps(out + i); + auto v1_out = mm256_uni_loadu_ps(out + i + vec_len_f32_avx2); + auto v2_out = mm256_uni_loadu_ps(out + i + vec_len_f32_avx2 * 2); + auto v3_out = mm256_uni_loadu_ps(out + i + vec_len_f32_avx2 * 3); + + auto v0_256 = _mm256_cvtepu8_epi32(v0_128); + auto v1_256 = _mm256_cvtepu8_epi32(v1_128); + auto v2_256 = _mm256_cvtepu8_epi32(v2_128); + auto v3_256 = _mm256_cvtepu8_epi32(v3_128); + + auto v0_value = _mm256_cvtepi32_ps(v0_256); + auto v1_value = _mm256_cvtepi32_ps(v1_256); + auto v2_value = _mm256_cvtepi32_ps(v2_256); + auto v3_value = _mm256_cvtepi32_ps(v3_256); + + v0_value = _mm256_sub_ps(v0_value, v_zp); + v1_value = _mm256_sub_ps(v1_value, v_zp); + v2_value = _mm256_sub_ps(v2_value, v_zp); + v3_value = _mm256_sub_ps(v3_value, v_zp); + + v0_out = _mm256_fmadd_ps(attn_w_vec_fp32, v0_value, v0_out); + v1_out = _mm256_fmadd_ps(attn_w_vec_fp32, v1_value, v1_out); + v2_out = _mm256_fmadd_ps(attn_w_vec_fp32, v2_value, v2_out); + v3_out = _mm256_fmadd_ps(attn_w_vec_fp32, v3_value, v3_out); + + mm256_uni_storeu_ps(out + i + vec_len_f32_avx2 * 0, v0_out); + mm256_uni_storeu_ps(out + i + vec_len_f32_avx2 * 1, v1_out); + mm256_uni_storeu_ps(out + i + vec_len_f32_avx2 * 2, v2_out); + mm256_uni_storeu_ps(out + i + vec_len_f32_avx2 * 3, v3_out); + } + if (i + 2 * vec_len_f32_avx2 <= S) { + auto v0_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(v + i)); + auto v1_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(v + i + vec_len_f32_avx2)); + + auto v0_out = mm256_uni_loadu_ps(out + i); + auto v1_out = mm256_uni_loadu_ps(out + i + vec_len_f32_avx2); + + auto v0_256 = _mm256_cvtepu8_epi32(v0_128); + auto v1_256 = _mm256_cvtepu8_epi32(v1_128); + + auto v0_value = _mm256_cvtepi32_ps(v0_256); + auto v1_value = _mm256_cvtepi32_ps(v1_256); + + v0_value = _mm256_sub_ps(v0_value, v_zp); + v1_value = _mm256_sub_ps(v1_value, v_zp); + + v0_out = _mm256_fmadd_ps(attn_w_vec_fp32, v0_value, v0_out); + v1_out = _mm256_fmadd_ps(attn_w_vec_fp32, v1_value, v1_out); + + mm256_uni_storeu_ps(out + i + vec_len_f32_avx2 * 0, v0_out); + mm256_uni_storeu_ps(out + i + vec_len_f32_avx2 * 1, v1_out); + i += 2 * vec_len_f32_avx2; + } + if (i + vec_len_f32_avx2 <= S) { + auto v0_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(v + i)); + auto v0_out = mm256_uni_loadu_ps(out + i); + auto v0_256 = _mm256_cvtepu8_epi32(v0_128); + auto v0_value = _mm256_cvtepi32_ps(v0_256); + v0_value = _mm256_sub_ps(v0_value, v_zp); + v0_out = _mm256_fmadd_ps(attn_w_vec_fp32, v0_value, v0_out); + mm256_uni_storeu_ps(out + i, v0_out); + i += vec_len_f32_avx2; + } +#endif + for (; i < S; i++) { + out[i] += weight * (v[i] - *zp); + } +} + +template +static float sum_q_head(T* a, size_t n) { + float sum = 0.0f; + size_t i = 0; +#if defined(HAVE_AVX512F) + auto vsum0 = _mm512_set1_ps(0.0f); + auto vsum1 = _mm512_set1_ps(0.0f); + auto vsum2 = _mm512_set1_ps(0.0f); + auto vsum3 = _mm512_set1_ps(0.0f); + for (; i + 4 * vec_len_f32_avx512 <= n; i += vec_len_f32_avx512 * 4) { + auto va0 = mm512_uni_loadu_ps(a + i); + auto va1 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512); + auto va2 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512 * 2); + auto va3 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512 * 3); + + vsum0 = _mm512_add_ps(va0, vsum0); + vsum1 = _mm512_add_ps(va1, vsum1); + vsum2 = _mm512_add_ps(va2, vsum2); + vsum3 = _mm512_add_ps(va3, vsum3); + } + if (i + 2 * vec_len_f32_avx512 <= n) { + auto va0 = mm512_uni_loadu_ps(a + i); + auto va1 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512); + + vsum0 = _mm512_add_ps(va0, vsum0); + vsum1 = _mm512_add_ps(va1, vsum1); + i += 2 * vec_len_f32_avx512; + } + if (i + vec_len_f32_avx512 <= n) { + auto va0 = mm512_uni_loadu_ps(a + i); + vsum0 = _mm512_add_ps(va0, vsum0); + i += vec_len_f32_avx512; + } + vsum0 = _mm512_add_ps(vsum0, vsum1); + vsum2 = _mm512_add_ps(vsum2, vsum3); + vsum0 = _mm512_add_ps(vsum0, vsum2); + sum = _mm512_reduce_add_ps(vsum0); +#elif defined(HAVE_AVX2) + auto vsum0 = _mm256_set1_ps(0.0f); + auto vsum1 = _mm256_set1_ps(0.0f); + auto vsum2 = _mm256_set1_ps(0.0f); + auto vsum3 = _mm256_set1_ps(0.0f); + for (; i + 4 * vec_len_f32_avx2 <= n; i += vec_len_f32_avx2 * 4) { + auto va0 = mm256_uni_loadu_ps(a + i); + auto va1 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2); + auto va2 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2 * 2); + auto va3 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2 * 3); + + vsum0 = _mm256_add_ps(va0, vsum0); + vsum1 = _mm256_add_ps(va1, vsum1); + vsum2 = _mm256_add_ps(va2, vsum2); + vsum3 = _mm256_add_ps(va3, vsum3); + } + if (i + 2 * vec_len_f32_avx2 <= n) { + auto va0 = mm256_uni_loadu_ps(a + i); + auto va1 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2); + + vsum0 = _mm256_add_ps(va0, vsum0); + vsum1 = _mm256_add_ps(va1, vsum1); + i += 2 * vec_len_f32_avx2; + } + if (i + vec_len_f32_avx2 <= n) { + auto va0 = mm256_uni_loadu_ps(a + i); + vsum0 = _mm256_add_ps(va0, vsum0); + i += vec_len_f32_avx2; + } + vsum0 = _mm256_add_ps(vsum0, vsum1); + vsum2 = _mm256_add_ps(vsum2, vsum3); + vsum0 = _mm256_add_ps(vsum0, vsum2); + hsum(vsum0); + sum = _mm256_cvtss_f32(vsum0); +#endif + + for (; i < n; i++) { + float tmp = a[i]; + sum += tmp; + } + return sum; +} + template -float dot_product(TA* a, TB* b, size_t n) { +static float dot_product(TA* a, TB* b, size_t n, float* scale, float* zp, float* head_sum) { size_t i = 0; float sum = 0.0f; #if defined(HAVE_AVX512F) - auto vsum = _mm512_setzero_ps(); - for (; i + vec_len_f32_avx512 <= n; i += vec_len_f32_avx512) { - auto va = mm512_uni_loadu_ps(a + i); - auto vb = mm512_uni_loadu_ps(b + i); - vsum = _mm512_fmadd_ps(va, vb, vsum); + auto vsum0 = _mm512_setzero_ps(); + auto vsum1 = _mm512_setzero_ps(); + auto vsum2 = _mm512_setzero_ps(); + auto vsum3 = _mm512_setzero_ps(); + for (; i + 4 * vec_len_f32_avx512 <= n; i += 4 * vec_len_f32_avx512) { + auto va0 = mm512_uni_loadu_ps(a + i); + auto va1 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512); + auto va2 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512 * 2); + auto va3 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512 * 3); + + auto vb0 = mm512_uni_loadu_ps(b + i); + auto vb1 = mm512_uni_loadu_ps(b + i + vec_len_f32_avx512); + auto vb2 = mm512_uni_loadu_ps(b + i + vec_len_f32_avx512 * 2); + auto vb3 = mm512_uni_loadu_ps(b + i + vec_len_f32_avx512 * 3); + + vsum0 = _mm512_fmadd_ps(va0, vb0, vsum0); + vsum1 = _mm512_fmadd_ps(va1, vb1, vsum1); + vsum2 = _mm512_fmadd_ps(va2, vb2, vsum2); + vsum3 = _mm512_fmadd_ps(va3, vb3, vsum3); + } + if (i + 2 * vec_len_f32_avx512 <= n) { + auto va0 = mm512_uni_loadu_ps(a + i); + auto va1 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512); + + auto vb0 = mm512_uni_loadu_ps(b + i); + auto vb1 = mm512_uni_loadu_ps(b + i + vec_len_f32_avx512); + + vsum0 = _mm512_fmadd_ps(va0, vb0, vsum0); + vsum1 = _mm512_fmadd_ps(va1, vb1, vsum1); + i += 2 * vec_len_f32_avx512; + } + if (i + vec_len_f32_avx512 <= n) { + auto va0 = mm512_uni_loadu_ps(a + i); + auto vb0 = mm512_uni_loadu_ps(b + i); + vsum0 = _mm512_fmadd_ps(va0, vb0, vsum0); + i += vec_len_f32_avx512; } - sum = _mm512_reduce_add_ps(vsum); + vsum0 = _mm512_add_ps(vsum0, vsum1); + vsum2 = _mm512_add_ps(vsum2, vsum3); + vsum0 = _mm512_add_ps(vsum0, vsum2); + sum = _mm512_reduce_add_ps(vsum0); #elif defined(HAVE_AVX2) - auto vsum = _mm256_set1_ps(0.0f); - for (; i + vec_len_f32_avx2 <= n; i += vec_len_f32_avx2) { - auto va = mm256_uni_loadu_ps(a + i); - auto vb = mm256_uni_loadu_ps(b + i); - vsum = _mm256_fmadd_ps(va, vb, vsum); - } - hsum(vsum); - sum = _mm256_cvtss_f32(vsum); + auto vsum0 = _mm256_set1_ps(0.0f); + auto vsum1 = _mm256_set1_ps(0.0f); + auto vsum2 = _mm256_set1_ps(0.0f); + auto vsum3 = _mm256_set1_ps(0.0f); + for (; i + 4 * vec_len_f32_avx2 <= n; i += vec_len_f32_avx2 * 4) { + auto va0 = mm256_uni_loadu_ps(a + i); + auto va1 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2); + auto va2 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2 * 2); + auto va3 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2 * 3); + + auto vb0 = mm256_uni_loadu_ps(b + i); + auto vb1 = mm256_uni_loadu_ps(b + i + vec_len_f32_avx2); + auto vb2 = mm256_uni_loadu_ps(b + i + vec_len_f32_avx2 * 2); + auto vb3 = mm256_uni_loadu_ps(b + i + vec_len_f32_avx2 * 3); + + vsum0 = _mm256_fmadd_ps(va0, vb0, vsum0); + vsum1 = _mm256_fmadd_ps(va1, vb1, vsum1); + vsum2 = _mm256_fmadd_ps(va2, vb2, vsum2); + vsum3 = _mm256_fmadd_ps(va3, vb3, vsum3); + } + if (i + 2 * vec_len_f32_avx2 <= n) { + auto va0 = mm256_uni_loadu_ps(a + i); + auto va1 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2); + + auto vb0 = mm256_uni_loadu_ps(b + i); + auto vb1 = mm256_uni_loadu_ps(b + i + vec_len_f32_avx2); + + vsum0 = _mm256_fmadd_ps(va0, vb0, vsum0); + vsum1 = _mm256_fmadd_ps(va1, vb1, vsum1); + i += 2 * vec_len_f32_avx2; + } + if (i + vec_len_f32_avx2 <= n) { + auto va0 = mm256_uni_loadu_ps(a + i); + auto vb0 = mm256_uni_loadu_ps(b + i); + vsum0 = _mm256_fmadd_ps(va0, vb0, vsum0); + i += vec_len_f32_avx2; + } + vsum0 = _mm256_add_ps(vsum0, vsum1); + vsum2 = _mm256_add_ps(vsum2, vsum3); + vsum0 = _mm256_add_ps(vsum0, vsum2); + hsum(vsum0); + sum = _mm256_cvtss_f32(vsum0); #endif for (; i < n; i++) { sum += a[i] * b[i]; @@ -79,15 +394,168 @@ float dot_product(TA* a, TB* b, size_t n) { return sum; } +template +static float dot_product(TA* a, uint8_t* b, size_t n, float* scale, float* zp, float* head_sum) { + size_t i = 0; + float sum = 0.0f; +#if defined(HAVE_AVX512F) + auto vsum0 = _mm512_set1_ps(0.0f); + auto vsum1 = _mm512_set1_ps(0.0f); + auto vsum2 = _mm512_set1_ps(0.0f); + auto vsum3 = _mm512_set1_ps(0.0f); + auto v_zp = _mm512_set1_ps(*zp); + for (; i + 4 * vec_len_f32_avx512 <= n; i += vec_len_f32_avx512 * 4) { + auto va0 = mm512_uni_loadu_ps(a + i); + auto va1 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512); + auto va2 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512 * 2); + auto va3 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512 * 3); + + auto vb0_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(b + i)); + auto vb1_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(b + i + vec_len_f32_avx512)); + auto vb2_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(b + i + vec_len_f32_avx512 * 2)); + auto vb3_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(b + i + vec_len_f32_avx512 * 3)); + + auto vb0_256 = _mm512_cvtepu8_epi32(vb0_128); + auto vb1_256 = _mm512_cvtepu8_epi32(vb1_128); + auto vb2_256 = _mm512_cvtepu8_epi32(vb2_128); + auto vb3_256 = _mm512_cvtepu8_epi32(vb3_128); + + auto vb0 = _mm512_cvtepi32_ps(vb0_256); + auto vb1 = _mm512_cvtepi32_ps(vb1_256); + auto vb2 = _mm512_cvtepi32_ps(vb2_256); + auto vb3 = _mm512_cvtepi32_ps(vb3_256); + + vb0 = _mm512_sub_ps(vb0, v_zp); + vb1 = _mm512_sub_ps(vb1, v_zp); + vb2 = _mm512_sub_ps(vb2, v_zp); + vb3 = _mm512_sub_ps(vb3, v_zp); + + vsum0 = _mm512_fmadd_ps(va0, vb0, vsum0); + vsum1 = _mm512_fmadd_ps(va1, vb1, vsum1); + vsum2 = _mm512_fmadd_ps(va2, vb2, vsum2); + vsum3 = _mm512_fmadd_ps(va3, vb3, vsum3); + } + if (i + 2 * vec_len_f32_avx512 <= n) { + auto va0 = mm512_uni_loadu_ps(a + i); + auto va1 = mm512_uni_loadu_ps(a + i + vec_len_f32_avx512); + + auto vb0_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(b + i)); + auto vb1_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(b + i + vec_len_f32_avx512)); + + auto vb0_256 = _mm512_cvtepu8_epi32(vb0_128); + auto vb1_256 = _mm512_cvtepu8_epi32(vb1_128); + + auto vb0 = _mm512_cvtepi32_ps(vb0_256); + auto vb1 = _mm512_cvtepi32_ps(vb1_256); + + vb0 = _mm512_sub_ps(vb0, v_zp); + vb1 = _mm512_sub_ps(vb1, v_zp); + + vsum0 = _mm512_fmadd_ps(va0, vb0, vsum0); + vsum1 = _mm512_fmadd_ps(va1, vb1, vsum1); + i += 2 * vec_len_f32_avx512; + } + if (i + vec_len_f32_avx512 <= n) { + auto va0 = mm512_uni_loadu_ps(a + i); + auto vb0_128 = _mm_loadu_si128(reinterpret_cast<__m128i*>(b + i)); + auto vb0_256 = _mm512_cvtepu8_epi32(vb0_128); + auto vb0 = _mm512_cvtepi32_ps(vb0_256); + vb0 = _mm512_sub_ps(vb0, v_zp); + vsum0 = _mm512_fmadd_ps(va0, vb0, vsum0); + i += vec_len_f32_avx512; + } + vsum0 = _mm512_add_ps(vsum0, vsum1); + vsum2 = _mm512_add_ps(vsum2, vsum3); + vsum0 = _mm512_add_ps(vsum0, vsum2); + sum = _mm512_reduce_add_ps(vsum0); + for (; i < n; i++) { + sum += a[i] * (b[i] - *zp); + } + return scale[0] * sum; + +#elif defined(HAVE_AVX2) + auto vsum0 = _mm256_set1_ps(0.0f); + auto vsum1 = _mm256_set1_ps(0.0f); + auto vsum2 = _mm256_set1_ps(0.0f); + auto vsum3 = _mm256_set1_ps(0.0f); + for (; i + 4 * vec_len_f32_avx2 <= n; i += vec_len_f32_avx2 * 4) { + auto va0 = mm256_uni_loadu_ps(a + i); + auto va1 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2); + auto va2 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2 * 2); + auto va3 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2 * 3); + + auto vb0_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(b + i)); + auto vb1_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(b + i + vec_len_f32_avx2)); + auto vb2_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(b + i + vec_len_f32_avx2 * 2)); + auto vb3_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(b + i + vec_len_f32_avx2 * 3)); + + auto vb0_256 = _mm256_cvtepu8_epi32(vb0_128); + auto vb1_256 = _mm256_cvtepu8_epi32(vb1_128); + auto vb2_256 = _mm256_cvtepu8_epi32(vb2_128); + auto vb3_256 = _mm256_cvtepu8_epi32(vb3_128); + + auto vb0 = _mm256_cvtepi32_ps(vb0_256); + auto vb1 = _mm256_cvtepi32_ps(vb1_256); + auto vb2 = _mm256_cvtepi32_ps(vb2_256); + auto vb3 = _mm256_cvtepi32_ps(vb3_256); + + vsum0 = _mm256_fmadd_ps(va0, vb0, vsum0); + vsum1 = _mm256_fmadd_ps(va1, vb1, vsum1); + vsum2 = _mm256_fmadd_ps(va2, vb2, vsum2); + vsum3 = _mm256_fmadd_ps(va3, vb3, vsum3); + } + if (i + 2 * vec_len_f32_avx2 <= n) { + auto va0 = mm256_uni_loadu_ps(a + i); + auto va1 = mm256_uni_loadu_ps(a + i + vec_len_f32_avx2); + + auto vb0_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(b + i)); + auto vb1_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(b + i + vec_len_f32_avx2)); + + auto vb0_256 = _mm256_cvtepu8_epi32(vb0_128); + auto vb1_256 = _mm256_cvtepu8_epi32(vb1_128); + + auto vb0 = _mm256_cvtepi32_ps(vb0_256); + auto vb1 = _mm256_cvtepi32_ps(vb1_256); + + vsum0 = _mm256_fmadd_ps(va0, vb0, vsum0); + vsum1 = _mm256_fmadd_ps(va1, vb1, vsum1); + i += 2 * vec_len_f32_avx2; + } + if (i + vec_len_f32_avx2 <= n) { + auto va0 = mm256_uni_loadu_ps(a + i); + auto vb0_128 = _mm_loadl_epi64(reinterpret_cast<__m128i*>(b + i)); + auto vb0_256 = _mm256_cvtepu8_epi32(vb0_128); + auto vb0 = _mm256_cvtepi32_ps(vb0_256); + vsum0 = _mm256_fmadd_ps(va0, vb0, vsum0); + i += vec_len_f32_avx2; + } + vsum0 = _mm256_add_ps(vsum0, vsum1); + vsum2 = _mm256_add_ps(vsum2, vsum3); + vsum0 = _mm256_add_ps(vsum0, vsum2); + hsum(vsum0); + sum = _mm256_cvtss_f32(vsum0); + for (; i < n; i++) { + sum += a[i] * b[i]; + } + // B = scale * (b - zero) + // Σ (A * B) = Σ (a * scale * (b - zero)) = scale * (Σ a * b - zero Σ a) = scale * (sum - zp * head_sum) + return scale[0] * (sum - zp[0] * head_sum[0]); +#else + for (; i < n; i++) { + sum += a[i] * (b[i] - *zp); + } + return scale[0] * sum; +#endif +} + template -void attn_reduce(T* dst, float* temp, size_t M, size_t S, size_t temp_stride) { +static void attn_reduce(T* dst, float* temp, size_t M, size_t S, size_t temp_stride) { size_t i = 0; #if defined(HAVE_AVX512F) for (; i + vec_len_f32_avx512 <= S; i+= vec_len_f32_avx512) { auto* src = temp + i; auto result_vec_fp32 = _mm512_setzero_ps(); for (size_t m = 0; m < M; m++) { - //auto* temp = &m_temp.at({ithr, b, pq, h, 0}); auto o_vec_fp32 = _mm512_loadu_ps(src); result_vec_fp32 = _mm512_add_ps(result_vec_fp32, o_vec_fp32); src += temp_stride; @@ -120,7 +588,7 @@ void attn_reduce(T* dst, float* temp, size_t M, size_t S, size_t temp_stride) { } template -void mha_single_token_kernel(const ov::intel_cpu::PlainTensor& query, +static void mha_single_token_kernel(const ov::intel_cpu::PlainTensor& query, const ov::intel_cpu::PlainTensor& present_key, const ov::intel_cpu::PlainTensor& present_value, const ov::intel_cpu::PlainTensor& alibi_mask, @@ -131,7 +599,10 @@ void mha_single_token_kernel(const ov::intel_cpu::PlainTensor& query, ov::intel_cpu::PlainTensor& buf_attn_score, bool has_out_transpose, bool auto_causal, - float d_scale) { + float d_scale, + const ov::intel_cpu::PlainTensor& past_k_scale_zp, + const ov::intel_cpu::PlainTensor& past_v_scale_zp, + ov::intel_cpu::PlainTensor& head_sum) { ov::intel_cpu::PlainTensor causal_mask; bool select_nfltmax_at_0 = false; auto B = query.size(0); @@ -146,27 +617,67 @@ void mha_single_token_kernel(const ov::intel_cpu::PlainTensor& query, } if (d_scale == 0.0f) d_scale = 1.0f / sqrt(S); + auto nthr = parallel_get_max_threads(); // use per-token kernel, for each k,v token // attn mask is a matrix of q_len(kv_len) buf_attn_w.resize({B, H, q_len, kv_len}); +#if defined(HAVE_AVX2) && !defined(HAVE_AVX512F) + // avx2 will pre-compute the zero point and try to save the sub instruction in the dot_product, + // but it seems not necessary for avx512. Possible reason may be that for avx2 the cost of dot_product + // is larger than the memory access time, but for avx512 is not and the cost of pre-compute is a pure increase. + bool pastkv_is_int8 = past_k_scale_zp; + if (pastkv_is_int8) { + // be sure no false sharing + head_sum.resize({B, H, q_len, 16}); + parallel_for3d(B, H, q_len, [&](size_t b, size_t h, size_t pq) { + *head_sum.ptr(b, h, pq) = sum_q_head(query.ptr(b, h, pq), S); + }); + } +#endif + parallel_nt_static(nthr, [&](const size_t ithr, const size_t nthr) { + size_t start{0}, end{0}; + splitter(B * h_group_num * kv_len, nthr, ithr, start, end); - bool is_abcd = present_key.stride(1) >= present_key.stride(2); - size_t dim0 = is_abcd ? B : kv_len; - size_t dim1 = is_abcd ? h_group_num : B; - size_t dim2 = is_abcd ? kv_len : h_group_num; - - parallel_for3d(dim0, dim1, dim2, [&](size_t d0, size_t d1, size_t d2) { - size_t b = is_abcd ? d0 : d1; - size_t h_group = is_abcd ? d1 : d2; - size_t pk = is_abcd ? d2 : d0; - - // which batch item should be used at postion pk? - auto b_kv = beams ? beams.at({b, pk}) : b; - for (size_t pq = 0; pq < q_len; pq++) { - for (size_t h = h_group * h_each_group_len; h < (h_group + 1) * h_each_group_len; h++) { - buf_attn_w.at({b, h, pq, pk}) = - dot_product(&query.at({b, h, pq, 0}), &present_key.at({b_kv, h_group, pk, 0}, true), S); + size_t b, h_group, pk; + if (start < end) { + parallel_it_init(start, b, B, h_group, h_group_num, pk, kv_len); + if (q_len == 1 && h_each_group_len == 1) { + if (B == 1) { + // the memory will be continuous when b==1 + for (size_t iwork = start; iwork < end; ++iwork) { + auto p = past_k_scale_zp.ptr(0, h_group, pk); + auto p_k = present_key.ptr(0, h_group, pk); + prefetch_bytes(S, _MM_HINT_T0, 4096, p_k); + buf_attn_w.ptr(0, h_group, 0)[pk] = + dot_product(query.ptr(0, h_group), p_k, + S, p, p + 1, head_sum.ptr(0, h_group)); + parallel_it_step(b, B, h_group, h_group_num, pk, kv_len); + } + } else { + for (size_t iwork = start; iwork < end; ++iwork) { + auto b_kv = beams ? beams.ptr(b)[pk] : b; + auto p = past_k_scale_zp.ptr(b_kv, h_group, pk); + auto p_k = present_key.ptr(b_kv, h_group, pk); + buf_attn_w.ptr(b, h_group, 0)[pk] = + dot_product(query.ptr(b, h_group), p_k, + S, p, p + 1, head_sum.ptr(b, h_group)); + parallel_it_step(b, B, h_group, h_group_num, pk, kv_len); + } + } + } else { + for (size_t iwork = start; iwork < end; ++iwork) { + auto b_kv = beams ? beams.ptr(b)[pk] : b; + for (size_t pq = 0; pq < q_len; pq++) { + auto p = past_k_scale_zp.ptr(b_kv, h_group, pk); + for (size_t h = h_group * h_each_group_len; h < (h_group + 1) * h_each_group_len; h++) { + buf_attn_w.ptr(b, h, pq)[pk] = + dot_product(query.ptr(b, h, pq), present_key.ptr(b_kv, h_group, pk), + S, p, p + 1, head_sum.ptr(b, h, pq)); + } + } + parallel_it_step(b, B, h_group, h_group_num, pk, kv_len); + } } } }); @@ -177,10 +688,10 @@ void mha_single_token_kernel(const ov::intel_cpu::PlainTensor& query, float* alibi_ptr = alibi_mask ? &alibi_mask.at({b, h, pq, 0}, true) : nullptr; uint8_t* attn_mask_ptr = nullptr; auto attn_mask_prec = attention_mask.get_precision(); - attn_mask_ptr = reinterpret_cast(&attention_mask.at({b, h, 0, 0}, true)); + attn_mask_ptr = reinterpret_cast(&attention_mask.at({b, h, pq, 0}, true)); uint8_t* cmask_ptr = causal_mask ? &causal_mask.at({b, h, pq, 0}, true) : nullptr; - attn_softmax_kernel(&buf_attn_w.at({b, h, pq, 0}), - &buf_attn_w.at({b, h, pq, 0}), + attn_softmax_kernel(buf_attn_w.ptr(b, h, pq), + buf_attn_w.ptr(b, h, pq), d_scale, alibi_ptr, attn_mask_ptr, @@ -193,44 +704,55 @@ void mha_single_token_kernel(const ov::intel_cpu::PlainTensor& query, }); // attn_w * V - auto nthr = parallel_get_max_threads(); buf_attn_score.resize({static_cast(nthr), B, q_len, H, S}); // buf_attn_w {B, H, q_len, kv_len} parallel_nt_static(nthr, [&](const size_t ithr, const size_t nthr) { size_t start{0}, end{0}; splitter(B * h_group_num * kv_len, nthr, ithr, start, end); - memset(&buf_attn_score.at({ithr, 0, 0, 0, 0}), 0, buf_attn_score.stride(0) * sizeof(float)); + memset(buf_attn_score.ptr(ithr, 0, 0, 0, 0), 0, buf_attn_score.stride(0) * sizeof(float)); size_t b, h_group, pv; if (start < end) { - if (is_abcd) - parallel_it_init(start, b, B, h_group, h_group_num, pv, kv_len); - else - parallel_it_init(start, pv, kv_len, b, B, h_group, h_group_num); - for (size_t iwork = start; iwork < end; ++iwork) { - auto b_kv = beams ? beams.at({b, pv}) : b; - auto* v = &present_value.at({b_kv, h_group, pv, 0}, true); - for (size_t pq = 0; pq < q_len; pq++) { - for (size_t h = h_group * h_each_group_len; h < (h_group + 1) * h_each_group_len; h++) { - attn_acc_value(&buf_attn_score.at({ithr, b, pq, h, 0}), - buf_attn_w.at({b, h, pq, pv}), - v, - S); - } + parallel_it_init(start, b, B, h_group, h_group_num, pv, kv_len); + if (q_len == 1 && h_each_group_len == 1) { + for (size_t iwork = start; iwork < end; ++iwork) { + auto b_kv = beams ? beams.ptr(b)[pv] : b; + auto* v = present_value.ptr(b_kv, h_group, pv); + auto p = past_v_scale_zp.ptr(b_kv, h_group, pv); + attn_acc_value(buf_attn_score.ptr(ithr, b, 0, h_group), + buf_attn_w.ptr(b, h_group, 0, pv)[0], + v, + S, + p + 0, + p + 1); + parallel_it_step(b, B, h_group, h_group_num, pv, kv_len); } - if (is_abcd) + } else { + for (size_t iwork = start; iwork < end; ++iwork) { + auto b_kv = beams ? beams.ptr(b)[pv] : b; + auto* v = present_value.ptr(b_kv, h_group, pv); + auto p = past_v_scale_zp.ptr(b_kv, h_group, pv); + for (size_t pq = 0; pq < q_len; pq++) { + for (size_t h = h_group * h_each_group_len; h < (h_group + 1) * h_each_group_len; h++) { + attn_acc_value(buf_attn_score.ptr(ithr, b, pq, h), + buf_attn_w.ptr(b, h, pq)[pv], + v, + S, + p + 0, + p + 1); + } + } parallel_it_step(b, B, h_group, h_group_num, pv, kv_len); - else - parallel_it_step(pv, kv_len, b, B, h_group, h_group_num); + } } } }); parallel_for3d(B, H, q_len, [&](size_t b, size_t h, size_t pq) { - auto* temp = &buf_attn_score.at({0, b, pq, h, 0}); + auto* temp = buf_attn_score.ptr(0, b, pq, h); size_t temp_stride = buf_attn_score.stride(0); - auto* dst = has_out_transpose ? &output_emb.at({b, pq, h * S}) : &output_emb.at({b, h, pq}); + auto* dst = has_out_transpose ? output_emb.ptr(b, pq, h * S) : output_emb.ptr(b, h, pq); attn_reduce(dst, temp, nthr, S, temp_stride); }); } @@ -246,22 +768,62 @@ void mha_single_token(const ov::intel_cpu::PlainTensor& query, ov::intel_cpu::PlainTensor& buf_attn_score, bool has_out_transpose, bool auto_causal, - float d_scale) { + float d_scale, + const ov::intel_cpu::PlainTensor& past_k_scale_zp, + const ov::intel_cpu::PlainTensor& past_v_scale_zp, + ov::intel_cpu::PlainTensor& head_sum) { if (query.get_precision() == ov::element::bf16) { - mha_single_token_kernel(query, - present_key, - present_value, - alibi_mask, - attention_mask, - beams, - output_emb, - buf_attn_w, - buf_attn_score, - has_out_transpose, - auto_causal, - d_scale); + if (present_key.get_precision() == ov::element::u8) { + mha_single_token_kernel(query, + present_key, + present_value, + alibi_mask, + attention_mask, + beams, + output_emb, + buf_attn_w, + buf_attn_score, + has_out_transpose, + auto_causal, + d_scale, + past_k_scale_zp, + past_v_scale_zp, + head_sum); + } else { + mha_single_token_kernel(query, + present_key, + present_value, + alibi_mask, + attention_mask, + beams, + output_emb, + buf_attn_w, + buf_attn_score, + has_out_transpose, + auto_causal, + d_scale, + past_k_scale_zp, + past_v_scale_zp, + head_sum); + } } else if (query.get_precision() == ov::element::f32) { - if (present_key.get_precision() == ov::element::f16) { + if (present_key.get_precision() == ov::element::u8) { + mha_single_token_kernel(query, + present_key, + present_value, + alibi_mask, + attention_mask, + beams, + output_emb, + buf_attn_w, + buf_attn_score, + has_out_transpose, + auto_causal, + d_scale, + past_k_scale_zp, + past_v_scale_zp, + head_sum); + } else if (present_key.get_precision() == ov::element::f16) { mha_single_token_kernel(query, present_key, present_value, @@ -273,7 +835,10 @@ void mha_single_token(const ov::intel_cpu::PlainTensor& query, buf_attn_score, has_out_transpose, auto_causal, - d_scale); + d_scale, + past_k_scale_zp, + past_v_scale_zp, + head_sum); } else { mha_single_token_kernel(query, present_key, @@ -286,7 +851,10 @@ void mha_single_token(const ov::intel_cpu::PlainTensor& query, buf_attn_score, has_out_transpose, auto_causal, - d_scale); + d_scale, + past_k_scale_zp, + past_v_scale_zp, + head_sum); } } else { OPENVINO_THROW("Unsupported precision: ", query.get_precision()); diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.hpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.hpp index 239527c8c7ad8a..8bf05a2d9dadf0 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.hpp @@ -26,7 +26,10 @@ void mha_single_token(const ov::intel_cpu::PlainTensor& query, ov::intel_cpu::PlainTensor& buf_attn_score, bool has_out_transpose, bool auto_causal, - float d_scale); + float d_scale, + const ov::intel_cpu::PlainTensor& past_k_scale_zp, + const ov::intel_cpu::PlainTensor& past_v_scale_zp, + ov::intel_cpu::PlainTensor& head_sum); } // namespace XARCH } // namespace Cpu diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/softmax_kernel.hpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/softmax_kernel.hpp index 4d85af64d137aa..500ae6e184f1f1 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/softmax_kernel.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/softmax_kernel.hpp @@ -17,16 +17,6 @@ namespace Cpu { namespace XARCH { #if defined(HAVE_AVX2) -inline void hmax(__m256& x) { - __m256 y; // x: 0 1 2 3 4 5 6 7 - y = _mm256_permute_ps(x, 0x39); // y: 1 2 3 0 5 6 7 4 - x = _mm256_max_ps(x, y); // X: 01 12 23 30 45 56 67 74 - y = _mm256_permute_ps(x, 0x4e); // y: 23 30 01 12 67 74 45 56 - x = _mm256_max_ps(x, y); // x: 0123 x x x 4567 x x x - y = _mm256_permute2f128_ps(x, x, 1); // y: 4567 x x x 0123 x x x - x = _mm256_max_ps(x, y); // x: 01234567 x x x x x x x -} - inline void exp_ps_avx2(__m256& src) { static __m256 exp_ln_flt_min_f = _mm256_castsi256_ps(_mm256_set1_epi32(0xc2aeac50)); // log(FLT_MIN) static __m256 exp_ln_flt_max_f = _mm256_castsi256_ps(_mm256_set1_epi32(0x42b17218)); // log(FLT_MAX) diff --git a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp index 857a2ed9e37441..cbb7d3eaae14d9 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp @@ -62,13 +62,13 @@ bool PriorBoxClustered::needShapeInfer() const { return true; } - const auto& outputShape = memory->getShape().getStaticDims(); - const int* in_data = memory->getDataAs(); + const auto& output_shape = memory->getShape().getStaticDims(); + const int* in_data = getSrcDataAtPortAs(0); const int h = in_data[0]; const int w = in_data[1]; const auto output = static_cast(4 * h * w * number_of_priors); - return outputShape[1] != output; + return output_shape[1] != output; } bool PriorBoxClustered::needPrepareParams() const { diff --git a/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp b/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp index 594306b4b2d169..acae41d7546b8e 100644 --- a/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp +++ b/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp @@ -27,6 +27,7 @@ #include "kernels/scaled_attn/softmax.hpp" #include "kernels/scaled_attn/mha_single_token.hpp" #include "kernels/scaled_attn/attn_memcpy.hpp" +#include "kernels/scaled_attn/attn_quant.hpp" #include "kernels/x64/brgemm_kernel.hpp" #include "nodes/common/cpu_convert.h" @@ -142,7 +143,7 @@ struct MHAKernel { for (size_t m = 0; m < q_len; m++) { // dot-product to get attention scores - auto* q = &query.at({b, h, m, 0}); + auto* q = query.ptr(b, h, m, 0); // how many key/values can be accessed causally auto ncausal = kv_len; // no causall mask is set and it's not fused into attention_mask @@ -248,12 +249,7 @@ struct MHAKernel { MHAKernel() = delete; explicit MHAKernel(GraphContext::CPtr ctx) - : context(ctx), - fp32_out(true), - qk_scratch_a(true), - qk_scratch_b(true), - wv_scratch_a(true), - wv_scratch_b(true) {} + : context(ctx) {} dnnl::memory::dims make_dnnl_dims(const std::vector& dims) { dnnl::memory::dims dnnl_dims(dims.size()); @@ -472,16 +468,16 @@ struct MHAKernel { } void exec_qk(dnnl::stream strm, PlainTensor& query, PlainTensor& present_key) { - dnnl::memory q(q_md, strm.get_engine(), query.data()); - dnnl::memory k(k_md, strm.get_engine(), present_key.data()); + dnnl::memory q(q_md, strm.get_engine(), query.ptr()); + dnnl::memory k(k_md, strm.get_engine(), present_key.ptr()); qk_prim.execute(strm, {{DNNL_ARG_SRC, q}, {DNNL_ARG_WEIGHTS, k}, {DNNL_ARG_DST, attn_score}}); } void exec_kv(dnnl::stream strm, PlainTensor& present_value, PlainTensor& output_emb) { - dnnl::memory v(v_md, strm.get_engine(), present_value.data()); - dnnl::memory out(out_md, strm.get_engine(), output_emb.data()); + dnnl::memory v(v_md, strm.get_engine(), present_value.ptr()); + dnnl::memory out(out_md, strm.get_engine(), output_emb.ptr()); wv_prim.execute(strm, {{DNNL_ARG_SRC, attn_weight}, {DNNL_ARG_WEIGHTS, v}, {DNNL_ARG_DST, out}}); } @@ -579,7 +575,7 @@ struct MHAKernel { explicit MHAKernel(GraphContext::CPtr ctx): context(ctx) { m_block_size = 4; select_nfltmax_at_0 = false; - qk_buffers.resize(parallel_get_max_threads(), PlainTensor(true)); + qk_buffers.resize(parallel_get_max_threads()); } PlainTensor causal_mask; @@ -730,8 +726,9 @@ struct MHAKernel { struct MHASingleToken { PlainTensor m_attn_w; PlainTensor m_temp; + PlainTensor m_head_sum; - MHASingleToken() : m_attn_w(true), m_temp(true) {} + MHASingleToken() {} // Q, K, V is ready, do attention // query [B, H, q_len, S] @@ -749,9 +746,11 @@ struct MHASingleToken { const PlainTensor& beams, bool has_out_transpose, bool auto_causal, - float d_scale = 0.0f) { + float d_scale, + const PlainTensor& k_scale_zp, + const PlainTensor& v_scale_zp) { mha_single_token(query, present_key, present_value, alibi_mask, attention_mask, beams, output_emb, - m_attn_w, m_temp, has_out_transpose, auto_causal, d_scale); + m_attn_w, m_temp, has_out_transpose, auto_causal, d_scale, k_scale_zp, v_scale_zp, m_head_sum); } }; @@ -763,17 +762,18 @@ struct ScaledDotProductAttention::AttentionExecutor : public ScaledDotProductAtt MHAKernel kernel; MHASingleToken kernel_single_token; - AttentionExecutor(GraphContext::CPtr ctx) : context(ctx), attn_buf(true), kernel(context) {} + AttentionExecutor(GraphContext::CPtr ctx) : context(ctx), kernel(context) {} void prepare_attn_mask(MemoryPtr attn_input) { attn_buf.resize(attn_input->getStaticDims()); auto p = attn_input->getDataAs(); for (size_t i = 0; i < attn_input->getSize(); i++) - attn_buf.data()[i] = p[i] ? 0.0f : -FLT_MAX; + attn_buf.ptr()[i] = p[i] ? 0.0f : -FLT_MAX; } void execute(dnnl::stream strm, const Config& config, const std::vector& inputs, const MemoryPtr output, - const MemoryPtr presentk_input, const MemoryPtr presentv_input, const MemoryPtr beam_input) override { + const MemoryPtr presentk_input, const MemoryPtr presentv_input, const MemoryPtr beam_input, + const PlainTensor& k_scale_zp, const PlainTensor& v_scale_zp) override { bool has_out_transpose = config.config.output_BLHxS; bool fuse_causal_attn = config.config.fuse_causal_attn; bool is_causal = config.config.is_causal; @@ -881,13 +881,13 @@ struct ScaledDotProductAttention::AttentionExecutor : public ScaledDotProductAtt // 2, using float will save the repack cost which typically is required for bf16/int8 opt // 3, using dot product can leverage the SIMD while easily adapt to indirect kv cache kernel_single_token(q_input, present_key, present_value, {}, use_attn_mask ? attn_mask : PlainTensor(), - output_emb, beam_table, has_out_transpose, auto_causal, scale_input); + output_emb, beam_table, has_out_transpose, auto_causal, scale_input, k_scale_zp, v_scale_zp); } } }; ScaledDotProductAttention::ScaledDotProductAttention(const std::shared_ptr& op, const GraphContext::CPtr context) - : Node(op, context, SDPAShapeInferFactory(op)), m_tmp_reorder(true) { + : Node(op, context, SDPAShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW("CPU: " + errorMessage); @@ -1012,6 +1012,7 @@ void ScaledDotProductAttention::execute(dnnl::stream strm) { inputs[i] = getSrcMemoryAtPort(i); } + PlainTensor k_scale_zp, v_scale_zp; if (m_config.config.fuse_concat) { // initialization will be also completed in this func gatherConcatPastkv(inputs[1], inputs[2], getSrcMemoryAtPort(orginSDPInputNumber)); @@ -1019,11 +1020,13 @@ void ScaledDotProductAttention::execute(dnnl::stream strm) { presentk_input = m_k_state->internal_state_mem(); presentv_input = m_v_state->internal_state_mem(); beam_input = m_k_state->hidden_state_mem(); + k_scale_zp = m_k_state->get_scale_zp(); + v_scale_zp = m_v_state->get_scale_zp(); } else { presentk_input = inputs[1]; presentv_input = inputs[2]; } - m_executor->execute(strm, m_config, inputs, output, presentk_input, presentv_input, beam_input); + m_executor->execute(strm, m_config, inputs, output, presentk_input, presentv_input, beam_input, k_scale_zp, v_scale_zp); } bool ScaledDotProductAttention::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { @@ -1110,7 +1113,7 @@ void ScaledDotProductAttention::resetBeamTablePastkv(const MemoryPtr& mem_cur_k, }; // 1. check beam idx if it's valid - auto* table = beam_idx.data(); + auto* table = beam_idx.ptr(); for (size_t i = 0; i < B; i++) { OPENVINO_ASSERT(static_cast(table[i]) < B_state, "beam_idx[", i, "]=", table[i], " should less than batch of previous pastkv: ", B_state); @@ -1150,6 +1153,27 @@ void ScaledDotProductAttention::resetBeamTablePastkv(const MemoryPtr& mem_cur_k, S * old_past_v.m_element_size); }); } + if (kvcache_precision == ov::element::u8) { + auto& old_scale_zp_k = m_k_state->get_scale_zp(); + auto& old_scale_zp_v = m_v_state->get_scale_zp(); + PlainTensor new_scale_zp_k, new_scale_zp_v; + + new_scale_zp_k.resize({B, H, (L0 + L1) * 2, 2}); + new_scale_zp_v.resize({B, H, (L0 + L1) * 2, 2}); + parallel_for2d(B, H, [&](size_t b, size_t h) { + auto idx = static_cast(table[b]); + for (size_t m = 0; m < L0; m++) { + auto b_kv = static_cast(old_beam_table_k.at({idx, m})); + new_scale_zp_k.at({b, h, m, 0}) = old_scale_zp_k.at({b_kv, h, m, 0}); + new_scale_zp_k.at({b, h, m, 1}) = old_scale_zp_k.at({b_kv, h, m, 1}); + new_scale_zp_v.at({b, h, m, 0}) = old_scale_zp_v.at({b_kv, h, m, 0}); + new_scale_zp_v.at({b, h, m, 1}) = old_scale_zp_v.at({b_kv, h, m, 1}); + } + }); + + m_k_state->set_scale_zp(new_scale_zp_k); + m_v_state->set_scale_zp(new_scale_zp_v); + } auto new_shape = {B, H, (L0 + L1), S}; mem_desc = std::make_shared(kvcache_precision, @@ -1161,7 +1185,13 @@ void ScaledDotProductAttention::resetBeamTablePastkv(const MemoryPtr& mem_cur_k, mem_desc->getStrides()); new_internal_mem_k->redefineDesc(mem_desc); new_internal_mem_v->redefineDesc(mem_desc); - attn_memcpy(cur_k, cur_v, new_pastk.slice(2, L0, L0 + L1), new_pastv.slice(2, L0, L0 + L1)); + if (kvcache_precision == ov::element::u8) { + attn_quantkv(cur_k, cur_v, + new_pastk.slice(2, L0, L0 + L1), new_pastv.slice(2, L0, L0 + L1), + m_k_state->get_scale_zp().slice(2, L0, L0 + L1), m_v_state->get_scale_zp().slice(2, L0, L0 + L1)); + } else { + attn_memcpy(cur_k, cur_v, new_pastk.slice(2, L0, L0 + L1), new_pastv.slice(2, L0, L0 + L1)); + } m_k_state->assign_internal_state(new_internal_mem_k); m_v_state->assign_internal_state(new_internal_mem_v); @@ -1251,7 +1281,8 @@ void ScaledDotProductAttention::updateBeamTable(const MemoryPtr& mem_beam_idx, s OPENVINO_ASSERT(B == B_state, "beam idx batch: ", B, " is not equal to batch of state: ", B_state); OPENVINO_ASSERT(B * (L0 + L1) > 0, "B or (L0+L1) is zero, B: ", B, ", L0: ", L0, ", L1: ", L1); // resize buffer - if (is_reset || B * (L0 + L1) > m_k_state->hidden_state_max_size()) { + bool need_redefine = true; + if (B * (L0 + L1) > m_k_state->hidden_state_max_size()) { auto mem_desc = std::make_shared(ov::element::i32, Shape{B, (L0 + L1) * 2}); auto new_hidden_state_k = std::make_shared(getEngine(), mem_desc); @@ -1263,8 +1294,8 @@ void ScaledDotProductAttention::updateBeamTable(const MemoryPtr& mem_beam_idx, s beam_table_k.reset(hidden_state_k); beam_table_v.reset(hidden_state_v); for (size_t b = 0; b < B; b++) { - std::memcpy(&new_beam_table_k.at({b}), &beam_table_k.at({b}), sizeof(int32_t) * L0); - std::memcpy(&new_beam_table_v.at({b}), &beam_table_v.at({b}), sizeof(int32_t) * L0); + std::memcpy(new_beam_table_k.ptr(b), beam_table_k.ptr(b), sizeof(int32_t) * L0); + std::memcpy(new_beam_table_v.ptr(b), beam_table_v.ptr(b), sizeof(int32_t) * L0); } } m_k_state->assign_hidden_state(new_hidden_state_k); @@ -1275,17 +1306,37 @@ void ScaledDotProductAttention::updateBeamTable(const MemoryPtr& mem_beam_idx, s hidden_state_v = new_hidden_state_v; beam_table_k = new_beam_table_k; beam_table_v = new_beam_table_v; + } else if (is_reset) { + // when reset and not resize, just reset the desc + need_redefine = false; + auto size = m_k_state->hidden_state_max_size(); + auto max_l = size / B; + VectorDims strides(2); + strides[0] = max_l; + strides[1] = 1; + std::vector new_shape{B, (L0 + L1)}; + auto mem_desc = std::make_shared(ov::element::i32, + Shape(new_shape), + new_shape, + VectorDims{0, 1}, + 0, + VectorDims{}, + strides); + hidden_state_k->redefineDesc(mem_desc); + hidden_state_v->redefineDesc(mem_desc); + } + if (need_redefine) { + std::vector new_shape{B, (L0 + L1)}; + auto mem_desc = std::make_shared(ov::element::i32, + Shape(new_shape), + new_shape, + VectorDims{0, 1}, + 0, + VectorDims{}, + hidden_state_k->getDescWithType()->getStrides()); + hidden_state_k->redefineDesc(mem_desc); + hidden_state_v->redefineDesc(mem_desc); } - std::vector new_shape{B, (L0 + L1)}; - auto mem_desc = std::make_shared(ov::element::i32, - Shape(new_shape), - new_shape, - VectorDims{0, 1}, - 0, - VectorDims{}, - hidden_state_k->getDescWithType()->getStrides()); - hidden_state_k->redefineDesc(mem_desc); - hidden_state_v->redefineDesc(mem_desc); if (!beam_table_k) { beam_table_k.reset(hidden_state_k); @@ -1306,7 +1357,7 @@ void ScaledDotProductAttention::updateBeamTable(const MemoryPtr& mem_beam_idx, s // beam order is like [0, 1, 2,...] bool no_reorder = true; for (size_t i = 0; i < B; i++) { - if (beam_idx.data()[i] != static_cast(i)) { + if (beam_idx.ptr()[i] != static_cast(i)) { no_reorder = false; break; } @@ -1314,20 +1365,16 @@ void ScaledDotProductAttention::updateBeamTable(const MemoryPtr& mem_beam_idx, s // reorder if (!no_reorder) { - m_tmp_reorder.resize({B, L0}); + auto* table = beam_idx.ptr(); + // beam table is same for both k,v state for (size_t i = 0; i < B; i++) { - std::memcpy(&m_tmp_reorder.at({i}), - &beam_table_k.at({i}), + std::memcpy(beam_table_k.ptr(i), + beam_table_v.ptr(table[i]), sizeof(int32_t) * L0); } - auto* table = beam_idx.data(); - // beam table is same for both k,v state for (size_t i = 0; i < B; i++) { - std::memcpy(&beam_table_k.at({i}), - &m_tmp_reorder.at({static_cast(table[i])}), - sizeof(int32_t) * L0); - std::memcpy(&beam_table_v.at({i}), - &m_tmp_reorder.at({static_cast(table[i])}), + std::memcpy(beam_table_v.ptr(i), + beam_table_k.ptr(i), sizeof(int32_t) * L0); } } @@ -1375,7 +1422,8 @@ void ScaledDotProductAttention::updatePastkv(const MemoryPtr& mem_cur_k, const M OPENVINO_ASSERT(B * (L0 + L1) > 0, "B or (L0+L1) is zero, B: ", B, ", L0: ", L0, ", L1: ", L1); // resize buffer ov::element::Type kvcache_precision = m_k_state->internal_desc()->getPrecision(); - if (is_reset || B * H * (L0 + L1) * S > m_k_state->internal_state_max_size()) { + bool need_redefine = true; + if (B * H * (L0 + L1) * S > m_k_state->internal_state_max_size()) { auto new_shape = {B, H, (L0 + L1) * 2, S}; auto mem_desc = std::make_shared(kvcache_precision, Shape(reverse(new_shape)), @@ -1405,17 +1453,69 @@ void ScaledDotProductAttention::updatePastkv(const MemoryPtr& mem_cur_k, const M m_v_state->assign_internal_state(new_internal_mem_v); m_k_state->assign_internal_state_max_size(B * H * (L0 + L1) * 2 * S); m_v_state->assign_internal_state_max_size(B * H * (L0 + L1) * 2 * S); + if (kvcache_precision == ov::element::u8) { + auto& old_scale_zp_k = m_k_state->get_scale_zp(); + auto& old_scale_zp_v = m_v_state->get_scale_zp(); + PlainTensor new_scale_zp_k, new_scale_zp_v; + + new_scale_zp_k.resize({B, H, (L0 + L1) * 2, 2}); + new_scale_zp_v.resize({B, H, (L0 + L1) * 2, 2}); + if (L0 > 0 && !is_reset) { + parallel_for2d(B, H, [&](size_t b, size_t h) { + memcpy(new_scale_zp_k.ptr(b, h), + old_scale_zp_k.ptr(b, h), + sizeof(float) * L0 * 2); + memcpy(new_scale_zp_v.ptr(b, h), + old_scale_zp_v.ptr(b, h), + sizeof(float) * L0 * 2); + }); + } + + m_k_state->set_scale_zp(new_scale_zp_k); + m_v_state->set_scale_zp(new_scale_zp_v); + } + } else if (is_reset) { + // when reset and not resize, just reset the desc + need_redefine = false; + auto size = m_k_state->internal_state_max_size(); + auto max_l = size / (B * H * S); + VectorDims strides(4); + strides[0] = H * max_l * S; + strides[1] = max_l * S; + strides[2] = S; + strides[3] = 1; + auto new_shape = {B, H, (L0 + L1), S}; + auto mem_desc = std::make_shared(kvcache_precision, + Shape(reverse(new_shape)), + new_shape, + order, + 0, + VectorDims{}, + strides); + internal_mem_k->redefineDesc(mem_desc); + internal_mem_v->redefineDesc(mem_desc); + if (kvcache_precision == ov::element::u8) { + auto& old_scale_zp_k = m_k_state->get_scale_zp(); + auto& old_scale_zp_v = m_v_state->get_scale_zp(); + // only dim0, dim1 need change + old_scale_zp_k.m_strides[0] = H * max_l * 2; + old_scale_zp_k.m_strides[1] = max_l * 2; + old_scale_zp_v.m_strides[0] = H * max_l * 2; + old_scale_zp_v.m_strides[1] = max_l * 2; + } + } + if (need_redefine) { + auto new_shape = {B, H, (L0 + L1), S}; + auto mem_desc = std::make_shared(kvcache_precision, + Shape(reverse(new_shape)), + new_shape, + order, + 0, + VectorDims{}, + internal_mem_k->getDescWithType()->getStrides()); + internal_mem_k->redefineDesc(mem_desc); + internal_mem_v->redefineDesc(mem_desc); } - auto new_shape = {B, H, (L0 + L1), S}; - auto mem_desc = std::make_shared(kvcache_precision, - Shape(reverse(new_shape)), - new_shape, - order, - 0, - VectorDims{}, - internal_mem_k->getDescWithType()->getStrides()); - internal_mem_k->redefineDesc(mem_desc); - internal_mem_v->redefineDesc(mem_desc); if (!past_k) { past_k.reset(internal_mem_k); @@ -1435,11 +1535,21 @@ void ScaledDotProductAttention::updatePastkv(const MemoryPtr& mem_cur_k, const M init_v.reset(v_mem); init_k = init_k.permute(order); init_v = init_v.permute(order); - attn_memcpy(init_k, init_v, past_k, past_v); + if (kvcache_precision == ov::element::u8) { + attn_quantkv(init_k, init_v, past_k, past_v, m_k_state->get_scale_zp(), m_v_state->get_scale_zp()); + } else { + attn_memcpy(init_k, init_v, past_k, past_v); + } } } - attn_memcpy(cur_k, cur_v, past_k.slice(2, L0, L0 + L1), past_v.slice(2, L0, L0 + L1)); + if (kvcache_precision == ov::element::u8) { + attn_quantkv(cur_k, cur_v, + past_k.slice(2, L0, L0 + L1), past_v.slice(2, L0, L0 + L1), + m_k_state->get_scale_zp().slice(2, L0, L0 + L1), m_v_state->get_scale_zp().slice(2, L0, L0 + L1)); + } else { + attn_memcpy(cur_k, cur_v, past_k.slice(2, L0, L0 + L1), past_v.slice(2, L0, L0 + L1)); + } } ov::element::Type ScaledDotProductAttention::getKVCachePrecision() { @@ -1447,6 +1557,11 @@ ov::element::Type ScaledDotProductAttention::getKVCachePrecision() { auto rtPrecision = getRuntimePrecision(); bool enableKVCacheFP16 = m_config.config.fuse_concat && mayiuse(cpu_isa_t::avx2) && rtPrecision != ov::element::bf16; kvcache_precision = enableKVCacheFP16 ? ov::element::f16 : rtPrecision; + bool use_int8_kv_cache_precision = false; + if (use_int8_kv_cache_precision) + kvcache_precision = ov::element::u8; + else + kvcache_precision = enableKVCacheFP16 ? ov::element::f16 : rtPrecision; return kvcache_precision; } diff --git a/src/plugins/intel_cpu/src/nodes/scaled_attn.h b/src/plugins/intel_cpu/src/nodes/scaled_attn.h index cd1fd0da46bfff..b7e59927a6a5d6 100644 --- a/src/plugins/intel_cpu/src/nodes/scaled_attn.h +++ b/src/plugins/intel_cpu/src/nodes/scaled_attn.h @@ -59,7 +59,8 @@ class ScaledDotProductAttention : public Node { struct Executor { virtual void execute(dnnl::stream strm, const Config& config, const std::vector& inputs, const MemoryPtr output, - const MemoryPtr presentk_input, const MemoryPtr presentv_input, const MemoryPtr beam_input) = 0; + const MemoryPtr presentk_input, const MemoryPtr presentv_input, const MemoryPtr beam_input, + const PlainTensor& k_scale_zp, const PlainTensor& v_scale_zp) = 0; }; Config m_config; @@ -69,8 +70,6 @@ class ScaledDotProductAttention : public Node { std::shared_ptr m_k_state; std::shared_ptr m_v_state; - - PlainTensor m_tmp_reorder; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 54517e73b46f07..7c2a1d7cf9ffa4 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -21,6 +21,7 @@ #include "transformations/common_optimizations/add_fake_quantize_fusion.hpp" #include "transformations/fp16_compression/convert_compression_only_to_legacy.hpp" #include "transformations/common_optimizations/convert_quantize_dequantize.hpp" +#include "transformations/common_optimizations/lstm_cell_fusion.hpp" #include "transformations/common_optimizations/fq_mul_fusion.hpp" #include "transformations/common_optimizations/mul_fake_quantize_fusion.hpp" #include "transformations/common_optimizations/nop_elimination.hpp" @@ -425,6 +426,15 @@ void Transformations::PreLpt(const std::vector& defaultPrecis ov::pass::ConvertGRUSequenceToTensorIterator, ov::pass::ConvertLSTMSequenceToTensorIterator); + CPU_SET_CALLBACK_COMMON(manager, + [](const_node_ptr &node) -> bool { + std::string msg; + return !node::RNN::isSupportedOperation(node, msg); + }, + ov::pass::ConvertLoopToLSTMSequence, + ov::pass::FuseReverseLSTMSequence, + ov::pass::FuseLSTMSequencesToBidirectionalLSTMSequence); + CPU_SET_CALLBACK_COMMON(manager, [](const_node_ptr &node) -> bool { std::string msg; @@ -434,6 +444,13 @@ void Transformations::PreLpt(const std::vector& defaultPrecis ov::pass::GRUCellDecomposition, ov::pass::LSTMCellDecomposition); + CPU_SET_CALLBACK_COMMON(manager, + [](const_node_ptr &node) -> bool { + std::string msg; + return !node::RNN::isSupportedOperation(node, msg); + }, + ov::pass::LSTMCellFusion); + CPU_SET_CALLBACK_COMMON(manager, [](const_node_ptr &node) -> bool { std::string errorMessage; @@ -467,9 +484,11 @@ void Transformations::PreLpt(const std::vector& defaultPrecis CPU_SET_CALLBACK_COMMON(manager, nmsCallback, ov::pass::ConvertMulticlassNmsToMulticlassNmsIE); CPU_SET_CALLBACK_COMMON(manager, nmsCallback, ov::pass::ConvertMatrixNmsToMatrixNmsIE); CPU_SET_CALLBACK_X64(manager, - [](const_node_ptr &node) -> bool { + [this](const_node_ptr &node) -> bool { std::string errorMsg; - return node::ScaledDotProductAttention::isSupportedOperation(node, errorMsg); + // Current SDPA impl is optimized only for LLM models, so we decompose it for others to avoid perf regression. + // Matching the pattern is a little complicated, so we just check if there is any state nodes. + return node::ScaledDotProductAttention::isSupportedOperation(node, errorMsg) && model->get_variables().size() > 0; }, ov::pass::ScaledDotProductAttentionDecomposition); diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp index 4968528677cbdb..6e29e04c8aa8d8 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp @@ -8,6 +8,7 @@ #include "debug_capabilities.h" #include "node.h" #include "edge.h" +#include "graph.h" #include #include "nodes/input.h" #include "nodes/eltwise.h" @@ -213,7 +214,7 @@ std::ostream & operator<<(std::ostream & os, const Node &c_node) { leftside << comma << desc->getPrecision().get_type_name() << "_" << desc->serializeFormat() << "_" << shape_str - << "_" << getData(ptr); + << "&" << getData(ptr); b_ouputed = true; } else { leftside << "(empty)"; @@ -289,22 +290,24 @@ std::ostream & operator<<(std::ostream & os, const Node &c_node) { comma = ""; for (size_t port = 0; port < node.getParentEdges().size(); ++port) { // find the Parent edge connecting to port + os << comma; + const char * sep2 = ""; for (const auto & e : node.getParentEdges()) { auto edge = e.lock(); if (!edge) continue; if (edge->getOutputNum() != static_cast(port)) continue; auto n = edge->getParent(); - os << comma; + os << sep2; os << node_id(*edge->getParent()); auto ptr = edge->getMemoryPtr(); if (ptr) { - os << "_" << getData(ptr); + os << "&" << getData(ptr); } if (!is_single_output_port(*n)) os << "[" << edge->getInputNum() << "]"; - comma = ","; - break; + sep2 = "|"; // show all edges at single port(usually indicating bugs) } + comma = ","; } if (node.getType() == intel_cpu::Type::Input && node.isConstant()) { @@ -386,6 +389,16 @@ std::ostream & operator<<(std::ostream & os, const Shape& shape) { return os; } +// Print complex data structures in a textualized form to the console is an efficient way to investigate them +std::ostream & operator<<(std::ostream & os, const Graph& g) { + os << "ov::intel_cpu::Graph " << g.GetName() << " {" << std::endl; + for (auto &graphNode : g.GetNodes()) { + std::cout << *graphNode << std::endl; + } + os << "};" << std::endl; + return os; +} + class OstreamAttributeVisitor : public ov::AttributeVisitor { std::ostream & os; diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.h b/src/plugins/intel_cpu/src/utils/debug_capabilities.h index 01c78c043b4122..724871209924ef 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.h +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.h @@ -47,7 +47,9 @@ class NodeDesc; class MemoryDesc; class Node; class Edge; +class Graph; class IMemory; + class PrintableModel { public: PrintableModel(const ov::Model& model, std::string tag = "", std::string prefix = "") : model(model), tag(tag), prefix(prefix) {} @@ -96,6 +98,7 @@ std::ostream & operator<<(std::ostream & os, const PortConfig& desc); std::ostream & operator<<(std::ostream & os, const NodeConfig& desc); std::ostream & operator<<(std::ostream & os, const NodeDesc& desc); std::ostream & operator<<(std::ostream & os, const Node& node); +std::ostream & operator<<(std::ostream & os, const ov::intel_cpu::Graph& graph); std::ostream & operator<<(std::ostream & os, const Shape& shape); std::ostream & operator<<(std::ostream & os, const MemoryDesc& desc); std::ostream & operator<<(std::ostream & os, const IMemory& mem); diff --git a/src/plugins/intel_cpu/src/utils/general_utils.h b/src/plugins/intel_cpu/src/utils/general_utils.h index 28ac379094f225..804150a5c747d5 100644 --- a/src/plugins/intel_cpu/src/utils/general_utils.h +++ b/src/plugins/intel_cpu/src/utils/general_utils.h @@ -6,11 +6,11 @@ #include "cpu_shape.h" -#include "openvino/core/type/element_type.hpp" - #include #include +#include "openvino/core/type/element_type.hpp" + namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp index 91db670748de73..45548faaac606e 100644 --- a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp +++ b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp @@ -93,15 +93,15 @@ struct PlainTensor { size_t m_strides[PLAINTENSOR_RANK_MAX]; size_t m_dims[PLAINTENSOR_RANK_MAX]; size_t m_rank = 0; - void* m_ptr = nullptr; + std::shared_ptr m_ptr; size_t m_capacity = 0; - bool with_storage = false; size_t m_element_size = 0; + size_t m_offset = 0; ov::element::Type_t m_dt = ov::element::Type_t::undefined; MemoryPtr m_mem; // hold memory ptr reference operator bool() const { - return static_cast(m_ptr); + return m_ptr != nullptr; } VectorDims shape() const { @@ -133,32 +133,18 @@ struct PlainTensor { PlainTensor() = default; - PlainTensor(bool _with_storage) { - with_storage = _with_storage; - } - - // copy construct (always not take ownership) PlainTensor operator=(const PlainTensor& other) { - OPENVINO_ASSERT(!with_storage); memcpy(&m_strides, &other.m_strides, sizeof(m_strides)); memcpy(&m_dims, &other.m_dims, sizeof(m_dims)); m_rank = other.m_rank; m_ptr = other.m_ptr; m_dt = other.m_dt; m_element_size = other.m_element_size; + m_capacity = other.m_capacity; + m_offset = other.m_offset; return *this; } - ~PlainTensor() { - if (with_storage && m_capacity > 0) { -#ifdef _WIN32 - _aligned_free(m_ptr); -#else - ::free(m_ptr); -#endif - } - } - void reset(MemoryPtr mem) { auto mem_desc = mem->getDescWithType(); // not support block layout @@ -235,7 +221,8 @@ struct PlainTensor { i_src++; } sub_tensor.m_rank = i_dst; // index may imply squeeze - sub_tensor.m_ptr = reinterpret_cast(reinterpret_cast(m_ptr) + off * m_element_size); + sub_tensor.m_ptr = m_ptr; + sub_tensor.m_offset = m_offset + off; sub_tensor.m_dt = m_dt; sub_tensor.m_element_size = m_element_size; return sub_tensor; @@ -268,8 +255,8 @@ struct PlainTensor { } auto off = start * m_strides[axis]; - auto* data = reinterpret_cast(m_ptr) + off * m_element_size; - sub_tensor.m_ptr = reinterpret_cast(data); + sub_tensor.m_ptr = m_ptr; + sub_tensor.m_offset = m_offset + off; sub_tensor.m_dt = m_dt; sub_tensor.m_element_size = m_element_size; @@ -307,7 +294,7 @@ struct PlainTensor { // only valid for dense memory PlainTensor new_tensor_view; assert(is_dense()); - new_tensor_view.resize(target_shape, m_element_size, m_dt, m_ptr); + new_tensor_view.resize(target_shape, m_element_size, m_dt, static_cast(m_ptr.get() + m_element_size * m_offset)); return new_tensor_view; } @@ -315,10 +302,12 @@ struct PlainTensor { PlainTensor new_tensor_view; assert(order.size() == m_rank); new_tensor_view.m_capacity = 0; + // not hold memory reference new_tensor_view.m_ptr = m_ptr; new_tensor_view.m_rank = m_rank; new_tensor_view.m_dt = m_dt; new_tensor_view.m_element_size = m_element_size; + new_tensor_view.m_offset = m_offset; auto it_order = order.begin(); // also should check order has no repeat element for (size_t i = 0; i < m_rank; i++) { @@ -346,21 +335,29 @@ struct PlainTensor { if (!data) { auto capacity_new = m_strides[0] * m_dims[0] * m_element_size; if (capacity_new > m_capacity) { - if (!with_storage) { - throw std::bad_alloc(); - } + void* ptr; #ifdef _WIN32 - m_ptr = _aligned_malloc(capacity_new, 64); + ptr = _aligned_malloc(capacity_new, 64); #else - int rc = ::posix_memalign(&m_ptr, 64, capacity_new); - if (rc) m_ptr = nullptr; + int rc = ::posix_memalign(&ptr, 64, capacity_new); + if (rc) { + OPENVINO_ASSERT(false, "PlainTensor call posix_memalign failed: ", rc); + } #endif + m_ptr = std::shared_ptr(static_cast(ptr), [](uint8_t* ptr) { + #ifdef _WIN32 + _aligned_free(ptr); + #else + ::free(ptr); + #endif + }); m_capacity = capacity_new; + m_offset = 0; } } else { // m_capacity is zero to indicate that we don't own the memory m_capacity = 0; - m_ptr = data; + m_ptr = std::shared_ptr(static_cast(data), [](uint8_t*) {}); } } @@ -369,9 +366,26 @@ struct PlainTensor { resize(new_dims, sizeof(DT), precision_of
::value, data, strides); } - template - DT* data() const { - return reinterpret_cast(m_ptr); + template + int64_t offset() const { + return m_offset; + } + template + int64_t offset(I i) const { + return m_offset + i * m_strides[dim]; + } + template + int64_t offset(I i, Is... indices) const { + return i * m_strides[dim] + offset(indices...); + } + template + DT* ptr(Is... indices) const { + return reinterpret_cast(m_ptr.get()) + offset<0>(indices...); + } + + template + void* ptr_v(Is... indices) const { + return reinterpret_cast(m_ptr.get() + offset<0>(indices...) * m_element_size); } // when allow_broadcast is true, index to size-1 dim will always access 0. @@ -389,14 +403,14 @@ struct PlainTensor { } off += m_strides[i] * coordinate; } - return (reinterpret_cast(reinterpret_cast(m_ptr) + off * m_element_size))[0]; + return (reinterpret_cast(m_ptr.get() + (off + m_offset) * m_element_size))[0]; } template PlainTensor& operator=(const DT& value) { // assign every element to value std::vector index(m_rank, 0); - auto* dst = reinterpret_cast(m_ptr); + auto* dst = reinterpret_cast(m_ptr.get() + m_offset * m_element_size); while (1) { size_t off = 0; for (int i = m_rank - 1; i >= 0; i--) { @@ -490,17 +504,17 @@ struct PlainTensor { // display current element if we still have buget if (cur_row_lines_left > 0) { if (m_dt == ov::element::Type_t::f32) - ss << reinterpret_cast(m_ptr)[i] << ","; + ss << (ptr())[i] << ","; else if (m_dt == ov::element::Type_t::bf16) - ss << reinterpret_cast(m_ptr)[i] << ","; + ss << (ptr())[i] << ","; else if (m_dt == ov::element::Type_t::f16) - ss << reinterpret_cast(m_ptr)[i] << ","; + ss << (ptr())[i] << ","; else if (m_dt == ov::element::Type_t::i32) - ss << reinterpret_cast(m_ptr)[i] << ","; + ss << (ptr())[i] << ","; else if (m_dt == ov::element::Type_t::i8) - ss << static_cast(reinterpret_cast(m_ptr)[i]) << ","; + ss << (ptr())[i] << ","; else if (m_dt == ov::element::Type_t::u8) - ss << static_cast(reinterpret_cast(m_ptr)[i]) << ","; + ss << (ptr())[i] << ","; else ss << "?,"; cur_line_elecnt++; diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/export_import.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/export_import.cpp index 93739427d89d0d..17ef4ac956d94d 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/export_import.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/export_import.cpp @@ -7,6 +7,7 @@ #include "common_test_utils/test_common.hpp" #include "common_test_utils/node_builders/eltwise.hpp" #include "common_test_utils/node_builders/constant.hpp" +#include "functional_test_utils/skip_tests_config.hpp" #include @@ -33,6 +34,7 @@ std::shared_ptr MakeMatMulModel() { } TEST_P(ExportOptimalNumStreams, OptimalNumStreams) { + SKIP_IF_CURRENT_TEST_IS_DISABLED(); auto original_model = MakeMatMulModel(); ov::Core core; std::string device_name; diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/include/fuse_transpose_reorder.hpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/include/fuse_transpose_reorder.hpp index 2dcfbcda3cb6f9..ae97903d87c64b 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/include/fuse_transpose_reorder.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/include/fuse_transpose_reorder.hpp @@ -54,5 +54,11 @@ class FuseTransposeAndReorderTest4 : public FuseTransposeAndReorderTest { protected: void create_model() override; }; + +class FuseTransposeAndReorderTest5 : public FuseTransposeAndReorderTest { +protected: + void create_model() override; +}; + } // namespace test } // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_transpose_reorder.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_transpose_reorder.cpp index 0610dcda662bcd..e13d8783b10d23 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_transpose_reorder.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_transpose_reorder.cpp @@ -354,6 +354,52 @@ TEST_P(FuseTransposeAndReorderTest4, CompareWithRefs) { INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest4, convSumTranposeParams, FuseTransposeAndReorderTest::getTestCaseName); +void FuseTransposeAndReorderTest5::create_model() { + OPENVINO_ASSERT(input_shape.size() == 4); + const ov::Shape kernel = {1, 1}; + const ov::Shape stride = {1, 1}; + const ov::Shape dilation = {1, 1}; + const std::vector padBegin = {0, 0}; + const std::vector padEnd = {0, 0}; + const size_t convOutChannels = 4; + auto memFmt = nhwc; + + ov::ParameterVector inputParams{std::make_shared(in_prec, ov::Shape(input_shape))}; + const auto relu = std::make_shared(inputParams[0]); + const auto transposeOrder = ov::op::v0::Constant::create(ov::element::i32, {4}, {0, 3, 1, 2}); + const auto transpose_shared = std::make_shared(relu, transposeOrder); + const auto conv1 = ov::test::utils::make_convolution(transpose_shared, + in_prec, + kernel, + stride, + padBegin, + padEnd, + dilation, + ov::op::PadType::AUTO, + convOutChannels); + conv1->get_rt_info() = makeCPUInfo({memFmt}, {memFmt}, {}); + const auto conv2 = ov::test::utils::make_convolution(transpose_shared, + in_prec, + kernel, + stride, + padBegin, + padEnd, + dilation, + ov::op::PadType::AUTO, + convOutChannels); + conv2->get_rt_info() = makeCPUInfo({memFmt}, {memFmt}, {}); + const auto add = std::make_shared(conv1, conv2); + + ov::ResultVector results{std::make_shared(add->output(0))}; + function = std::make_shared(results, inputParams, "TransposeReorder"); +} + +TEST_P(FuseTransposeAndReorderTest5, CompareWithRefs) { + run(); + check_transpose_count(0); +} +INSTANTIATE_TEST_SUITE_P(smoke_Basic, FuseTransposeAndReorderTest5, convSumTranposeParams, FuseTransposeAndReorderTest::getTestCaseName); + TEST(smoke_Basic, FuseDynamicTransposeAndReorderTest) { auto model = ov::builder::preprocess::create_preprocess_1input(ov::element::u8, ov::PartialShape{1, 3, 224, 224}); auto p = ov::preprocess::PrePostProcessor(model); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 08805d4e586c51..4822b3d6a23fff 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -373,6 +373,8 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(smoke_MM_Brgemm_Amx_.*/MatMulLayerCPUTest.*TS=\(\(55\.12\)\).*bf16.*_primitive=brgemm_avx512_amx.*)"); // Issue: 130471 retVector.emplace_back(R"(smoke_JIT_AVX512_DW_GroupConv/GroupConvolutionLayerCPUTest.*inFmts=nCdhw16c.*INFERENCE_PRECISION_HINT=bf16.*)"); + // Issue: 131475 + retVector.emplace_back(R"(smoke_ExportImportTest/ExportOptimalNumStreams.OptimalNumStreams/.*)"); } if (ov::with_cpu_x86_avx512_core_fp16()) { diff --git a/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp b/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp index ba576d51a7a8ca..dec7eb101b3fb7 100644 --- a/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/dnnl_memory_test.cpp @@ -4,10 +4,11 @@ #include -#include -#include -#include #include +#include + +#include "cpu_memory.h" +#include "memory_desc/cpu_blocked_memory_desc.h" using namespace ov::intel_cpu; diff --git a/src/plugins/intel_cpu/tests/unit/graph/merge_transpose_reorder_test.cpp b/src/plugins/intel_cpu/tests/unit/graph/merge_transpose_reorder_test.cpp index 1dd1c177e16813..731b3dd91208f4 100644 --- a/src/plugins/intel_cpu/tests/unit/graph/merge_transpose_reorder_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/graph/merge_transpose_reorder_test.cpp @@ -17,22 +17,7 @@ using namespace ov::intel_cpu; -/* - * MergeTransposeReorderIsOptimizedCPUTest to test the CPU plugin-in MergeTransposeReorder graph optimizer - * under the circumstance that the upstream node or downstream node is inPlaced thereby the inserted Reorder - * cannot be optimized. - */ -class MergeTransposeReorderIsOptimizedCPUTest : public ::testing::Test { -public: - void Validate() const { - CheckTransposeCount(0); - CheckReorderOptimized(std::string("_fake"), false); // the fused node is of name "reshape_abcd_acdb_fake" - } - - void SetUp() override { - CreateGraph(); - } - +class MergeTransposeReordersCPUTest : public ::testing::Test { protected: /* graph typology --------- @@ -61,7 +46,7 @@ class MergeTransposeReorderIsOptimizedCPUTest : public ::testing::Test { |Output | --------- */ - void CreateGraph() { + void CreateGraph(int num_consumers, int consumer_in_place_direction) { // Config conf; conf.rtCacheCapacity = 100; @@ -75,7 +60,9 @@ class MergeTransposeReorderIsOptimizedCPUTest : public ::testing::Test { auto order = std::vector{0, 3, 1, 2}; auto constOrder = ov::test::utils::deprecated::make_constant(ov::element::i32, {order.size()}, order); auto transpose = std::make_shared(params[0], constOrder); - ov::ResultVector results{std::make_shared(transpose)}; + ov::ResultVector results; + for (int i = 0; i < num_consumers; i++) + results.push_back(std::make_shared(transpose)); // Replicate auto replicate = [&](std::vector &nodes, std::vector &edges) -> void { @@ -99,18 +86,28 @@ class MergeTransposeReorderIsOptimizedCPUTest : public ::testing::Test { auto transposeNode = std::make_shared(transpose, context); transposeNode->filterSupportedPrimitiveDescriptors(); - // dummy nspc + inPlace LOOK_DOWN - const ov::Shape shape_tranpose{testShape[0], testShape[3], testShape[1], testShape[2]}; // shape after transpose - auto dummyNode2 = std::make_shared( - shape_tranpose, testPrec, "multiply", "DummyNode", context, LayoutType::nspc, Edge::LOOK::LOOK_DOWN); - - auto outputNode = std::make_shared(results[0], context); addEdge(inputNode, dummyNode1, 0, 0); addEdge(dummyNode1, transposeNode, 0, 0); addEdge(orderNode, transposeNode, 0, 1); - addEdge(transposeNode, dummyNode2, 0, 0); - addEdge(dummyNode2, outputNode, 0, 0); + + // dummy nspc + inPlace LOOK_DOWN + const ov::Shape shape_tranpose{testShape[0], + testShape[3], + testShape[1], + testShape[2]}; // shape after transpose + for (int i = 0; i < num_consumers; i++) { + auto dummyConsumer = std::make_shared(shape_tranpose, + testPrec, + "multiply", + "DummyNode", + context, + LayoutType::nspc, + consumer_in_place_direction); + auto outputNode = std::make_shared(results[i], context); + addEdge(transposeNode, dummyConsumer, 0, 0); + addEdge(dummyConsumer, outputNode, 0, 0); + } for (auto &node : nodesSet) nodes.emplace_back(node); }; @@ -150,13 +147,43 @@ class MergeTransposeReorderIsOptimizedCPUTest : public ::testing::Test { ASSERT_EQ(1, actualCount); } -private: +protected: const ov::element::Type_t testPrec = ov::element::Type_t::f32; const ov::Shape testShape{1, 3, 8, 16}; std::unique_ptr m_graph; -}; // class MergeTransposeReorderIsOptimizedCPUTest +}; // class MergeTransposeReordersCPUTest + +// upstream node or downstream node is inPlaced thereby the inserted Reorder cannot be optimized. +TEST_F(MergeTransposeReordersCPUTest, smoke_Run_MergeTransposeReorders_isOptimized) { + CreateGraph(1, Edge::LOOK::LOOK_DOWN); + CheckTransposeCount(0); + CheckReorderOptimized(std::string("_fake"), false); // the fused node is of name "reshape_abcd_acdb_fake" +} + +// 3 non-inplace consumers share a single optimized reorder fused with Transpose +TEST_F(MergeTransposeReordersCPUTest, smoke_Run_MergeTransposeReorders_shared) { + CreateGraph(3, 0); + CheckTransposeCount(0); + CheckReorderOptimized(std::string("_fake"), true); +} + +// 3 inplace consumers cannot share reorders thus transpose is not fused with reorders +// there will be also 3 reorders between 3 dummyNode-consumers and 3 Result nodes +TEST_F(MergeTransposeReordersCPUTest, smoke_Run_MergeTransposeReorders_notFused) { + CreateGraph(3, Edge::LOOK::LOOK_DOWN); + CheckTransposeCount(1); + size_t reorderCount = 0; + for (auto& node : m_graph->GetNodes()) { + auto reorder_node = std::dynamic_pointer_cast(node); + if (reorder_node) { + // there should be no "_fake" reorders generated by merging transpose + reorder + ASSERT_EQ(node->getName().find("_fake"), std::string::npos); + reorderCount++; + } + } -TEST_F(MergeTransposeReorderIsOptimizedCPUTest, smoke_Run_MergeTransposeReorder_isOptimized) { - Validate(); + // 3 for layout conflist between [transpose => dummyConsumer] + // 3 for layout conflist between [dummyConsumer => result] + ASSERT_EQ(6, reorderCount); } diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp index dc756006c18264..947cdf06553417 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/format.hpp @@ -241,6 +241,8 @@ struct format { os_zy_is_x_osv8_isv2, os_zy_is_x_osv8_isv4, os_is_yx_osv4_isv16, + os_is_yx_osv4_isv2, + os_is_yx_osv8_isv16, os_is_yx_osv2_isv4, os_is_yx_osv2_isv16, os_is_yx_osv2_isv32, diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index db9216202cfb55..64ed3013efe97a 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -693,6 +693,14 @@ void prepare_buffer_fusing::run(program& p) { if (gather_prim) { update_dep(gather_prim); } + + // Fallback to ocl impl since oneDNN doesn't support dynamic paddings + for (auto user : node.get_users()) { + if (user->get_preferred_impl_type() == impl_types::onednn) { + GPU_DEBUG_TRACE_DETAIL << user->id() << ": change impl to ocl because of dynamic input paddings\n"; + user->set_preferred_impl_type(impl_types::ocl); + } + } } }); program_helpers::do_for_types(*node, [](read_value_node& node) { diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp index fff672088f233e..5030b1e2cfa0e4 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.cpp @@ -502,6 +502,10 @@ kernel_selector::weights_layout to_weights_layout(format f, bool is_grouped) { return kernel_selector::weights_layout::os_is_yx_osv8_isv4; case format::os_is_yx_osv4_isv16: return kernel_selector::weights_layout::os_is_yx_osv4_isv16; + case format::os_is_yx_osv8_isv16: + return kernel_selector::weights_layout::os_is_yx_osv8_isv16; + case format::os_is_yx_osv4_isv2: + return kernel_selector::weights_layout::os_is_yx_osv4_isv2; case format::os_is_yx_osv2_isv4: return kernel_selector::weights_layout::os_is_yx_osv2_isv4; case format::os_is_yx_osv2_isv16: @@ -908,6 +912,10 @@ cldnn::format::type from_weights_layout(kernel_selector::weights_layout l) { return cldnn::format::os_is_yx_osv8_isv4; case kernel_selector::weights_layout::os_is_yx_osv4_isv16: return cldnn::format::os_is_yx_osv4_isv16; + case kernel_selector::weights_layout::os_is_yx_osv8_isv16: + return cldnn::format::os_is_yx_osv8_isv16; + case kernel_selector::weights_layout::os_is_yx_osv4_isv2: + return cldnn::format::os_is_yx_osv4_isv2; case kernel_selector::weights_layout::os_is_yx_osv2_isv4: return cldnn::format::os_is_yx_osv2_isv4; case kernel_selector::weights_layout::os_is_yx_osv2_isv16: diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/strided_slice.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/strided_slice.cpp index df46aa9799f8f3..7fee15c92a271c 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/strided_slice.cpp @@ -9,6 +9,7 @@ #include "strided_slice/strided_slice_kernel_ref.h" #include "strided_slice/strided_slice_kernel_selector.h" + namespace { template ::value>::type> std::vector& pad_vector_to_size(std::vector& data, size_t size, DT value) { @@ -141,6 +142,7 @@ struct strided_slice_impl : typed_primitive_impl_ocl { std::vector out_shape; for (const auto& dim : logical_dims) out_shape.push_back(static_cast(dim)); + if (params.striding_params.size() == 3) { // If the ith bit of begin_mask is not set, begin[i] is ignored and the range of the appropriate dimension starts from 0. vector_assign_if_not_mask(params.striding_params[0], 0, params.begin_mask); @@ -148,44 +150,53 @@ struct strided_slice_impl : typed_primitive_impl_ocl { // instead. vector_assign_if_not_mask(params.striding_params[1], out_shape, params.end_mask); for (size_t dim = 0; dim < params.striding_params[2].size(); dim++) { - auto begin_org = params.striding_params[0][dim]; - auto end_org = params.striding_params[1][dim]; - if (params.striding_params[0][dim] < 0) - params.striding_params[0][dim] = std::max(out_shape[dim] + params.striding_params[0][dim], (int32_t)0); - if (params.striding_params[1][dim] < 0) - params.striding_params[1][dim] = std::max(out_shape[dim] + params.striding_params[1][dim], (int32_t)0); - - params.striding_params[0][dim] = std::min(params.striding_params[0][dim], out_shape[dim]); - params.striding_params[1][dim] = std::min(params.striding_params[1][dim], out_shape[dim]); - - auto& begin = params.striding_params[0][dim]; - auto& end = params.striding_params[1][dim]; - auto& stride = params.striding_params[2][dim]; - bool is_clamp_begin = begin_org != begin; - bool is_clamp_end = end_org != end; - bool is_reverse = stride < 0; - // If begin > end && is_reverse, then we don't need to adjust begin/end values, the kernel will process it correctly - // However, in case of out-of-bounds begin/end values, it will be clamped, so we subtract 1 from each of them manually - // E.g. out_shape[dim] = 100; begin=10000; end=-10000; stride=-1 - // clamp: begin=100; end=0; - // sub: begin=99; end=-1; - // If begin <= end, then we swap begin/end values and subtruct 1 from each of them - // E.g. out_shape[dim] = 100; begin=0; end=100; stride=-1 - // swap: begin=100; end=0; - // sub: begin=99; end=-1; - // So the kernel will put the slices [99, 0] in reversed order as expected. - if (is_reverse) { - if (begin <= end) { - std::swap(begin, end); + auto begin = params.striding_params[0][dim]; + auto end = params.striding_params[1][dim]; + auto stride = params.striding_params[2][dim]; + + // Check out of bounds values for Clamping + auto check_out_of_bounds = [&](int32_t value) -> bool { + auto size = out_shape[dim]; + if (value >= size || value < (size * -1)) + return true; + else + return false; + }; + bool should_clamp_begin = check_out_of_bounds(begin); + bool should_clamp_end = check_out_of_bounds(end); + + // Convert a negative value which means reverse indexing from the end + if (begin < 0) + begin += out_shape[dim]; // converted value can be negative if the original one was out of bounds + if (end < 0) + end += out_shape[dim]; + bool is_stride_reverse = (stride < 0) ? true : false; + + // Clamping + begin = std::min(std::max(begin, (int32_t)0), out_shape[dim]); + end = std::min(std::max(end, (int32_t)0), out_shape[dim]); + + if (is_stride_reverse) { + // If begin > end && is_reverse, then we don't need to adjust begin/end values, the kernel will process it correctly + // However, in case of out-of-bounds begin/end values, it will be clamped, so we subtract 1 from each of them manually + // E.g. out_shape[dim] = 100; begin=10000; end=-10000; stride=-1 + // clamp: begin=100; end=0; + // sub: begin=99; end=-1; + // If begin <= end, then we swap begin/end values and subtruct 1 from each of them + // E.g. out_shape[dim] = 100; begin=-100; end=100; stride=-1 + // sub: begin=-1; end=100; + // swap: begin=100; end=-1; + // So the kernel will put the slices [99, 0] in reversed order as expected. + if (should_clamp_begin) begin--; + if (should_clamp_end) end--; - } else if (begin_org != -1) { // If begin is -1 with negative stride, clamping begin is already expected value - if (is_clamp_begin) - begin--; - if (is_clamp_end) - end--; - } + if (begin <= end) + std::swap(begin, end); } + + params.striding_params[0][dim] = begin; + params.striding_params[1][dim] = end; } } return {params, op_params}; diff --git a/src/plugins/intel_gpu/src/graph/loop.cpp b/src/plugins/intel_gpu/src/graph/loop.cpp index 36ca523093f595..159023105d5194 100644 --- a/src/plugins/intel_gpu/src/graph/loop.cpp +++ b/src/plugins/intel_gpu/src/graph/loop.cpp @@ -357,63 +357,61 @@ event::ptr loop_inst::set_output_memory(memory::ptr mem, bool check, size_t idx) } loop_inst::concatenated_memory_mapping::ptr loop_inst::create_concat_memory_map(const cldnn::loop::io_primitive_map& io_prim_map, - memory::ptr mem_ptr, + memory::ptr extern_mem_ptr, const int64_t num_iterations) { OPENVINO_ASSERT(io_prim_map.axis >= 0, "axis should not be negative"); const auto& external_id = io_prim_map.external_id; const auto& internal_id = io_prim_map.internal_id; auto& engine = body_network->get_engine(); auto& stream = body_network->get_stream(); - auto prim = body_network->get_primitive(internal_id.pid); + auto intern_prim = body_network->get_primitive(internal_id.pid); + auto extern_prim = get_network().get_primitive(external_id.pid); std::vector sliced_mems; // if memory is nullptr, that means memory is not allocated yet because current network is dynamic shape model. // In dynamic model, we can't calculate num_element_iteration, start, and sliced_layout. // will recalculate that parameters in backedge preprocessing map after first execution. - if (mem_ptr != nullptr) { - layout sliced_layout = prim->get_output_layout(internal_id.idx); - auto out_mem_ptr = prim->output_memory_ptr(internal_id.idx); - if (out_mem_ptr != nullptr) { - sliced_layout = out_mem_ptr->get_layout(); - } else { - // if inner body prim has no output memory because it has dynamic shape, - // calculate inner body prim layout using concat_mem's layout. + if (extern_mem_ptr != nullptr) { + layout sliced_layout = intern_prim->get_output_layout(internal_id.idx); + auto inter_mem_ptr = intern_prim->output_memory_ptr(internal_id.idx); + if (inter_mem_ptr == nullptr) { + // if inner body intern_prim has no output memory because it has dynamic shape, + // calculate inner body intern_prim layout using concat_mem's layout. auto updated_sliced_layout = sliced_layout.get_partial_shape(); OPENVINO_ASSERT(updated_sliced_layout[io_prim_map.axis].is_static() || num_iterations > 0, "Not allowed dynamic dimension for axis when num_iteraiont is negative"); - auto concat_mem_pshape = mem_ptr->get_layout().get_partial_shape(); - const auto shape_size = concat_mem_pshape.size(); + auto concat_pshape = extern_prim->get_output_layout().get_partial_shape(); + const auto shape_size = concat_pshape.size(); for (size_t i = 0; i < shape_size; i++) { if (updated_sliced_layout[i].is_dynamic()) { - updated_sliced_layout[i] = concat_mem_pshape[i]; + updated_sliced_layout[i] = concat_pshape[i]; } } - GPU_DEBUG_LOG << "output pshape for [" << prim->id() << "] is changed from " + GPU_DEBUG_LOG << "output pshape for [" << intern_prim->id() << "] is changed from " << sliced_layout.get_partial_shape().to_string() << " to " << updated_sliced_layout.to_string() << std::endl; sliced_layout.set_partial_shape(updated_sliced_layout); - out_mem_ptr = engine.allocate_memory(sliced_layout); - prim->set_output_layout(sliced_layout, internal_id.idx); + inter_mem_ptr = engine.allocate_memory(sliced_layout); + intern_prim->set_output_layout(sliced_layout, internal_id.idx); } // When num_iterations is -1, allocate first sliced_mem and allocate sliced memory if additional sliced mem is required if (num_iterations < 0) { - sliced_mems.push_back(out_mem_ptr); + sliced_mems.push_back(inter_mem_ptr); } else { sliced_mems.reserve(num_iterations); - sliced_mems.push_back(out_mem_ptr); + sliced_mems.push_back(inter_mem_ptr); for (int j=1; j < num_iterations; ++j) { memory::ptr sliced_mem = engine.allocate_memory(sliced_layout); sliced_mems.push_back(sliced_mem); } } } - auto sliced_data_prim = body_network->get_primitive(internal_id.pid); auto concat_data_prim = get_network().get_primitive(external_id.pid); auto concat_data_id = external_id; - return std::make_shared(mem_ptr, sliced_mems, stream, engine, + return std::make_shared(extern_mem_ptr, sliced_mems, stream, engine, concat_data_prim, sliced_data_prim, io_prim_map); } diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_weights.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_weights.cl index 492086141aea7d..bdd92a6085ea49 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_weights.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/fetch_weights.cl @@ -1538,6 +1538,22 @@ inline uint get_g_os_is_yx_osv_isv(uint g, uint o, uint i, uint y, uint x, CAT(prefix, _SIZE_X), \ CAT(prefix, _SIZE_Y), 4, 16) +#define GET_FILTER_OS_IS_YX_OSV8_ISV16_INDEX(prefix, o, i, y, x) \ + get_g_os_is_yx_osv_isv( \ + 0, o, i, y, x, \ + CAT(prefix, _IFM_NUM), \ + CAT(prefix, _OFM_NUM), \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), 8, 16) + +#define GET_FILTER_OS_IS_YX_OSV4_ISV2_INDEX(prefix, o, i, y, x) \ + get_g_os_is_yx_osv_isv( \ + 0, o, i, y, x, \ + CAT(prefix, _IFM_NUM), \ + CAT(prefix, _OFM_NUM), \ + CAT(prefix, _SIZE_X), \ + CAT(prefix, _SIZE_Y), 4, 2) + #define GET_FILTER_OS_IS_YX_OSV8_ISV2_INDEX(prefix, o, i, y, x) \ get_g_os_is_yx_osv_isv( \ 0, o, i, y, x, \ diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl index dbc584eba22714..30d06e2e7ca9a0 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/reorder_weights.cl @@ -448,6 +448,10 @@ inline uint FUNC(get_input_index)(uint g, uint o, uint i, uint z, uint y, uint x return GET_FILTER_OS_IS_YX_OSV2_ISV32_INDEX(INPUT0, o, i, y, x); #elif defined INPUT0_LAYOUT_OS_IS_YX_OSV4_ISV16 return GET_FILTER_OS_IS_YX_OSV4_ISV16_INDEX(INPUT0, o, i, y, x); +#elif defined INPUT0_LAYOUT_OS_IS_YX_OSV8_ISV16 + return GET_FILTER_OS_IS_YX_OSV8_ISV16_INDEX(INPUT0, o, i, y, x); +#elif defined INPUT0_LAYOUT_OS_IS_YX_OSV4_ISV2 + return GET_FILTER_OS_IS_YX_OSV4_ISV2_INDEX(INPUT0, o, i, y, x); #elif defined INPUT0_LAYOUT_G_OS_IS_ZYX_OSV16_ISV16 return GET_FILTER_G_OS_IS_ZYX_OSV16_ISV16_INDEX(INPUT0, g, o, i, z, y, x); #elif defined INPUT0_LAYOUT_OS_IS_ZYX_OSV32_ISV16 @@ -567,6 +571,10 @@ inline uint FUNC(get_output_index)(uint g, uint o, uint i, uint z, uint y, uint return GET_FILTER_OS_IS_YX_OSV2_ISV32_INDEX(OUTPUT, o, i, y, x); #elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV4_ISV16 return GET_FILTER_OS_IS_YX_OSV4_ISV16_INDEX(OUTPUT, o, i, y, x); +#elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV8_ISV16 + return GET_FILTER_OS_IS_YX_OSV8_ISV16_INDEX(OUTPUT, o, i, y, x); +#elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV4_ISV2 + return GET_FILTER_OS_IS_YX_OSV4_ISV2_INDEX(OUTPUT, o, i, y, x); #elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV32_ISV4_SWIZZLED_BY_2 return GET_FILTER_OS_IS_YX_OSV32_ISV4_SWIZZLED_BY_2_INDEX(OUTPUT, o, i, y, x); #elif defined OUTPUT_LAYOUT_OS_IS_YX_OSV32_ISV4 diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp index a56f49b468b7fd..6caa5e75a474b7 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_common.cpp @@ -385,6 +385,8 @@ std::string toString(WeightsLayout layout) { case WeightsLayout::os_is_yx_osv2_isv16: return "OS_IS_YX_OSV2_ISV16"; case WeightsLayout::os_is_yx_osv2_isv32: return "OS_IS_YX_OSV2_ISV32"; case WeightsLayout::os_is_yx_osv4_isv16: return "OS_IS_YX_OSV4_ISV16"; + case WeightsLayout::os_is_yx_osv8_isv16: return "OS_IS_YX_OSV8_ISV16"; + case WeightsLayout::os_is_yx_osv4_isv2: return "OS_IS_YX_OSV4_ISV2"; case WeightsLayout::os_is_zyx_osv8_isv2: return "OS_IS_ZYX_OSV8_ISV2"; case WeightsLayout::goiyx: return "GOIYX"; case WeightsLayout::gioyx: return "GIOYX"; diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp index f6ca2a01254f27..24d79936715b14 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.cpp @@ -141,6 +141,8 @@ WeightsTensor::WeightsChannelArray WeightsTensor::weightsChannelArray {{ { WeightsLayout::os_is_yx_osv2_isv16, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_osv2_isv32, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_osv4_isv16, { 0, 1, -1, 2, 3, -1 } }, + { WeightsLayout::os_is_yx_osv8_isv16, { 0, 1, -1, 2, 3, -1 } }, + { WeightsLayout::os_is_yx_osv4_isv2, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_yx_osv8_isv4, { 0, 1, -1, 2, 3, -1 } }, { WeightsLayout::os_is_zyx_osv8_isv4, { 0, 1, 2, 3, 4, -1 } }, { WeightsLayout::os_is_yx_osv8_isv2, { 0, 1, -1, 2, 3, -1 } }, @@ -871,6 +873,16 @@ NDims WeightsTensor::GetSimpleDims(const std::vector& d, WeightsLayout l newDims[2] = RoundUp(newDims[2], 16); newDims[3] = RoundUp(newDims[3], 4); break; + case os_is_yx_osv8_isv16: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 16); + newDims[3] = RoundUp(newDims[3], 8); + break; + case os_is_yx_osv4_isv2: + assert(newDims.size() == 4); + newDims[2] = RoundUp(newDims[2], 2); + newDims[3] = RoundUp(newDims[3], 4); + break; case os_is_yx_osv8_isv4: assert(newDims.size() == 4); newDims[2] = RoundUp(newDims[2], 4); diff --git a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h index d16dee409c91b4..6a70cff8deb861 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h +++ b/src/plugins/intel_gpu/src/kernel_selector/tensor_type.h @@ -191,6 +191,8 @@ enum WeightsLayout { os_is_yx_osv2_isv16, os_is_yx_osv2_isv32, os_is_yx_osv4_isv16, + os_is_yx_osv8_isv16, + os_is_yx_osv4_isv2, oizyx, iozyx, os_is_osv32_isv32_swizzled_by_4, // for weights for 1x1 IMAD convolution diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index f6b3e9ffda7830..4b0dafa847cc1d 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -56,6 +56,7 @@ #include "transformations/common_optimizations/broadcast_elementwise_fusion.hpp" #include "transformations/common_optimizations/broadcast_transition.hpp" #include "transformations/common_optimizations/lin_op_sequence_fusion.hpp" +#include "transformations/common_optimizations/lstm_cell_fusion.hpp" #include "transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp" #include "transformations/common_optimizations/convert_quantize_dequantize.hpp" #include "transformations/common_optimizations/wrap_interpolate_into_transposes.hpp" @@ -450,6 +451,11 @@ void TransformationsPipeline::apply(std::shared_ptr func) { return isCellPrimitiveSupported(node); }); + pass_config->set_callback( + [isCellPrimitiveSupported](const_node_ptr &node) -> bool { + return !isCellPrimitiveSupported(node); + }); + if (unroll_loop) { pass_config->set_callback func) { }); } + pass_config->set_callback( + [isSequencePrimitiveSupported](const_node_ptr &node) -> bool { + return !isSequencePrimitiveSupported(node); + }); pass_config->set_callback( [](const_node_ptr &node) -> bool { diff --git a/src/plugins/intel_gpu/src/runtime/format.cpp b/src/plugins/intel_gpu/src/runtime/format.cpp index 5a4872c19200bd..f7f1eb94b2501c 100644 --- a/src/plugins/intel_gpu/src/runtime/format.cpp +++ b/src/plugins/intel_gpu/src/runtime/format.cpp @@ -123,6 +123,8 @@ static const std::map format_traits_map { FMT_TRAITS(os_is_yx_osv2_isv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 2}, {1, 16}}), FMT_TRAITS(os_is_yx_osv2_isv32, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 2}, {1, 32}}), FMT_TRAITS(os_is_yx_osv4_isv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 4}, {1, 16}}), + FMT_TRAITS(os_is_yx_osv8_isv16, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 8}, {1, 16}}), + FMT_TRAITS(os_is_yx_osv4_isv2, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 4}, {1, 2}}), FMT_TRAITS(os_is_yx_osv16_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 16}, {1, 4}}), FMT_TRAITS(os_is_yx_osv8_isv4, 1, 1, 2, 0, {0, 1, 2, 3}, "oiyx", "oixy", {{0, 8}, {1, 4}}), FMT_TRAITS(os_is_zyx_osv8_isv4, 1, 1, 3, 0, {0, 1, 2, 3, 4}, "oizyx", "oixyz", {{0, 8}, {1, 4}}), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 5cbe29b4241bc0..a680a2bc65974d 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -682,7 +682,6 @@ TEST_P(OVGetMetricPropsTest_CACHING_PROPERTIES, GetMetricAndPrintNoThrow) { const std::vector expected_properties = { ov::device::architecture.name(), ov::intel_gpu::execution_units_count.name(), - ov::intel_gpu::driver_version.name(), ov::hint::inference_precision.name(), ov::hint::execution_mode.name(), }; @@ -700,8 +699,6 @@ TEST_P(OVGetMetricPropsTest_CACHING_PROPERTIES, GetMetricAndPrintNoThrow) { ASSERT_TRUE(std::find(caching_properties.begin(), caching_properties.end(), property_name) != caching_properties.end()); } - - OV_ASSERT_PROPERTY_SUPPORTED(ov::internal::caching_properties); } INSTANTIATE_TEST_SUITE_P(nightly_OVGetMetricPropsTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp index 98384ef39cfd7b..be437518430e99 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/strided_slice.cpp @@ -64,6 +64,14 @@ std::vector ss_only_test_cases_fp32 = { { 1, 12, 100 }})), { 0, -6, 0 }, { 0, -8, 0 }, { -1, -2, -1 }, { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, 6, 0 }, { 0, 4, 0 }, { -1, -2, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 1, 12, 100 }})), + { 0, -8, 0 }, { 0, -4, 0 }, { -1, 2, -1 }, + { 1, 0, 1 }, { 1, 0, 1 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 } }, StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ { 1, 12, 100, 1, 1 }})), { 0, -1, 0, 0 }, { 0, 0, 0, 0 }, { 1, 1, 1, 1 }, @@ -109,8 +117,28 @@ std::vector ss_only_test_cases_fp32 = { { 0, 0, 0, 0, 0 }, { 0, 0, 29, 29, 29 }, { 1, 1, 1, 1, 1 }, {1, 1, 1, 1, 1}, {1, 1, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0} }, StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ - { 10, 12 }})), - { -1, 1 }, { -9999, 0 }, { -1, 1 }, + { 10, 10 }})), + { 0, 0 }, { 1000, 2 }, { 1, 1 }, + { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 10, 10 }})), + { -1000, 0 }, { 1000, 2 }, { 1, 1 }, + { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 10, 10 }})), + { 1000, 1 }, { -1000, 2 }, { -1, 1 }, + { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 10, 10 }})), + { -1, 1 }, { -1000, 2 }, { -1, 1 }, + { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 10, 10 }})), + { -1, 1 }, { 0, 2 }, { -1, 1 }, + { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, + StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ + { 10, 10 }})), + { -4, 1 }, { -8, 0 }, { -1, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }, StridedSliceSpecificParams{ ov::test::static_shapes_to_test_representation(std::vector({ { 5, 5, 5, 5 }})), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index aac54a9c72f723..87fd919febed9f 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -21,6 +21,8 @@ std::vector disabledTestPatterns() { // Unknown issues R"(.*(LSTMSequence).*mode=.*_RAND_SEQ_LEN_CONST.*)", R"(.*(smoke_DetectionOutput5In).*)", + + // TODO: Issue: 47773 R"(.*(ProposalLayerTest).*)", // TODO: Issue: 54194 @@ -35,12 +37,14 @@ std::vector disabledTestPatterns() { R"(.*Behavior.*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", // Not implemented yet: R"(.*Behavior.*ExecutableNetworkBaseTest.*canSetConfigToExecNet.*)", + // Issue: 122177 + R"(.*LSTMSequenceCommon.*LSTMSequenceTest.Inference.*CONVERT_TO_TI.*)", // TODO: Issue 67408 R"(.*smoke_LSTMSequenceCommonClip.*LSTMSequenceTest.*Inference.*)", // TODO: Issue 114262 R"(LSTMSequenceCommonZeroClipNonConstantWRB/LSTMSequenceTest.Inference/mode=PURE_SEQ_seq_lengths=2_batch=10_hidden_size=1_.*relu.*)", // Expected behavior. GPU plugin doesn't support i64 for eltwise power operation. - R"(.*EltwiseLayerTest.*OpType=Pow.*NetType=i64.*)", + R"(.*EltwiseLayerTest.*eltwise_op_type=Pow.*model_type=i64.*)", // TODO: Issue: 68712 R"(.*.MatMul.*CompareWithRefs.*IS0=\(1.5\)_IS1=\(1.5\).*transpose_a=0.*transpose_b=1.*CONSTANT.*FP16.*UNSPECIFIED.*UNSPECIFIED.*ANY.*)", // Unsupported @@ -66,6 +70,9 @@ std::vector disabledTestPatterns() { R"(.*CachingSupportCase.*GPU.*CompileModelCacheTestBase.*CompareWithRefImpl.*)", // unsupported metrics R"(.*nightly_HeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", + // Issue: 131699 SUPPORTED_PROPERTIES in BATCH issue. DEVICE_ID in BATCH, HETERO issue. + R"(.*OVClassCompiledModelGetPropertyTest/OVClassCompiledModelGetPropertyTest.GetMetricNoThrow_SUPPORTED_CONFIG_KEYS/2)", + R"(.*nightly_HeteroAutoBatchOVCheckChangePropComplieModleGetPropTests_DEVICE_ID.*)", // Issue: 111437 R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.Inference.*)", R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.Inference.*)", diff --git a/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h b/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h index e3a0c41fe8853a..623b506285e3a5 100644 --- a/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h +++ b/src/plugins/intel_gpu/tests/unit/test_utils/test_utils.h @@ -591,7 +591,7 @@ inline cldnn::network::ptr get_network(cldnn::engine& engine, std::ostream out_mem(&mem_buf); cldnn::BinaryOutputBuffer ob = cldnn::BinaryOutputBuffer(out_mem); ob.set_stream(stream.get()); - cldnn::program::build_program(engine, topology, config, nullptr, false)->save(ob); + cldnn::program::build_program(engine, topology, config)->save(ob); } { std::istream in_mem(&mem_buf); diff --git a/src/plugins/template/tests/functional/skip_tests_config.cpp b/src/plugins/template/tests/functional/skip_tests_config.cpp index 5d9e369cde953a..0743b5837f2dab 100644 --- a/src/plugins/template/tests/functional/skip_tests_config.cpp +++ b/src/plugins/template/tests/functional/skip_tests_config.cpp @@ -121,8 +121,6 @@ std::vector disabledTestPatterns() { R"(.*eltwiseOpType=Mod_secondaryInputType=PARAMETER_opType=VECTOR_NetType=(f16|f32).*)", // Interpreter backend doesn't implement evaluate method for OP Multiply (by GroupNormalizationDecomposition) R"(.*ReferenceGroupNormalization.*_f64*)", - // CVS-131733 - R"(.*smoke_BehaviorTests/InferRequestPropertiesTest.ReusableCPUStreamsExecutor/*)", }; #ifdef _WIN32 diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_consistency.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_consistency.hpp index e69960fd493a2c..35f9e44222be77 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_consistency.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_consistency.hpp @@ -13,7 +13,6 @@ namespace ov { namespace test { namespace behavior { - // for deviceConfigs, the deviceConfigs[0] is target device which need to be tested. // deviceConfigs[1], deviceConfigs[2],deviceConfigs[n] are the devices which will // be compared with target device, the result of target should be in one of the compared diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp index aa5d03b6e93d8f..ac191f2966bbd8 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/properties_tests.hpp @@ -103,7 +103,8 @@ TEST_P(InferRequestPropertiesTest, withoutExclusiveAsyncRequests) { } } -TEST_P(InferRequestPropertiesTest, DISABLED_ReusableCPUStreamsExecutor) { +TEST_P(InferRequestPropertiesTest, ReusableCPUStreamsExecutor) { + ov::threading::executor_manager()->clear(); ASSERT_EQ(0u, ov::threading::executor_manager()->get_executors_number()); ASSERT_EQ(0u, ov::threading::executor_manager()->get_idle_cpu_streams_executors_number()); diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration_sw.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration_sw.hpp index a505c6fa46b457..b6a56c63e283c6 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration_sw.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration_sw.hpp @@ -98,7 +98,7 @@ TEST_P(OVClassSeveralDevicesTestQueryModel, QueryModelActualSeveralDevicesNoThro clear_target_device = target_devices.begin()->substr(0, pos); } auto deviceIDs = ie.get_property(clear_target_device, ov::available_devices); - ASSERT_LT(deviceIDs.size(), target_devices.size()); + ASSERT_LE(deviceIDs.size(), target_devices.size()); std::string multi_target_device = ov::test::utils::DEVICE_MULTI + std::string(":"); for (auto& dev_name : target_devices) { diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp index 0879ae91c2562c..6da4d0f86fd173 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/add_transformation.cpp @@ -51,8 +51,6 @@ std::string AddTransformation::getTestCaseName(const testing::TestParamInfo< Add } void AddTransformation::SetUp() { - abs_threshold = 1.1; - rel_threshold = 3; ov::element::Type precision; ov::PartialShape inputShape; AddTestValues param; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp index 6bd189263a8f11..1ba23576aa88aa 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/batch_to_space_transformation.cpp @@ -24,8 +24,6 @@ std::string BatchToSpaceTransformation::getTestCaseName(const testing::TestParam } void BatchToSpaceTransformation::SetUp() { - abs_threshold = 1.1; - ov::element::Type input_type; BatchToSpaceTransformationParam param; std::tie(input_type, targetDevice, param) = this->GetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp index 5a97477cc9abae..2cf6ee1d670730 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/clamp_transformation.cpp @@ -28,8 +28,6 @@ std::string ClampTransformation::getTestCaseName(const testing::TestParamInfo inputShapeAndHandling; ov::Shape outputShape; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp index f0eaa0b10b1131..62818c22a05557 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_qdq_transformation.cpp @@ -31,9 +31,6 @@ std::string ConvolutionQDqTransformation::getTestCaseName(const testing::TestPar } void ConvolutionQDqTransformation::SetUp() { - rel_threshold = 0.1; - abs_threshold = 12.8; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp index 4c87d697a6bdcc..90ba837d86ae01 100755 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_transformation.cpp @@ -34,9 +34,6 @@ std::string ConvolutionTransformation::getTestCaseName(const testing::TestParamI } void ConvolutionTransformation::SetUp() { - rel_threshold = 1.0e+10; - abs_threshold = 1.4; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp index fc74c3d260c635..a799cd8fc2929a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/convolution_with_incorrect_weights.cpp @@ -34,9 +34,6 @@ std::string ConvolutionWIthIncorrectWeightsTransformation::getTestCaseName(const } void ConvolutionWIthIncorrectWeightsTransformation::SetUp() { - rel_threshold = 0.1; - abs_threshold = 16.1; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp index e35d184ba71c7b..ed2d115e1155c1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/eliminate_fake_quantize_transformation.cpp @@ -49,7 +49,6 @@ void EliminateFakeQuantizeTransformation::SetUp() { TEST_P(EliminateFakeQuantizeTransformation, CompareWithRefImpl) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - abs_threshold = 2.3; run(); EliminateFakeQuantizeTransformationTestValues testValues; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp index fd766959b0f383..8b9b70481d5880 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_avg_pool_transformation.cpp @@ -27,8 +27,6 @@ std::string FakeQuantizeAndAvgPoolTransformation::getTestCaseName(const testing: } void FakeQuantizeAndAvgPoolTransformation::SetUp() { - rel_threshold = 0.5f; - abs_threshold = 1.0; ov::element::Type precision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp index a641d08caed2aa..f447002a927de7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_max_pool_transformation.cpp @@ -27,7 +27,6 @@ std::string FakeQuantizeAndMaxPoolTransformation::getTestCaseName(const testing: } void FakeQuantizeAndMaxPoolTransformation::SetUp() { - abs_threshold = 1.0; ov::element::Type precision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp index 11c5a55d6e2f39..449110e7fe55e1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -33,9 +33,6 @@ std::string FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::getTe } void FakeQuantizeAndTwoOutputBranchesWithConvolutionTransformation::SetUp() { - rel_threshold = 0.1; - abs_threshold = 0.1; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp index de67a53328239e..8b81da2d6b6070 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_precision_selection_transformation.cpp @@ -28,8 +28,6 @@ std::string FakeQuantizePrecisionSelectionTransformation::getTestCaseName(const } void FakeQuantizePrecisionSelectionTransformation::SetUp() { - abs_threshold = 0.01; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp index fdf773a9a8c784..6d6751db46d5b1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_transformation.cpp @@ -32,8 +32,6 @@ std::string FakeQuantizeTransformation::getTestCaseName(const testing::TestParam } void FakeQuantizeTransformation::SetUp() { - abs_threshold = 1.0e-3; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp index 8ed160a3ec2b3f..969b137dcb540a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp @@ -28,8 +28,6 @@ std::string FakeQuantizeWithNotOptimalTransformation::getTestCaseName(const test } void FakeQuantizeWithNotOptimalTransformation::SetUp() { - abs_threshold = 4; - rel_threshold = 2778; SKIP_IF_CURRENT_TEST_IS_DISABLED(); ov::PartialShape inputShape; ov::element::Type netPrecision; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp index 27773b7b8f5d4c..694962036010b1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fully_connected_transformation.cpp @@ -35,8 +35,6 @@ std::string FullyConnectedTransformation::getTestCaseName(const testing::TestPar } void FullyConnectedTransformation::SetUp() { - abs_threshold = 0.6; - ov::element::Type precision; MatMulShapes shapes; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp index d93f82b3a9aa38..09be58373119de 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_convert_transformation.cpp @@ -34,7 +34,6 @@ std::string FuseConvertTransformation::getTestCaseName(const testing::TestParamI } void FuseConvertTransformation::SetUp() { - abs_threshold = 0.01; ov::PartialShape shape; ov::element::Type precision; ov::builder::subgraph::DequantizationOperations deqOperations; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp index 4ed6f51df2cec2..fac36d8f56b863 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp @@ -33,8 +33,6 @@ std::string FuseDequantizeToFakeQuantizeTransformation::getTestCaseName(const te } void FuseDequantizeToFakeQuantizeTransformation::SetUp() { - abs_threshold = 0.1; - FuseDequantizeToFakeQuantizeTransformationTestValues testValues; std::tie(targetDevice, testValues) = this->GetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp index 1ebafccd1a21d8..1a428dce08778e 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_fake_quantize_and_scale_shift_transformation.cpp @@ -27,7 +27,6 @@ std::string FuseFakeQuantizeAndScaleShiftTransformation::getTestCaseName(const t } void FuseFakeQuantizeAndScaleShiftTransformation::SetUp() { - abs_threshold = 1.8; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp index af7f587f6d6700..b915fe21762141 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gemm_transformation.cpp @@ -28,8 +28,6 @@ std::string GemmTransformation::getTestCaseName(const testing::TestParamInfo inputShapes; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp index e288a75d361144..57c92ecd4c411d 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/groupconvolution_qdq_transformation.cpp @@ -31,9 +31,6 @@ std::string GroupConvolutionQDqTransformation::getTestCaseName(const testing::Te } void GroupConvolutionQDqTransformation::SetUp() { - abs_threshold = 153.7; - - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp index 8151bc84410211..a6e9f54178775c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_transformation.cpp @@ -37,8 +37,6 @@ std::string MatMulTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp index 68edab74b7c4f1..f5981ec8e77f74 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mat_mul_with_optimized_constant_fq.cpp @@ -37,9 +37,6 @@ std::string MatMulWithOptimizedConstantFq::getTestCaseName( } void MatMulWithOptimizedConstantFq::SetUp() { - rel_threshold = 0.01; - abs_threshold = 2.1; - ov::element::Type precision; std::pair shapes; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp index 6ace6bde3d6fa2..0767ffb272b00f 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/move_fake_quantize_transformation.cpp @@ -35,8 +35,6 @@ std::string MoveFakeQuantizeTransformation::getTestCaseName(testing::TestParamIn } void MoveFakeQuantizeTransformation::SetUp() { - abs_threshold = 1.1; - ov::element::Type netPrecision; std::vector inputShapes; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp index 44e0a99c3c4452..a6d5d3b83b3c7a 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_transformation.cpp @@ -49,8 +49,6 @@ std::string MultiplyTransformation::getTestCaseName(const testing::TestParamInfo } void MultiplyTransformation::SetUp() { - abs_threshold = 0.1; - ov::element::Type precision; ov::PartialShape inputShape; MultiplyTestValues param; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp index f7ec4d8d4afcae..d8b961f6accd95 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp @@ -28,8 +28,6 @@ std::string MultiplyWithOneParentTransformation::getTestCaseName(const testing:: } void MultiplyWithOneParentTransformation::SetUp() { - rel_threshold = 0.01f; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp index 9c39f710965a52..38d001149ea0e3 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/mvn_transformation.cpp @@ -34,7 +34,6 @@ std::string MVNTransformation::getTestCaseName(const testing::TestParamInfo shapes; ov::element::Type precision; std::vector axes; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp index 28aa6b8cf7b179..f216fbe18e16d3 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp @@ -43,8 +43,6 @@ std::string OutputLayersConcat::getTestCaseName(const testing::TestParamInfo activations_shapes; std::vector weights_shapes; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp index 67e232a4a4f77a..1fa455b5da8673 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp @@ -30,7 +30,6 @@ std::string ReduceMaxTransformation::getTestCaseName(const testing::TestParamInf } void ReduceMaxTransformation::SetUp() { - abs_threshold = 1.1; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp index b28c4128686593..3a4611f2288100 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp @@ -42,7 +42,6 @@ std::string ReduceMeanTransformation::getTestCaseName(const testing::TestParamIn } void ReduceMeanTransformation::SetUp() { - abs_threshold = 4.1; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp index 52cdf578ab8d67..9c84ef2c860e29 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp @@ -30,7 +30,6 @@ std::string ReduceMinTransformation::getTestCaseName(const testing::TestParamInf } void ReduceMinTransformation::SetUp() { - abs_threshold = 0.1; ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp index 171fbfefa28e67..4cf3f75185b9e7 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp @@ -30,8 +30,6 @@ std::string ReduceSumTransformation::getTestCaseName(const testing::TestParamInf } void ReduceSumTransformation::SetUp() { - abs_threshold = 4.1; - ov::element::Type netPrecision; ov::PartialShape inputShape; ov::pass::low_precision::LayerTransformation::Params params; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp index b4bb254e3d4b71..603349350cdeb0 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/relu_transformation.cpp @@ -32,7 +32,6 @@ std::string ReluTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp index ee5eb94eeb970d..d014d159ab4c70 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/split_transformation.cpp @@ -30,7 +30,6 @@ std::string SplitTransformation::getTestCaseName(const testing::TestParamInfo getOriginal( const ov::element::Type precision, const ov::Shape& inputShape, - const ov::builder::subgraph::DequantizationOperations dequantization); + const ov::builder::subgraph::DequantizationOperations dequantization, + const bool typeRelaxed = true); static std::shared_ptr getReference( const ov::element::Type precision, const ov::Shape& inputShape, const ov::builder::subgraph::DequantizationOperations dequantizationBefore, const ov::element::Type precisionAfterOperation, - const ov::builder::subgraph::DequantizationOperations dequantizationAfter); + const ov::builder::subgraph::DequantizationOperations dequantizationAfter, + const bool typeRelaxed = true); }; } // namespace subgraph diff --git a/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp b/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp index ba01fe7d965261..a0ef6e1acbee10 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp @@ -16,16 +16,26 @@ namespace subgraph { std::shared_ptr MoveDequantizationAfterFunction::getOriginal( const ov::element::Type precision, const ov::Shape& inputShape, - const ov::builder::subgraph::DequantizationOperations dequantization) { + const ov::builder::subgraph::DequantizationOperations dequantization, + const bool typeRelaxed) { const auto input = std::make_shared(precision, inputShape); const auto deq = makeDequantization(input, dequantization); - const auto op = - ov::opset1::MaxPool(deq, Strides{1, 1}, Shape{1, 1}, Shape{0, 0}, Shape{2, 2}, ov::op::RoundingType::FLOOR); - const auto targetOp = std::make_shared>( - op, - std::vector{ov::element::f32, ov::element::f32}, - std::vector{}); + const auto targetOp = typeRelaxed ? std::make_shared>( + std::vector{ov::element::f32, ov::element::f32}, + std::vector{}, + deq, + Strides{1, 1}, + Shape{1, 1}, + Shape{0, 0}, + Shape{2, 2}, + ov::op::RoundingType::FLOOR) + : std::make_shared(deq, + Strides{1, 1}, + Shape{1, 1}, + Shape{0, 0}, + Shape{2, 2}, + ov::op::RoundingType::FLOOR); auto& rtInfo = targetOp->get_rt_info(); rtInfo["Variant::std::string"] = "targetOp"; @@ -40,21 +50,26 @@ namespace subgraph { const ov::Shape& inputShape, const ov::builder::subgraph::DequantizationOperations dequantizationBefore, const ov::element::Type precisionAfterOperation, - const ov::builder::subgraph::DequantizationOperations dequantizationAfter) { + const ov::builder::subgraph::DequantizationOperations dequantizationAfter, + const bool typeRelaxed) { const auto input = std::make_shared(precision, inputShape); const auto deqBefore = makeDequantization(input, dequantizationBefore); - const auto op = ov::opset1::MaxPool(deqBefore, - Strides{1, 1}, - Shape{1, 1}, - Shape{0, 0}, - Shape{2, 2}, - ov::op::RoundingType::FLOOR); - const auto targetOp = std::make_shared>( - op, - std::vector{ov::element::f32, ov::element::f32}, - std::vector{}); - ov::pass::low_precision::NetworkHelper::setOutDataPrecisionForTypeRelaxed(targetOp, precisionAfterOperation); + const auto targetOp = typeRelaxed ? std::make_shared>( + std::vector{ov::element::f32, ov::element::f32}, + std::vector{precisionAfterOperation}, + deqBefore, + Strides{1, 1}, + Shape{1, 1}, + Shape{0, 0}, + Shape{2, 2}, + ov::op::RoundingType::FLOOR) + : std::make_shared(deqBefore, + Strides{1, 1}, + Shape{1, 1}, + Shape{0, 0}, + Shape{2, 2}, + ov::op::RoundingType::FLOOR); auto& rtInfo = targetOp->get_rt_info(); rtInfo["Variant::std::string"] = "targetOp"; diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/common_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/common_utils.hpp index 600a3c62eb9b1b..972d0c84bfe639 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/common_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/common_utils.hpp @@ -36,6 +36,21 @@ inline std::string vec2str(const std::vector& vec) { } return std::string("()"); } + +template <> +inline std::string vec2str(const std::vector& vec) { + if (!vec.empty()) { + std::ostringstream result; + result << "("; + std::copy(vec.begin(), vec.end() - 1, std::ostream_iterator(result, ".")); + result << vec.back() << ")"; + auto ret = result.str(); + std::replace(ret.begin(), ret.end(), '-', '_'); + return ret; + } + return std::string("()"); +} + inline void replaceSubstringInString(std::string& str, const std::string& from, const std::string& to) { size_t pos; while ((pos = str.find(from)) != std::string::npos) { diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp index f4d62900dec31b..14aff0703fab29 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp @@ -15,6 +15,7 @@ #include "openvino/op/util/framework_node.hpp" #include "openvino/op/util/sub_graph_base.hpp" #include "openvino/runtime/aligned_buffer.hpp" +#include "openvino/runtime/string_aligned_buffer.hpp" class FunctionsComparator { public: @@ -470,7 +471,8 @@ class Storage : private AttributeStorage, private AttributeStorage, private AttributeStorage>, private AttributeStorage, - private AttributeStorage { + private AttributeStorage, + private AttributeStorage> { public: template const AttributeStorage& storage() const { @@ -504,7 +506,8 @@ class Storage : private AttributeStorage, storage().get_attributes_number() + storage().get_attributes_number() + storage>().get_attributes_number() + - storage().get_attributes_number() + storage().get_attributes_number(); + storage().get_attributes_number() + storage().get_attributes_number() + + storage>().get_attributes_number(); } }; @@ -966,6 +969,7 @@ class ReadAndCompareAttributes : public ov::AttributeVisitor { void verify(const std::string& name, const AttrValue& attr_value); void verify_mem_buf(const std::string& name, const std::shared_ptr& buffer); + void verify_string_aligned_buffer(const std::string& name, const std::shared_ptr& buffer); using ModelAccessor = ov::ValueAccessor>; diff --git a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp index b5e4c2d116b528..5c57b171dc5fda 100644 --- a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp +++ b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp @@ -12,6 +12,7 @@ #include "openvino/op/tensor_iterator.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/op/util/sub_graph_base.hpp" +#include "openvino/runtime/string_aligned_buffer.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "precomp.hpp" @@ -916,6 +917,9 @@ void ReadAndStoreAttributes::on_adapter(const std::string& name, ov::ValueAccess insert(name, shape_ptr->get()); } else if (auto dim_ptr = ov::as_type>(&adapter)) { insert(name, dim_ptr->get()); + } else if (auto string_aligned_buffer = + ov::as_type>>(&adapter)) { + insert(name, string_aligned_buffer->get()); } else { m_read_result += "store attr [ ERR ]: " + name + " [drop `void` comparison which is '" + adapter.get_type_info().name + "']"; @@ -958,6 +962,32 @@ void ReadAndCompareAttributes::verify_mem_buf(const std::string& name, } } +void ReadAndCompareAttributes::verify_string_aligned_buffer(const std::string& name, + const std::shared_ptr& buffer) { + if (should_return()) { + return; + } + m_visited_attributes.insert(name); + const auto ref_value = *(m_attr_ref.get>(name)); + if (!ref_value) { + m_cmp_result += "missing attribute name: '" + name + "'"; + return; + } + auto num_elements = buffer->get_num_elements(); + if (num_elements != buffer->get_num_elements()) { + m_cmp_result += "number of string elements mismatch"; + return; + } + std::string* ref_strings = static_cast(ref_value->get_ptr()); + std::string* cmp_strings = static_cast(buffer->get_ptr()); + for (size_t ind = 0; ind < num_elements; ++ind) { + if (ref_strings[ind].compare(cmp_strings[ind])) { + m_cmp_result += "string elements mismatch"; + return; + } + } +} + void ReadAndCompareAttributes::verify_function(const std::string& name, ModelAccessor& adapter) { if (should_return()) { return; @@ -994,6 +1024,9 @@ void ReadAndCompareAttributes::verify_others(const std::string& name, ov::ValueA verify(name, shape_ptr->get()); } else if (auto dim_ptr = ov::as_type>(&adapter)) { verify(name, dim_ptr->get()); + } else if (auto string_aligned_buffer_ptr = + ov::as_type>>(&adapter)) { + verify_string_aligned_buffer(name, string_aligned_buffer_ptr->get()); } else { m_cmp_result += "compare attr [ ERR ]: " + name + " [drop `void` comparison which is '" + adapter.get_type_info().name + "']"; diff --git a/tests/layer_tests/ovc_python_api_tests/telemetry/test_pytorch_telemetry.py b/tests/layer_tests/ovc_python_api_tests/telemetry/test_pytorch_telemetry.py new file mode 100644 index 00000000000000..c1b6d4ae6d4b68 --- /dev/null +++ b/tests/layer_tests/ovc_python_api_tests/telemetry/test_pytorch_telemetry.py @@ -0,0 +1,47 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import unittest +from unittest.mock import MagicMock, patch + +import openvino_telemetry as tm +import torch +from openvino.tools.ovc import convert_model + + +def arange_pt_model(): + + class aten_arange_end_dtype(torch.nn.Module): + def __init__(self, dtype) -> None: + super(aten_arange_end_dtype, self).__init__() + self.dtype = dtype + + def forward(self, x): + return torch.arange(x, dtype=self.dtype) + + return aten_arange_end_dtype(torch.float32) + + +def mocked_inputs(self): + # This line returns incorrect inputs and causes exception raise in translator + if hasattr(self, "graph_element") and hasattr(self.graph_element, "kind") and self.graph_element.kind() == "aten::arange": + return [0] + + return [x.unique() for x in self.raw_inputs] + + +@patch('openvino.frontend.pytorch.ts_decoder.TorchScriptPythonDecoder.inputs', mocked_inputs) +class TestGeneralTelemetrySending(unittest.TestCase): + def test_general_telemetry_sending(self): + tm.Telemetry.send_event = MagicMock() + + # Create PyTorch model with Arange + model = torch.jit.script(arange_pt_model()) + + try: + _ = convert_model(model, input=[torch.float32]) + except: + pass + + tm.Telemetry.send_event.assert_any_call('ovc', 'error_info', '[PyTorch Frontend] Not expected number of inputs for aten::arange\n', 1) + tm.Telemetry.send_event.reset_mock()