diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3f2178a1c681e5..3727c4d88f6e8b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -94,6 +94,7 @@ /tests/layer_tests/tensorflow_tests @openvinotoolkit/openvino-tf-frontend-maintainers /tests/layer_tests/jax_tests @openvinotoolkit/openvino-tf-frontend-maintainers /tests/model_hub_tests @openvinotoolkit/openvino-tf-frontend-maintainers +/tests/model_hub_tests/torch_tests @openvinotoolkit/openvino-pytorch-frontend-maintainers # Tools: /tools/ @openvinotoolkit/openvino-tools-maintainers diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 733dfed4c09d14..639eca9957928d 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -760,6 +760,18 @@ jobs: ${INSTALL_TEST_DIR}/ov_cpu_unit_tests --gtest_print_time=1 \ --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CPUUnitTests.xml + - name: SubgraphsDumper tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/subgraphsDumperTests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-SubgraphsDumperTests.xml + + - name: Template OpImpl tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/conformanceTests --gtest_print_time=1 --device=TEMPLATE --gtest_filter=*OpImpl*\ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpImplTests.xml + - name: AUTO unit tests run: | source ${INSTALL_DIR}/setupvars.sh diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index d5084d7a5d19c6..487536f615a8a6 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -1,6 +1,9 @@ -name: macOS (macOS 12, Python 3.11) +name: macOS (Python 3.11) on: workflow_dispatch: + schedule: + # at 00:00 on workdays + - cron: '0 0 * * 1,2,3,4,5' # pull_request: # paths-ignore: # - '**/docs/**' @@ -9,17 +12,17 @@ on: # - '**.md' # - '**/layer_tests_summary/**' # - '**/conformance/**' - push: - paths-ignore: - - '**/docs/**' - - 'docs/**' - - '**/**.md' - - '**.md' - - '**/layer_tests_summary/**' - - '**/conformance/**' - branches: - - master - - 'releases/**' +# push: +# paths-ignore: +# - '**/docs/**' +# - 'docs/**' +# - '**/**.md' +# - '**.md' +# - '**/layer_tests_summary/**' +# - '**/conformance/**' +# branches: +# - master +# - 'releases/**' concurrency: # github.ref is not unique in post-commit @@ -34,11 +37,22 @@ jobs: defaults: run: shell: bash - runs-on: macos-12-large + strategy: + max-parallel: 2 + fail-fast: false + matrix: + include: + - arhitecture: 'x86_64' + machine: 'macos-13-large' + macos_deployment_target: '10.12' + - arhitecture: 'arm64' + machine: 'macos-13-xlarge' + macos_deployment_target: '11.0' + runs-on: ${{ matrix.machine }} env: CMAKE_BUILD_TYPE: 'Release' CMAKE_GENERATOR: 'Ninja Multi-Config' - MACOSX_DEPLOYMENT_TARGET: '10.12' + MACOSX_DEPLOYMENT_TARGET: ${{ matrix.macos_deployment_target }} CMAKE_CXX_COMPILER_LAUNCHER: ccache CMAKE_C_COMPILER_LAUNCHER: ccache OPENVINO_REPO: ${{ github.workspace }}/openvino @@ -100,9 +114,9 @@ jobs: # github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push save: ${{ github.ref_name == 'master' && 'true' || 'false' }} verbose: 2 - key: ${{ runner.os }}-main + key: ${{ runner.os }}-${{ matrix.arhitecture }}-main restore-keys: | - ${{ runner.os }}-main + ${{ runner.os }}-${{ matrix.arhitecture }}-main - name: CMake configure run: | @@ -144,6 +158,7 @@ jobs: run: | cmake \ -DBUILD_nvidia_plugin=OFF \ + -DBUILD_java_api=OFF \ -DCUSTOM_OPERATIONS="calculate_grid;complex_mul;fft;grid_sample;sparse_conv;sparse_conv_transpose" \ -DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}/modules \ -S ${{ env.OPENVINO_REPO }} \ @@ -158,7 +173,7 @@ jobs: if: ${{ always() }} uses: actions/upload-artifact@v3 with: - name: openvino_package + name: openvino_package_${{ matrix.arhitecture }} path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz if-no-files-found: 'error' @@ -166,7 +181,7 @@ jobs: if: ${{ always() }} uses: actions/upload-artifact@v3 with: - name: openvino_tests + name: openvino_tests_${{ matrix.arhitecture }} path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz if-no-files-found: 'error' @@ -175,7 +190,16 @@ jobs: defaults: run: shell: bash - runs-on: macos-12 + strategy: + max-parallel: 2 + fail-fast: false + matrix: + include: + - arhitecture: 'x86_64' + machine: 'macos-13' + - arhitecture: 'arm64' + machine: 'macos-13-xlarge' + runs-on: ${{ matrix.machine }} env: INSTALL_DIR: ${{ github.workspace }}/install INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests @@ -189,13 +213,13 @@ jobs: - name: Download OpenVINO package uses: actions/download-artifact@v3 with: - name: openvino_package + name: openvino_package_${{ matrix.arhitecture }} path: ${{ env.INSTALL_DIR }} - name: Download OpenVINO tests package uses: actions/download-artifact@v3 with: - name: openvino_tests + name: openvino_tests_${{ matrix.arhitecture }} path: ${{ env.INSTALL_TEST_DIR }} - name: Extract OpenVINO packages @@ -248,7 +272,7 @@ jobs: uses: actions/upload-artifact@v3 if: ${{ !cancelled() }} with: - name: test-results-samples + name: test-results-samples-${{ matrix.arhitecture }} path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' @@ -258,7 +282,16 @@ jobs: defaults: run: shell: bash - runs-on: macos-12 + strategy: + max-parallel: 2 + fail-fast: false + matrix: + include: + - arhitecture: 'x86_64' + machine: 'macos-13' + - arhitecture: 'arm64' + machine: 'macos-13-xlarge' + runs-on: ${{ matrix.machine }} env: INSTALL_DIR: ${{ github.workspace }}/install INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests @@ -271,13 +304,13 @@ jobs: - name: Download OpenVINO package uses: actions/download-artifact@v3 with: - name: openvino_package + name: openvino_package_${{ matrix.arhitecture }} path: ${{ env.INSTALL_DIR }} - name: Download OpenVINO tests package uses: actions/download-artifact@v3 with: - name: openvino_tests + name: openvino_tests_${{ matrix.arhitecture }} path: ${{ env.INSTALL_TEST_DIR }} - name: Extract OpenVINO packages @@ -314,7 +347,11 @@ jobs: - name: Low Precision Transformations Tests run: | source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 \ + + # Skips under Ticket: 122660 + skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*smoke_LPT/FoldFakeQuantizeInTransformations.CompareFunctions*' || '' }} + + ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 "$skip_filter" \ --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml - name: OpenVINO Conditional compilation tests @@ -337,8 +374,10 @@ jobs: --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml - name: ONNX frontend tests + if: ${{ matrix.arhitecture == 'x86_64' }} # Ticket for ARM64: 122663 run: | source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml @@ -351,7 +390,11 @@ jobs: - name: TensorFlow frontend tests run: | source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 \ + + # Skips under Ticket: 122666 + skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*CompileModelsTests.ModelWithSplitConvConcat*:*NgramCompilation*' || '' }} + + ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 "$skip_filter" \ --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml - name: TensorFlow Lite frontend tests @@ -363,7 +406,11 @@ jobs: - name: Transformations func tests run: | source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 \ + + # Skips under Ticket: 122668 + skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*TransformationTestsF.CompressQuantizeWeights*:*TransformationTests/CompressQuantizeWeightsTests.FusionTest*' || '' }} + + ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 "$skip_filter" \ --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml - name: Common test utils tests @@ -384,6 +431,18 @@ jobs: ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 \ --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml + - name: SubgraphsDumper tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/subgraphsDumperTests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-SubgraphsDumperTests.xml + + - name: Template OpImpl tests + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/conformanceTests --gtest_print_time=1 --device=TEMPLATE --gtest_filter="*OpImpl*" \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateOpImplTests.xml + - name: AUTO unit tests run: | source ${{ env.INSTALL_DIR }}/setupvars.sh @@ -444,7 +503,7 @@ jobs: uses: actions/upload-artifact@v3 if: ${{ always() }} with: - name: test-results-cpp + name: test-results-cpp-${{ matrix.arhitecture }} path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' @@ -454,7 +513,16 @@ jobs: defaults: run: shell: bash - runs-on: macos-12 + strategy: + max-parallel: 2 + fail-fast: false + matrix: + include: + - arhitecture: 'x86_64' + machine: 'macos-13' + - arhitecture: 'arm64' + machine: 'macos-13-xlarge' + runs-on: ${{ matrix.machine }} env: OPENVINO_REPO: ${{ github.workspace }}/openvino OPENVINO_CONTRIB_REPO: ${{ github.workspace }}/openvino_contrib @@ -479,13 +547,13 @@ jobs: - name: Download OpenVINO package uses: actions/download-artifact@v3 with: - name: openvino_package + name: openvino_package_${{ matrix.arhitecture }} path: ${{ env.INSTALL_DIR }} - name: Download OpenVINO tests package uses: actions/download-artifact@v3 with: - name: openvino_tests + name: openvino_tests_${{ matrix.arhitecture }} path: ${{ env.INSTALL_TEST_DIR }} - name: Extract OpenVINO packages @@ -511,10 +579,16 @@ jobs: # Install the core OV wheel python3 -m pip install ${{ env.INSTALL_DIR }}/tools/openvino-*.whl + # mxnet is only available on x86_64 + extras_to_install="caffe,kaldi,onnx,tensorflow2,pytorch" + if [[ "${{ matrix.arhitecture }}" == "x86_64" ]]; then + extras_to_install="mxnet,$extras_to_install" + fi + # Find and install OV dev wheel pushd ${{ env.INSTALL_DIR }}/tools ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') - python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2,pytorch] + python3 -m pip install $ov_dev_wheel_name[$extras_to_install] popd - name: Python API 1.0 Tests @@ -597,6 +671,7 @@ jobs: TEST_DEVICE: CPU - name: TensorFlow 2 Layer Tests - TF FE + if: ${{ 'false' }} # Ticket: 123322 run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt export PYTHONPATH=${{ env.OPENVINO_REPO }}/tools/mo/:${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH @@ -634,6 +709,7 @@ jobs: TEST_PRECISION: FP16 - name: Python ONNX operators tests + if: ${{ 'false' }} # Ticket: 123325 run: | # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately python3 -m pytest -sv ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests -k 'not cuda' \ @@ -657,18 +733,27 @@ jobs: uses: actions/upload-artifact@v3 if: ${{ always() }} with: - name: test-results-python + name: test-results-python-${{ matrix.arhitecture }} path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' CPU_Functional_Tests: name: CPU functional tests - if: ${{ 'false' }} # Ticket: 122001 needs: Build defaults: run: shell: bash - runs-on: macos-12 + strategy: + max-parallel: 2 + fail-fast: false + matrix: + include: + # ticket: 122001 + # - arhitecture: 'x86_64' + # machine: 'macos-13' + - arhitecture: 'arm64' + machine: 'macos-13-xlarge' + runs-on: ${{ matrix.machine }} env: INSTALL_DIR: ${{ github.workspace }}/install INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests @@ -680,33 +765,37 @@ jobs: - name: Download OpenVINO package uses: actions/download-artifact@v3 with: - name: openvino_package + name: openvino_package_${{ matrix.arhitecture }} path: ${{ env.INSTALL_DIR }} - name: Download OpenVINO tests package uses: actions/download-artifact@v3 with: - name: openvino_tests + name: openvino_tests_${{ matrix.arhitecture }} path: ${{ env.INSTALL_TEST_DIR }} - name: Extract OpenVINO packages run: | pushd ${{ env.INSTALL_DIR }} - tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz || exit 1 + tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz popd pushd ${{ env.INSTALL_TEST_DIR }} - tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz || exit 1 + tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz popd - - name: Intel CPU plugin func tests + - name: CPU plugin func tests run: | source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:"${{ env.INSTALL_TEST_DIR }}/TEST-CPUFuncTests.xml" + + # Skips under Ticket: 122769 + skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*smoke_nonzero/NonZeroLayerTest.Inference/IS*:*smoke_NormalizeL2_*:*Extension.XmlModelWithExtensionFromDSO*:*Extension.OnnxModelWithExtensionFromDSO*:*ONNXQuantizedModels/QuantizedModelsTests.MaxPool*:*ONNXQuantizedModels/QuantizedModelsTests.Convolution*:**' || '' }} + + ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_print_time=1 --gtest_filter=*smoke* "$skip_filter" --gtest_output=xml:"${{ env.INSTALL_TEST_DIR }}/TEST-CPUFuncTests.xml" - name: Upload Test Results uses: actions/upload-artifact@v3 if: ${{ always() }} with: - name: test-results-functional-cpu + name: test-results-functional-cpu-${{ matrix.arhitecture }} path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml if-no-files-found: 'error' diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 6ce891e6767698..e6763d2a696377 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -564,6 +564,14 @@ jobs: run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml + - name: SubgraphsDumper tests + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/subgraphsDumperTests --gtest_print_time=1 --gtest_print_time=1 --device=TEMPLATE --gtest_filter="*OpImpl*" --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SubgraphsDumperTests.xml + + - name: Template OpImpl tests + run: | + call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/conformanceTests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateOpImplTests.xml + - name: GNA plugin unit tests shell: cmd run: | diff --git a/cmake/developer_package/frontends/frontends.cmake b/cmake/developer_package/frontends/frontends.cmake index a86c57c6c87845..a20b1665fb7d29 100644 --- a/cmake/developer_package/frontends/frontends.cmake +++ b/cmake/developer_package/frontends/frontends.cmake @@ -125,17 +125,24 @@ macro(ov_add_frontend) source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) # Generate protobuf file on build time for each '.proto' file in src/proto - file(GLOB proto_files ${frontend_root_dir}/src/proto/*.proto) + set(protofiles_root_dir "${frontend_root_dir}/src/proto") + file(GLOB_RECURSE proto_files ${protofiles_root_dir}/*.proto) foreach(proto_file IN LISTS proto_files) + # filter out standaard google proto files + if(proto_file MATCHES ".*google.*") + continue() + endif() + file(RELATIVE_PATH proto_file_relative "${CMAKE_SOURCE_DIR}" "${proto_file}") - get_filename_component(FILE_DIR ${proto_file} DIRECTORY) get_filename_component(FILE_WE ${proto_file} NAME_WE) - set(OUTPUT_PB_SRC ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}.pb.cc) - set(OUTPUT_PB_HEADER ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}.pb.h) + file(RELATIVE_PATH relative_path ${protofiles_root_dir} ${proto_file}) + get_filename_component(relative_path ${relative_path} DIRECTORY) + set(OUTPUT_PB_SRC ${CMAKE_CURRENT_BINARY_DIR}/${relative_path}/${FILE_WE}.pb.cc) + set(OUTPUT_PB_HEADER ${CMAKE_CURRENT_BINARY_DIR}/${relative_path}/${FILE_WE}.pb.h) add_custom_command( OUTPUT "${OUTPUT_PB_SRC}" "${OUTPUT_PB_HEADER}" - COMMAND ${PROTOC_EXECUTABLE} ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} -I ${FILE_DIR} ${FILE_WE}.proto + COMMAND ${PROTOC_EXECUTABLE} ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} -I ${protofiles_root_dir} ${proto_file} DEPENDS ${PROTOC_DEPENDENCY} ${proto_file} COMMENT "Running C++ protocol buffer compiler (${PROTOC_EXECUTABLE}) on ${proto_file_relative}" VERBATIM diff --git a/docs/requirements.txt b/docs/requirements.txt index 69433a40eb64ff..2e643842f24861 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -44,6 +44,6 @@ sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 toml==0.10.2 -urllib3==1.26.17 +urllib3==1.26.18 zipp==3.4.1 docs/openvino_custom_sphinx_sitemap diff --git a/src/bindings/python/src/compatibility/ngraph/opset3/ops.py b/src/bindings/python/src/compatibility/ngraph/opset3/ops.py index 82846826111751..7d7c757d9cd5dc 100644 --- a/src/bindings/python/src/compatibility/ngraph/opset3/ops.py +++ b/src/bindings/python/src/compatibility/ngraph/opset3/ops.py @@ -550,9 +550,9 @@ def shuffle_channels(data: Node, axis: int, group: int, name: Optional[str] = No `data_reshaped` = reshape(`data`, [N, group, C / group, H * W]) - `data_trnasposed` = transpose(`data_reshaped`, [0, 2, 1, 3]) + `data_transposed` = transpose(`data_reshaped`, [0, 2, 1, 3]) - `output` = reshape(`data_trnasposed`, [N, C, H, W]) + `output` = reshape(`data_transposed`, [N, C, H, W]) For example: diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py index 726f3b598bc15e..4a76d90b160553 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/op_support.py @@ -41,6 +41,7 @@ def __init__(self): "torch.ops.aten.arange.default": None, "torch.ops.aten.argmax.default": None, "torch.ops.aten.avg_pool2d.default": None, + "torch.ops.aten.baddbmm.default": None, "torch.ops.aten.bitwise_and.Tensor": None, "torch.ops.aten.bmm.default": None, "torch.ops.aten.cat.default": None, @@ -67,6 +68,7 @@ def __init__(self): "torch.ops.aten.hardswish_.default": None, "torch.ops.aten.hardtanh_.default": None, "torch.ops.aten.index.Tensor": None, + "torch.ops.aten.leaky_relu_.default": None, "torch.ops.aten.lift_fresh_copy.default": None, "torch.ops.aten.linalg_vector_norm.default": None, "torch.ops.aten.lt.Tensor": None, @@ -89,6 +91,7 @@ def __init__(self): "torch.ops.aten.relu.default": None, "torch.ops.aten.relu_.default": None, "torch.ops.aten.rsub.Scalar": None, + "torch.ops.aten._scaled_dot_product_flash_attention.default": None, "torch.ops.aten.select.int": None, "torch.ops.aten.sigmoid.default": None, "torch.ops.aten.silu.default": None, diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 11d5991e700c42..f7a398bf67e519 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -107,9 +107,10 @@ def _get_scripted_model(self, pt_module, example_inputs=None, skip_freeze=False) gptq.unpatch_model(pt_module) if not skip_freeze: + ops_kind_no_freeze = ["quantize", "aten::as_strided"] for n in scripted.inlined_graph.nodes(): # TODO: switch off freezing for all traced models - if "quantize" in n.kind(): + if any(kind in n.kind() for kind in ops_kind_no_freeze): # do not freeze quantized models skip_freeze = True break @@ -150,6 +151,16 @@ def get_input_shape(self, index: int): raw_input = self._raw_input(index) return self.get_shape_for_value(raw_input) + def get_input_strides(self, index: int) -> typing.List[int]: + raw_input = self._raw_input(index) + if isinstance(raw_input, torch.Value): + inp_type = raw_input.type() + if isinstance(inp_type, torch.TensorType): + strides = inp_type.strides() + if strides: + return strides + return [] + def get_input_type(self, index: int): raw_input = self._raw_input(index) return self.get_type_for_value(raw_input) diff --git a/src/bindings/python/src/openvino/runtime/opset3/ops.py b/src/bindings/python/src/openvino/runtime/opset3/ops.py index 979fda8a782a02..8a1d81d9703ffb 100644 --- a/src/bindings/python/src/openvino/runtime/opset3/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset3/ops.py @@ -575,9 +575,9 @@ def shuffle_channels(data: Node, axis: int, group: int, name: Optional[str] = No `data_reshaped` = reshape(`data`, [N, group, C / group, H * W]) - `data_trnasposed` = transpose(`data_reshaped`, [0, 2, 1, 3]) + `data_transposed` = transpose(`data_reshaped`, [0, 2, 1, 3]) - `output` = reshape(`data_trnasposed`, [N, C, H, W]) + `output` = reshape(`data_transposed`, [N, C, H, W]) For example: diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp index a1136e4cda6f66..024b03b2ff4cd9 100644 --- a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.hpp @@ -34,6 +34,10 @@ class PyDecoder : public ov::frontend::pytorch::TorchDecoder { PYBIND11_OVERRIDE_PURE(ov::PartialShape, TorchDecoder, get_input_shape, index); } + const std::vector& get_input_strides(size_t index) const override { + PYBIND11_OVERRIDE_PURE(const std::vector&, TorchDecoder, get_input_strides, index); + } + ov::Any get_input_type(size_t index) const override { PYBIND11_OVERRIDE_PURE(ov::Any, TorchDecoder, get_input_type, index); } diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py b/src/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py index ec8f6c49e7ffb6..025c438fedf5d2 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_onnx_external_data.py @@ -1,15 +1,19 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform import os import numpy as np import ngraph as ng +import pytest from openvino.inference_engine import IECore from tests_compatibility.runtime import get_runtime +@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122712') def test_import_onnx_with_external_data(): model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx") ie = IECore() diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py index 60ab593d097250..7b1ebc7295ce96 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_ops_nonlinear.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import onnx import pytest @@ -45,6 +47,8 @@ def relu(x): assert_onnx_import_equals_callable("Relu", relu, [[-3, -2, -1], [1, 2, 3]]) +@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122712') def test_leaky_relu(): def leaky_relu(x, alpha=0.01): return np.maximum(alpha * x, x) @@ -79,6 +83,8 @@ def parametic_relu(x, slope): assert np.allclose(output, expected_output) +@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122712') def test_selu(): # f(x) = gamma * (alpha * exp(x) - alpha) for x <= 0, y = gamma * x for x > 0 def selu(x, alpha=1.67326319217681884765625, gamma=1.05070102214813232421875): diff --git a/src/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py b/src/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py index ddbd8dd53e4a4a..ad7b8e8ffbaf85 100644 --- a/src/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py +++ b/src/bindings/python/tests_compatibility/test_onnx/test_ops_unary.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import onnx import onnx.mapping @@ -210,6 +212,8 @@ def hardmax_2d(data): assert np.allclose(ng_results, [expected]) +@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122712') def test_hardsigmoid(): def hardsigmoid(data, alpha=0.2, beta=0.5): return np.clip(alpha * data + beta, 0, 1) @@ -447,6 +451,8 @@ def test_cast_errors(): @pytest.mark.parametrize("value_type", [pytest.param(np.float64), pytest.param(np.float32)]) +@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122712') def test_constant(value_type): values = np.random.randn(5, 5).astype(value_type) node = onnx.helper.make_node( diff --git a/src/core/include/openvino/op/constant.hpp b/src/core/include/openvino/op/constant.hpp index e122d36a8223d1..a482fd12bb4c68 100644 --- a/src/core/include/openvino/op/constant.hpp +++ b/src/core/include/openvino/op/constant.hpp @@ -485,9 +485,19 @@ class OPENVINO_API Constant : public Op { if (!std::is_same::value) { OPENVINO_ASSERT( !std::numeric_limits::is_signed || std::numeric_limits::lowest() <= c, - "Cannot cast vector from constant. Some values are outside the range."); + "Cannot cast vector from ", + Type, + " constant to ", + element::from(), + ". Some values are outside the range. Example: ", + c); OPENVINO_ASSERT(std::numeric_limits::max() >= c, - "Cannot cast vector from constant. Some values are outside the range."); + "Cannot cast vector from ", + Type, + " constant to ", + element::from(), + ". Some values are outside the range. Example: ", + c); } #if defined(__clang__) # pragma clang diagnostic pop diff --git a/src/frontends/common/src/manager.cpp b/src/frontends/common/src/manager.cpp index 35df484c2cab26..6194fca7583937 100644 --- a/src/frontends/common/src/manager.cpp +++ b/src/frontends/common/src/manager.cpp @@ -20,21 +20,6 @@ class FrontEndManager::Impl { std::mutex m_loading_mutex; std::vector m_plugins; - // Note, static methods below are required to create an order of initialization of static variables - // e.g. if users (not encouraged) created ov::Model globally, we need to ensure proper order of initialization - - /// \return map of shared object per frontend - static std::unordered_map>& get_shared_objects_map() { - static std::unordered_map> shared_objects_map; - return shared_objects_map; - } - - /// \return Mutex to guard access the shared object map - static std::mutex& get_shared_objects_mutex() { - static std::mutex shared_objects_map_mutex; - return shared_objects_map_mutex; - } - public: Impl() { search_all_plugins(); @@ -46,10 +31,6 @@ class FrontEndManager::Impl { auto fe_obj = std::make_shared(); fe_obj->m_shared_object = std::make_shared(plugin.get_so_pointer()); fe_obj->m_actual = plugin.get_creator().m_creator(); - - std::lock_guard guard(get_shared_objects_mutex()); - get_shared_objects_map().emplace(plugin.get_creator().m_name, fe_obj->m_shared_object); - return fe_obj; } @@ -164,6 +145,7 @@ class FrontEndManager::Impl { {".xml", {"ir", "ir"}}, {".onnx", {"onnx", "onnx"}}, {".pb", {"tf", "tensorflow"}}, + {".pbtxt", {"tf", "tensorflow"}}, {".tflite", {"tflite", "tensorflow_lite"}}, {".pdmodel", {"paddle", "paddle"}}, // {".ts", {"pytorch", "pytorch"}}, diff --git a/src/frontends/common/src/plugin_loader.cpp b/src/frontends/common/src/plugin_loader.cpp index a044152d8d590d..a98eff766bbc0d 100644 --- a/src/frontends/common/src/plugin_loader.cpp +++ b/src/frontends/common/src/plugin_loader.cpp @@ -16,17 +16,32 @@ #include -#include #include #include #include "openvino/util/file_util.hpp" +#include "openvino/util/log.hpp" #include "openvino/util/shared_object.hpp" #include "plugin_loader.hpp" using namespace ov; using namespace ov::frontend; +// Note, static methods below are required to create an order of initialization of static variables +// e.g. if users (not encouraged) created ov::Model globally, we need to ensure proper order of initialization + +/// \return map of shared object per frontend +std::unordered_map>& ov::frontend::get_shared_objects_map() { + static std::unordered_map> shared_objects_map; + return shared_objects_map; +} + +/// \return Mutex to guard access the shared object map +std::mutex& ov::frontend::get_shared_objects_mutex() { + static std::mutex shared_objects_map_mutex; + return shared_objects_map_mutex; +} + #ifdef OPENVINO_STATIC_LIBRARY # include "ov_frontends.hpp" @@ -131,6 +146,10 @@ bool PluginInfo::load() { m_load_failed = true; return false; } + + std::lock_guard guard(get_shared_objects_mutex()); + get_shared_objects_map().emplace(get_creator().m_name, get_so_pointer()); + return true; } diff --git a/src/frontends/common/src/plugin_loader.hpp b/src/frontends/common/src/plugin_loader.hpp index 93e6a5cc2eb5a3..dccf8ddf7a39f3 100644 --- a/src/frontends/common/src/plugin_loader.hpp +++ b/src/frontends/common/src/plugin_loader.hpp @@ -4,7 +4,12 @@ #pragma once -#include +#include +#include +#include +#include + +#include "openvino/frontend/manager.hpp" #ifdef _WIN32 static const char PathSeparator[] = ";"; @@ -15,6 +20,9 @@ static const char PathSeparator[] = ":"; namespace ov { namespace frontend { +std::unordered_map>& get_shared_objects_map(); +std::mutex& get_shared_objects_mutex(); + /// \brief Internal data structure holding by each frontend. Includes library handle and extensions. class FrontEndSharedData { friend inline void add_extension_to_shared_data(std::shared_ptr& obj, diff --git a/src/frontends/onnx/tests/__init__.py b/src/frontends/onnx/tests/__init__.py index 857c3853cf8fd2..87220792d2d349 100644 --- a/src/frontends/onnx/tests/__init__.py +++ b/src/frontends/onnx/tests/__init__.py @@ -127,6 +127,7 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): "Not equal to tolerance") xfail_issue_58033 = xfail_test(reason="Einsum operation misses support for complex ellipsis equations") xfail_issue_58676 = xfail_test(reason="AssertionError: Not equal to tolerance rtol=0.001, atol=1e-07") +skip_issue_58676 = pytest.mark.skip(reason="AssertionError: Not equal to tolerance rtol=0.001, atol=1e-07") xfail_issue_onnx_models_140 = xfail_test(reason="https://github.com/onnx/models/issues/140") xfail_issue_63033 = xfail_test(reason="BatchNormalization: Training mode is not supported") diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index d1ef686bdd4124..14034898b7c693 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -2,6 +2,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform import logging import onnx.backend.test @@ -24,6 +25,7 @@ xfail_issue_38735, skip_issue_39658, skip_issue_39658, + skip_issue_58676, xfail_issue_44858, xfail_issue_44965, xfail_issue_45180, @@ -683,6 +685,22 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ), ] +if platform.system() == 'Darwin': + tests_expected_to_fail.extend([ + ( + skip_issue_58676, + "OnnxBackendNodeModelTest.test_mish_expanded_cpu" + ), + ( + skip_issue_58676, + "OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_cpu" + ), + ( + skip_issue_58676, + "OnnxBackendNodeModelTest.test_div_uint8_cpu" + )] + ) + for test_group in tests_expected_to_fail: for test_case in test_group[1:]: expect_fail(f"{test_case}", test_group[0]) diff --git a/src/frontends/paddle/src/decoder_proto.cpp b/src/frontends/paddle/src/decoder_proto.cpp index e25437fcbf4a2e..f286bfcf1f81fc 100644 --- a/src/frontends/paddle/src/decoder_proto.cpp +++ b/src/frontends/paddle/src/decoder_proto.cpp @@ -19,9 +19,9 @@ namespace ov { namespace frontend { namespace paddle { -using namespace ::ov_paddle::framework; +using namespace ::paddle::framework; -ov::element::Type get_ov_type(const ::ov_paddle::framework::proto::VarType_Type& type) { +ov::element::Type get_ov_type(const ::paddle::framework::proto::VarType_Type& type) { static const std::map type_map{ {proto::VarType_Type::VarType_Type_BOOL, ov::element::boolean}, {proto::VarType_Type::VarType_Type_INT16, ov::element::i16}, @@ -189,7 +189,7 @@ std::vector DecoderProto::decode_attribute_helper(const std: namespace { inline std::map map_for_each_input_impl( - const google::protobuf::RepeatedPtrField<::ov_paddle::framework::proto::OpDesc_Var>& c, + const google::protobuf::RepeatedPtrField<::paddle::framework::proto::OpDesc_Var>& c, const std::function(const std::string&, size_t)>& func) { size_t idx = 0; std::map res; diff --git a/src/frontends/paddle/src/decoder_proto.hpp b/src/frontends/paddle/src/decoder_proto.hpp index 652b03fd3ea76b..11627c6fba6ab9 100644 --- a/src/frontends/paddle/src/decoder_proto.hpp +++ b/src/frontends/paddle/src/decoder_proto.hpp @@ -23,7 +23,7 @@ namespace ov { namespace frontend { namespace paddle { -ov::element::Type get_ov_type(const ::ov_paddle::framework::proto::VarType_Type& type); +ov::element::Type get_ov_type(const ::paddle::framework::proto::VarType_Type& type); class DecoderProto : public paddle::DecoderBase { public: @@ -56,7 +56,7 @@ class DecoderProto : public paddle::DecoderBase { const std::function(const std::string&, size_t)>& func) const; private: - std::vector<::ov_paddle::framework::proto::OpDesc_Attr> decode_attribute_helper(const std::string& name) const; + std::vector<::paddle::framework::proto::OpDesc_Attr> decode_attribute_helper(const std::string& name) const; std::weak_ptr op_place; const std::shared_ptr get_place() const { diff --git a/src/frontends/paddle/src/frontend.cpp b/src/frontends/paddle/src/frontend.cpp index 2bc0ba333bb241..9582fccf6c447f 100644 --- a/src/frontends/paddle/src/frontend.cpp +++ b/src/frontends/paddle/src/frontend.cpp @@ -393,7 +393,7 @@ bool FrontEnd::supported_impl(const std::vector& variants) const { else if (variants[0].is()) { // Validating first stream, it must contain a model auto p_model_stream = variants[0].as(); - ::ov_paddle::framework::proto::ProgramDesc fw; + ::paddle::framework::proto::ProgramDesc fw; return fw.ParseFromIstream(p_model_stream); } return false; diff --git a/src/frontends/paddle/src/input_model.cpp b/src/frontends/paddle/src/input_model.cpp index 1264d983965e5d..287fa5e54ad743 100644 --- a/src/frontends/paddle/src/input_model.cpp +++ b/src/frontends/paddle/src/input_model.cpp @@ -21,7 +21,7 @@ namespace ov { namespace frontend { namespace paddle { -using namespace ::ov_paddle::framework::proto; +using namespace ::paddle::framework::proto; class InputModel::InputModelImpl { public: @@ -279,7 +279,7 @@ void InputModel::InputModelImpl::load_consts(const std::basic_string& folder_ if (!var_desc.persistable()) continue; - FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::ov_paddle::framework::proto::VarType::LOD_TENSOR); + FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::paddle::framework::proto::VarType::LOD_TENSOR); const auto& tensor = var_desc.type().lod_tensor().tensor(); Shape shape(tensor.dims().cbegin(), tensor.dims().cend()); const auto& type = get_ov_type(tensor.data_type()); @@ -324,7 +324,7 @@ void InputModel::InputModelImpl::load_consts(std::istream* weight_stream) { if (!var_desc.persistable()) continue; - FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::ov_paddle::framework::proto::VarType::LOD_TENSOR); + FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::paddle::framework::proto::VarType::LOD_TENSOR); FRONT_END_GENERAL_CHECK(weight_stream != nullptr && weight_stream->peek() != EOF, "PaddlePaddle *.pdiparams format weight file doesn't exist!"); /* @@ -350,8 +350,8 @@ void InputModel::InputModelImpl::load_consts(std::istream* weight_stream) { std::unique_ptr buf(new char[size]); weight_stream->read(reinterpret_cast(buf.get()), size); - std::unique_ptr<::ov_paddle::framework::proto::VarType_TensorDesc> tensor_desc( - new ::ov_paddle::framework::proto::VarType_TensorDesc()); + std::unique_ptr<::paddle::framework::proto::VarType_TensorDesc> tensor_desc( + new ::paddle::framework::proto::VarType_TensorDesc()); tensor_desc->ParseFromArray(buf.get(), size); Shape shape(tensor_desc->dims().cbegin(), tensor_desc->dims().cend()); const auto& type = get_ov_type(tensor_desc->data_type()); diff --git a/src/frontends/paddle/src/place.cpp b/src/frontends/paddle/src/place.cpp index ab5232018a6a9f..7af2bc07bbf5c5 100644 --- a/src/frontends/paddle/src/place.cpp +++ b/src/frontends/paddle/src/place.cpp @@ -29,12 +29,12 @@ bool Place::is_output() const { } OpPlace::OpPlace(const ov::frontend::InputModel& input_model, - const ::ov_paddle::framework::proto::OpDesc& op_desc, + const ::paddle::framework::proto::OpDesc& op_desc, const std::vector& names) : Place(input_model, names), m_op_desc(op_desc) {} -OpPlace::OpPlace(const ov::frontend::InputModel& input_model, const ::ov_paddle::framework::proto::OpDesc& op_desc) +OpPlace::OpPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::OpDesc& op_desc) : OpPlace(input_model, op_desc, {}) {} const std::map>>& OpPlace::get_output_ports() const { @@ -58,7 +58,7 @@ std::shared_ptr OpPlace::get_input_port_paddle(const std::string& i return m_input_ports.at(inputName)[inputPortIndex]; } -const ::ov_paddle::framework::proto::OpDesc& OpPlace::get_desc() const { +const ::paddle::framework::proto::OpDesc& OpPlace::get_desc() const { return m_op_desc; } @@ -207,11 +207,11 @@ Place::Ptr OpPlace::get_target_tensor(int outputPortIndex) const { TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model, const std::vector& names, - const ::ov_paddle::framework::proto::VarDesc& var_desc) + const ::paddle::framework::proto::VarDesc& var_desc) : Place(input_model, names), m_var_desc(var_desc) { const auto& var_type = var_desc.type(); - if (var_type.type() == ::ov_paddle::framework::proto::VarType::LOD_TENSOR) { + if (var_type.type() == ::paddle::framework::proto::VarType::LOD_TENSOR) { const auto& tensor_desc = var_type.lod_tensor().tensor(); m_type = get_ov_type(tensor_desc.data_type()); m_pshape = PartialShape(std::vector(tensor_desc.dims().begin(), tensor_desc.dims().end())); @@ -219,7 +219,7 @@ TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model, } TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model, - const ::ov_paddle::framework::proto::VarDesc& var_desc) + const ::paddle::framework::proto::VarDesc& var_desc) : TensorPlace(input_model, {var_desc.name()}, var_desc) {} std::vector TensorPlace::get_consuming_ports() const { @@ -250,7 +250,7 @@ void TensorPlace::add_consuming_port(const std::shared_ptr& in_port m_consuming_ports.push_back(in_port); } -const ::ov_paddle::framework::proto::VarDesc& TensorPlace::get_desc() const { +const ::paddle::framework::proto::VarDesc& TensorPlace::get_desc() const { return m_var_desc; } diff --git a/src/frontends/paddle/src/place.hpp b/src/frontends/paddle/src/place.hpp index e09112dd42f295..fc2fe9eb29efe0 100644 --- a/src/frontends/paddle/src/place.hpp +++ b/src/frontends/paddle/src/place.hpp @@ -7,7 +7,7 @@ #include "input_model.hpp" #include "openvino/frontend/manager.hpp" -namespace ov_paddle { +namespace paddle { namespace framework { namespace proto { class OpDesc; @@ -15,7 +15,7 @@ class VarDesc; } // namespace proto } // namespace framework -} // namespace ov_paddle +} // namespace paddle namespace ov { namespace frontend { @@ -101,10 +101,10 @@ class OutPortPlace : public Place { class OpPlace : public Place { public: OpPlace(const ov::frontend::InputModel& input_model, - const ::ov_paddle::framework::proto::OpDesc& op_desc, + const ::paddle::framework::proto::OpDesc& op_desc, const std::vector& names); - OpPlace(const ov::frontend::InputModel& input_model, const ::ov_paddle::framework::proto::OpDesc& op_desc); + OpPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::OpDesc& op_desc); void add_in_port(const std::shared_ptr& input, const std::string& name); void add_out_port(const std::shared_ptr& output, const std::string& name); @@ -114,7 +114,7 @@ class OpPlace : public Place { const std::map>>& get_input_ports() const; std::shared_ptr get_output_port_paddle(const std::string& outputName, int outputPortIndex) const; std::shared_ptr get_input_port_paddle(const std::string& inputName, int inputPortIndex) const; - const ::ov_paddle::framework::proto::OpDesc& get_desc() const; + const ::paddle::framework::proto::OpDesc& get_desc() const; const std::shared_ptr get_decoder() const; void set_decoder(const std::shared_ptr op_decoder); @@ -152,7 +152,7 @@ class OpPlace : public Place { Ptr get_target_tensor(const std::string& outputName, int outputPortIndex) const override; private: - const ::ov_paddle::framework::proto::OpDesc& m_op_desc; // TODO: to conceal it behind decoder. + const ::paddle::framework::proto::OpDesc& m_op_desc; // TODO: to conceal it behind decoder. std::shared_ptr m_op_decoder; std::map>> m_input_ports; std::map>> m_output_ports; @@ -162,9 +162,9 @@ class TensorPlace : public Place { public: TensorPlace(const ov::frontend::InputModel& input_model, const std::vector& names, - const ::ov_paddle::framework::proto::VarDesc& var_desc); + const ::paddle::framework::proto::VarDesc& var_desc); - TensorPlace(const ov::frontend::InputModel& input_model, const ::ov_paddle::framework::proto::VarDesc& var_desc); + TensorPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::VarDesc& var_desc); void add_producing_port(const std::shared_ptr& out_port); void add_consuming_port(const std::shared_ptr& in_port); @@ -182,7 +182,7 @@ class TensorPlace : public Place { void set_element_type(const element::Type& type) { m_type = type; } - const ::ov_paddle::framework::proto::VarDesc& get_desc() const; + const ::paddle::framework::proto::VarDesc& get_desc() const; // External usage Ptr get_producing_operation() const override; @@ -192,7 +192,7 @@ class TensorPlace : public Place { bool is_equal_data(const Ptr& another) const override; private: - const ::ov_paddle::framework::proto::VarDesc& m_var_desc; + const ::paddle::framework::proto::VarDesc& m_var_desc; PartialShape m_pshape; element::Type m_type; diff --git a/src/frontends/paddle/src/proto/framework.proto b/src/frontends/paddle/src/proto/framework.proto index 4fc9c26c47e9ca..22112cba29667d 100644 --- a/src/frontends/paddle/src/proto/framework.proto +++ b/src/frontends/paddle/src/proto/framework.proto @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto2"; -package ov_paddle.framework.proto; +package paddle.framework.proto; option optimize_for = LITE_RUNTIME; // Added by Intel Corporation 2021-2022 diff --git a/src/frontends/pytorch/README.md b/src/frontends/pytorch/README.md new file mode 100644 index 00000000000000..92a38d693d9b21 --- /dev/null +++ b/src/frontends/pytorch/README.md @@ -0,0 +1,141 @@ +# OpenVINO PyTorch Frontend + +The PyTorch Frontend (PT FE) is a C++ based OpenVINO Frontend component that is +responsible for reading and converting a PyTorch model to an `ov::Model` object +that can be further serialized into the Intermediate Representation (IR) format. + +## Key Contacts + +People from the [openvino-pytorch-frontend-maintainers](https://github.com/orgs/openvinotoolkit/teams/openvino-pytorch-frontend-maintainers) +have the rights to approve and merge PRs to the PyTorch Frontend component. +They can assist with any questions about the component. + +## Components + +The structure of OpenVINO PyTorch Frontend sources includes the following +directories: + +* [include](./include) is a public frontend API. +* [src](./src/) folder contains the sources of the component. + +## Architecture + +OpenVINO PyTorch Frontend is a C++ component that uses [TorchScriptPythonDecoder](../../bindings/python/src/openvino/frontend/pytorch/ts_decoder.py) +in Python code to parse a PyTorch model from a Python object. Usually, the frontend is +used inside [openvino.convert_model](../../../tools/ovc) in Python code or inside +openvino backend in `torch.compile_model`, in which case `TorchFXPythonDecoder` +is used to decode `torch.fx.graph`. The entire model conversion workflow can be +represented by the following diagram. + +```mermaid +flowchart TD + A[(torch.nn.Module)] --> torch.compile + subgraph torch.compile + subgraph TorchFXPythonDecoder + torch.fx.graph_module.GraphModule + end + TorchFXPythonDecoder --> E("pytorch::FrontEnd::load()") + E -->|ov::InputModel| F("pytorch::FrontEnd::convert()") + F --> G[(ov::Model)] + end + A[(torch.nn.Module)] --> openvino.convert_model + subgraph openvino.convert_model + subgraph TorchScriptPythonDecoder + torch.jit.trace ~~~ torch.jit.script + end + TorchScriptPythonDecoder --> B("pytorch::FrontEnd::load()") + B -->|ov::InputModel| C("pytorch::FrontEnd::convert()") + end + openvino.convert_model --> D[(ov::Model)] +``` + +OpenVINO PyTorch Frontend supports extensions. To add an extension, use +`ov::frontend::pytorch::Frontend::add_extension()` API. +The following extension types are supported: + +* `ov::frontend::tensorflow::ConversionExtension` or `ov::frontend::ConversionExtension` - add a new Loader into the conversion pipeline. +* `ov::TelemetryExtension` - enable telemetry for the frontend. +* `ov::BaseOpExtension` - enable support for a custom operation. +* `ov::detail::SOExtension` - allow support for `ov::BaseOpExtension` extensions loaded from an external library. + +## How to Implement Support for a New PyTorch Operation + +PyTorch conversion into the OpenVINO opset operations consists of two stages: +1. Conversion of PyTorch operations to OpenVINO opset using [translators](./src/op/), + which directly transforms a PyTorch operation into a sub-graph of the OpenVINO + opset. This is a 1->N conversion. +2. [Internal Transformations](./src/transforms) that transform a sub-graph of + operations into a sub-graph of the OpenVINO opset. This is an N->N conversion. + +### Operation Translation + +Most PyTorch operations can be converted by a single `translator`. The +dictionary of `translators` is placed in the [op_table.cpp](./src/op_table.cpp) +file and each translator is located in the [op](../tensorflow_common/src/op/) +directory: + +https://github.com/openvinotoolkit/openvino/blob/491454103ea2f29b242587c6084c19868a879a82/src/frontends/pytorch/src/op_table.cpp#L222-L227 + +The main rules for translator implementation: +1. Support dynamic shapes and ranks, undefined types, including future support of new types, such as strings and complex numbers. +2. Try to maintain the same algorithmic complexity of the decomposition. Fewer operations are usually better. +3. Use the latest OpenVINO opset version for the translation. +4. Use helper routines for operation checks and graph construction from `utils.hpp`. +5. Call `NodeContext::mark_mode()` for each created node. + +#### Inplace and Mutable Operations + +Some PyTorch operations modify the input tensor rather than the output. For example, +`aten::add` writes the result of addition to the output, but `aten::add_` writes the result +to its first input. To correctly convert such an operation: +* Ensure that the output tensor produced by the translation has the same type and shape as the initial input. +* Call `NodeContext::mutate_input()` to change the input tensor with the new value. + +#### PtFrameworkNode Primitive + +`PtFrameworkNode` is used to represent unconverted operation from the original +model. You can use `FrontEnd::convert_partially()` instead of `Frontend::convert()` +to get an `ov::Model` containing unconverted operations. + +#### Operations Accepting Strings + +At the moment, OpenVINO core does not support strings. However, since strings in models are usually constants, you can extract them as `std::string` directly from Python using `NodeContext::const_input()`. + +#### Operations with lists, tuples, dicts + +These types are also not supported by OpenVINO core and generally require +implementing transformation for N->N conversion. However, in some simple cases, lists +and tuples can be processed. Helpers for working with lists can be found in `utils.hpp`. +For example, `get_list_as_outputs` enables you to get list elements to work with them +in the translator or transformation. + +### Internal Transformations + +In rare cases, converting PyTorch operations requires transformation. The main +difference between transformation and translation is that transformation works on the graph rather +than on the `NodeContext` of a single operation. This means that some functionality +provided by `NodeContext` is not accessible in transformation and usually +requires working with `PtFramworkNode` directly. [General rules](https://docs.openvino.ai/2023.1/openvino_docs_transformations.html) +for writing transformations also apply to PT FE transformations. + +### PyTorch Frontend Layer Tests + +The layer tests are Python-based tests that check if a PyTorch operation is +supported by PT FE. The testing pipeline of the layer tests consists of four +steps: +1. Create a simple model containing the PyTorch operation to be tested. +2. Convert this model into an OpenVINO Model. +3. Infer the original model using PyTorch and infer the OpenVINO Model. +4. Compare the inference results between both frameworks. + +To set up the environment for running the layer tests, follow these [instructions](../../../tests/layer_tests/README.md). + +To test the entire suite of the PyTorch operation set support, run the following command: +```bash +python -m pytest layer_tests/pytorch_tests +``` + +## See Also + * [OpenVINO README](../../../README.md) + * [OpenVINO Core Components](../../README.md) + * [Developer documentation](../../../docs/dev/index.md) diff --git a/src/frontends/pytorch/include/openvino/frontend/pytorch/decoder.hpp b/src/frontends/pytorch/include/openvino/frontend/pytorch/decoder.hpp index 066c203e3a1938..d5878783c314af 100644 --- a/src/frontends/pytorch/include/openvino/frontend/pytorch/decoder.hpp +++ b/src/frontends/pytorch/include/openvino/frontend/pytorch/decoder.hpp @@ -40,6 +40,9 @@ class TorchDecoder : public IDecoder { // Return shape if inputs has torch::Tensor type in the original model, otherwise returns the shape [] of a scalar virtual PartialShape get_input_shape(size_t index) const = 0; + // Return strides if inputs has torch::Tensor type in original model, otherwise return []. + virtual const std::vector& get_input_strides(size_t index) const = 0; + // Return element::Type when it the original type can be represented, otherwise returns PT-specific data type object // (see custom_type.hpp) virtual Any get_input_type(size_t index) const = 0; diff --git a/src/frontends/pytorch/src/op/as_strided.cpp b/src/frontends/pytorch/src/op/as_strided.cpp new file mode 100644 index 00000000000000..5d1dfe38bdaa17 --- /dev/null +++ b/src/frontends/pytorch/src/op/as_strided.cpp @@ -0,0 +1,106 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/scatter_update.hpp" +#include "openvino/op/tile.hpp" +#include "openvino/op/transpose.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; +bool compare_strides(const std::tuple& a, const std::tuple& b) { + return std::get<0>(a) > std::get<0>(b); +} +OutputVector translate_as_strided(const NodeContext& context) { + // "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)" + num_inputs_check(context, 3, 4); + auto decoder = context.get_decoder(); + auto input = context.get_input(0); + auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + auto input_strides = decoder->get_input_strides(0); + FRONT_END_OP_CONVERSION_CHECK(input_strides.size() != 0, + "aten::as_strided: Couldn't retrive input stride information from torchscript."); + + std::vector idxs(input_strides.size()); + iota(idxs.begin(), idxs.end(), 0); + std::vector> stride_idxs(idxs.size()); + std::for_each(idxs.rbegin(), idxs.rend(), [&](size_t& idx) { + stride_idxs[idx] = {input_strides[idx], idx}; + }); + + std::sort(stride_idxs.begin(), stride_idxs.end(), compare_strides); + std::vector transpose_idx(idxs.size()); + int transpose_counter = 0; + std::for_each(stride_idxs.begin(), stride_idxs.end(), [&](std::tuple& pair) { + transpose_idx[transpose_counter] = uint64_t(std::get<1>(pair)); + transpose_counter++; + }); + auto transpose_idx_const = + context.mark_node(v0::Constant::create(element::i32, Shape{transpose_idx.size()}, transpose_idx)); + auto transposed_input = context.mark_node(std::make_shared(input, transpose_idx_const)); + auto flat_input = context.mark_node(std::make_shared(transposed_input, const_neg_1, false)); + std::deque> sizes; + std::deque> strides; + if (std::dynamic_pointer_cast(context.get_input_from_visible_context(1).get_node_shared_ptr())) { + auto input_vector = context.const_input>(1); + std::for_each(input_vector.rbegin(), input_vector.rend(), [&](int64_t input_val) { + auto const_input = context.mark_node(v0::Constant::create(element::i32, Shape{}, {input_val})); + sizes.push_front(const_input); + }); + } else { + sizes = get_list_as_outputs(context.get_input(1)); + } + if (std::dynamic_pointer_cast(context.get_input_from_visible_context(2).get_node_shared_ptr())) { + auto input_vector = context.const_input>(2); + std::for_each(input_vector.rbegin(), input_vector.rend(), [&](int64_t input_val) { + auto const_input = context.mark_node(v0::Constant::create(element::i32, Shape{}, {input_val})); + strides.push_front(const_input); + }); + } else { + strides = get_list_as_outputs(context.get_input(2)); + } + auto offset = const_0->output(0); + if (!context.input_is_none(3)) { + offset = context.get_input(3); + } + FRONT_END_OP_CONVERSION_CHECK(sizes.size() == strides.size(), + "aten::as_strided: Vector for strides and sizes need to have equal length."); + auto strides_size = strides.size() - 1; + auto i = 0; + auto strides_length_const = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {strides.size()})); + auto ones_strides_len = context.mark_node(std::make_shared(const_1, strides_length_const)); + auto indices = const_0; + std::for_each(strides.rbegin(), strides.rend(), [&](Output& stride) { + auto const_num_iter = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {strides_size - i})); + stride = context.mark_node(std::make_shared(stride, element::i32)); + auto size = sizes.at(strides_size - i); + auto range = context.mark_node(std::make_shared(const_0, size, const_1, element::i32)); + range = context.mark_node(std::make_shared(range, stride)); + auto iteration_shape = context.mark_node( + std::make_shared(ones_strides_len, const_num_iter, const_neg_1, const_0)); + range = context.mark_node(std::make_shared(range, iteration_shape, false)); + indices = context.mark_node(std::make_shared(indices, range)); + i++; + }); + indices = context.mark_node(std::make_shared(indices, offset)); + auto gather = context.mark_node(std::make_shared(flat_input, indices, const_0)); + return {gather}; +}; +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/bitwise.cpp b/src/frontends/pytorch/src/op/bitwise.cpp index 6e3b1fe5f49ee4..8cbae192ca6bef 100644 --- a/src/frontends/pytorch/src/op/bitwise.cpp +++ b/src/frontends/pytorch/src/op/bitwise.cpp @@ -17,7 +17,7 @@ OutputVector translate_bitwise_not(const NodeContext& context) { num_inputs_check(context, 1, 2); auto x = context.get_input(0); FRONT_END_OP_CONVERSION_CHECK(x.get_element_type().compatible(element::boolean), - "aten::bitwise_not suppored only for boolean input"); + "aten::bitwise_not supported only for boolean input"); auto not_x = context.mark_node(std::make_shared(x)); if (!context.input_is_none(1)) { context.mutate_input(1, not_x); @@ -30,7 +30,7 @@ OutputVector translate_bitwise_and(const NodeContext& context) { auto x = context.get_input(0); auto y = context.get_input(1); FRONT_END_OP_CONVERSION_CHECK(x.get_element_type().compatible(element::boolean), - "aten::bitwise_not suppored only for boolean input"); + "aten::bitwise_not supported only for boolean input"); auto and_x = context.mark_node(std::make_shared(x, y)); return {and_x}; }; @@ -40,7 +40,7 @@ OutputVector translate_bitwise_or(const NodeContext& context) { auto x = context.get_input(0); auto y = context.get_input(1); FRONT_END_OP_CONVERSION_CHECK(x.get_element_type().compatible(element::boolean), - "aten::bitwise_not suppored only for boolean input"); + "aten::bitwise_not supported only for boolean input"); auto or_x = context.mark_node(std::make_shared(x, y)); return {or_x}; }; diff --git a/src/frontends/pytorch/src/op/scaled_dot_product_attention.cpp b/src/frontends/pytorch/src/op/scaled_dot_product_attention.cpp index 735324405d1f11..82231472e401be 100644 --- a/src/frontends/pytorch/src/op/scaled_dot_product_attention.cpp +++ b/src/frontends/pytorch/src/op/scaled_dot_product_attention.cpp @@ -15,6 +15,7 @@ #include "openvino/op/matmul.hpp" #include "openvino/op/multiply.hpp" #include "openvino/op/range.hpp" +#include "openvino/op/reshape.hpp" #include "openvino/op/select.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/op/softmax.hpp" @@ -22,6 +23,7 @@ #include "openvino/op/squeeze.hpp" #include "openvino/op/transpose.hpp" #include "openvino/op/unsqueeze.hpp" +#include "openvino/op/util/framework_node.hpp" #include "utils.hpp" namespace ov { @@ -31,10 +33,7 @@ namespace op { using namespace ov::op; -OutputVector translate_scaled_dot_product_attention(const NodeContext& context) { - // aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float - // dropout_p=0., bool is_causal=False) - num_inputs_check(context, 6, 6); +std::shared_ptr translate_scaled_dot_product_attention_common(const NodeContext& context) { auto query = context.get_input(0); auto key = context.get_input(1); auto value = context.get_input(2); @@ -68,7 +67,10 @@ OutputVector translate_scaled_dot_product_attention(const NodeContext& context) minus_inf = context.mark_node(std::make_shared(minus_inf, scaled_atten)); // two types of masks are supported. A boolean mask where a value of True indicates that the element should take // part in attention. A float mask of the same type as query, key, value that is added to the attention score. - auto is_causal = context.const_input(5); + auto is_causal = false; + if (!context.input_is_none(5)) { + is_causal = context.const_input(5); + } if (is_causal || !context.input_is_none(3)) { Output mask; Output atten_mask; @@ -100,10 +102,30 @@ OutputVector translate_scaled_dot_product_attention(const NodeContext& context) scaled_atten = context.mark_node(std::make_shared(scaled_atten, atten_mask)); } scaled_atten = context.mark_node(std::make_shared(scaled_atten, -1)); - return {context.mark_node(std::make_shared(scaled_atten, value))}; + return context.mark_node(std::make_shared(scaled_atten, value)); +}; + +OutputVector translate_scaled_dot_product_attention(const NodeContext& context) { + // aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float + // dropout_p=0., bool is_causal=False) + num_inputs_check(context, 6, 6); + return {translate_scaled_dot_product_attention_common(context)}; +}; + +OutputVector translate_scaled_dot_product_attention_fx(const NodeContext& context) { + // aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float + // dropout_p=0., bool is_causal=False) + num_inputs_check(context, 3, 6); + auto output = translate_scaled_dot_product_attention_common(context); + // TODO: scaled_dot_product_flash_attention has 9 outputs but for most cases only + // the first input is used. Rest of the outputs should be returned properly as + // needed. + ov::OutputVector out_vec; + out_vec.push_back(output); + return {context.mark_node(make_list_construct(out_vec))}; }; } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 75665ffe8d4d14..d9ac0aff6af2dc 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -34,6 +34,7 @@ OP_CONVERTER(translate_argmax); OP_CONVERTER(translate_argsort); OP_CONVERTER(translate_argmax); OP_CONVERTER(translate_argmin); +OP_CONVERTER(translate_as_strided); OP_CONVERTER(translate_as_tensor); OP_CONVERTER(translate_avg_poolnd); OP_CONVERTER(translate_bool); @@ -213,6 +214,7 @@ OP_CONVERTER(translate_group_norm_fx); OP_CONVERTER(translate_index_fx); OP_CONVERTER(translate_layer_norm_fx); OP_CONVERTER(translate_max_poolnd_fx); +OP_CONVERTER(translate_scaled_dot_product_attention_fx); OP_CONVERTER(translate_slice_fx); OP_CONVERTER(translate_softmax_fx); OP_CONVERTER(translate_transpose_fx); @@ -255,6 +257,7 @@ const std::map get_supported_ops_ts() { {"aten::argmax", op::translate_argmax}, {"aten::argmin", op::translate_argmin}, {"aten::argsort", op::translate_argsort}, + {"aten::as_strided", op::translate_as_strided}, {"aten::as_tensor", op::translate_as_tensor}, {"aten::asin", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten::asin_", op::inplace_op>}, @@ -555,6 +558,7 @@ const std::map get_supported_ops_fx() { {"aten.arange.default", op::translate_arange_fx}, {"aten.argmax.default", op::translate_argmax}, {"aten.avg_pool2d.default", op::translate_avg_poolnd}, + {"aten.baddbmm.default", op::translate_addmm}, {"aten.bitwise_and.Tensor", op::translate_bitwise_and}, {"aten.bmm.default", op::translate_1to1_match_2_inputs_align_types}, {"aten.cat.default", op::translate_cat_fx}, @@ -581,6 +585,7 @@ const std::map get_supported_ops_fx() { {"aten.hardswish_.default", op::inplace_op>}, {"aten.hardtanh_.default", op::inplace_op}, {"aten.index.Tensor", op::translate_index_fx}, + {"aten.leaky_relu_.default", op::inplace_op>}, {"aten.lift_fresh_copy.default", op::skip_node}, {"aten.linalg_vector_norm.default", op::translate_linalg_vector_norm}, {"aten.log.default", op::translate_log}, @@ -603,6 +608,7 @@ const std::map get_supported_ops_fx() { {"aten.relu.default", op::translate_1to1_match_1_inputs}, {"aten.relu_.default", op::inplace_op>}, {"aten.rsub.Scalar", op::translate_rsub}, + {"aten._scaled_dot_product_flash_attention.default", op::translate_scaled_dot_product_attention_fx}, {"aten.select.int", op::translate_select}, {"aten.sigmoid.default", op::translate_1to1_match_1_inputs}, {"aten.silu.default", op::translate_1to1_match_1_inputs}, diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 1635296e612dff..b4a37118961ab7 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -158,6 +158,9 @@ class DummyDecoder : public TorchDecoder { virtual PartialShape get_input_shape(size_t index) const override { FRONT_END_NOT_IMPLEMENTED(get_input_shape); } + virtual const std::vector& get_input_strides(size_t index) const override { + FRONT_END_NOT_IMPLEMENTED(get_input_strides); + } virtual Any get_input_type(size_t index) const override { FRONT_END_NOT_IMPLEMENTED(get_input_type); } diff --git a/src/frontends/tensorflow/README.md b/src/frontends/tensorflow/README.md index 7fc421cd92c7f4..4a48203a2b41dc 100644 --- a/src/frontends/tensorflow/README.md +++ b/src/frontends/tensorflow/README.md @@ -205,6 +205,7 @@ py.test tensorflow_tests/test_tf_Unique.py --use_new_frontend ``` ## See also + * [Supported Operations](./docs/supported_ops.md) * [OpenVINO README](../../../README.md) * [OpenVINO Core Components](../../README.md) * [Developer documentation](../../../docs/dev/index.md) diff --git a/src/frontends/tensorflow/docs/supported_ops.md b/src/frontends/tensorflow/docs/supported_ops.md new file mode 100644 index 00000000000000..5794e3f16653fd --- /dev/null +++ b/src/frontends/tensorflow/docs/supported_ops.md @@ -0,0 +1,1406 @@ +# TensorFlow Operations Supported by OpenVINO TensorFlow Frontend + +Here is a table of operations supported by the TensorFlow Frontend from [tf.raw_ops](https://www.tensorflow.org/api_docs/python/tf/raw_ops). +A "supported operation" is one that TensorFlow Frontend can convert to the OpenVINO representation. + +| Operation Name | Supported | Limitation | +|---------------------------------------------------------|-------------------------------|-------------------------------| +| Abort | NO | | +| Abs | YES | | +| AccumulateNV2 | NO | | +| AccumulatorApplyGradient | NO | | +| AccumulatorNumAccumulated | NO | | +| AccumulatorSetGlobalStep | NO | | +| AccumulatorTakeGradient | NO | | +| Acos | YES | | +| Acosh | YES | | +| Add | YES | | +| AddManySparseToTensorsMap | NO | | +| AddN | YES | | +| AddSparseToTensorsMap | NO | | +| AddV2 | YES | | +| AdjustContrast | NO | | +| AdjustContrastv2 | YES | | +| AdjustHue | NO | | +| AdjustSaturation | NO | | +| All | YES | | +| AllCandidateSampler | NO | | +| AllToAll | NO | | +| Angle | NO | | +| AnonymousHashTable | NO | | +| AnonymousIterator | NO | | +| AnonymousIteratorV2 | NO | | +| AnonymousIteratorV3 | NO | | +| AnonymousMemoryCache | NO | | +| AnonymousMultiDeviceIterator | NO | | +| AnonymousMultiDeviceIteratorV3 | NO | | +| AnonymousMutableDenseHashTable | NO | | +| AnonymousMutableHashTable | NO | | +| AnonymousMutableHashTableOfTensors | NO | | +| AnonymousRandomSeedGenerator | NO | | +| AnonymousSeedGenerator | NO | | +| Any | YES | | +| ApplyAdaMax | NO | | +| ApplyAdadelta | NO | | +| ApplyAdagrad | NO | | +| ApplyAdagradDA | NO | | +| ApplyAdagradV2 | NO | | +| ApplyAdam | NO | | +| ApplyAddSign | NO | | +| ApplyCenteredRMSProp | NO | | +| ApplyFtrl | NO | | +| ApplyFtrlV2 | NO | | +| ApplyGradientDescent | NO | | +| ApplyMomentum | NO | | +| ApplyPowerSign | NO | | +| ApplyProximalAdagrad | NO | | +| ApplyProximalGradientDescent | NO | | +| ApplyRMSProp | NO | | +| ApproxTopK | NO | | +| ApproximateEqual | NO | | +| ArgMax | YES | | +| ArgMin | YES | | +| AsString | NO | | +| Asin | YES | | +| Asinh | YES | | +| Assert | YES | | +| AssertCardinalityDataset | NO | | +| AssertNextDataset | NO | | +| AssertPrevDataset | NO | | +| Assign | YES | | +| AssignAdd | NO | | +| AssignAddVariableOp | NO | | +| AssignSub | NO | | +| AssignSubVariableOp | NO | | +| AssignVariableOp | YES | | +| AssignVariableXlaConcatND | NO | | +| Atan | YES | | +| Atan2 | NO | | +| Atanh | YES | | +| AudioSpectrogram | NO | | +| AudioSummary | NO | | +| AudioSummaryV2 | NO | | +| AutoShardDataset | NO | | +| AvgPool | YES | | +| AvgPool3D | YES | | +| AvgPool3DGrad | NO | | +| AvgPoolGrad | NO | | +| BandedTriangularSolve | NO | | +| Barrier | NO | | +| BarrierClose | NO | | +| BarrierIncompleteSize | NO | | +| BarrierInsertMany | NO | | +| BarrierReadySize | NO | | +| BarrierTakeMany | NO | | +| Batch | NO | | +| BatchCholesky | NO | | +| BatchCholeskyGrad | NO | | +| BatchDataset | NO | | +| BatchDatasetV2 | NO | | +| BatchFFT | NO | | +| BatchFFT2D | NO | | +| BatchFFT3D | NO | | +| BatchFunction | NO | | +| BatchIFFT | NO | | +| BatchIFFT2D | NO | | +| BatchIFFT3D | NO | | +| BatchMatMul | YES | | +| BatchMatMulV2 | YES | | +| BatchMatMulV3 | YES | | +| BatchMatrixBandPart | NO | | +| BatchMatrixDeterminant | NO | | +| BatchMatrixDiag | NO | | +| BatchMatrixDiagPart | NO | | +| BatchMatrixInverse | NO | | +| BatchMatrixSetDiag | NO | | +| BatchMatrixSolve | NO | | +| BatchMatrixSolveLs | NO | | +| BatchMatrixTriangularSolve | NO | | +| BatchNormWithGlobalNormalization | NO | | +| BatchNormWithGlobalNormalizationGrad | NO | | +| BatchSelfAdjointEig | NO | | +| BatchSelfAdjointEigV2 | NO | | +| BatchSvd | NO | | +| BatchToSpace | NO | | +| BatchToSpaceND | YES | | +| BesselI0 | NO | | +| BesselI0e | NO | | +| BesselI1 | NO | | +| BesselI1e | NO | | +| BesselJ0 | NO | | +| BesselJ1 | NO | | +| BesselK0 | NO | | +| BesselK0e | NO | | +| BesselK1 | NO | | +| BesselK1e | NO | | +| BesselY0 | NO | | +| BesselY1 | NO | | +| Betainc | NO | | +| BiasAdd | YES | | +| BiasAddGrad | NO | | +| BiasAddV1 | NO | | +| Bincount | NO | | +| Bitcast | NO | | +| BitwiseAnd | NO | | +| BitwiseOr | NO | | +| BitwiseXor | NO | | +| BlockLSTM | YES | | +| BlockLSTMGrad | NO | | +| BlockLSTMGradV2 | NO | | +| BlockLSTMV2 | NO | | +| BoostedTreesAggregateStats | NO | | +| BoostedTreesBucketize | NO | | +| BoostedTreesCalculateBestFeatureSplit | NO | | +| BoostedTreesCalculateBestFeatureSplitV2 | NO | | +| BoostedTreesCalculateBestGainsPerFeature | NO | | +| BoostedTreesCenterBias | NO | | +| BoostedTreesCreateEnsemble | NO | | +| BoostedTreesCreateQuantileStreamResource | NO | | +| BoostedTreesDeserializeEnsemble | NO | | +| BoostedTreesEnsembleResourceHandleOp | NO | | +| BoostedTreesExampleDebugOutputs | NO | | +| BoostedTreesFlushQuantileSummaries | NO | | +| BoostedTreesGetEnsembleStates | NO | | +| BoostedTreesMakeQuantileSummaries | NO | | +| BoostedTreesMakeStatsSummary | NO | | +| BoostedTreesPredict | NO | | +| BoostedTreesQuantileStreamResourceAddSummaries | NO | | +| BoostedTreesQuantileStreamResourceDeserialize | NO | | +| BoostedTreesQuantileStreamResourceFlush | NO | | +| BoostedTreesQuantileStreamResourceGetBucketBoundaries | NO | | +| BoostedTreesQuantileStreamResourceHandleOp | NO | | +| BoostedTreesSerializeEnsemble | NO | | +| BoostedTreesSparseAggregateStats | NO | | +| BoostedTreesSparseCalculateBestFeatureSplit | NO | | +| BoostedTreesTrainingPredict | NO | | +| BoostedTreesUpdateEnsemble | NO | | +| BoostedTreesUpdateEnsembleV2 | NO | | +| BroadcastArgs | YES | | +| BroadcastGradientArgs | NO | | +| BroadcastTo | YES | | +| Bucketize | YES | | +| BytesProducedStatsDataset | NO | | +| CSRSparseMatrixComponents | NO | | +| CSRSparseMatrixToDense | NO | | +| CSRSparseMatrixToSparseTensor | NO | | +| CSVDataset | NO | | +| CSVDatasetV2 | NO | | +| CTCBeamSearchDecoder | NO | | +| CTCGreedyDecoder | YES | | +| CTCLoss | YES | | +| CTCLossV2 | NO | | +| CacheDataset | NO | | +| CacheDatasetV2 | NO | | +| Case | NO | | +| Cast | YES | | +| Ceil | YES | | +| CheckNumerics | YES | | +| CheckNumericsV2 | YES | | +| Cholesky | NO | | +| CholeskyGrad | NO | | +| ChooseFastestBranchDataset | NO | | +| ChooseFastestDataset | NO | | +| ClipByValue | YES | | +| CloseSummaryWriter | NO | | +| CollectiveAllToAllV2 | NO | | +| CollectiveAllToAllV3 | NO | | +| CollectiveAssignGroupV2 | NO | | +| CollectiveBcastRecv | NO | | +| CollectiveBcastRecvV2 | NO | | +| CollectiveBcastSend | NO | | +| CollectiveBcastSendV2 | NO | | +| CollectiveGather | NO | | +| CollectiveGatherV2 | NO | | +| CollectiveInitializeCommunicator | NO | | +| CollectivePermute | NO | | +| CollectiveReduce | NO | | +| CollectiveReduceScatterV2 | NO | | +| CollectiveReduceV2 | NO | | +| CollectiveReduceV3 | NO | | +| CombinedNonMaxSuppression | NO | | +| Complex | NO | | +| ComplexAbs | NO | | +| CompositeTensorVariantFromComponents | NO | | +| CompositeTensorVariantToComponents | NO | | +| CompressElement | NO | | +| ComputeAccidentalHits | NO | | +| ComputeBatchSize | NO | | +| Concat | YES | | +| ConcatOffset | NO | | +| ConcatV2 | YES | | +| ConcatenateDataset | NO | | +| ConditionalAccumulator | NO | | +| ConfigureDistributedTPU | NO | | +| ConfigureTPUEmbedding | NO | | +| Conj | NO | | +| ConjugateTranspose | NO | | +| Const | YES | | +| ConsumeMutexLock | NO | | +| ControlTrigger | NO | | +| Conv | NO | | +| Conv2D | YES | | +| Conv2DBackpropFilter | NO | | +| Conv2DBackpropFilterV2 | NO | | +| Conv2DBackpropInput | YES | | +| Conv2DBackpropInputV2 | NO | | +| Conv3D | YES | | +| Conv3DBackpropFilter | NO | | +| Conv3DBackpropFilterV2 | NO | | +| Conv3DBackpropInput | NO | | +| Conv3DBackpropInputV2 | YES | | +| Copy | NO | | +| CopyHost | NO | | +| Cos | YES | | +| Cosh | YES | | +| CountUpTo | NO | | +| CreateSummaryDbWriter | NO | | +| CreateSummaryFileWriter | NO | | +| CropAndResize | YES | | +| CropAndResizeGradBoxes | NO | | +| CropAndResizeGradImage | NO | | +| Cross | NO | | +| CrossReplicaSum | NO | | +| CudnnRNN | NO | | +| CudnnRNNBackprop | NO | | +| CudnnRNNBackpropV2 | NO | | +| CudnnRNNBackpropV3 | NO | | +| CudnnRNNCanonicalToParams | NO | | +| CudnnRNNCanonicalToParamsV2 | NO | | +| CudnnRNNParamsSize | NO | | +| CudnnRNNParamsToCanonical | NO | | +| CudnnRNNParamsToCanonicalV2 | NO | | +| CudnnRNNV2 | NO | | +| CudnnRNNV3 | NO | | +| Cumprod | NO | | +| Cumsum | YES | | +| CumulativeLogsumexp | NO | | +| DataFormatDimMap | NO | | +| DataFormatVecPermute | NO | | +| DataServiceDataset | NO | | +| DataServiceDatasetV2 | NO | | +| DataServiceDatasetV3 | NO | | +| DataServiceDatasetV4 | NO | | +| DatasetCardinality | NO | | +| DatasetFromGraph | NO | | +| DatasetToGraph | NO | | +| DatasetToGraphV2 | NO | | +| DatasetToSingleElement | NO | | +| DatasetToTFRecord | NO | | +| Dawsn | NO | | +| DebugGradientIdentity | NO | | +| DebugGradientRefIdentity | NO | | +| DebugIdentity | NO | | +| DebugIdentityV2 | NO | | +| DebugIdentityV3 | NO | | +| DebugNanCount | NO | | +| DebugNumericSummary | NO | | +| DebugNumericSummaryV2 | NO | | +| DecodeAndCropJpeg | NO | | +| DecodeBase64 | NO | | +| DecodeBmp | NO | | +| DecodeCSV | NO | | +| DecodeCompressed | NO | | +| DecodeGif | NO | | +| DecodeImage | NO | | +| DecodeJSONExample | NO | | +| DecodeJpeg | NO | | +| DecodePaddedRaw | NO | | +| DecodePng | NO | | +| DecodeProtoV2 | NO | | +| DecodeRaw | NO | | +| DecodeWav | NO | | +| DeepCopy | NO | | +| DeleteIterator | NO | | +| DeleteMemoryCache | NO | | +| DeleteMultiDeviceIterator | NO | | +| DeleteRandomSeedGenerator | NO | | +| DeleteSeedGenerator | NO | | +| DeleteSessionTensor | NO | | +| DenseBincount | NO | | +| DenseCountSparseOutput | NO | | +| DenseToCSRSparseMatrix | NO | | +| DenseToDenseSetOperation | NO | | +| DenseToSparseBatchDataset | NO | | +| DenseToSparseSetOperation | NO | | +| DepthToSpace | YES | | +| DepthwiseConv2dNative | YES | | +| DepthwiseConv2dNativeBackpropFilter | NO | | +| DepthwiseConv2dNativeBackpropInput | NO | | +| Dequantize | NO | | +| DeserializeIterator | NO | | +| DeserializeManySparse | NO | | +| DeserializeSparse | NO | | +| DestroyResourceOp | NO | | +| DestroyTemporaryVariable | NO | | +| DeviceIndex | NO | | +| Diag | NO | | +| DiagPart | NO | | +| Digamma | NO | | +| Dilation2D | NO | | +| Dilation2DBackpropFilter | NO | | +| Dilation2DBackpropInput | NO | | +| DirectedInterleaveDataset | NO | | +| DisableCopyOnRead | NO | | +| DistributedSave | NO | | +| Div | NO | | +| DivNoNan | YES | | +| DrawBoundingBoxes | NO | | +| DrawBoundingBoxesV2 | NO | | +| DummyIterationCounter | NO | | +| DummyMemoryCache | NO | | +| DummySeedGenerator | NO | | +| DynamicEnqueueTPUEmbeddingArbitraryTensorBatch | NO | | +| DynamicPartition | YES | | +| DynamicStitch | YES | | +| EagerPyFunc | NO | | +| EditDistance | NO | | +| Eig | NO | | +| Einsum | YES | | +| Elu | YES | | +| EluGrad | NO | | +| Empty | NO | | +| EmptyTensorList | YES | | +| EncodeBase64 | NO | | +| EncodeJpeg | NO | | +| EncodeJpegVariableQuality | NO | | +| EncodePng | NO | | +| EncodeProto | NO | | +| EncodeWav | NO | | +| EnqueueTPUEmbeddingArbitraryTensorBatch | NO | | +| EnqueueTPUEmbeddingIntegerBatch | NO | | +| EnqueueTPUEmbeddingRaggedTensorBatch | NO | | +| EnqueueTPUEmbeddingSparseBatch | NO | | +| EnqueueTPUEmbeddingSparseTensorBatch | NO | | +| EnsureShape | YES | | +| Enter | YES | | +| Equal | YES | | +| Erf | YES | | +| Erfc | NO | | +| Erfinv | NO | | +| EuclideanNorm | YES | | +| Exit | YES | | +| Exp | YES | | +| ExpandDims | YES | | +| ExperimentalAssertNextDataset | NO | | +| ExperimentalAutoShardDataset | NO | | +| ExperimentalBytesProducedStatsDataset | NO | | +| ExperimentalCSVDataset | NO | | +| ExperimentalChooseFastestDataset | NO | | +| ExperimentalDatasetCardinality | NO | | +| ExperimentalDatasetToTFRecord | NO | | +| ExperimentalDenseToSparseBatchDataset | NO | | +| ExperimentalDirectedInterleaveDataset | NO | | +| ExperimentalGroupByReducerDataset | NO | | +| ExperimentalGroupByWindowDataset | NO | | +| ExperimentalIgnoreErrorsDataset | NO | | +| ExperimentalIteratorGetDevice | NO | | +| ExperimentalLMDBDataset | NO | | +| ExperimentalLatencyStatsDataset | NO | | +| ExperimentalMapAndBatchDataset | NO | | +| ExperimentalMapDataset | NO | | +| ExperimentalMatchingFilesDataset | NO | | +| ExperimentalMaxIntraOpParallelismDataset | NO | | +| ExperimentalNonSerializableDataset | NO | | +| ExperimentalParallelInterleaveDataset | NO | | +| ExperimentalParseExampleDataset | NO | | +| ExperimentalPrivateThreadPoolDataset | NO | | +| ExperimentalRandomDataset | NO | | +| ExperimentalRebatchDataset | NO | | +| ExperimentalScanDataset | NO | | +| ExperimentalSetStatsAggregatorDataset | NO | | +| ExperimentalSleepDataset | NO | | +| ExperimentalSlidingWindowDataset | NO | | +| ExperimentalSqlDataset | NO | | +| ExperimentalStatsAggregatorHandle | NO | | +| ExperimentalStatsAggregatorSummary | NO | | +| ExperimentalTakeWhileDataset | NO | | +| ExperimentalThreadPoolDataset | NO | | +| ExperimentalThreadPoolHandle | NO | | +| ExperimentalUnbatchDataset | NO | | +| ExperimentalUniqueDataset | NO | | +| Expint | NO | | +| Expm1 | NO | | +| ExtractGlimpse | NO | | +| ExtractGlimpseV2 | NO | | +| ExtractImagePatches | YES | | +| ExtractJpegShape | NO | | +| ExtractVolumePatches | NO | | +| FFT | NO | | +| FFT2D | NO | | +| FFT3D | NO | | +| FIFOQueue | YES | | +| FIFOQueueV2 | YES | | +| Fact | NO | | +| FakeParam | NO | | +| FakeQuantWithMinMaxArgs | YES | | +| FakeQuantWithMinMaxArgsGradient | NO | | +| FakeQuantWithMinMaxVars | YES | | +| FakeQuantWithMinMaxVarsGradient | NO | | +| FakeQuantWithMinMaxVarsPerChannel | YES | | +| FakeQuantWithMinMaxVarsPerChannelGradient | NO | | +| FakeQueue | NO | | +| Fill | YES | | +| FilterByLastComponentDataset | NO | | +| FilterDataset | NO | | +| FinalizeDataset | NO | | +| Fingerprint | NO | | +| FixedLengthRecordDataset | NO | | +| FixedLengthRecordDatasetV2 | NO | | +| FixedLengthRecordReader | NO | | +| FixedLengthRecordReaderV2 | NO | | +| FixedUnigramCandidateSampler | NO | | +| FlatMapDataset | NO | | +| Floor | YES | | +| FloorDiv | YES | | +| FloorMod | YES | | +| FlushSummaryWriter | NO | | +| For | NO | | +| FractionalAvgPool | NO | | +| FractionalAvgPoolGrad | NO | | +| FractionalMaxPool | NO | | +| FractionalMaxPoolGrad | NO | | +| FresnelCos | NO | | +| FresnelSin | NO | | +| FusedBatchNorm | YES | | +| FusedBatchNormGrad | NO | | +| FusedBatchNormGradV2 | NO | | +| FusedBatchNormGradV3 | NO | | +| FusedBatchNormV2 | YES | | +| FusedBatchNormV3 | YES | | +| FusedPadConv2D | NO | | +| FusedResizeAndPadConv2D | NO | | +| GRUBlockCell | YES | | +| GRUBlockCellGrad | NO | | +| Gather | YES | | +| GatherNd | YES | | +| GatherV2 | YES | | +| GenerateBoundingBoxProposals | NO | | +| GenerateVocabRemapping | NO | | +| GeneratorDataset | NO | | +| GetElementAtIndex | NO | | +| GetOptions | NO | | +| GetSessionHandle | NO | | +| GetSessionHandleV2 | NO | | +| GetSessionTensor | NO | | +| Greater | YES | | +| GreaterEqual | YES | | +| GroupByReducerDataset | NO | | +| GroupByWindowDataset | NO | | +| GuaranteeConst | NO | | +| HSVToRGB | NO | | +| HashTable | YES | | +| HashTableV2 | YES | | +| HistogramFixedWidth | NO | | +| HistogramSummary | NO | | +| IFFT | NO | | +| IFFT2D | NO | | +| IFFT3D | NO | | +| IRFFT | NO | | +| IRFFT2D | NO | | +| IRFFT3D | NO | | +| Identity | YES | | +| IdentityN | YES | | +| IdentityReader | NO | | +| IdentityReaderV2 | NO | | +| If | YES | | +| Igamma | NO | | +| IgammaGradA | NO | | +| Igammac | NO | | +| IgnoreErrorsDataset | NO | | +| Imag | NO | | +| ImageProjectiveTransformV2 | NO | | +| ImageProjectiveTransformV3 | NO | | +| ImageSummary | NO | | +| ImmutableConst | NO | | +| ImportEvent | NO | | +| InTopK | NO | | +| InTopKV2 | NO | | +| InfeedDequeue | NO | | +| InfeedDequeueTuple | NO | | +| InfeedEnqueue | NO | | +| InfeedEnqueuePrelinearizedBuffer | NO | | +| InfeedEnqueueTuple | NO | | +| InitializeTable | NO | | +| InitializeTableFromDataset | NO | | +| InitializeTableFromTextFile | NO | | +| InitializeTableFromTextFileV2 | NO | | +| InitializeTableV2 | NO | | +| InplaceAdd | NO | | +| InplaceSub | NO | | +| InplaceUpdate | NO | | +| InterleaveDataset | NO | | +| Inv | NO | | +| InvGrad | NO | | +| Invert | NO | | +| InvertPermutation | YES | | +| IsBoostedTreesEnsembleInitialized | NO | | +| IsBoostedTreesQuantileStreamResourceInitialized | NO | | +| IsFinite | YES | | +| IsInf | YES | | +| IsNan | YES | | +| IsTPUEmbeddingInitialized | NO | | +| IsVariableInitialized | YES | | +| IsotonicRegression | NO | | +| Iterator | YES | | +| IteratorFromStringHandle | NO | | +| IteratorFromStringHandleV2 | NO | | +| IteratorGetDevice | NO | | +| IteratorGetNext | YES | | +| IteratorGetNextAsOptional | NO | | +| IteratorGetNextSync | NO | | +| IteratorToStringHandle | NO | | +| IteratorV2 | YES | | +| L2Loss | YES | | +| LMDBDataset | NO | | +| LMDBReader | NO | | +| LRN | YES | | +| LRNGrad | NO | | +| LSTMBlockCell | NO | | +| LSTMBlockCellGrad | NO | | +| LatencyStatsDataset | NO | | +| LeakyRelu | YES | | +| LeakyReluGrad | NO | | +| LearnedUnigramCandidateSampler | NO | | +| LeftShift | NO | | +| LegacyParallelInterleaveDatasetV2 | NO | | +| Less | YES | | +| LessEqual | YES | | +| Lgamma | NO | | +| LinSpace | YES | | +| ListDataset | NO | | +| ListDiff | YES | | +| LoadAndRemapMatrix | NO | | +| LoadDataset | NO | | +| LoadTPUEmbeddingADAMParameters | NO | | +| LoadTPUEmbeddingAdadeltaParameters | NO | | +| LoadTPUEmbeddingAdagradMomentumParameters | NO | | +| LoadTPUEmbeddingAdagradParameters | NO | | +| LoadTPUEmbeddingCenteredRMSPropParameters | NO | | +| LoadTPUEmbeddingFTRLParameters | NO | | +| LoadTPUEmbeddingFrequencyEstimatorParameters | NO | | +| LoadTPUEmbeddingMDLAdagradLightParameters | NO | | +| LoadTPUEmbeddingMomentumParameters | NO | | +| LoadTPUEmbeddingProximalAdagradParameters | NO | | +| LoadTPUEmbeddingProximalYogiParameters | NO | | +| LoadTPUEmbeddingRMSPropParameters | NO | | +| LoadTPUEmbeddingStochasticGradientDescentParameters | NO | | +| Log | YES | | +| Log1p | YES | | +| LogMatrixDeterminant | NO | | +| LogSoftmax | YES | | +| LogUniformCandidateSampler | NO | | +| LogicalAnd | YES | | +| LogicalNot | YES | | +| LogicalOr | YES | | +| LookupTableExport | NO | | +| LookupTableExportV2 | NO | | +| LookupTableFind | NO | | +| LookupTableFindV2 | NO | | +| LookupTableImport | NO | | +| LookupTableImportV2 | NO | | +| LookupTableInsert | YES | | +| LookupTableInsertV2 | YES | | +| LookupTableRemoveV2 | NO | | +| LookupTableSize | NO | | +| LookupTableSizeV2 | NO | | +| LoopCond | YES | | +| LowerBound | NO | | +| Lu | NO | | +| MakeIterator | NO | | +| MapAndBatchDataset | NO | | +| MapClear | NO | | +| MapDataset | NO | | +| MapDefun | NO | | +| MapIncompleteSize | NO | | +| MapPeek | NO | | +| MapSize | NO | | +| MapStage | NO | | +| MapUnstage | NO | | +| MapUnstageNoKey | NO | | +| MatMul | YES | | +| MatchingFiles | NO | | +| MatchingFilesDataset | NO | | +| MatrixBandPart | NO | | +| MatrixDeterminant | NO | | +| MatrixDiag | YES | | +| MatrixDiagPart | NO | | +| MatrixDiagPartV2 | NO | | +| MatrixDiagPartV3 | NO | | +| MatrixDiagV2 | NO | | +| MatrixDiagV3 | NO | | +| MatrixExponential | NO | | +| MatrixInverse | NO | | +| MatrixLogarithm | NO | | +| MatrixSetDiag | NO | | +| MatrixSetDiagV2 | NO | | +| MatrixSetDiagV3 | NO | | +| MatrixSolve | NO | | +| MatrixSolveLs | NO | | +| MatrixSquareRoot | NO | | +| MatrixTriangularSolve | NO | | +| Max | YES | | +| MaxIntraOpParallelismDataset | NO | | +| MaxPool | YES | | +| MaxPool3D | YES | | +| MaxPool3DGrad | NO | | +| MaxPool3DGradGrad | NO | | +| MaxPoolGrad | NO | | +| MaxPoolGradGrad | NO | | +| MaxPoolGradGradV2 | NO | | +| MaxPoolGradGradWithArgmax | NO | | +| MaxPoolGradV2 | NO | | +| MaxPoolGradWithArgmax | NO | | +| MaxPoolV2 | YES | | +| MaxPoolWithArgmax | YES | | +| Maximum | YES | | +| Mean | YES | | +| Merge | YES | | +| MergeSummary | NO | | +| MergeV2Checkpoints | YES | | +| Mfcc | NO | | +| Min | YES | | +| Minimum | YES | | +| MirrorPad | YES | | +| MirrorPadGrad | NO | | +| Mod | YES | | +| ModelDataset | NO | | +| Mul | YES | | +| MulNoNan | NO | | +| MultiDeviceIterator | NO | | +| MultiDeviceIteratorFromStringHandle | NO | | +| MultiDeviceIteratorGetNextFromShard | NO | | +| MultiDeviceIteratorInit | NO | | +| MultiDeviceIteratorToStringHandle | NO | | +| Multinomial | NO | | +| MutableDenseHashTable | NO | | +| MutableDenseHashTableV2 | NO | | +| MutableHashTable | YES | | +| MutableHashTableOfTensors | NO | | +| MutableHashTableOfTensorsV2 | NO | | +| MutableHashTableV2 | YES | | +| MutexLock | NO | | +| MutexV2 | NO | | +| NcclAllReduce | NO | | +| NcclBroadcast | NO | | +| NcclReduce | NO | | +| Ndtri | NO | | +| Neg | YES | | +| NextAfter | NO | | +| NextIteration | YES | | +| NoOp | YES | | +| NonDeterministicInts | NO | | +| NonMaxSuppression | YES | | +| NonMaxSuppressionV2 | YES | | +| NonMaxSuppressionV3 | YES | | +| NonMaxSuppressionV4 | YES | | +| NonMaxSuppressionV5 | YES | | +| NonMaxSuppressionWithOverlaps | NO | | +| NonSerializableDataset | NO | | +| NotEqual | YES | | +| NthElement | NO | | +| OneHot | YES | | +| OneShotIterator | YES | | +| OnesLike | YES | | +| OptimizeDataset | NO | | +| OptimizeDatasetV2 | NO | | +| OptionalFromValue | NO | | +| OptionalGetValue | NO | | +| OptionalHasValue | NO | | +| OptionalNone | NO | | +| OptionsDataset | NO | | +| OrderedMapClear | NO | | +| OrderedMapIncompleteSize | NO | | +| OrderedMapPeek | NO | | +| OrderedMapSize | NO | | +| OrderedMapStage | NO | | +| OrderedMapUnstage | NO | | +| OrderedMapUnstageNoKey | NO | | +| OutfeedDequeue | NO | | +| OutfeedDequeueTuple | NO | | +| OutfeedDequeueTupleV2 | NO | | +| OutfeedDequeueV2 | NO | | +| OutfeedEnqueue | NO | | +| OutfeedEnqueueTuple | NO | | +| Pack | YES | | +| Pad | YES | | +| PadV2 | YES | | +| PaddedBatchDataset | NO | | +| PaddedBatchDatasetV2 | NO | | +| PaddingFIFOQueue | NO | | +| PaddingFIFOQueueV2 | NO | | +| ParallelBatchDataset | NO | | +| ParallelConcat | NO | | +| ParallelDynamicStitch | YES | | +| ParallelFilterDataset | NO | | +| ParallelInterleaveDataset | NO | | +| ParallelInterleaveDatasetV2 | NO | | +| ParallelInterleaveDatasetV3 | NO | | +| ParallelInterleaveDatasetV4 | NO | | +| ParallelMapDataset | NO | | +| ParallelMapDatasetV2 | NO | | +| ParameterizedTruncatedNormal | NO | | +| ParseExample | NO | | +| ParseExampleDataset | NO | | +| ParseExampleDatasetV2 | NO | | +| ParseExampleV2 | NO | | +| ParseSequenceExample | NO | | +| ParseSequenceExampleV2 | NO | | +| ParseSingleExample | NO | | +| ParseSingleSequenceExample | NO | | +| ParseTensor | NO | | +| PartitionedCall | YES | | +| Placeholder | YES | | +| PlaceholderV2 | NO | | +| PlaceholderWithDefault | YES | | +| Polygamma | NO | | +| PopulationCount | NO | | +| Pow | YES | | +| PrefetchDataset | NO | | +| Prelinearize | NO | | +| PrelinearizeTuple | NO | | +| PreventGradient | YES | | +| Print | NO | | +| PrintV2 | NO | | +| PriorityQueue | NO | | +| PriorityQueueV2 | NO | | +| PrivateThreadPoolDataset | NO | | +| Prod | YES | | +| PyFunc | NO | | +| PyFuncStateless | NO | | +| Qr | NO | | +| QuantizeAndDequantize | NO | | +| QuantizeAndDequantizeV2 | NO | | +| QuantizeAndDequantizeV3 | NO | | +| QuantizeAndDequantizeV4 | NO | | +| QuantizeAndDequantizeV4Grad | NO | | +| QuantizeDownAndShrinkRange | NO | | +| QuantizeV2 | NO | | +| QuantizedAdd | NO | | +| QuantizedAvgPool | NO | | +| QuantizedBatchNormWithGlobalNormalization | NO | | +| QuantizedBiasAdd | NO | | +| QuantizedConcat | NO | | +| QuantizedConv2D | NO | | +| QuantizedConv2DAndRelu | NO | | +| QuantizedConv2DAndReluAndRequantize | NO | | +| QuantizedConv2DAndRequantize | NO | | +| QuantizedConv2DPerChannel | NO | | +| QuantizedConv2DWithBias | NO | | +| QuantizedConv2DWithBiasAndRelu | NO | | +| QuantizedConv2DWithBiasAndReluAndRequantize | NO | | +| QuantizedConv2DWithBiasAndRequantize | NO | | +| QuantizedConv2DWithBiasSignedSumAndReluAndRequantize | NO | | +| QuantizedConv2DWithBiasSumAndRelu | NO | | +| QuantizedConv2DWithBiasSumAndReluAndRequantize | NO | | +| QuantizedDepthwiseConv2D | NO | | +| QuantizedDepthwiseConv2DWithBias | NO | | +| QuantizedDepthwiseConv2DWithBiasAndRelu | NO | | +| QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize | NO | | +| QuantizedInstanceNorm | NO | | +| QuantizedMatMul | NO | | +| QuantizedMatMulWithBias | NO | | +| QuantizedMatMulWithBiasAndDequantize | NO | | +| QuantizedMatMulWithBiasAndRelu | NO | | +| QuantizedMatMulWithBiasAndReluAndRequantize | NO | | +| QuantizedMatMulWithBiasAndRequantize | NO | | +| QuantizedMaxPool | NO | | +| QuantizedMul | NO | | +| QuantizedRelu | NO | | +| QuantizedRelu6 | NO | | +| QuantizedReluX | NO | | +| QuantizedReshape | NO | | +| QuantizedResizeBilinear | NO | | +| QueueClose | NO | | +| QueueCloseV2 | NO | | +| QueueDequeue | YES | | +| QueueDequeueMany | YES | | +| QueueDequeueManyV2 | NO | | +| QueueDequeueUpTo | YES | | +| QueueDequeueUpToV2 | YES | | +| QueueDequeueV2 | YES | | +| QueueEnqueue | NO | | +| QueueEnqueueMany | NO | | +| QueueEnqueueManyV2 | NO | | +| QueueEnqueueV2 | NO | | +| QueueIsClosed | NO | | +| QueueIsClosedV2 | NO | | +| QueueSize | NO | | +| QueueSizeV2 | NO | | +| RFFT | NO | | +| RFFT2D | NO | | +| RFFT3D | NO | | +| RGBToHSV | NO | | +| RaggedBincount | NO | | +| RaggedCountSparseOutput | NO | | +| RaggedCross | NO | | +| RaggedFillEmptyRows | NO | | +| RaggedFillEmptyRowsGrad | NO | | +| RaggedGather | NO | | +| RaggedRange | NO | | +| RaggedTensorFromVariant | NO | | +| RaggedTensorToSparse | NO | | +| RaggedTensorToTensor | NO | | +| RaggedTensorToVariant | NO | | +| RaggedTensorToVariantGradient | NO | | +| RandomCrop | NO | | +| RandomDataset | NO | | +| RandomDatasetV2 | NO | | +| RandomGamma | NO | | +| RandomGammaGrad | NO | | +| RandomIndexShuffle | NO | | +| RandomPoisson | NO | | +| RandomPoissonV2 | NO | | +| RandomShuffle | NO | | +| RandomShuffleQueue | NO | | +| RandomShuffleQueueV2 | NO | | +| RandomStandardNormal | NO | | +| RandomUniform | YES | | +| RandomUniformInt | YES | | +| Range | YES | | +| RangeDataset | NO | | +| Rank | YES | | +| ReadFile | NO | | +| ReadVariableOp | YES | | +| ReadVariableXlaSplitND | NO | | +| ReaderNumRecordsProduced | NO | | +| ReaderNumRecordsProducedV2 | NO | | +| ReaderNumWorkUnitsCompleted | NO | | +| ReaderNumWorkUnitsCompletedV2 | NO | | +| ReaderRead | NO | | +| ReaderReadUpTo | NO | | +| ReaderReadUpToV2 | NO | | +| ReaderReadV2 | NO | | +| ReaderReset | NO | | +| ReaderResetV2 | NO | | +| ReaderRestoreState | NO | | +| ReaderRestoreStateV2 | NO | | +| ReaderSerializeState | NO | | +| ReaderSerializeStateV2 | NO | | +| Real | NO | | +| RealDiv | YES | | +| RebatchDataset | NO | | +| RebatchDatasetV2 | NO | | +| Reciprocal | YES | | +| ReciprocalGrad | NO | | +| RecordInput | NO | | +| Recv | NO | | +| RecvTPUEmbeddingActivations | NO | | +| ReduceDataset | NO | | +| ReduceJoin | NO | | +| RefEnter | NO | | +| RefExit | NO | | +| RefIdentity | NO | | +| RefMerge | NO | | +| RefNextIteration | NO | | +| RefSelect | NO | | +| RefSwitch | NO | | +| RegexFullMatch | NO | | +| RegexReplace | NO | | +| RegisterDataset | NO | | +| RegisterDatasetV2 | NO | | +| Relu | YES | | +| Relu6 | YES | | +| Relu6Grad | NO | | +| ReluGrad | NO | | +| RemoteCall | NO | | +| RepeatDataset | NO | | +| RequantizationRange | NO | | +| RequantizationRangePerChannel | NO | | +| Requantize | NO | | +| RequantizePerChannel | NO | | +| Reshape | YES | | +| ResizeArea | NO | | +| ResizeBicubic | NO | | +| ResizeBicubicGrad | NO | | +| ResizeBilinear | YES | | +| ResizeBilinearGrad | NO | | +| ResizeNearestNeighbor | YES | | +| ResizeNearestNeighborGrad | NO | | +| ResourceAccumulatorApplyGradient | NO | | +| ResourceAccumulatorNumAccumulated | NO | | +| ResourceAccumulatorSetGlobalStep | NO | | +| ResourceAccumulatorTakeGradient | NO | | +| ResourceApplyAdaMax | NO | | +| ResourceApplyAdadelta | NO | | +| ResourceApplyAdagrad | NO | | +| ResourceApplyAdagradDA | NO | | +| ResourceApplyAdagradV2 | NO | | +| ResourceApplyAdam | NO | | +| ResourceApplyAdamWithAmsgrad | NO | | +| ResourceApplyAddSign | NO | | +| ResourceApplyCenteredRMSProp | NO | | +| ResourceApplyFtrl | NO | | +| ResourceApplyFtrlV2 | NO | | +| ResourceApplyGradientDescent | NO | | +| ResourceApplyKerasMomentum | NO | | +| ResourceApplyMomentum | NO | | +| ResourceApplyPowerSign | NO | | +| ResourceApplyProximalAdagrad | NO | | +| ResourceApplyProximalGradientDescent | NO | | +| ResourceApplyRMSProp | NO | | +| ResourceConditionalAccumulator | NO | | +| ResourceCountUpTo | NO | | +| ResourceGather | YES | | +| ResourceGatherNd | NO | | +| ResourceScatterAdd | NO | | +| ResourceScatterDiv | NO | | +| ResourceScatterMax | NO | | +| ResourceScatterMin | NO | | +| ResourceScatterMul | NO | | +| ResourceScatterNdAdd | NO | | +| ResourceScatterNdMax | NO | | +| ResourceScatterNdMin | NO | | +| ResourceScatterNdSub | NO | | +| ResourceScatterNdUpdate | NO | | +| ResourceScatterSub | NO | | +| ResourceScatterUpdate | NO | | +| ResourceSparseApplyAdadelta | NO | | +| ResourceSparseApplyAdagrad | NO | | +| ResourceSparseApplyAdagradDA | NO | | +| ResourceSparseApplyAdagradV2 | NO | | +| ResourceSparseApplyCenteredRMSProp | NO | | +| ResourceSparseApplyFtrl | NO | | +| ResourceSparseApplyFtrlV2 | NO | | +| ResourceSparseApplyKerasMomentum | NO | | +| ResourceSparseApplyMomentum | NO | | +| ResourceSparseApplyProximalAdagrad | NO | | +| ResourceSparseApplyProximalGradientDescent | NO | | +| ResourceSparseApplyRMSProp | NO | | +| ResourceStridedSliceAssign | NO | | +| Restore | NO | | +| RestoreSlice | NO | | +| RestoreV2 | YES | | +| RetrieveTPUEmbeddingADAMParameters | NO | | +| RetrieveTPUEmbeddingAdadeltaParameters | NO | | +| RetrieveTPUEmbeddingAdagradMomentumParameters | NO | | +| RetrieveTPUEmbeddingAdagradParameters | NO | | +| RetrieveTPUEmbeddingCenteredRMSPropParameters | NO | | +| RetrieveTPUEmbeddingFTRLParameters | NO | | +| RetrieveTPUEmbeddingFrequencyEstimatorParameters | NO | | +| RetrieveTPUEmbeddingMDLAdagradLightParameters | NO | | +| RetrieveTPUEmbeddingMomentumParameters | NO | | +| RetrieveTPUEmbeddingProximalAdagradParameters | NO | | +| RetrieveTPUEmbeddingProximalYogiParameters | NO | | +| RetrieveTPUEmbeddingRMSPropParameters | NO | | +| RetrieveTPUEmbeddingStochasticGradientDescentParameters | NO | | +| Reverse | YES | | +| ReverseSequence | YES | | +| ReverseV2 | YES | | +| RewriteDataset | NO | | +| RightShift | NO | | +| Rint | NO | | +| RngReadAndSkip | NO | | +| RngSkip | NO | | +| Roll | YES | | +| Round | YES | | +| Rsqrt | YES | | +| RsqrtGrad | NO | | +| SampleDistortedBoundingBox | NO | | +| SampleDistortedBoundingBoxV2 | NO | | +| SamplingDataset | NO | | +| Save | NO | | +| SaveDataset | NO | | +| SaveDatasetV2 | NO | | +| SaveSlices | NO | | +| SaveV2 | YES | | +| ScalarSummary | NO | | +| ScaleAndTranslate | NO | | +| ScaleAndTranslateGrad | NO | | +| ScanDataset | NO | | +| ScatterAdd | NO | | +| ScatterDiv | NO | | +| ScatterMax | NO | | +| ScatterMin | NO | | +| ScatterMul | NO | | +| ScatterNd | YES | | +| ScatterNdAdd | NO | | +| ScatterNdMax | NO | | +| ScatterNdMin | NO | | +| ScatterNdNonAliasingAdd | NO | | +| ScatterNdSub | NO | | +| ScatterNdUpdate | NO | | +| ScatterSub | NO | | +| ScatterUpdate | NO | | +| SdcaFprint | NO | | +| SdcaOptimizer | NO | | +| SdcaOptimizerV2 | NO | | +| SdcaShrinkL1 | NO | | +| SegmentMax | NO | | +| SegmentMaxV2 | NO | | +| SegmentMean | NO | | +| SegmentMin | NO | | +| SegmentMinV2 | NO | | +| SegmentProd | NO | | +| SegmentProdV2 | NO | | +| SegmentSum | YES | | +| SegmentSumV2 | NO | | +| Select | YES | | +| SelectV2 | YES | | +| SelfAdjointEig | NO | | +| SelfAdjointEigV2 | NO | | +| Selu | YES | | +| SeluGrad | NO | | +| Send | NO | | +| SendTPUEmbeddingGradients | NO | | +| SerializeIterator | NO | | +| SerializeManySparse | NO | | +| SerializeSparse | NO | | +| SerializeTensor | NO | | +| SetSize | NO | | +| SetStatsAggregatorDataset | NO | | +| Shape | YES | | +| ShapeN | YES | | +| ShardDataset | NO | | +| ShardedFilename | YES | | +| ShardedFilespec | NO | | +| ShuffleAndRepeatDataset | NO | | +| ShuffleAndRepeatDatasetV2 | NO | | +| ShuffleDataset | NO | | +| ShuffleDatasetV2 | NO | | +| ShuffleDatasetV3 | NO | | +| ShutdownDistributedTPU | NO | | +| Sigmoid | YES | | +| SigmoidGrad | NO | | +| Sign | YES | | +| Sin | YES | | +| Sinh | YES | | +| Size | YES | | +| SkipDataset | NO | | +| SleepDataset | NO | | +| Slice | YES | | +| SlidingWindowDataset | NO | | +| Snapshot | YES | | +| SnapshotChunkDataset | NO | | +| SnapshotDataset | NO | | +| SnapshotDatasetReader | NO | | +| SnapshotDatasetV2 | NO | | +| SnapshotNestedDatasetReader | NO | | +| SobolSample | NO | | +| Softmax | YES | | +| SoftmaxCrossEntropyWithLogits | NO | | +| Softplus | YES | | +| SoftplusGrad | NO | | +| Softsign | YES | | +| SoftsignGrad | NO | | +| SpaceToBatch | NO | | +| SpaceToBatchND | YES | | +| SpaceToDepth | YES | | +| SparseAccumulatorApplyGradient | NO | | +| SparseAccumulatorTakeGradient | NO | | +| SparseAdd | NO | | +| SparseAddGrad | NO | | +| SparseApplyAdadelta | NO | | +| SparseApplyAdagrad | NO | | +| SparseApplyAdagradDA | NO | | +| SparseApplyAdagradV2 | NO | | +| SparseApplyCenteredRMSProp | NO | | +| SparseApplyFtrl | NO | | +| SparseApplyFtrlV2 | NO | | +| SparseApplyMomentum | NO | | +| SparseApplyProximalAdagrad | NO | | +| SparseApplyProximalGradientDescent | NO | | +| SparseApplyRMSProp | NO | | +| SparseBincount | NO | | +| SparseConcat | NO | | +| SparseConditionalAccumulator | NO | | +| SparseCountSparseOutput | NO | | +| SparseCross | NO | | +| SparseCrossHashed | NO | | +| SparseCrossV2 | NO | | +| SparseDenseCwiseAdd | NO | | +| SparseDenseCwiseDiv | NO | | +| SparseDenseCwiseMul | NO | | +| SparseFillEmptyRows | YES | | +| SparseFillEmptyRowsGrad | NO | | +| SparseMatMul | NO | | +| SparseMatrixAdd | NO | | +| SparseMatrixMatMul | NO | | +| SparseMatrixMul | NO | | +| SparseMatrixNNZ | NO | | +| SparseMatrixOrderingAMD | NO | | +| SparseMatrixSoftmax | NO | | +| SparseMatrixSoftmaxGrad | NO | | +| SparseMatrixSparseCholesky | NO | | +| SparseMatrixSparseMatMul | NO | | +| SparseMatrixTranspose | NO | | +| SparseMatrixZeros | NO | | +| SparseReduceMax | NO | | +| SparseReduceMaxSparse | NO | | +| SparseReduceSum | NO | | +| SparseReduceSumSparse | NO | | +| SparseReorder | NO | | +| SparseReshape | YES | | +| SparseSegmentMean | NO | | +| SparseSegmentMeanGrad | NO | | +| SparseSegmentMeanGradV2 | NO | | +| SparseSegmentMeanWithNumSegments | NO | | +| SparseSegmentSqrtN | NO | | +| SparseSegmentSqrtNGrad | NO | | +| SparseSegmentSqrtNGradV2 | NO | | +| SparseSegmentSqrtNWithNumSegments | NO | | +| SparseSegmentSum | YES | | +| SparseSegmentSumGrad | NO | | +| SparseSegmentSumGradV2 | NO | | +| SparseSegmentSumWithNumSegments | NO | | +| SparseSlice | NO | | +| SparseSliceGrad | NO | | +| SparseSoftmax | NO | | +| SparseSoftmaxCrossEntropyWithLogits | NO | | +| SparseSparseMaximum | NO | | +| SparseSparseMinimum | NO | | +| SparseSplit | NO | | +| SparseTensorDenseAdd | NO | | +| SparseTensorDenseMatMul | NO | | +| SparseTensorSliceDataset | NO | | +| SparseTensorToCSRSparseMatrix | NO | | +| SparseToDense | YES | | +| SparseToSparseSetOperation | NO | | +| Spence | NO | | +| Split | YES | | +| SplitV | YES | | +| SqlDataset | NO | | +| Sqrt | YES | | +| SqrtGrad | NO | | +| Square | YES | | +| SquaredDifference | YES | | +| Squeeze | YES | | +| Stack | NO | | +| StackClose | NO | | +| StackCloseV2 | NO | | +| StackPop | NO | | +| StackPopV2 | NO | | +| StackPush | NO | | +| StackPushV2 | NO | | +| StackV2 | NO | | +| Stage | NO | | +| StageClear | NO | | +| StagePeek | NO | | +| StageSize | NO | | +| StatefulPartitionedCall | YES | | +| StatefulRandomBinomial | NO | | +| StatefulStandardNormal | NO | | +| StatefulStandardNormalV2 | NO | | +| StatefulTruncatedNormal | NO | | +| StatefulUniform | NO | | +| StatefulUniformFullInt | NO | | +| StatefulUniformInt | NO | | +| StatelessCase | NO | | +| StatelessIf | YES | | +| StatelessMultinomial | NO | | +| StatelessParameterizedTruncatedNormal | NO | | +| StatelessRandomBinomial | NO | | +| StatelessRandomGammaV2 | NO | | +| StatelessRandomGammaV3 | NO | | +| StatelessRandomGetAlg | NO | | +| StatelessRandomGetKeyCounter | NO | | +| StatelessRandomGetKeyCounterAlg | NO | | +| StatelessRandomNormal | NO | | +| StatelessRandomNormalV2 | NO | | +| StatelessRandomPoisson | NO | | +| StatelessRandomUniform | NO | | +| StatelessRandomUniformFullInt | NO | | +| StatelessRandomUniformFullIntV2 | NO | | +| StatelessRandomUniformInt | NO | | +| StatelessRandomUniformIntV2 | NO | | +| StatelessRandomUniformV2 | NO | | +| StatelessSampleDistortedBoundingBox | NO | | +| StatelessShuffle | NO | | +| StatelessTruncatedNormal | NO | | +| StatelessTruncatedNormalV2 | NO | | +| StatelessWhile | YES | | +| StaticRegexFullMatch | YES | | +| StaticRegexReplace | NO | | +| StatsAggregatorHandle | NO | | +| StatsAggregatorHandleV2 | NO | | +| StatsAggregatorSetSummaryWriter | NO | | +| StatsAggregatorSummary | NO | | +| StopGradient | YES | | +| StridedSlice | YES | | +| StridedSliceAssign | NO | | +| StridedSliceGrad | NO | | +| StringFormat | NO | | +| StringJoin | YES | | +| StringLength | NO | | +| StringLower | NO | | +| StringNGrams | NO | | +| StringSplit | NO | | +| StringSplitV2 | NO | | +| StringStrip | NO | | +| StringToHashBucket | NO | | +| StringToHashBucketFast | NO | | +| StringToHashBucketStrong | NO | | +| StringToNumber | NO | | +| StringUpper | NO | | +| Sub | YES | | +| Substr | NO | | +| Sum | YES | | +| SummaryWriter | NO | | +| Svd | NO | | +| Switch | YES | | +| SymbolicGradient | NO | | +| SyncDevice | NO | | +| TFRecordDataset | NO | | +| TFRecordDatasetV2 | NO | | +| TFRecordReader | NO | | +| TFRecordReaderV2 | NO | | +| TPUCompilationResult | NO | | +| TPUEmbeddingActivations | NO | | +| TPUOrdinalSelector | NO | | +| TPUPartitionedCall | NO | | +| TPUPartitionedInput | NO | | +| TPUPartitionedInputV2 | NO | | +| TPUPartitionedOutput | NO | | +| TPUPartitionedOutputV2 | NO | | +| TPUReplicateMetadata | NO | | +| TPUReplicatedInput | NO | | +| TPUReplicatedOutput | NO | | +| TakeDataset | NO | | +| TakeManySparseFromTensorsMap | NO | | +| TakeWhileDataset | NO | | +| Tan | YES | | +| Tanh | YES | | +| TanhGrad | NO | | +| TemporaryVariable | NO | | +| TensorArray | NO | | +| TensorArrayClose | NO | | +| TensorArrayCloseV2 | NO | | +| TensorArrayCloseV3 | YES | | +| TensorArrayConcat | NO | | +| TensorArrayConcatV2 | NO | | +| TensorArrayConcatV3 | YES | | +| TensorArrayGather | NO | | +| TensorArrayGatherV2 | NO | | +| TensorArrayGatherV3 | YES | | +| TensorArrayGrad | NO | | +| TensorArrayGradV2 | NO | | +| TensorArrayGradV3 | NO | | +| TensorArrayGradWithShape | NO | | +| TensorArrayPack | NO | | +| TensorArrayRead | NO | | +| TensorArrayReadV2 | NO | | +| TensorArrayReadV3 | YES | | +| TensorArrayScatter | NO | | +| TensorArrayScatterV2 | NO | | +| TensorArrayScatterV3 | YES | | +| TensorArraySize | NO | | +| TensorArraySizeV2 | NO | | +| TensorArraySizeV3 | YES | | +| TensorArraySplit | NO | | +| TensorArraySplitV2 | NO | | +| TensorArraySplitV3 | NO | | +| TensorArrayUnpack | NO | | +| TensorArrayV2 | NO | | +| TensorArrayV3 | YES | | +| TensorArrayWrite | NO | | +| TensorArrayWriteV2 | NO | | +| TensorArrayWriteV3 | YES | | +| TensorDataset | NO | | +| TensorListConcat | NO | | +| TensorListConcatLists | NO | | +| TensorListConcatV2 | NO | | +| TensorListElementShape | NO | | +| TensorListFromTensor | YES | | +| TensorListGather | NO | | +| TensorListGetItem | YES | | +| TensorListLength | YES | | +| TensorListPopBack | NO | | +| TensorListPushBack | YES | | +| TensorListPushBackBatch | NO | | +| TensorListReserve | YES | | +| TensorListResize | YES | | +| TensorListScatter | NO | | +| TensorListScatterIntoExistingList | NO | | +| TensorListScatterV2 | NO | | +| TensorListSetItem | YES | | +| TensorListSplit | NO | | +| TensorListStack | YES | | +| TensorScatterAdd | NO | | +| TensorScatterMax | NO | | +| TensorScatterMin | NO | | +| TensorScatterSub | NO | | +| TensorScatterUpdate | NO | | +| TensorSliceDataset | NO | | +| TensorStridedSliceUpdate | NO | | +| TensorSummary | NO | | +| TensorSummaryV2 | NO | | +| TextLineDataset | NO | | +| TextLineReader | NO | | +| TextLineReaderV2 | NO | | +| ThreadPoolDataset | NO | | +| ThreadPoolHandle | NO | | +| ThreadUnsafeUnigramCandidateSampler | NO | | +| Tile | YES | | +| TileGrad | NO | | +| Timestamp | NO | | +| ToBool | YES | | +| TopK | YES | | +| TopKV2 | YES | | +| Transpose | YES | | +| TridiagonalMatMul | NO | | +| TridiagonalSolve | NO | | +| TruncateDiv | YES | | +| TruncateMod | YES | | +| TruncatedNormal | NO | | +| Unbatch | NO | | +| UnbatchDataset | NO | | +| UnbatchGrad | NO | | +| UncompressElement | NO | | +| UnicodeDecode | NO | | +| UnicodeDecodeWithOffsets | NO | | +| UnicodeEncode | NO | | +| UnicodeScript | NO | | +| UnicodeTranscode | NO | | +| UniformCandidateSampler | NO | | +| UniformDequantize | NO | | +| UniformQuantize | NO | | +| UniformQuantizedAdd | NO | | +| UniformQuantizedClipByValue | NO | | +| UniformQuantizedConvolution | NO | | +| UniformQuantizedConvolutionHybrid | NO | | +| UniformQuantizedDot | NO | | +| UniformQuantizedDotHybrid | NO | | +| UniformRequantize | NO | | +| Unique | YES | | +| UniqueDataset | NO | | +| UniqueV2 | NO | | +| UniqueWithCounts | NO | | +| UniqueWithCountsV2 | NO | | +| Unpack | YES | | +| UnravelIndex | YES | | +| UnsortedSegmentJoin | NO | | +| UnsortedSegmentMax | NO | | +| UnsortedSegmentMin | NO | | +| UnsortedSegmentProd | NO | | +| UnsortedSegmentSum | YES | | +| Unstage | NO | | +| UnwrapDatasetVariant | NO | | +| UpperBound | NO | | +| VarHandleOp | YES | | +| VarIsInitializedOp | YES | | +| Variable | YES | | +| VariableShape | NO | | +| VariableV2 | YES | | +| Where | YES | | +| While | YES | | +| WholeFileReader | NO | | +| WholeFileReaderV2 | NO | | +| WindowDataset | NO | | +| WindowOp | NO | | +| WorkerHeartbeat | NO | | +| WrapDatasetVariant | NO | | +| WriteAudioSummary | NO | | +| WriteFile | NO | | +| WriteGraphSummary | NO | | +| WriteHistogramSummary | NO | | +| WriteImageSummary | NO | | +| WriteRawProtoSummary | NO | | +| WriteScalarSummary | NO | | +| WriteSummary | NO | | +| Xdivy | YES | | +| XlaConcatND | NO | | +| XlaSplitND | NO | | +| Xlog1py | YES | | +| Xlogy | YES | | +| ZerosLike | YES | | +| Zeta | NO | | +| ZipDataset | NO | | diff --git a/src/frontends/tensorflow/src/checkpoint_v1_reader.cpp b/src/frontends/tensorflow/src/checkpoint_v1_reader.cpp index c74173af792d76..185b374e6bc42f 100644 --- a/src/frontends/tensorflow/src/checkpoint_v1_reader.cpp +++ b/src/frontends/tensorflow/src/checkpoint_v1_reader.cpp @@ -7,7 +7,7 @@ #include "checkpoint_utils.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/util/file_util.hpp" -#include "saved_tensor_slice.pb.h" +#include "ov_tensorflow/saved_tensor_slice.pb.h" #include "tf_utils.hpp" #ifdef ENABLE_SNAPPY_COMPRESSION @@ -68,7 +68,7 @@ void CheckpointV1Reader::initialize() { // parse empty index block // This is only present at the first item of each checkpoint file and serves // as a table of contents, listing all the tensor slices saved in this file. - ::ov_tensorflow::SavedTensorSlices sts; + ::tensorflow::SavedTensorSlices sts; FRONT_END_GENERAL_CHECK(sts.ParseFromArray(value.data(), static_cast(value.size())), "[TensorFlow Frontend] incorrect input checkpoint file or internal error: cannot parse " "SavedTensorSlices entry"); @@ -254,7 +254,7 @@ void CheckpointV1Reader::read_variable(const std::string& variable_name, ov::Any // This is only present at the first item of each checkpoint file and serves // as a table of contents, listing all the tensor slices saved in this file. - ::ov_tensorflow::SavedTensorSlices sts; + ::tensorflow::SavedTensorSlices sts; FRONT_END_GENERAL_CHECK(sts.ParseFromArray(raw_data.data(), static_cast(raw_data.size())), "[TensorFlow Frontend] incorrect input checkpoint file or internal error: cannot parse " "SavedTensorSlices entry"); diff --git a/src/frontends/tensorflow/src/checkpoint_v1_reader.hpp b/src/frontends/tensorflow/src/checkpoint_v1_reader.hpp index f088ed145f8ff3..1171fd6a682cb1 100644 --- a/src/frontends/tensorflow/src/checkpoint_v1_reader.hpp +++ b/src/frontends/tensorflow/src/checkpoint_v1_reader.hpp @@ -12,17 +12,17 @@ #include "checkpoint_utils.hpp" #include "openvino/core/any.hpp" #include "openvino/frontend/exception.hpp" -#include "saved_tensor_slice.pb.h" -#include "tensor_shape.pb.h" -#include "types.pb.h" +#include "ov_tensorflow/saved_tensor_slice.pb.h" +#include "ov_tensorflow/tensor_shape.pb.h" +#include "ov_tensorflow/types.pb.h" namespace ov { namespace frontend { namespace tensorflow { // stores information about shape, type, and shard id for Variable struct VariableInfo { - ::ov_tensorflow::TensorShapeProto variable_shape; - ::ov_tensorflow::DataType variable_type; + ::tensorflow::TensorShapeProto variable_shape; + ::tensorflow::DataType variable_type; int32_t shard_id; size_t offset; size_t size; diff --git a/src/frontends/tensorflow/src/decoder_argdef.cpp b/src/frontends/tensorflow/src/decoder_argdef.cpp index 3430bcbe5e37aa..af7c1a1cfccd01 100644 --- a/src/frontends/tensorflow/src/decoder_argdef.cpp +++ b/src/frontends/tensorflow/src/decoder_argdef.cpp @@ -5,11 +5,11 @@ #include "decoder_argdef.hpp" #include "decoder_proto.hpp" -#include "op_def.pb.h" #include "openvino/frontend/tensorflow/node_context.hpp" #include "openvino/frontend/tensorflow/special_types.hpp" +#include "ov_tensorflow/op_def.pb.h" +#include "ov_tensorflow/types.pb.h" #include "tf_utils.hpp" -#include "types.pb.h" namespace ov { namespace frontend { diff --git a/src/frontends/tensorflow/src/decoder_argdef.hpp b/src/frontends/tensorflow/src/decoder_argdef.hpp index 69f05423f528d6..dfee9b21e1481c 100644 --- a/src/frontends/tensorflow/src/decoder_argdef.hpp +++ b/src/frontends/tensorflow/src/decoder_argdef.hpp @@ -9,11 +9,11 @@ #include "openvino/frontend/tensorflow/decoder.hpp" -namespace ov_tensorflow { +namespace tensorflow { class GraphDef; class FunctionDef; class OpDef_ArgDef; -} // namespace ov_tensorflow +} // namespace tensorflow namespace ov { namespace frontend { @@ -21,18 +21,18 @@ namespace tensorflow { class DecoderArgDef : public ov::frontend::tensorflow::DecoderBase { public: - explicit DecoderArgDef(const ::ov_tensorflow::OpDef_ArgDef* arg_def, - const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def, - const std::shared_ptr<::ov_tensorflow::FunctionDef>& func_def, + explicit DecoderArgDef(const ::tensorflow::OpDef_ArgDef* arg_def, + const std::shared_ptr<::tensorflow::GraphDef>& graph_def, + const std::shared_ptr<::tensorflow::FunctionDef>& func_def, const std::string& op_type) : m_arg_def(arg_def), m_graph_def(graph_def), m_func_def(func_def), m_op_type(op_type) {} - explicit DecoderArgDef(const ::ov_tensorflow::OpDef_ArgDef* arg_def, - const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def, - const std::shared_ptr<::ov_tensorflow::FunctionDef>& func_def, + explicit DecoderArgDef(const ::tensorflow::OpDef_ArgDef* arg_def, + const std::shared_ptr<::tensorflow::GraphDef>& graph_def, + const std::shared_ptr<::tensorflow::FunctionDef>& func_def, const std::string& op_type, const std::string& producer_name) : m_arg_def(arg_def), @@ -55,13 +55,13 @@ class DecoderArgDef : public ov::frontend::tensorflow::DecoderBase { const std::string& get_op_name() const override; private: - const ::ov_tensorflow::OpDef_ArgDef* m_arg_def; + const ::tensorflow::OpDef_ArgDef* m_arg_def; // For existence of OpDef_ArgDef object corresponding to the main graph node, // GraphDef object must live in the memory - const std::shared_ptr<::ov_tensorflow::GraphDef> m_graph_def; + const std::shared_ptr<::tensorflow::GraphDef> m_graph_def; // For existence of OpDef_ArgDef object corresponding to the body graph node, // both GraphDef and FunctionDef objects must be alive in the memory - const std::shared_ptr<::ov_tensorflow::FunctionDef> m_func_def; + const std::shared_ptr<::tensorflow::FunctionDef> m_func_def; const std::string m_op_type; const std::string m_producer_name; }; diff --git a/src/frontends/tensorflow/src/decoder_proto.cpp b/src/frontends/tensorflow/src/decoder_proto.cpp index 26003336584d1e..9e0a53efb6d09f 100644 --- a/src/frontends/tensorflow/src/decoder_proto.cpp +++ b/src/frontends/tensorflow/src/decoder_proto.cpp @@ -4,12 +4,12 @@ #include "decoder_proto.hpp" -#include "attr_value.pb.h" -#include "node_def.pb.h" #include "openvino/frontend/tensorflow/node_context.hpp" #include "openvino/frontend/tensorflow/special_types.hpp" +#include "ov_tensorflow/attr_value.pb.h" +#include "ov_tensorflow/node_def.pb.h" +#include "ov_tensorflow/types.pb.h" #include "tf_utils.hpp" -#include "types.pb.h" namespace ov { namespace frontend { @@ -38,7 +38,7 @@ void extract_tensor_content(const std::string& tensor_content, ov::Tensor* value # pragma warning(disable : 4267) // possible loss of data #endif template -void extract_compressed_tensor_content(const ::ov_tensorflow::TensorProto& tensor_proto, +void extract_compressed_tensor_content(const ::tensorflow::TensorProto& tensor_proto, int64_t val_size, ov::Tensor* values) { auto val_lastsaved = static_cast(0); @@ -90,15 +90,15 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { } switch (attrs[0].value_case()) { - case ::ov_tensorflow::AttrValue::ValueCase::kB: + case ::tensorflow::AttrValue::ValueCase::kB: return attrs[0].b(); - case ::ov_tensorflow::AttrValue::ValueCase::kF: + case ::tensorflow::AttrValue::ValueCase::kF: return attrs[0].f(); - case ::ov_tensorflow::AttrValue::ValueCase::kS: + case ::tensorflow::AttrValue::ValueCase::kS: return attrs[0].s(); - case ::ov_tensorflow::AttrValue::ValueCase::kI: + case ::tensorflow::AttrValue::ValueCase::kI: return attrs[0].i(); - case ::ov_tensorflow::AttrValue::ValueCase::kShape: { + case ::tensorflow::AttrValue::ValueCase::kShape: { const auto& tf_shape = attrs[0].shape(); if (tf_shape.unknown_rank()) { return ov::PartialShape::dynamic(); @@ -111,16 +111,16 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { return ov::PartialShape(dims); } - case ::ov_tensorflow::AttrValue::ValueCase::kType: { + case ::tensorflow::AttrValue::ValueCase::kType: { auto atype = attrs[0].type(); - if (atype != ::ov_tensorflow::DT_STRING) { + if (atype != ::tensorflow::DT_STRING) { return get_ov_type(attrs[0].type()); } else { return ov::Any("DT_STRING"); } } - case ::ov_tensorflow::AttrValue::ValueCase::kList: { + case ::tensorflow::AttrValue::ValueCase::kList: { const auto& list = attrs[0].list(); if (list.i_size()) return std::vector(list.i().begin(), list.i().end()); @@ -156,7 +156,7 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { if (list.type_size()) { std::vector res; for (int idx = 0; idx < list.type_size(); ++idx) { - if (list.type(idx) != ::ov_tensorflow::DataType::DT_STRING) { + if (list.type(idx) != ::tensorflow::DataType::DT_STRING) { res.emplace_back(get_ov_type(list.type(idx))); } else { res.emplace_back(ov::element::dynamic); @@ -176,15 +176,15 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { return EmptyList(); } - case ::ov_tensorflow::AttrValue::ValueCase::kTensor: { + case ::tensorflow::AttrValue::ValueCase::kTensor: { return unpack_tensor_proto(attrs[0].tensor()); } - case ::ov_tensorflow::AttrValue::ValueCase::kPlaceholder: + case ::tensorflow::AttrValue::ValueCase::kPlaceholder: FRONT_END_GENERAL_CHECK(false, "Conversion from Tensorflow to OpenVINO data type failed: Placeholder type for '", name, "' attribute is not supported."); - case ::ov_tensorflow::AttrValue::ValueCase::kFunc: + case ::tensorflow::AttrValue::ValueCase::kFunc: // attrs[0].func() returns NameAttrList object from which // we retrieve the function name // Further, InputModel object is created for FunctionDef with this name @@ -251,7 +251,7 @@ const std::string& DecoderProto::get_op_name() const { return m_node_def->name(); } -std::vector<::ov_tensorflow::AttrValue> DecoderProto::decode_attribute_helper(const std::string& name) const { +std::vector<::tensorflow::AttrValue> DecoderProto::decode_attribute_helper(const std::string& name) const { auto attr_map = m_node_def->attr(); if (attr_map.contains(name)) { auto value = m_node_def->attr().at(name); diff --git a/src/frontends/tensorflow/src/decoder_proto.hpp b/src/frontends/tensorflow/src/decoder_proto.hpp index eab5e10c41c892..9d22e273e1e146 100644 --- a/src/frontends/tensorflow/src/decoder_proto.hpp +++ b/src/frontends/tensorflow/src/decoder_proto.hpp @@ -9,14 +9,14 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/frontend/tensorflow/decoder.hpp" -#include "types.pb.h" +#include "ov_tensorflow/types.pb.h" -namespace ov_tensorflow { +namespace tensorflow { class GraphDef; class FunctionDef; class NodeDef; class AttrValue; -} // namespace ov_tensorflow +} // namespace tensorflow namespace ov { namespace frontend { @@ -29,15 +29,15 @@ void parse_producer_name(const std::string& producer_port_name, class DecoderProto : public ov::frontend::tensorflow::DecoderBase { public: - explicit DecoderProto(const ::ov_tensorflow::NodeDef* node_def, - const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def) + explicit DecoderProto(const ::tensorflow::NodeDef* node_def, + const std::shared_ptr<::tensorflow::GraphDef>& graph_def) : m_node_def(node_def), m_graph_def(graph_def), m_func_def(nullptr) {} - explicit DecoderProto(const ::ov_tensorflow::NodeDef* node_def, - const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def, - const std::shared_ptr<::ov_tensorflow::FunctionDef>& func_def) + explicit DecoderProto(const ::tensorflow::NodeDef* node_def, + const std::shared_ptr<::tensorflow::GraphDef>& graph_def, + const std::shared_ptr<::tensorflow::FunctionDef>& func_def) : m_node_def(node_def), m_graph_def(graph_def), m_func_def(func_def) {} @@ -56,14 +56,14 @@ class DecoderProto : public ov::frontend::tensorflow::DecoderBase { const std::string& get_op_name() const override; private: - std::vector<::ov_tensorflow::AttrValue> decode_attribute_helper(const std::string& name) const; - const ::ov_tensorflow::NodeDef* m_node_def; + std::vector<::tensorflow::AttrValue> decode_attribute_helper(const std::string& name) const; + const ::tensorflow::NodeDef* m_node_def; // For existence of NodeDef object corresponding to the main graph node, // GraphDef object must live in the memory - const std::shared_ptr<::ov_tensorflow::GraphDef> m_graph_def; + const std::shared_ptr<::tensorflow::GraphDef> m_graph_def; // For existence of NodeDef object corresponding to the body graph node, // both GraphDef and FunctionDef objects must be alive in the memory - const std::shared_ptr<::ov_tensorflow::FunctionDef> m_func_def; + const std::shared_ptr<::tensorflow::FunctionDef> m_func_def; }; } // namespace tensorflow } // namespace frontend diff --git a/src/frontends/tensorflow/src/graph_iterator_meta.cpp b/src/frontends/tensorflow/src/graph_iterator_meta.cpp index 81bd821aadec0e..06f2d31f389a27 100644 --- a/src/frontends/tensorflow/src/graph_iterator_meta.cpp +++ b/src/frontends/tensorflow/src/graph_iterator_meta.cpp @@ -10,26 +10,26 @@ #include #include "openvino/core/type/element_type.hpp" -#include "tensor_bundle.pb.h" -#include "trackable_object_graph.pb.h" +#include "ov_tensorflow/tensor_bundle.pb.h" +#include "ov_tensorflow/trackable_object_graph.pb.h" namespace ov { namespace frontend { namespace tensorflow { -bool GraphIteratorMeta::is_valid_signature(const ::ov_tensorflow::SignatureDef& signature) const { - const std::map<::ov_tensorflow::DataType, ov::element::Type> types{ - {::ov_tensorflow::DataType::DT_BOOL, ov::element::boolean}, - {::ov_tensorflow::DataType::DT_INT16, ov::element::i16}, - {::ov_tensorflow::DataType::DT_INT32, ov::element::i32}, - {::ov_tensorflow::DataType::DT_INT64, ov::element::i64}, - {::ov_tensorflow::DataType::DT_HALF, ov::element::f16}, - {::ov_tensorflow::DataType::DT_FLOAT, ov::element::f32}, - {::ov_tensorflow::DataType::DT_DOUBLE, ov::element::f64}, - {::ov_tensorflow::DataType::DT_UINT8, ov::element::u8}, - {::ov_tensorflow::DataType::DT_INT8, ov::element::i8}, - {::ov_tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}, - {::ov_tensorflow::DataType::DT_STRING, ov::element::dynamic}}; +bool GraphIteratorMeta::is_valid_signature(const ::tensorflow::SignatureDef& signature) const { + const std::map<::tensorflow::DataType, ov::element::Type> types{ + {::tensorflow::DataType::DT_BOOL, ov::element::boolean}, + {::tensorflow::DataType::DT_INT16, ov::element::i16}, + {::tensorflow::DataType::DT_INT32, ov::element::i32}, + {::tensorflow::DataType::DT_INT64, ov::element::i64}, + {::tensorflow::DataType::DT_HALF, ov::element::f16}, + {::tensorflow::DataType::DT_FLOAT, ov::element::f32}, + {::tensorflow::DataType::DT_DOUBLE, ov::element::f64}, + {::tensorflow::DataType::DT_UINT8, ov::element::u8}, + {::tensorflow::DataType::DT_INT8, ov::element::i8}, + {::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}, + {::tensorflow::DataType::DT_STRING, ov::element::dynamic}}; for (const auto& it : signature.inputs()) { if (it.second.name().empty() || types.find(it.second.dtype()) == types.end()) diff --git a/src/frontends/tensorflow/src/graph_iterator_meta.hpp b/src/frontends/tensorflow/src/graph_iterator_meta.hpp index 6c14df8ba8fd6b..1e2789227260fb 100644 --- a/src/frontends/tensorflow/src/graph_iterator_meta.hpp +++ b/src/frontends/tensorflow/src/graph_iterator_meta.hpp @@ -27,7 +27,7 @@ std::basic_string get_variables_index_name(const std::wstring // Loads graph from Tensorflow MetaGraph file (*.meta) class GraphIteratorMeta : public GraphIteratorProto { - std::shared_ptr<::ov_tensorflow::MetaGraphDef> m_metagraph_def; + std::shared_ptr<::tensorflow::MetaGraphDef> m_metagraph_def; std::shared_ptr m_variables_index; std::shared_ptr> m_inputs_map; std::shared_ptr> m_outputs_map; @@ -36,7 +36,7 @@ class GraphIteratorMeta : public GraphIteratorProto { public: template GraphIteratorMeta(const std::basic_string& path, const bool mmap_enabled) - : m_metagraph_def(std::make_shared<::ov_tensorflow::MetaGraphDef>()), + : m_metagraph_def(std::make_shared<::tensorflow::MetaGraphDef>()), m_mmap_enabled(mmap_enabled) { this->read_meta(path); } @@ -45,7 +45,7 @@ class GraphIteratorMeta : public GraphIteratorProto { static bool is_supported(const std::basic_string& path) { try { std::ifstream mg_stream(path.c_str(), std::ios::in | std::ifstream::binary); - auto metagraph_def = std::make_shared<::ov_tensorflow::MetaGraphDef>(); + auto metagraph_def = std::make_shared<::tensorflow::MetaGraphDef>(); return mg_stream && mg_stream.is_open() && metagraph_def->ParsePartialFromIstream(&mg_stream) && metagraph_def->has_graph_def() && metagraph_def->graph_def().node_size() > 0; } catch (...) { @@ -66,7 +66,7 @@ class GraphIteratorMeta : public GraphIteratorProto { } private: - bool is_valid_signature(const ::ov_tensorflow::SignatureDef& signature) const; + bool is_valid_signature(const ::tensorflow::SignatureDef& signature) const; template bool read_meta(const std::basic_string& path) { @@ -87,10 +87,10 @@ class GraphIteratorMeta : public GraphIteratorProto { bool res = m_metagraph_def->ParseFromIstream(&mg_stream); FRONT_END_GENERAL_CHECK(res && m_metagraph_def->has_graph_def(), "MetaGraph cannot be parsed"); - std::map validSignatures = {}; + std::map validSignatures = {}; for (const auto& sit : m_metagraph_def->signature_def()) { const std::string& key = sit.first; - const ::ov_tensorflow::SignatureDef& val = sit.second; + const ::tensorflow::SignatureDef& val = sit.second; if (is_valid_signature(val)) { validSignatures[key] = &val; } @@ -114,7 +114,7 @@ class GraphIteratorMeta : public GraphIteratorProto { } } - m_graph_def = std::make_shared<::ov_tensorflow::GraphDef>(m_metagraph_def->graph_def()); + m_graph_def = std::make_shared<::tensorflow::GraphDef>(m_metagraph_def->graph_def()); // Update variables map using information by resolving AssignVariableOp graph nodes std::map var_map; diff --git a/src/frontends/tensorflow/src/graph_iterator_proto.hpp b/src/frontends/tensorflow/src/graph_iterator_proto.hpp index d01e1fec6b7a0c..5ef6d0a5954b41 100644 --- a/src/frontends/tensorflow/src/graph_iterator_proto.hpp +++ b/src/frontends/tensorflow/src/graph_iterator_proto.hpp @@ -10,10 +10,10 @@ #include "checkpoint_v1_reader.hpp" #include "decoder_argdef.hpp" #include "decoder_proto.hpp" -#include "graph.pb.h" #include "openvino/frontend/exception.hpp" #include "openvino/frontend/graph_iterator.hpp" #include "openvino/frontend/tensorflow/decoder.hpp" +#include "ov_tensorflow/graph.pb.h" namespace ov { namespace frontend { @@ -21,8 +21,8 @@ namespace tensorflow { class GraphIteratorProto : public GraphIterator { protected: - std::shared_ptr<::ov_tensorflow::GraphDef> m_graph_def; - std::shared_ptr<::ov_tensorflow::FunctionDef> m_func_def; + std::shared_ptr<::tensorflow::GraphDef> m_graph_def; + std::shared_ptr<::tensorflow::FunctionDef> m_func_def; std::shared_ptr m_checkpoint_v1_reader; size_t node_index = 0; @@ -32,7 +32,7 @@ class GraphIteratorProto : public GraphIterator { std::vector m_output_names; GraphIteratorProto() - : m_graph_def(std::make_shared<::ov_tensorflow::GraphDef>()), + : m_graph_def(std::make_shared<::tensorflow::GraphDef>()), m_func_def(nullptr), m_checkpoint_v1_reader(nullptr), m_library_map() {} @@ -62,8 +62,8 @@ class GraphIteratorProto : public GraphIterator { } public: - GraphIteratorProto(const std::shared_ptr<::ov_tensorflow::GraphDef>& graph_def, - const std::shared_ptr<::ov_tensorflow::FunctionDef>& func_def, + GraphIteratorProto(const std::shared_ptr<::tensorflow::GraphDef>& graph_def, + const std::shared_ptr<::tensorflow::FunctionDef>& func_def, const std::unordered_map& library_map, const std::shared_ptr checkpoint_v1_reader) : m_graph_def(graph_def), @@ -105,7 +105,7 @@ class GraphIteratorProto : public GraphIterator { /// \brief Construct GraphIterator for the frozen model without v1 checkpoints template GraphIteratorProto(const std::basic_string& model_path) - : m_graph_def(std::make_shared<::ov_tensorflow::GraphDef>()), + : m_graph_def(std::make_shared<::tensorflow::GraphDef>()), m_func_def(nullptr), m_checkpoint_v1_reader(nullptr) { std::ifstream pb_stream(model_path, std::ios::in | std::ifstream::binary); @@ -119,7 +119,7 @@ class GraphIteratorProto : public GraphIterator { /// \brief Construct GraphIterator for the frozen model with v1 checkpoints template GraphIteratorProto(const std::basic_string& model_path, const std::basic_string& checkpoint_directory) - : m_graph_def(std::make_shared<::ov_tensorflow::GraphDef>()), + : m_graph_def(std::make_shared<::tensorflow::GraphDef>()), m_func_def(nullptr), m_checkpoint_v1_reader(nullptr) { std::ifstream pb_stream(model_path, std::ios::in | std::ifstream::binary); @@ -136,7 +136,7 @@ class GraphIteratorProto : public GraphIterator { static bool is_supported(const std::basic_string& path) { try { std::ifstream pb_stream(path, std::ios::in | std::ifstream::binary); - auto graph_def = std::make_shared<::ov_tensorflow::GraphDef>(); + auto graph_def = std::make_shared<::tensorflow::GraphDef>(); return pb_stream && pb_stream.is_open() && graph_def->ParsePartialFromIstream(&pb_stream) && graph_def->node_size() > 0; } catch (...) { @@ -184,7 +184,7 @@ class GraphIteratorProto : public GraphIterator { "[TensorFlow Error] Internal Error: incorrect library map to cache function indices by names."); auto func = m_graph_def->library().function(func_ind); - auto func_ptr = std::make_shared<::ov_tensorflow::FunctionDef>(func); + auto func_ptr = std::make_shared<::tensorflow::FunctionDef>(func); return std::make_shared(m_graph_def, func_ptr, m_library_map, m_checkpoint_v1_reader); } diff --git a/src/frontends/tensorflow/src/graph_iterator_proto_txt.hpp b/src/frontends/tensorflow/src/graph_iterator_proto_txt.hpp index 523d863dbb0bdd..6d5b6494f764c5 100644 --- a/src/frontends/tensorflow/src/graph_iterator_proto_txt.hpp +++ b/src/frontends/tensorflow/src/graph_iterator_proto_txt.hpp @@ -62,7 +62,7 @@ class GraphIteratorProtoTxt : public GraphIteratorProto { if (!input_stream) { return false; } - auto graph_def = std::make_shared<::ov_tensorflow::GraphDef>(); + auto graph_def = std::make_shared<::tensorflow::GraphDef>(); auto is_parsed = ::google::protobuf::TextFormat::Parse(input_stream.get(), graph_def.get()) && graph_def && graph_def->node_size() > 0; return is_parsed; diff --git a/src/frontends/tensorflow/src/graph_iterator_saved_model.cpp b/src/frontends/tensorflow/src/graph_iterator_saved_model.cpp index 7c9af8216a910f..803e7d694bc69a 100644 --- a/src/frontends/tensorflow/src/graph_iterator_saved_model.cpp +++ b/src/frontends/tensorflow/src/graph_iterator_saved_model.cpp @@ -10,26 +10,26 @@ #include #include "openvino/core/type/element_type.hpp" -#include "tensor_bundle.pb.h" -#include "trackable_object_graph.pb.h" +#include "ov_tensorflow/tensor_bundle.pb.h" +#include "ov_tensorflow/trackable_object_graph.pb.h" namespace ov { namespace frontend { namespace tensorflow { -bool GraphIteratorSavedModel::is_valid_signature(const ::ov_tensorflow::SignatureDef& signature) const { - const std::map<::ov_tensorflow::DataType, ov::element::Type> types{ - {::ov_tensorflow::DataType::DT_BOOL, ov::element::boolean}, - {::ov_tensorflow::DataType::DT_INT16, ov::element::i16}, - {::ov_tensorflow::DataType::DT_INT32, ov::element::i32}, - {::ov_tensorflow::DataType::DT_INT64, ov::element::i64}, - {::ov_tensorflow::DataType::DT_HALF, ov::element::f16}, - {::ov_tensorflow::DataType::DT_FLOAT, ov::element::f32}, - {::ov_tensorflow::DataType::DT_DOUBLE, ov::element::f64}, - {::ov_tensorflow::DataType::DT_UINT8, ov::element::u8}, - {::ov_tensorflow::DataType::DT_INT8, ov::element::i8}, - {::ov_tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}, - {::ov_tensorflow::DataType::DT_STRING, ov::element::dynamic}}; +bool GraphIteratorSavedModel::is_valid_signature(const ::tensorflow::SignatureDef& signature) const { + const std::map<::tensorflow::DataType, ov::element::Type> types{ + {::tensorflow::DataType::DT_BOOL, ov::element::boolean}, + {::tensorflow::DataType::DT_INT16, ov::element::i16}, + {::tensorflow::DataType::DT_INT32, ov::element::i32}, + {::tensorflow::DataType::DT_INT64, ov::element::i64}, + {::tensorflow::DataType::DT_HALF, ov::element::f16}, + {::tensorflow::DataType::DT_FLOAT, ov::element::f32}, + {::tensorflow::DataType::DT_DOUBLE, ov::element::f64}, + {::tensorflow::DataType::DT_UINT8, ov::element::u8}, + {::tensorflow::DataType::DT_INT8, ov::element::i8}, + {::tensorflow::DataType::DT_BFLOAT16, ov::element::bf16}, + {::tensorflow::DataType::DT_STRING, ov::element::dynamic}}; for (const auto& it : signature.inputs()) { if (it.second.name().empty() || types.find(it.second.dtype()) == types.end()) diff --git a/src/frontends/tensorflow/src/graph_iterator_saved_model.hpp b/src/frontends/tensorflow/src/graph_iterator_saved_model.hpp index 52b0ba75137835..4cb385e66f744d 100644 --- a/src/frontends/tensorflow/src/graph_iterator_saved_model.hpp +++ b/src/frontends/tensorflow/src/graph_iterator_saved_model.hpp @@ -8,7 +8,7 @@ #include "graph_iterator_proto.hpp" #include "openvino/util/file_util.hpp" -#include "saved_model.pb.h" +#include "ov_tensorflow/saved_model.pb.h" #include "variables_index.hpp" namespace ov { @@ -34,7 +34,7 @@ std::basic_string get_variables_index_name(); // Loads graph from Tensorflow Saved Model file (saved_model.pb) class GraphIteratorSavedModel : public GraphIteratorProto { - std::shared_ptr<::ov_tensorflow::SavedModel> m_saved_model; + std::shared_ptr<::tensorflow::SavedModel> m_saved_model; std::shared_ptr m_variables_index; std::shared_ptr> m_inputs_map; std::shared_ptr> m_outputs_map; @@ -43,7 +43,7 @@ class GraphIteratorSavedModel : public GraphIteratorProto { public: template GraphIteratorSavedModel(const std::basic_string& path, const std::string& tags, const bool mmap_enabled) - : m_saved_model(std::make_shared<::ov_tensorflow::SavedModel>()), + : m_saved_model(std::make_shared<::tensorflow::SavedModel>()), m_mmap_enabled(mmap_enabled) { this->read_saved_model(path, tags); } @@ -66,7 +66,7 @@ class GraphIteratorSavedModel : public GraphIteratorProto { } private: - bool is_valid_signature(const ::ov_tensorflow::SignatureDef& signature) const; + bool is_valid_signature(const ::tensorflow::SignatureDef& signature) const; template bool read_saved_model(const std::basic_string& path, const std::string& tags) { @@ -141,11 +141,11 @@ class GraphIteratorSavedModel : public GraphIteratorProto { } /// \brief Does a loading of exact meta-graph - bool load_meta_graph(const ::ov_tensorflow::MetaGraphDef& meta_graph) { - std::map validSignatures = {}; + bool load_meta_graph(const ::tensorflow::MetaGraphDef& meta_graph) { + std::map validSignatures = {}; for (const auto& sit : meta_graph.signature_def()) { const std::string& key = sit.first; - const ::ov_tensorflow::SignatureDef& val = sit.second; + const ::tensorflow::SignatureDef& val = sit.second; if (is_valid_signature(val)) { validSignatures[key] = &val; } @@ -167,7 +167,7 @@ class GraphIteratorSavedModel : public GraphIteratorProto { } } - m_graph_def = std::make_shared<::ov_tensorflow::GraphDef>(meta_graph.graph_def()); + m_graph_def = std::make_shared<::tensorflow::GraphDef>(meta_graph.graph_def()); // Update variables map using information by resolving AssignVariableOp graph nodes std::map var_map; diff --git a/src/frontends/tensorflow/src/op/var_handle.cpp b/src/frontends/tensorflow/src/op/var_handle.cpp index edca2d2bca8cb0..0c86041440a8ff 100644 --- a/src/frontends/tensorflow/src/op/var_handle.cpp +++ b/src/frontends/tensorflow/src/op/var_handle.cpp @@ -10,7 +10,7 @@ #include "ngraph/runtime/shared_buffer.hpp" #include "openvino/opsets/opset8.hpp" #include "openvino/util/mmap_object.hpp" -#include "tensor_bundle.pb.h" +#include "ov_tensorflow/tensor_bundle.pb.h" using namespace std; using namespace ov::opset8; @@ -26,7 +26,7 @@ template static std::shared_ptr read_variable(std::shared_ptr var_index, const ov::element::Type ov_type, const ov::Shape shape, - const ::ov_tensorflow::BundleEntryProto& entry, + const ::tensorflow::BundleEntryProto& entry, const NodeContext& node) { google::protobuf::int64 size = 1; for (uint64_t i = 0; i < shape.size(); ++i) { @@ -95,7 +95,7 @@ OutputVector translate_varhandle_op(const NodeContext& node) { TENSORFLOW_OP_VALIDATION(node, result, "[TensorFlow Frontend] Internal error: Cannot find requested variable."); - ::ov_tensorflow::BundleEntryProto entry; + ::tensorflow::BundleEntryProto entry; TENSORFLOW_OP_VALIDATION(node, entry.ParseFromArray(entry_data, static_cast(entry_size)), "[TensorFlow Frontend] Internal error: Cannot get read bundle entry."); diff --git a/src/frontends/tensorflow/src/op/xla_conv_v2.cpp b/src/frontends/tensorflow/src/op/xla_conv_v2.cpp index 605b2c5f51e209..2d6ecdfa7bfb73 100644 --- a/src/frontends/tensorflow/src/op/xla_conv_v2.cpp +++ b/src/frontends/tensorflow/src/op/xla_conv_v2.cpp @@ -14,13 +14,13 @@ #include "openvino/op/shape_of.hpp" #include "openvino/op/slice.hpp" #include "openvino/op/transpose.hpp" +#include "ov_tensorflow/xla_data.pb.h" #include "utils.hpp" -#include "xla_data.pb.h" using namespace std; using namespace ov; using namespace ov::op; -using namespace ov_xla; +using namespace xla; namespace ov { namespace frontend { diff --git a/src/frontends/tensorflow/src/op/xla_dot.cpp b/src/frontends/tensorflow/src/op/xla_dot.cpp index 00493e1385d7b2..b4c38519ce210c 100644 --- a/src/frontends/tensorflow/src/op/xla_dot.cpp +++ b/src/frontends/tensorflow/src/op/xla_dot.cpp @@ -13,8 +13,8 @@ #include "openvino/op/shape_of.hpp" #include "openvino/op/transpose.hpp" #include "openvino/op/unsqueeze.hpp" +#include "ov_tensorflow/xla_data.pb.h" #include "utils.hpp" -#include "xla_data.pb.h" using namespace std; using namespace ov; @@ -92,7 +92,7 @@ OutputVector translate_xla_dot_op(const NodeContext& node) { auto rhs = node.get_input(1); auto node_name = node.get_name(); auto dimension_numbers_message = node.get_attribute("dimension_numbers"); - ::ov_xla::DotDimensionNumbers dimension_numbers; + ::xla::DotDimensionNumbers dimension_numbers; TENSORFLOW_OP_VALIDATION( node, dimension_numbers.ParseFromArray(dimension_numbers_message.data(), diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index bc1a657faf54fb..149b2d76184497 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -278,9 +278,12 @@ const std::map get_supported_ops() { {"TensorListReserve", CreatorFunction(translate_tensor_list_reserve_op)}, {"TensorListResize", CreatorFunction(translate_tensor_list_resize_op)}, {"Tile", CreatorFunction(translate_tile_op)}, + {"ToBool", CreatorFunction(translate_tobool_op)}, {"TopK", CreatorFunction(translate_top_k_op)}, {"TopKV2", CreatorFunction(translate_top_k_v2_op)}, {"Transpose", CreatorFunction(translate_transpose_op)}, + {"TruncateDiv", CreatorFunction(translate_truncate_div_op)}, + {"TruncateMod", CreatorFunction(translate_truncate_mod_op)}, {"Unpack", CreatorFunction(translate_unpack_op)}, {"UnravelIndex", CreatorFunction(translate_unravel_index_op)}, {"UnsortedSegmentSum", CreatorFunction(translate_unsorted_segment_sum_op)}, diff --git a/src/frontends/tensorflow/src/proto/allocation_description.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/allocation_description.proto similarity index 98% rename from src/frontends/tensorflow/src/proto/allocation_description.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/allocation_description.proto index 589f1bf597b0fb..8932ca2cb33b33 100644 --- a/src/frontends/tensorflow/src/proto/allocation_description.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/allocation_description.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "AllocationDescriptionProtos"; diff --git a/src/frontends/tensorflow/src/proto/api_def.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/api_def.proto similarity index 98% rename from src/frontends/tensorflow/src/proto/api_def.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/api_def.proto index 31139f89855e65..cbb581973d32bb 100644 --- a/src/frontends/tensorflow/src/proto/api_def.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/api_def.proto @@ -15,13 +15,13 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "ApiDefProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.framework"; option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/api_def_go_proto"; -import "attr_value.proto"; +import "ov_tensorflow/attr_value.proto"; // Used to specify and override the default API & behavior in the // generated code for client languages, from what you would get from diff --git a/src/frontends/tensorflow/src/proto/attr_value.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/attr_value.proto similarity index 95% rename from src/frontends/tensorflow/src/proto/attr_value.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/attr_value.proto index c42f78ac45a42b..b903c30cf99276 100644 --- a/src/frontends/tensorflow/src/proto/attr_value.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/attr_value.proto @@ -12,11 +12,11 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "tensor.proto"; -import "tensor_shape.proto"; -import "types.proto"; +import "ov_tensorflow/tensor.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; option cc_enable_arenas = true; option java_outer_classname = "AttrValueProtos"; diff --git a/src/frontends/tensorflow/src/proto/cost_graph.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/cost_graph.proto similarity index 97% rename from src/frontends/tensorflow/src/proto/cost_graph.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/cost_graph.proto index db348eb8860847..8e4d9788f49595 100644 --- a/src/frontends/tensorflow/src/proto/cost_graph.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/cost_graph.proto @@ -12,10 +12,10 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "tensor_shape.proto"; -import "types.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; option cc_enable_arenas = true; option java_outer_classname = "CostGraphProtos"; diff --git a/src/frontends/tensorflow/src/proto/dataset_options.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/dataset_options.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/dataset_options.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/dataset_options.proto index be7a0d8efd0c61..dc492a60fe0ebe 100644 --- a/src/frontends/tensorflow/src/proto/dataset_options.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/dataset_options.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow.data; +package tensorflow.data; // Represents the type of auto-sharding we enable. enum AutoShardPolicy { diff --git a/src/frontends/tensorflow/src/proto/device_attributes.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/device_attributes.proto similarity index 98% rename from src/frontends/tensorflow/src/proto/device_attributes.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/device_attributes.proto index bec64f2744124b..92c8a6b2d191bc 100644 --- a/src/frontends/tensorflow/src/proto/device_attributes.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/device_attributes.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "DeviceAttributesProtos"; diff --git a/src/frontends/tensorflow/src/proto/function.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/function.proto similarity index 97% rename from src/frontends/tensorflow/src/proto/function.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/function.proto index 271126ac0f4687..9e84731c983bb1 100644 --- a/src/frontends/tensorflow/src/proto/function.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/function.proto @@ -12,11 +12,11 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "attr_value.proto"; -import "node_def.proto"; -import "op_def.proto"; +import "ov_tensorflow/attr_value.proto"; +import "ov_tensorflow/node_def.proto"; +import "ov_tensorflow/op_def.proto"; option cc_enable_arenas = true; option java_outer_classname = "FunctionProtos"; diff --git a/src/frontends/tensorflow/src/proto/graph.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/graph.proto similarity index 95% rename from src/frontends/tensorflow/src/proto/graph.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/graph.proto index 97bf8002700d0a..e047abeafe18b1 100644 --- a/src/frontends/tensorflow/src/proto/graph.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/graph.proto @@ -12,11 +12,11 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "function.proto"; -import "node_def.proto"; -import "versions.proto"; +import "ov_tensorflow/function.proto"; +import "ov_tensorflow/node_def.proto"; +import "ov_tensorflow/versions.proto"; option cc_enable_arenas = true; option java_outer_classname = "GraphProtos"; diff --git a/src/frontends/tensorflow/src/proto/graph_transfer_info.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/graph_transfer_info.proto similarity index 97% rename from src/frontends/tensorflow/src/proto/graph_transfer_info.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/graph_transfer_info.proto index 821e7619cc8488..9e7d598e34a5c1 100644 --- a/src/frontends/tensorflow/src/proto/graph_transfer_info.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/graph_transfer_info.proto @@ -12,9 +12,9 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "types.proto"; +import "ov_tensorflow/types.proto"; option cc_enable_arenas = true; option java_outer_classname = "GraphTransferInfoProto"; diff --git a/src/frontends/tensorflow/src/proto/kernel_def.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/kernel_def.proto similarity index 97% rename from src/frontends/tensorflow/src/proto/kernel_def.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/kernel_def.proto index a8d0daeaa9ef20..88142d3de9584d 100644 --- a/src/frontends/tensorflow/src/proto/kernel_def.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/kernel_def.proto @@ -12,9 +12,9 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "attr_value.proto"; +import "ov_tensorflow/attr_value.proto"; option cc_enable_arenas = true; option java_outer_classname = "KernelDefProtos"; diff --git a/src/frontends/tensorflow/src/proto/log_memory.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/log_memory.proto similarity index 97% rename from src/frontends/tensorflow/src/proto/log_memory.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/log_memory.proto index 39ea81659c7eda..62489f0e0b8df4 100644 --- a/src/frontends/tensorflow/src/proto/log_memory.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/log_memory.proto @@ -12,9 +12,9 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "tensor_description.proto"; +import "ov_tensorflow/tensor_description.proto"; option cc_enable_arenas = true; option java_outer_classname = "LogMemoryProtos"; diff --git a/src/frontends/tensorflow/src/proto/meta_graph.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/meta_graph.proto similarity index 97% rename from src/frontends/tensorflow/src/proto/meta_graph.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/meta_graph.proto index 48f4c4b5e83860..255fb6efeb2f9e 100644 --- a/src/frontends/tensorflow/src/proto/meta_graph.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/meta_graph.proto @@ -12,16 +12,16 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; import "google/protobuf/any.proto"; -import "graph.proto"; -import "op_def.proto"; -import "tensor_shape.proto"; -import "types.proto"; -import "saved_object_graph.proto"; -import "saver.proto"; -import "struct.proto"; +import "ov_tensorflow/graph.proto"; +import "ov_tensorflow/op_def.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; +import "ov_tensorflow/saved_object_graph.proto"; +import "ov_tensorflow/saver.proto"; +import "ov_tensorflow/struct.proto"; option cc_enable_arenas = true; option java_outer_classname = "MetaGraphProtos"; diff --git a/src/frontends/tensorflow/src/proto/model.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/model.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/model.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/model.proto index 1614f284b7fd55..a6567d462b8772 100644 --- a/src/frontends/tensorflow/src/proto/model.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/model.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow.data.model; +package tensorflow.data.model; option cc_enable_arenas = true; diff --git a/src/frontends/tensorflow/src/proto/node_def.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/node_def.proto similarity index 98% rename from src/frontends/tensorflow/src/proto/node_def.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/node_def.proto index 848c27d4c4c1f8..b8f3a017a30fc5 100644 --- a/src/frontends/tensorflow/src/proto/node_def.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/node_def.proto @@ -12,9 +12,9 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "attr_value.proto"; +import "ov_tensorflow/attr_value.proto"; option cc_enable_arenas = true; option java_outer_classname = "NodeProto"; diff --git a/src/frontends/tensorflow/src/proto/op_def.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/op_def.proto similarity index 98% rename from src/frontends/tensorflow/src/proto/op_def.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/op_def.proto index d44526f059c548..31493fed26ce55 100644 --- a/src/frontends/tensorflow/src/proto/op_def.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/op_def.proto @@ -12,15 +12,16 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "OpDefProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.framework"; option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/op_def_go_proto"; -import "attr_value.proto"; -import "types.proto"; -import "resource_handle.proto"; + +import "ov_tensorflow/attr_value.proto"; +import "ov_tensorflow/types.proto"; +import "ov_tensorflow/resource_handle.proto"; // Defines an operation. A NodeDef in a GraphDef specifies an Op by // using the "op" field which should match the name of a OpDef. diff --git a/src/frontends/tensorflow/src/proto/reader_base.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/reader_base.proto similarity index 98% rename from src/frontends/tensorflow/src/proto/reader_base.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/reader_base.proto index e51e3781ddc6d1..0c3536600e6f24 100644 --- a/src/frontends/tensorflow/src/proto/reader_base.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/reader_base.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "ReaderBaseProtos"; diff --git a/src/frontends/tensorflow/src/proto/remote_fused_graph_execute_info.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/remote_fused_graph_execute_info.proto similarity index 93% rename from src/frontends/tensorflow/src/proto/remote_fused_graph_execute_info.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/remote_fused_graph_execute_info.proto index b94ee5e6f1b892..3b17878e127cf9 100644 --- a/src/frontends/tensorflow/src/proto/remote_fused_graph_execute_info.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/remote_fused_graph_execute_info.proto @@ -12,11 +12,11 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "graph.proto"; -import "tensor_shape.proto"; -import "types.proto"; +import "ov_tensorflow/graph.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; option cc_enable_arenas = true; option java_outer_classname = "RemoteFusedGraphExecuteInfoProto"; diff --git a/src/frontends/tensorflow/src/proto/resource_handle.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/resource_handle.proto similarity index 95% rename from src/frontends/tensorflow/src/proto/resource_handle.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/resource_handle.proto index 55345d0302a428..19b4dcc3b84ded 100644 --- a/src/frontends/tensorflow/src/proto/resource_handle.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/resource_handle.proto @@ -12,10 +12,10 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "tensor_shape.proto"; -import "types.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; option cc_enable_arenas = true; option java_outer_classname = "ResourceHandle"; diff --git a/src/frontends/tensorflow/src/proto/saved_model.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/saved_model.proto similarity index 95% rename from src/frontends/tensorflow/src/proto/saved_model.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/saved_model.proto index 75a809070b59e0..f8660655229245 100644 --- a/src/frontends/tensorflow/src/proto/saved_model.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/saved_model.proto @@ -12,9 +12,9 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "meta_graph.proto"; +import "ov_tensorflow/meta_graph.proto"; option cc_enable_arenas = true; option java_outer_classname = "SavedModelProtos"; diff --git a/src/frontends/tensorflow/src/proto/saved_object_graph.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/saved_object_graph.proto similarity index 97% rename from src/frontends/tensorflow/src/proto/saved_object_graph.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/saved_object_graph.proto index 9ce18710a14954..d0b2170044966c 100644 --- a/src/frontends/tensorflow/src/proto/saved_object_graph.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/saved_object_graph.proto @@ -12,15 +12,15 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; import "google/protobuf/any.proto"; -import "tensor_shape.proto"; -import "types.proto"; -import "variable.proto"; -import "versions.proto"; -import "struct.proto"; -import "trackable_object_graph.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; +import "ov_tensorflow/variable.proto"; +import "ov_tensorflow/versions.proto"; +import "ov_tensorflow/struct.proto"; +import "ov_tensorflow/trackable_object_graph.proto"; option cc_enable_arenas = true; option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; diff --git a/src/frontends/tensorflow/src/proto/saved_tensor_slice.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/saved_tensor_slice.proto similarity index 94% rename from src/frontends/tensorflow/src/proto/saved_tensor_slice.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/saved_tensor_slice.proto index 6d13b1f27aa455..9e628752bb1f5c 100644 --- a/src/frontends/tensorflow/src/proto/saved_tensor_slice.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/saved_tensor_slice.proto @@ -29,17 +29,17 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "SavedTensorSliceProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.util"; -import "tensor_shape.proto"; -import "tensor_slice.proto"; -import "tensor.proto"; -import "types.proto"; -import "versions.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/tensor_slice.proto"; +import "ov_tensorflow/tensor.proto"; +import "ov_tensorflow/types.proto"; +import "ov_tensorflow/versions.proto"; // Metadata describing the set of slices of the same tensor saved in a // checkpoint file. diff --git a/src/frontends/tensorflow/src/proto/saver.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/saver.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/saver.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/saver.proto index 634397b1ee04b3..7834f473e4ccdf 100644 --- a/src/frontends/tensorflow/src/proto/saver.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/saver.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "SaverProtos"; diff --git a/src/frontends/tensorflow/src/proto/step_stats.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/step_stats.proto similarity index 96% rename from src/frontends/tensorflow/src/proto/step_stats.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/step_stats.proto index b2524e28a807fa..027a1d79ee22e4 100644 --- a/src/frontends/tensorflow/src/proto/step_stats.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/step_stats.proto @@ -12,10 +12,10 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "allocation_description.proto"; -import "tensor_description.proto"; +import "ov_tensorflow/allocation_description.proto"; +import "ov_tensorflow/tensor_description.proto"; option cc_enable_arenas = true; option java_outer_classname = "StepStatsProtos"; diff --git a/src/frontends/tensorflow/src/proto/struct.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/struct.proto similarity index 93% rename from src/frontends/tensorflow/src/proto/struct.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/struct.proto index 7da836debd7e76..4126bd98c4a3d3 100644 --- a/src/frontends/tensorflow/src/proto/struct.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/struct.proto @@ -12,11 +12,11 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "tensor.proto"; -import "tensor_shape.proto"; -import "types.proto"; +import "ov_tensorflow/tensor.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; @@ -66,9 +66,9 @@ message StructuredValue { bool bool_value = 14; // Represents a TensorShape. - ov_tensorflow.TensorShapeProto tensor_shape_value = 31; + tensorflow.TensorShapeProto tensor_shape_value = 31; // Represents an enum value for dtype. - ov_tensorflow.DataType tensor_dtype_value = 32; + tensorflow.DataType tensor_dtype_value = 32; // Represents a value for tf.TensorSpec. TensorSpecProto tensor_spec_value = 33; // Represents a value for tf.TypeSpec. @@ -121,17 +121,17 @@ message NamedTupleValue { // A protobuf to represent tf.TensorSpec. message TensorSpecProto { string name = 1; - ov_tensorflow.TensorShapeProto shape = 2; - ov_tensorflow.DataType dtype = 3; + tensorflow.TensorShapeProto shape = 2; + tensorflow.DataType dtype = 3; } // A protobuf to represent tf.BoundedTensorSpec. message BoundedTensorSpecProto { string name = 1; - ov_tensorflow.TensorShapeProto shape = 2; - ov_tensorflow.DataType dtype = 3; - ov_tensorflow.TensorProto minimum = 4; - ov_tensorflow.TensorProto maximum = 5; + tensorflow.TensorShapeProto shape = 2; + tensorflow.DataType dtype = 3; + tensorflow.TensorProto minimum = 4; + tensorflow.TensorProto maximum = 5; } // Represents a tf.TypeSpec diff --git a/src/frontends/tensorflow/src/proto/summary.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/summary.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/summary.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/summary.proto index 16bc6235bfb1b3..ce326176947dd4 100644 --- a/src/frontends/tensorflow/src/proto/summary.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/summary.proto @@ -12,9 +12,9 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "tensor.proto"; +import "ov_tensorflow/tensor.proto"; option cc_enable_arenas = true; option java_outer_classname = "SummaryProtos"; diff --git a/src/frontends/tensorflow/src/proto/tensor.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor.proto similarity index 96% rename from src/frontends/tensorflow/src/proto/tensor.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/tensor.proto index 85fd170596eefe..42f063536e09e0 100644 --- a/src/frontends/tensorflow/src/proto/tensor.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor.proto @@ -12,11 +12,11 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "resource_handle.proto"; -import "tensor_shape.proto"; -import "types.proto"; +import "ov_tensorflow/resource_handle.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; option cc_enable_arenas = true; option java_outer_classname = "TensorProtos"; diff --git a/src/frontends/tensorflow/src/proto/tensor_bundle.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_bundle.proto similarity index 93% rename from src/frontends/tensorflow/src/proto/tensor_bundle.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_bundle.proto index 48bf6be520920b..21af38195c4e11 100644 --- a/src/frontends/tensorflow/src/proto/tensor_bundle.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_bundle.proto @@ -12,12 +12,12 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "tensor_shape.proto"; -import "tensor_slice.proto"; -import "types.proto"; -import "versions.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/tensor_slice.proto"; +import "ov_tensorflow/types.proto"; +import "ov_tensorflow/versions.proto"; option cc_enable_arenas = true; option java_outer_classname = "TensorBundleProtos"; diff --git a/src/frontends/tensorflow/src/proto/tensor_description.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_description.proto similarity index 88% rename from src/frontends/tensorflow/src/proto/tensor_description.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_description.proto index 86ecbe2b3e4047..c03e1311c1f386 100644 --- a/src/frontends/tensorflow/src/proto/tensor_description.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_description.proto @@ -12,11 +12,11 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; -import "allocation_description.proto"; -import "tensor_shape.proto"; -import "types.proto"; +import "ov_tensorflow/allocation_description.proto"; +import "ov_tensorflow/tensor_shape.proto"; +import "ov_tensorflow/types.proto"; option cc_enable_arenas = true; option java_outer_classname = "TensorDescriptionProtos"; diff --git a/src/frontends/tensorflow/src/proto/tensor_shape.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_shape.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/tensor_shape.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_shape.proto index 48d821006c4989..0a7515def63931 100644 --- a/src/frontends/tensorflow/src/proto/tensor_shape.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_shape.proto @@ -19,7 +19,7 @@ option java_multiple_files = true; option java_package = "org.tensorflow.framework"; option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_shape_go_proto"; -package ov_tensorflow; +package tensorflow; // Dimensions of a tensor. message TensorShapeProto { diff --git a/src/frontends/tensorflow/src/proto/tensor_slice.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_slice.proto similarity index 98% rename from src/frontends/tensorflow/src/proto/tensor_slice.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_slice.proto index b30c61eca33361..415012483056d3 100644 --- a/src/frontends/tensorflow/src/proto/tensor_slice.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/tensor_slice.proto @@ -14,7 +14,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "TensorSliceProtos"; diff --git a/src/frontends/tensorflow/src/proto/trackable_object_graph.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/trackable_object_graph.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/trackable_object_graph.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/trackable_object_graph.proto index 748be64410c002..f0a9617432f617 100644 --- a/src/frontends/tensorflow/src/proto/trackable_object_graph.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/trackable_object_graph.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; import "google/protobuf/wrappers.proto"; diff --git a/src/frontends/tensorflow/src/proto/types.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/types.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/types.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/types.proto index a50586760a7cdf..0a60332f662397 100644 --- a/src/frontends/tensorflow/src/proto/types.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/types.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "TypesProtos"; option java_multiple_files = true; diff --git a/src/frontends/tensorflow/src/proto/variable.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/variable.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/variable.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/variable.proto index 297638e7bc649a..6e9a05d1291b23 100644 --- a/src/frontends/tensorflow/src/proto/variable.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/variable.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "VariableProtos"; diff --git a/src/frontends/tensorflow/src/proto/versions.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/versions.proto similarity index 98% rename from src/frontends/tensorflow/src/proto/versions.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/versions.proto index 0fc46788dc2078..31a6623cf71a90 100644 --- a/src/frontends/tensorflow/src/proto/versions.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/versions.proto @@ -12,7 +12,7 @@ limitations under the License.*/ syntax = "proto3"; -package ov_tensorflow; +package tensorflow; option cc_enable_arenas = true; option java_outer_classname = "VersionsProtos"; diff --git a/src/frontends/tensorflow/src/proto/xla_data.proto b/src/frontends/tensorflow/src/proto/ov_tensorflow/xla_data.proto similarity index 99% rename from src/frontends/tensorflow/src/proto/xla_data.proto rename to src/frontends/tensorflow/src/proto/ov_tensorflow/xla_data.proto index 42ff8065983f77..95695ba78a2974 100644 --- a/src/frontends/tensorflow/src/proto/xla_data.proto +++ b/src/frontends/tensorflow/src/proto/ov_tensorflow/xla_data.proto @@ -15,7 +15,7 @@ limitations under the License. syntax = "proto3"; -package ov_xla; +package xla; option cc_enable_arenas = true; diff --git a/src/frontends/tensorflow/src/tf_utils.cpp b/src/frontends/tensorflow/src/tf_utils.cpp index 1c7df199a851a6..c72e8e7bb9080a 100644 --- a/src/frontends/tensorflow/src/tf_utils.cpp +++ b/src/frontends/tensorflow/src/tf_utils.cpp @@ -83,7 +83,7 @@ void extract_tensor_content(const string& tensor_content, Tensor* values) { # pragma warning(disable : 4267) // possible loss of data #endif template -void extract_compressed_tensor_content(const ::ov_tensorflow::TensorProto& tensor_proto, +void extract_compressed_tensor_content(const ::tensorflow::TensorProto& tensor_proto, int64_t val_size, Tensor* values) { auto val_lastsaved = static_cast(0); @@ -149,30 +149,30 @@ bool CfMarkerType::is_copyable() const { return false; } -Type get_ov_type(const ::ov_tensorflow::DataType& type) { - static const map<::ov_tensorflow::DataType, Type> type_map{{::ov_tensorflow::DataType::DT_BOOL, boolean}, - {::ov_tensorflow::DataType::DT_INT16, i16}, - {::ov_tensorflow::DataType::DT_INT32, i32}, - {::ov_tensorflow::DataType::DT_INT64, i64}, - {::ov_tensorflow::DataType::DT_HALF, f16}, - {::ov_tensorflow::DataType::DT_FLOAT, f32}, - {::ov_tensorflow::DataType::DT_DOUBLE, f64}, - {::ov_tensorflow::DataType::DT_UINT8, u8}, - {::ov_tensorflow::DataType::DT_INT8, i8}, - {::ov_tensorflow::DataType::DT_BFLOAT16, bf16}}; +Type get_ov_type(const ::tensorflow::DataType& type) { + static const map<::tensorflow::DataType, Type> type_map{{::tensorflow::DataType::DT_BOOL, boolean}, + {::tensorflow::DataType::DT_INT16, i16}, + {::tensorflow::DataType::DT_INT32, i32}, + {::tensorflow::DataType::DT_INT64, i64}, + {::tensorflow::DataType::DT_HALF, f16}, + {::tensorflow::DataType::DT_FLOAT, f32}, + {::tensorflow::DataType::DT_DOUBLE, f64}, + {::tensorflow::DataType::DT_UINT8, u8}, + {::tensorflow::DataType::DT_INT8, i8}, + {::tensorflow::DataType::DT_BFLOAT16, bf16}}; auto it = type_map.find(type); // for all unsupported types return dynamic type return it == type_map.end() ? dynamic : it->second; } -Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto) { +Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto) { return unpack_tensor_proto(tensor_proto, tensor_proto.tensor_shape(), tensor_proto.dtype()); } -Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto, - const ::ov_tensorflow::TensorShapeProto& tensor_shape, - const ::ov_tensorflow::DataType& tensor_type) { +Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto, + const ::tensorflow::TensorShapeProto& tensor_shape, + const ::tensorflow::DataType& tensor_type) { PartialShape pshape; for (int i = 0; i < tensor_shape.dim_size(); i++) { pshape.push_back(tensor_shape.dim(i).size()); @@ -180,7 +180,7 @@ Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto, FRONT_END_GENERAL_CHECK(pshape.is_static(), "Dynamic shapes are not supported for Tensor attribute."); Type ov_type = get_ov_type(tensor_type); - if (tensor_type != ::ov_tensorflow::DataType::DT_STRING) { + if (tensor_type != ::tensorflow::DataType::DT_STRING) { FRONT_END_GENERAL_CHECK( ov_type.is_static(), "Encountered unknown element type " + DataType_Name(tensor_type) + " on an empty tensor_proto"); diff --git a/src/frontends/tensorflow/src/tf_utils.hpp b/src/frontends/tensorflow/src/tf_utils.hpp index 286ce1440bc638..861fb56f552685 100644 --- a/src/frontends/tensorflow/src/tf_utils.hpp +++ b/src/frontends/tensorflow/src/tf_utils.hpp @@ -4,8 +4,6 @@ #pragma once -#include "attr_value.pb.h" -#include "node_def.pb.h" #include "openvino/core/node.hpp" #include "openvino/core/partial_shape.hpp" #include "openvino/core/runtime_attribute.hpp" @@ -14,9 +12,11 @@ #include "openvino/frontend/node_context.hpp" #include "openvino/op/loop.hpp" #include "openvino/runtime/tensor.hpp" -#include "tensor.pb.h" -#include "tensor_shape.pb.h" -#include "types.pb.h" +#include "ov_tensorflow/attr_value.pb.h" +#include "ov_tensorflow/node_def.pb.h" +#include "ov_tensorflow/tensor.pb.h" +#include "ov_tensorflow/tensor_shape.pb.h" +#include "ov_tensorflow/types.pb.h" namespace ov { namespace frontend { @@ -24,13 +24,13 @@ namespace tensorflow { #define CF_MARKER_TAG "tf_cf_marker_tag" -ov::element::Type get_ov_type(const ::ov_tensorflow::DataType& type); +ov::element::Type get_ov_type(const ::tensorflow::DataType& type); -ov::Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto); +ov::Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto); -ov::Any unpack_tensor_proto(const ::ov_tensorflow::TensorProto& tensor_proto, - const ::ov_tensorflow::TensorShapeProto& tensor_shape, - const ::ov_tensorflow::DataType& tensor_type); +ov::Any unpack_tensor_proto(const ::tensorflow::TensorProto& tensor_proto, + const ::tensorflow::TensorShapeProto& tensor_shape, + const ::tensorflow::DataType& tensor_type); class Switch; using SetOfSwitchNodes = std::unordered_set>; diff --git a/src/frontends/tensorflow/src/variables_index.cpp b/src/frontends/tensorflow/src/variables_index.cpp index cda18ca3ca7c7f..2dcf3faf9e0b0c 100644 --- a/src/frontends/tensorflow/src/variables_index.cpp +++ b/src/frontends/tensorflow/src/variables_index.cpp @@ -11,8 +11,8 @@ #include "graph_iterator_saved_model.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/util/mmap_object.hpp" -#include "tensor_bundle.pb.h" -#include "trackable_object_graph.pb.h" +#include "ov_tensorflow/tensor_bundle.pb.h" +#include "ov_tensorflow/trackable_object_graph.pb.h" #ifdef ENABLE_SNAPPY_COMPRESSION # include "snappy.h" @@ -126,7 +126,7 @@ void VariablesIndex::read_bundle_header() { auto item = m_variables_index.find(""); FRONT_END_GENERAL_CHECK(item != m_variables_index.end(), "Bundle Header isn't found in index"); - ::ov_tensorflow::BundleHeaderProto bundleHeader; + ::tensorflow::BundleHeaderProto bundleHeader; FRONT_END_GENERAL_CHECK(bundleHeader.ParseFromArray(item->second.data(), static_cast(item->second.size())), "Bundle Header: Cannot parse Bundle Header"); FRONT_END_GENERAL_CHECK(bundleHeader.version().producer() == 1, "Bundle Header: Unsupported producer version"); @@ -145,7 +145,7 @@ void VariablesIndex::read_checkpointable_object_graph() { return; } - ::ov_tensorflow::BundleEntryProto entry; + ::tensorflow::BundleEntryProto entry; FRONT_END_GENERAL_CHECK(entry.ParseFromArray(item->second.data(), static_cast(item->second.size())), "CMO: Cannot parse Bundle Entry"); @@ -155,7 +155,7 @@ void VariablesIndex::read_checkpointable_object_graph() { FRONT_END_GENERAL_CHECK(shard != m_data_files.end(), "CMO: data files isn't found"); std::vector data(entry.size()); - ::ov_tensorflow::TrackableObjectGraph tog; + ::tensorflow::TrackableObjectGraph tog; // TODO: have to understand this offset // It looks like reinterpret_cast artifact @@ -244,13 +244,13 @@ bool VariablesIndex::read_variables(std::ifstream& vi_stream, const std::wstring struct PtrNode { using SharedPtrNode = std::shared_ptr; - const ::ov_tensorflow::NodeDef* node; + const ::tensorflow::NodeDef* node; std::vector inputs; std::vector outputs; PtrNode() : node(nullptr), inputs(), outputs() {} - PtrNode(const ::ov_tensorflow::NodeDef& src_node) { + PtrNode(const ::tensorflow::NodeDef& src_node) { node = &src_node; } @@ -308,14 +308,14 @@ struct PtrNode { } }; -static void read_stateful_partitioned_call(const std::shared_ptr<::ov_tensorflow::GraphDef> graph_def, - const ::ov_tensorflow::NodeDef& partCall, +static void read_stateful_partitioned_call(const std::shared_ptr<::tensorflow::GraphDef> graph_def, + const ::tensorflow::NodeDef& partCall, std::map& node_dictionary) { FRONT_END_GENERAL_CHECK(partCall.op() == "StatefulPartitionedCall", "Passed node isn't StatefulPartitionedCall"); std::string func_name = partCall.attr().at("f").func().name(); - const ::ov_tensorflow::FunctionDef* func_def = nullptr; + const ::tensorflow::FunctionDef* func_def = nullptr; for (const auto& func : graph_def->library().function()) { if (func.signature().name() == func_name) { func_def = &func; @@ -365,7 +365,7 @@ static void read_stateful_partitioned_call(const std::shared_ptr<::ov_tensorflow } } -void VariablesIndex::map_assignvariable(const std::shared_ptr<::ov_tensorflow::GraphDef> graph_def, +void VariablesIndex::map_assignvariable(const std::shared_ptr<::tensorflow::GraphDef> graph_def, std::map& variables_map) { std::map nodes; diff --git a/src/frontends/tensorflow/src/variables_index.hpp b/src/frontends/tensorflow/src/variables_index.hpp index 2fb517e8e9b2c8..aa805b264bc3d1 100644 --- a/src/frontends/tensorflow/src/variables_index.hpp +++ b/src/frontends/tensorflow/src/variables_index.hpp @@ -9,7 +9,7 @@ #include "graph_iterator_proto.hpp" #include "openvino/util/file_util.hpp" #include "openvino/util/mmap_object.hpp" -#include "saved_model.pb.h" +#include "ov_tensorflow/saved_model.pb.h" namespace ov { namespace frontend { @@ -139,7 +139,7 @@ class VariablesIndex { /// It needs to map VarHandleOp to right place in .index file. /// \param[in] graph_def GraphDef object for analysis /// \param[out] variables_map Map of variables found in graph_def - static void map_assignvariable(const std::shared_ptr<::ov_tensorflow::GraphDef> graph_def, + static void map_assignvariable(const std::shared_ptr<::tensorflow::GraphDef> graph_def, std::map& variables_map); private: diff --git a/src/frontends/tensorflow_common/include/common_op_table.hpp b/src/frontends/tensorflow_common/include/common_op_table.hpp index 54f1dff243efd1..75a9bdcafc91ee 100644 --- a/src/frontends/tensorflow_common/include/common_op_table.hpp +++ b/src/frontends/tensorflow_common/include/common_op_table.hpp @@ -142,9 +142,12 @@ OP_CONVERTER(translate_tensor_list_set_item_op); OP_CONVERTER(translate_tensor_list_stack_op); OP_CONVERTER(translate_tensor_list_resize_op); OP_CONVERTER(translate_tile_op); +OP_CONVERTER(translate_tobool_op); OP_CONVERTER_NAMED(translate_top_k_op); OP_CONVERTER_NAMED(translate_top_k_v2_op); OP_CONVERTER(translate_transpose_op); +OP_CONVERTER(translate_truncate_div_op); +OP_CONVERTER(translate_truncate_mod_op); OP_CONVERTER(translate_unpack_op); OP_CONVERTER(translate_unravel_index_op); OP_CONVERTER(translate_unsorted_segment_sum_op); diff --git a/src/frontends/tensorflow_common/src/op/tobool.cpp b/src/frontends/tensorflow_common/src/op/tobool.cpp new file mode 100644 index 00000000000000..a8d595800a4f5c --- /dev/null +++ b/src/frontends/tensorflow_common/src/op/tobool.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_op_table.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/logical_or.hpp" +#include "openvino/op/not_equal.hpp" +#include "openvino/op/reduce_prod.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/shape_of.hpp" + +using namespace std; +using namespace ov::op; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_tobool_op(const NodeContext& node) { + // (rank(x) == 0 && x != 0) || (rank > 0 && ReduceProd(ShapeOf(x))) > 0 + + default_op_checks(node, 1, {"ToBool"}); + auto x = node.get_input(0); + + // prepare auxiliary zero and zero constants of the same type as the inputs + auto zero = create_same_type_const_scalar(x, 0); + auto zero_2 = make_shared(element::i32, Shape{}, 0); + auto true_const = make_shared(element::boolean, Shape{}, true); + auto false_const = make_shared(element::boolean, Shape{}, false); + // compute a mask to get rank(x) == 0 + auto x_rank = compute_subgraph_scalar_rank(x, element::i32); + + // compute rank(x) == 0 + auto is_zero = make_shared(x_rank, zero_2); + + // compute mask to get x != 0 + auto is_not_zero = make_shared(x, zero); + + // compute (rank(x) == 0 && x != 0) + auto logical_and = make_shared(is_zero, is_not_zero); + // compute rank(x) > 0 + auto greater_than_zero = make_shared(x_rank, zero_2); + + // compute ShapeOf(x) + auto cond_shape = make_shared(x, element::i32); + // compute ReduceProd(ShapeOf(x))) and axis + auto axis = make_shared(element::i32, Shape{}, 0); + auto reduce_prod = make_shared(cond_shape, axis); + + // compute ReduceProd(ShapeOf(x))) > 0 + auto greater_than__zero_2 = make_shared(reduce_prod, zero_2); + // compute (rank > 0 && ReduceProd(ShapeOf(x))) > 0 + auto logical_and_2 = make_shared(greater_than_zero, greater_than__zero_2); + + auto logical_or = make_shared(logical_and, logical_and_2); + + auto tobool = make_shared(logical_or, true_const, false_const); + set_node_name(node.get_name(), tobool); + return tobool->outputs(); +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/tensorflow_common/src/op/truncate_div.cpp b/src/frontends/tensorflow_common/src/op/truncate_div.cpp new file mode 100644 index 00000000000000..b725bbd76b44a3 --- /dev/null +++ b/src/frontends/tensorflow_common/src/op/truncate_div.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_op_table.hpp" +#include "openvino/op/ceiling.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/floor.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/mod.hpp" +#include "openvino/op/select.hpp" + +using namespace std; +using namespace ov::opset10; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_truncate_div_op(const NodeContext& node) { + default_op_checks(node, 2, {"TruncateDiv"}); + auto x = node.get_input(0); + auto y = node.get_input(1); + + auto res = make_shared(x, y); + auto is_res_negative = make_shared(res, create_same_type_const_scalar(x, 0)); + auto final_res = make_shared(is_y_negative, negative_y, y); + + // check if floor_mod == zero + auto floor_mod = make_shared(x, y); + auto is_zero = make_shared(floor_mod, create_same_type_const_scalar(floor_mod, 0)); + + // floor_mod - y + auto other_res = make_shared(floor_mod, y); + + // select operation to handle the sign + auto result = make_shared(is_x_negative, other_res, floor_mod)); + + set_node_name(node.get_name(), result); + return result->outputs(); +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tests/frontend/shared/src/library_extension.cpp b/src/frontends/tests/frontend/shared/src/library_extension.cpp index a2257f8fca116b..8a6bb23d82f0ef 100644 --- a/src/frontends/tests/frontend/shared/src/library_extension.cpp +++ b/src/frontends/tests/frontend/shared/src/library_extension.cpp @@ -9,6 +9,7 @@ #include "common_test_utils/file_utils.hpp" #include "openvino/op/relu.hpp" #include "openvino/op/swish.hpp" +#include "openvino/runtime/core.hpp" #include "utils.hpp" using namespace ov::frontend; @@ -88,3 +89,30 @@ TEST_P(FrontendLibraryExtensionTest, verifyFunctions) { nodes.end()); } } + +TEST_P(FrontendLibraryExtensionTest, loadExtensionBeforeFrontend) { + // release all frontends internally + ov::shutdown(); + + const auto& lib_path = get_lib_path("test_builtin_extensions"); + + ov::Core core; + core.add_extension(lib_path); + + auto model = core.read_model(m_param.m_modelName); + ASSERT_NE(nullptr, model); + + const auto nodes = model->get_ops(); + ASSERT_EQ(std::find_if(nodes.begin(), + nodes.end(), + [](const std::shared_ptr& n) { + return ov::is_type(n); + }), + nodes.end()); + ASSERT_NE(std::find_if(nodes.begin(), + nodes.end(), + [](const std::shared_ptr& n) { + return ov::is_type(n); + }), + nodes.end()); +} diff --git a/src/inference/dev_api/ie_icore.hpp b/src/inference/dev_api/ie_icore.hpp index 8852c1f4ecd8c9..2210f26bbfc6ef 100644 --- a/src/inference/dev_api/ie_icore.hpp +++ b/src/inference/dev_api/ie_icore.hpp @@ -191,7 +191,7 @@ class ICore : public ov::ICore { virtual InferenceEngine::RemoteContext::Ptr CreateContext(const std::string& deviceName, const ov::AnyMap&) = 0; /** - * @brief Get only configs that are suppored by device + * @brief Get only configs that are supported by device * @param deviceName Name of a device * @param config Map of configs that can contains configs that are not supported by device * @return map of configs that are supported by device diff --git a/src/inference/dev_api/openvino/runtime/icore.hpp b/src/inference/dev_api/openvino/runtime/icore.hpp index e4d0a98f5be968..de2ca2ebf07c57 100644 --- a/src/inference/dev_api/openvino/runtime/icore.hpp +++ b/src/inference/dev_api/openvino/runtime/icore.hpp @@ -222,7 +222,7 @@ class OPENVINO_RUNTIME_API ICore { } /** - * @brief Get only properties that are suppored by specified device + * @brief Get only properties that are supported by specified device * @param full_device_name Name of a device (can be either virtual or hardware) * @param properties Properties that can contains configs that are not supported by device * @return map of properties that are supported by device diff --git a/src/inference/dev_api/openvino/runtime/isync_infer_request.hpp b/src/inference/dev_api/openvino/runtime/isync_infer_request.hpp index ed15438de2eb83..938fa8924fbb05 100644 --- a/src/inference/dev_api/openvino/runtime/isync_infer_request.hpp +++ b/src/inference/dev_api/openvino/runtime/isync_infer_request.hpp @@ -124,6 +124,12 @@ class OPENVINO_RUNTIME_API ISyncInferRequest : public IInferRequest { } }; + /** + * @brief Finds input or output port + * @return structure which contains index of Input/Output or report that port wasn't found + */ + FoundPort find_port(const ov::Output& port) const; + /** * @brief Converts batched tensors to tensor */ @@ -157,12 +163,9 @@ class OPENVINO_RUNTIME_API ISyncInferRequest : public IInferRequest { std::shared_ptr m_compiled_model; // Mutable to return reference to ov::Tensor mutable std::unordered_map, ov::SoPtr> m_tensors; - - /** - * @brief Finds input or output port - * @return structure which contains index of Input/Output or report that port wasn't found - */ - FoundPort find_port(const ov::Output& port) const; + // Cache ports + mutable std::unordered_map m_cached_ports; + mutable std::mutex m_cache_mutex; }; }; // namespace ov diff --git a/src/inference/src/dev/isync_infer_request.cpp b/src/inference/src/dev/isync_infer_request.cpp index 8e0f554fedd900..94d714d9f134a5 100644 --- a/src/inference/src/dev/isync_infer_request.cpp +++ b/src/inference/src/dev/isync_infer_request.cpp @@ -4,6 +4,7 @@ #include "openvino/runtime/isync_infer_request.hpp" +#include #include #include @@ -17,6 +18,7 @@ #include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/plugin_itt.hpp" #include "openvino/runtime/tensor.hpp" +#include "openvino/util/common_util.hpp" namespace { void check_batched_tensors(const ov::Output& input, @@ -93,14 +95,18 @@ ov::IInferRequest::~IInferRequest() = default; ov::ISyncInferRequest::ISyncInferRequest(const std::shared_ptr& compiled_model) : m_compiled_model(compiled_model) { OPENVINO_ASSERT(m_compiled_model); - // Create map of empty tensors - for (const auto& input : get_inputs()) { - if (m_tensors.find(input.get_tensor_ptr()) == m_tensors.end()) - m_tensors[input.get_tensor_ptr()] = ov::SoPtr(); - } - for (const auto& output : get_outputs()) { - if (m_tensors.find(output.get_tensor_ptr()) == m_tensors.end()) - m_tensors[output.get_tensor_ptr()] = ov::SoPtr(); + // Create map of empty tensors and cache ports from the compiled model + auto port_type = ov::ISyncInferRequest::FoundPort::Type::INPUT; + for (const auto& ports : {get_inputs(), get_outputs()}) { + for (size_t i = 0; i < ports.size(); i++) { + const auto& port = ports[i]; + if (m_tensors.find(port.get_tensor_ptr()) == m_tensors.end()) + m_tensors[port.get_tensor_ptr()] = ov::SoPtr(); + size_t port_hash = ov::util::hash_combine(std::vector{std::hash()(port.get_node()), + std::hash()(port.get_index())}); + m_cached_ports[port_hash] = {i, port_type}; + } + port_type = ov::ISyncInferRequest::FoundPort::Type::OUTPUT; } } @@ -118,18 +124,30 @@ ov::ISyncInferRequest::FoundPort ov::ISyncInferRequest::find_port(const ov::Outp // This function is hotspot, need optimization. auto check_nodes = [](const ov::Node* node1, const ov::Node* node2) { return node1 == node2 || - (node1->get_friendly_name() == node2->get_friendly_name() && - node1->get_type_info() == node2->get_type_info() && - node1->outputs().size() == node2->outputs().size() && node1->inputs().size() == node2->inputs().size()); + (node1->outputs().size() == node2->outputs().size() && + node1->inputs().size() == node2->inputs().size() && node1->get_type_info() == node2->get_type_info() && + node1->get_friendly_name() == node2->get_friendly_name()); }; + // Find port without caching work slow because we need each time iterate over all ports and compare different + // strings So use WA with caching in order to make 2+ calls for the same ports faster. + // Calculate hash for the port + size_t port_hash = ov::util::hash_combine( + std::vector{std::hash()(port.get_node()), std::hash()(port.get_index())}); + { + std::lock_guard lock(m_cache_mutex); + if (m_cached_ports.find(port_hash) != m_cached_ports.end()) { + // Cached port for the hash was found + return m_cached_ports[port_hash]; + } + } ov::ISyncInferRequest::FoundPort::Type type = ov::ISyncInferRequest::FoundPort::Type::INPUT; for (const auto& ports : {get_inputs(), get_outputs()}) { for (size_t i = 0; i < ports.size(); i++) { - // TODO: Fix port comparison - // if (ports[i] == port) { if (ports[i].get_index() == port.get_index() && ports[i].get_names() == port.get_names() && check_nodes(ports[i].get_node(), port.get_node())) { - return {i, type}; + std::lock_guard lock(m_cache_mutex); + m_cached_ports[port_hash] = {i, type}; + return m_cached_ports[port_hash]; } } type = ov::ISyncInferRequest::FoundPort::Type::OUTPUT; @@ -275,10 +293,10 @@ void ov::ISyncInferRequest::allocate_tensor( void ov::ISyncInferRequest::check_tensors() const { const auto& inputs = m_compiled_model->inputs(); for (size_t i = 0; i < inputs.size(); i++) { - check_tensor(inputs[i], get_tensor_ptr(inputs[i])); + check_tensor(inputs[i], m_tensors.at(inputs[i].get_tensor_ptr())); } const auto& outputs = m_compiled_model->outputs(); for (size_t i = 0; i < outputs.size(); i++) { - check_tensor(outputs[i], get_tensor_ptr(outputs[i])); + check_tensor(outputs[i], m_tensors.at(outputs[i].get_tensor_ptr())); } } diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index dba0082d647080..e61893e132dfeb 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -397,7 +397,7 @@ struct CPUStreamsExecutor::Impl { auto numaNodes = get_available_numa_nodes(); if (_config._streams != 0) { std::copy_n(std::begin(numaNodes), - std::min(static_cast(_config._streams), numaNodes.size()), + std::min(_config._streams, numaNodes.size()), std::back_inserter(_usedNumaNodes)); } else { _usedNumaNodes = numaNodes; diff --git a/src/plugins/hetero/src/sync_infer_request.cpp b/src/plugins/hetero/src/sync_infer_request.cpp index 21fbd0b4f2e2a3..0bb4bc4b7e9a4f 100644 --- a/src/plugins/hetero/src/sync_infer_request.cpp +++ b/src/plugins/hetero/src/sync_infer_request.cpp @@ -49,20 +49,15 @@ ov::hetero::InferRequest::InferRequest(const std::shared_ptr ov::hetero::InferRequest::get_request(const ov::Output& port) const { - auto check_nodes = [](const ov::Node* node1, const ov::Node* node2) { - return node1 == node2 || - (node1->get_friendly_name() == node2->get_friendly_name() && - node1->get_type_info() == node2->get_type_info() && - node1->outputs().size() == node2->outputs().size() && node1->inputs().size() == node2->inputs().size()); - }; - - for (const auto& kvp : m_port_to_subrequest_idx) { - if (kvp.first.get_index() == port.get_index() && kvp.first.get_names() == port.get_names() && - check_nodes(kvp.first.get_node(), port.get_node())) { - return m_subrequests[kvp.second]; - } + auto found_port = find_port(port); + ov::Output internal_port; + OPENVINO_ASSERT(found_port.found(), "Cannot find infer request for port ", port); + if (found_port.is_input()) { + internal_port = get_inputs().at(found_port.idx); + } else { + internal_port = get_outputs().at(found_port.idx); } - OPENVINO_THROW("Cannot find infer request for port ", port); + return m_subrequests[m_port_to_subrequest_idx.at(internal_port)]; } ov::SoPtr ov::hetero::InferRequest::get_tensor(const ov::Output& port) const { diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py index 6eab63bf682bd0..e437209cde9c9b 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 import unittest +import platform from typing import Tuple import numpy as np @@ -1236,6 +1237,8 @@ class TestPrecisionSensitive(): @pytest.mark.parametrize("create_model", test_data) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122714') def test_precision_sensitive(self, create_model, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): import numpy.testing as npt from pathlib import Path diff --git a/tests/layer_tests/onnx_tests/test_reduce_lp.py b/tests/layer_tests/onnx_tests/test_reduce_lp.py index e64929a680c20d..73cd86a2bbbc6f 100644 --- a/tests/layer_tests/onnx_tests/test_reduce_lp.py +++ b/tests/layer_tests/onnx_tests/test_reduce_lp.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest from common.layer_test_class import check_ir_version @@ -232,6 +234,8 @@ def create_reduce_lp_const(self, shape, axes, keep_dims, reduce_p, ir_version): @pytest.mark.parametrize("keep_dims", [True, False]) @pytest.mark.parametrize("reduce_p", [1, 2]) @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122846') def test_reduce_lp_precommit(self, params, keep_dims, reduce_p, ie_device, precision, ir_version, temp_dir, use_old_api): self._test(*self.create_reduce_lp(**params, keep_dims=keep_dims, reduce_p=reduce_p, diff --git a/tests/layer_tests/onnx_tests/test_roi_align.py b/tests/layer_tests/onnx_tests/test_roi_align.py index 13663808a3acd3..a29ddc4c1d1213 100644 --- a/tests/layer_tests/onnx_tests/test_roi_align.py +++ b/tests/layer_tests/onnx_tests/test_roi_align.py @@ -136,6 +136,8 @@ def create_net(self, input_shape, rois_shape, indices_shape, output_shape, @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.xfail(condition=platform.system() == 'Windows', reason="Ticket - 122731") + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122846') def test_roi_alignv10(self, params, ie_device, precision, ir_version, temp_dir, use_old_api): # TODO: ticket for investigating GPU failures: CVS-86300 if ie_device != "GPU": diff --git a/tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py b/tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py index c01e58c2107eec..09be641a0fb96e 100644 --- a/tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py +++ b/tests/layer_tests/pytorch_tests/test_adaptive_max_pool.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -49,6 +51,8 @@ def forward(self, input_tensor): @pytest.mark.precommit @pytest.mark.precommit_ts_backend @pytest.mark.precommit_fx_backend + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_adaptive_max_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices): self.input_tensor = input_tensor self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version) @@ -94,6 +98,8 @@ def forward(self, input_tensor): @pytest.mark.precommit @pytest.mark.precommit_ts_backend @pytest.mark.precommit_fx_backend + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_adaptive_max_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices): self.input_tensor = input_tensor self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version) @@ -139,6 +145,8 @@ def forward(self, input_tensor): @pytest.mark.precommit @pytest.mark.precommit_ts_backend @pytest.mark.precommit_fx_backend + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_adaptive_max_pool1d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices): self.input_tensor = input_tensor self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version) \ No newline at end of file diff --git a/tests/layer_tests/pytorch_tests/test_all.py b/tests/layer_tests/pytorch_tests/test_all.py index b5255f197cfef0..ca9b734c1ad1dd 100644 --- a/tests/layer_tests/pytorch_tests/test_all.py +++ b/tests/layer_tests/pytorch_tests/test_all.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -68,6 +70,8 @@ def test_all_noparams(self, input_tensor, ie_device, precision, ir_version): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_all(self, input_tensor, keepdim, ie_device, precision, ir_version): self.input_tensor = input_tensor for dim in range(len(input_tensor.shape)): diff --git a/tests/layer_tests/pytorch_tests/test_argmax_argmin.py b/tests/layer_tests/pytorch_tests/test_argmax_argmin.py index 05abf128da400d..80ed6fcb872b5f 100644 --- a/tests/layer_tests/pytorch_tests/test_argmax_argmin.py +++ b/tests/layer_tests/pytorch_tests/test_argmax_argmin.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -71,6 +73,8 @@ def forward(self, x): @pytest.mark.parametrize("dtype", ["float32", "int32", "int64"]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_argmin_argmax(self, axes, keep_dims, op_type, dtype, ie_device, precision, ir_version): self._test(*self.create_model(op_type, axes, keep_dims), ie_device, precision, ir_version, trace_model=True, diff --git a/tests/layer_tests/pytorch_tests/test_as_strided.py b/tests/layer_tests/pytorch_tests/test_as_strided.py new file mode 100644 index 00000000000000..9bfaa66d3a7f6b --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_as_strided.py @@ -0,0 +1,125 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestAsStrided(PytorchLayerTest): + def _prepare_input(self): + return (np.random.randn(8, 8).astype(np.float32),) + + def create_model(self, size, stride, offset): + class aten_as_strided(torch.nn.Module): + def __init__(self, size, stride, offset): + super().__init__() + self.size = size + self.stride = stride + self.offset = offset + + def forward(self, x): + return torch.as_strided(x, self.size, self.stride, self.offset) + + ref_net = None + + return aten_as_strided(size, stride, offset), ref_net, "aten::as_strided" + + @pytest.mark.parametrize( + "size,stride", + [ + ([1], [1]), + ([2, 2], [1, 1]), + ([5, 4, 3], [1, 3, 7]), + ([5, 5, 5], [5, 0, 5]), + ([1, 2, 3, 4], [4, 3, 2, 1]), + ], + ) + @pytest.mark.parametrize("offset", [None, 1, 3, 7]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_as_strided(self, size, stride, offset, ie_device, precision, ir_version): + self._test(*self.create_model(size, stride, offset), ie_device, precision, ir_version, trace_model=True) + + +class TestAsStridedListConstruct(PytorchLayerTest): + def _prepare_input(self, size_shape_tensor=[1], stride_shape_tensor=[1]): + return ( + np.random.randn(8, 8).astype(np.float32), + np.ones(size_shape_tensor), + np.ones(stride_shape_tensor), + ) + + def create_model(self, size, stride, offset, mode): + class aten_as_strided(torch.nn.Module): + def __init__(self, size, stride, offset, mode): + super().__init__() + self.size = size + self.stride = stride + self.size_shape_tensor = torch.empty(size) + self.stride_shape_tensor = torch.empty(stride) + self.offset = offset + modes = { + "no_const": self.forward_no_const, + "stride_const": self.forward_stride_const, + "size_const": self.forward_size_const, + } + self.forward = modes.get(mode) + + def forward_no_const(self, x, size_shape_tensor, stride_shape_tensor): + sz1, sz2, sz3 = size_shape_tensor.shape + st1, st2, st3 = stride_shape_tensor.shape + return torch.as_strided(x, [sz1, sz2, sz3], [st1, st2, st3], self.offset) + + def forward_stride_const(self, x, size_shape_tensor, stride_shape_tensor): + sz1, sz2, sz3 = size_shape_tensor.shape + return torch.as_strided(x, [sz1, sz2, sz3], self.stride, self.offset) + + def forward_size_const(self, x, size_shape_tensor, stride_shape_tensor): + st1, st2, st3 = stride_shape_tensor.shape + return torch.as_strided(x, self.size, [st1, st2, st3], self.offset) + + ref_net = None + + return aten_as_strided(size, stride, offset, mode), ref_net, ["aten::as_strided", "prim::ListConstruct"] + + @pytest.mark.parametrize("size,stride", [([5, 4, 3], [1, 3, 7]), ([5, 5, 5], [5, 0, 5])]) + @pytest.mark.parametrize("offset", [None, 7]) + @pytest.mark.parametrize("mode", ["no_const", "stride_const", "size_const"]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_as_strided_list_construct(self, size, stride, offset, mode, ie_device, precision, ir_version): + inp_kwargs = {"size_shape_tensor": size, "stride_shape_tensor": stride} + self._test( + *self.create_model(size, stride, offset, mode), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input=inp_kwargs, + trace_model=True + ) + + +class TestAsStridedLongformer(PytorchLayerTest): + def _prepare_input(self): + return (np.random.randn(1, 10, 20, 40).astype(np.float32).transpose([0, 2, 3, 1]),) + + def create_model(self): + class aten_as_strided_lf(torch.nn.Module): + def forward(self, x): + chunk_size = list(x.size()) + chunk_size[1] = chunk_size[1] * 2 - 1 + chunk_stride = list(x.stride()) + chunk_stride[1] = chunk_stride[1] // 2 + return x.as_strided(size=chunk_size, stride=chunk_stride) + + ref_net = None + + return aten_as_strided_lf(), ref_net, "aten::as_strided" + + @pytest.mark.nightly + @pytest.mark.precommit + def test_as_strided_lf(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version, trace_model=True, freeze_model=False) diff --git a/tests/layer_tests/pytorch_tests/test_cumsum.py b/tests/layer_tests/pytorch_tests/test_cumsum.py index 926cfe9e95c30a..771eb02768bdf0 100644 --- a/tests/layer_tests/pytorch_tests/test_cumsum.py +++ b/tests/layer_tests/pytorch_tests/test_cumsum.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -69,5 +71,7 @@ def forward_out_prim_dtype(self, x, y): @pytest.mark.parametrize("out,dtype_from_input", [(False, False), (True, False), (True, True)]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_cumsum(self, axis, dtype, out, dtype_from_input, ie_device, precision, ir_version): self._test(*self.create_model(axis, dtype, out, dtype_from_input), ie_device, precision, ir_version, kwargs_to_prepare_input={"out": out, "out_dtype": dtype}) diff --git a/tests/layer_tests/pytorch_tests/test_distance.py b/tests/layer_tests/pytorch_tests/test_distance.py index 1c76a7243b47e3..f8cec6998ca7b6 100644 --- a/tests/layer_tests/pytorch_tests/test_distance.py +++ b/tests/layer_tests/pytorch_tests/test_distance.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -29,6 +31,8 @@ def forward(self, x, y): @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.parametrize("p", [2., 4., 6., 8.,]) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_cdist(self, p, ie_device, precision, ir_version): self._test(*self.create_model(p), ie_device, precision, ir_version) @@ -61,5 +65,7 @@ def forward(self, x, y): @pytest.mark.parametrize("p", [2., 4., 6., 8.,]) @pytest.mark.parametrize("eps", [1e-06, 0.00001, 1e-07]) @pytest.mark.parametrize("keepdim", [True, False]) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_cdist(self, p, eps, keepdim, ie_device, precision, ir_version): self._test(*self.create_model(p, eps, keepdim), ie_device, precision, ir_version) \ No newline at end of file diff --git a/tests/layer_tests/pytorch_tests/test_div.py b/tests/layer_tests/pytorch_tests/test_div.py index d6e696b62882d5..8b7dad351817d4 100644 --- a/tests/layer_tests/pytorch_tests/test_div.py +++ b/tests/layer_tests/pytorch_tests/test_div.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -116,6 +118,8 @@ def forward3(self, lhs, rhs): ])) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_div_types(self, ie_device, precision, ir_version, lhs_type, lhs_shape, rhs_type, rhs_shape, rounding_mode): self.lhs_type = lhs_type self.lhs_shape = lhs_shape diff --git a/tests/layer_tests/pytorch_tests/test_embedding_bag.py b/tests/layer_tests/pytorch_tests/test_embedding_bag.py index 2595b2269316fd..d0c6d0c532856f 100644 --- a/tests/layer_tests/pytorch_tests/test_embedding_bag.py +++ b/tests/layer_tests/pytorch_tests/test_embedding_bag.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -42,6 +44,8 @@ def forward_offsets_per_sample_weights(self, indicies, weight, offsets, per_samp @pytest.mark.precommit @pytest.mark.parametrize("indicies_dtype", ["int", "int32"]) @pytest.mark.parametrize("per_sample_weights", [True, False]) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_embedding_bag(self, ie_device, precision, ir_version, indicies_dtype, per_sample_weights): self._test(*self.create_model(per_sample_weights), ie_device, precision, ir_version, kwargs_to_prepare_input={"indicies_dtype": indicies_dtype, "per_sample_weights": per_sample_weights}, @@ -85,6 +89,8 @@ def forward_per_sample_weights(self, indicies, weight, per_sample_wights): @pytest.mark.parametrize("indicies_size", [[1, 1], [2, 5], [3, 10], [4, 7]]) @pytest.mark.parametrize("indicies_dtype", ["int", "int32"]) @pytest.mark.parametrize("per_sample_weights", [True, False]) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_embedding_bag(self, ie_device, precision, ir_version, indicies_dtype, indicies_size, per_sample_weights): self._test(*self.create_model(per_sample_weights), ie_device, precision, ir_version, kwargs_to_prepare_input={"indicies_size": indicies_size, "indicies_dtype": indicies_dtype, "per_sample_weights": per_sample_weights}, diff --git a/tests/layer_tests/pytorch_tests/test_fake_quantize.py b/tests/layer_tests/pytorch_tests/test_fake_quantize.py index 6bb1d6601cb43b..3146ac87b90087 100644 --- a/tests/layer_tests/pytorch_tests/test_fake_quantize.py +++ b/tests/layer_tests/pytorch_tests/test_fake_quantize.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -46,6 +48,8 @@ def forward(self, x): (1.0, 0, 0, 127), ], ) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_fake_quantize_per_tensor_affine( self, ie_device, precision, ir_version, scale, zero_point, quant_min, quant_max ): @@ -96,6 +100,8 @@ def forward(self, x): (torch.tensor([-0.005, -0.7, 0.1]), torch.tensor([1, 0, 1], dtype=torch.int32), 0, 0, 255), ], ) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_fake_quantize_per_channel_affine( self, ie_device, precision, ir_version, scale, zero_point, axis, quant_min, quant_max ): diff --git a/tests/layer_tests/pytorch_tests/test_floor_divide.py b/tests/layer_tests/pytorch_tests/test_floor_divide.py index cd427acb3dba56..44c1eadc3ce542 100644 --- a/tests/layer_tests/pytorch_tests/test_floor_divide.py +++ b/tests/layer_tests/pytorch_tests/test_floor_divide.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest @@ -52,6 +54,8 @@ def forward(self, input_tensor, other_tensor): ])) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_floor_divide(self, input_tensor, other_tensor, ie_device, precision, ir_version): self.input_tensor = input_tensor self.other_tensor = other_tensor diff --git a/tests/layer_tests/pytorch_tests/test_full.py b/tests/layer_tests/pytorch_tests/test_full.py index c564b1bb3731b9..52b5b2e3e58bd1 100644 --- a/tests/layer_tests/pytorch_tests/test_full.py +++ b/tests/layer_tests/pytorch_tests/test_full.py @@ -1,5 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 + +import platform + import numpy as np import pytest @@ -144,6 +147,8 @@ def forward(self, input_t: torch.Tensor, x:float): @pytest.mark.parametrize("mode", ["", "inplace", "out"]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_fill(self, shape, value, input_dtype, value_dtype, mode, ie_device, precision, ir_version): self._test(*self.create_model(mode), ie_device, precision, ir_version, kwargs_to_prepare_input={ @@ -183,6 +188,8 @@ def forward(self, x:torch.Tensor, y:float): @pytest.mark.parametrize("wrap", [True, False]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_fill_diagonal(self, shape, value, input_dtype, value_dtype, wrap, ie_device, precision, ir_version): self._test(*self.create_model(shape, wrap), ie_device, precision, ir_version, kwargs_to_prepare_input={ diff --git a/tests/layer_tests/pytorch_tests/test_grid_sampler.py b/tests/layer_tests/pytorch_tests/test_grid_sampler.py index b142544c3b6e62..7b55862e2f0c2d 100644 --- a/tests/layer_tests/pytorch_tests/test_grid_sampler.py +++ b/tests/layer_tests/pytorch_tests/test_grid_sampler.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -35,6 +37,8 @@ def forward(self, input, grid): @pytest.mark.parametrize("align_corners", [True, False, None]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_grid_sampler(self, h_in, w_in, h_out, w_out, mode, padding_mode, align_corners, ie_device, precision, ir_version): self._test(*self.create_model(mode, padding_mode, align_corners), ie_device, precision, ir_version, kwargs_to_prepare_input={ "h_in": h_in, "w_in": w_in, "h_out": h_out, "w_out": w_out diff --git a/tests/layer_tests/pytorch_tests/test_instance_norm.py b/tests/layer_tests/pytorch_tests/test_instance_norm.py index 2fe3f5e13e066a..3ec2dd0144573d 100644 --- a/tests/layer_tests/pytorch_tests/test_instance_norm.py +++ b/tests/layer_tests/pytorch_tests/test_instance_norm.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -60,6 +62,8 @@ def forward(self, x): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_group_norm(self, params, ie_device, precision, ir_version, kwargs_to_prepare_input): self._test(*self.create_model(**params), ie_device, precision, ir_version, kwargs_to_prepare_input=kwargs_to_prepare_input, diff --git a/tests/layer_tests/pytorch_tests/test_linspace.py b/tests/layer_tests/pytorch_tests/test_linspace.py index aa6f70d3d71c89..4cf623e55fafad 100644 --- a/tests/layer_tests/pytorch_tests/test_linspace.py +++ b/tests/layer_tests/pytorch_tests/test_linspace.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -63,6 +65,8 @@ def forward(self, start, end, steps, d): @pytest.mark.parametrize( "start,end,steps", [(0, 1, 5), (-2, 1, 5), (1, -5, 7), (1, 10, 2), (-1, -5, 2), (-1, -5, 1), (1.25, -5.5, 5)] ) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_linspace_with_prim_dtype(self, dtype, end, start, steps, ie_device, precision, ir_version): self._test( *self.create_model(dtype, ref_dtype=True), @@ -79,6 +83,8 @@ def test_linspace_with_prim_dtype(self, dtype, end, start, steps, ie_device, pre "start,end,steps", [(0, 1, 5), (-2, 1, 5), (1, -5, 7), (1, 10, 2), (-1, -5, 2), (-1, -5, 1), (1.25, -5.5, 5)] ) @pytest.mark.parametrize("use_out", [False, True]) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_linspace_with_out(self, dtype, use_out, end, start, steps, ie_device, precision, ir_version): self._test( *self.create_model(dtype=dtype, use_out=use_out), diff --git a/tests/layer_tests/pytorch_tests/test_native_multi_head_attention.py b/tests/layer_tests/pytorch_tests/test_native_multi_head_attention.py index 41e737dba6221d..26b7cdbd14812b 100644 --- a/tests/layer_tests/pytorch_tests/test_native_multi_head_attention.py +++ b/tests/layer_tests/pytorch_tests/test_native_multi_head_attention.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -74,6 +76,8 @@ def _prepare_input(self): ["need_weights", "average_attn_weights"], [[False, False], [True, False], [True, True]] ) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_native_multi_head_attention(self, ie_device, precision, ir_version, mask, need_weights, average_attn_weights): self._test(aten_native_multi_head_attention(mask, need_weights, average_attn_weights), None, "aten::_native_multi_head_attention", ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_nms.py b/tests/layer_tests/pytorch_tests/test_nms.py index ae09726a23b8f9..b703e98ccaffe9 100644 --- a/tests/layer_tests/pytorch_tests/test_nms.py +++ b/tests/layer_tests/pytorch_tests/test_nms.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest import numpy as np @@ -35,6 +37,8 @@ def forward(self, boxes, scores): @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_nms(self, ie_device, precision, ir_version, boxes_num): self.boxes_num = boxes_num self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_norm.py b/tests/layer_tests/pytorch_tests/test_norm.py index aef0a074059950..9422c170401702 100644 --- a/tests/layer_tests/pytorch_tests/test_norm.py +++ b/tests/layer_tests/pytorch_tests/test_norm.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -245,6 +247,8 @@ def forward_out(self, x, y): @pytest.mark.parametrize("dtype", ["float32", "float64", None]) @pytest.mark.parametrize("out", [True, False]) @pytest.mark.parametrize("prim_dtype", [True, False]) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_linalg_matrix_norm(self, p, dim, keepdim, dtype, out, prim_dtype, ie_device, precision, ir_version): self._test(*self.create_model(p, dim, keepdim, dtype, out, prim_dtype), ie_device, precision, ir_version, diff --git a/tests/layer_tests/pytorch_tests/test_pooling.py b/tests/layer_tests/pytorch_tests/test_pooling.py index 3f4c94db6d45d0..f54902282ece1b 100644 --- a/tests/layer_tests/pytorch_tests/test_pooling.py +++ b/tests/layer_tests/pytorch_tests/test_pooling.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -133,6 +135,8 @@ def forward(self, x): @pytest.mark.parametrize("count_include_pad", [True, False]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_avg_pool1d(self, params, ceil_mode, count_include_pad, ie_device, precision, ir_version): self._test(*self.create_model("avg_pool1d", **params, ceil_mode=ceil_mode, count_include_pad=count_include_pad), ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 3}, trace_model=True, @@ -151,6 +155,8 @@ def test_avg_pool1d(self, params, ceil_mode, count_include_pad, ie_device, preci @pytest.mark.parametrize("count_include_pad", [True, False]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_avg_pool2d(self, params, ceil_mode, count_include_pad, ie_device, precision, ir_version): self._test(*self.create_model("avg_pool2d", **params, ceil_mode=ceil_mode, count_include_pad=count_include_pad), ie_device, precision, ir_version, trace_model=True, dynamic_shapes=False) @@ -160,6 +166,8 @@ def test_avg_pool2d(self, params, ceil_mode, count_include_pad, ie_device, preci @pytest.mark.parametrize("count_include_pad", [True, False]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_avg_pool3d(self, params, ceil_mode, count_include_pad, ie_device, precision, ir_version): self._test(*self.create_model("avg_pool3d", **params, ceil_mode=ceil_mode, count_include_pad=count_include_pad), ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 5}, trace_model=True, @@ -170,6 +178,8 @@ def test_avg_pool3d(self, params, ceil_mode, count_include_pad, ie_device, preci @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_max_pool1d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): self._test(*self.create_model("max_pool1d", **params, ceil_mode=ceil_mode, dilation=dilation), ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 3}, dynamic_shapes=False) @@ -179,6 +189,8 @@ def test_max_pool1d(self, params, ceil_mode, dilation, ie_device, precision, ir_ @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_max_pool2d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): to_trace = False if params["stride"] == []: @@ -191,6 +203,8 @@ def test_max_pool2d(self, params, ceil_mode, dilation, ie_device, precision, ir @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_max_pool3d(self, params, ceil_mode, dilation, ie_device, precision, ir_version): self._test(*self.create_model("max_pool3d", **params, ceil_mode=ceil_mode, dilation=dilation), ie_device, precision, ir_version, kwargs_to_prepare_input={'ndim': 5}, dynamic_shapes=False) @@ -200,6 +214,8 @@ def test_max_pool3d(self, params, ceil_mode, dilation, ie_device, precision, ir_ @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_max_pool1d_indices(self, params, ceil_mode, dilation, ie_device, precision, ir_version): if ceil_mode and (np.array(params["padding"]).any() != 0): pytest.skip("ticket 122418") @@ -211,6 +227,8 @@ def test_max_pool1d_indices(self, params, ceil_mode, dilation, ie_device, precis @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_max_pool2d_indices(self, params, ceil_mode, dilation, ie_device, precision, ir_version): if ceil_mode and (np.array(params["padding"]).any() != 0): pytest.skip("ticket 122418") @@ -225,6 +243,8 @@ def test_max_pool2d_indices(self, params, ceil_mode, dilation, ie_device, preci @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_max_pool3d_indices(self, params, ceil_mode, dilation, ie_device, precision, ir_version): if ceil_mode and (np.array(params["padding"]).any() != 0): pytest.skip("ticket 122418") diff --git a/tests/layer_tests/pytorch_tests/test_quantize.py b/tests/layer_tests/pytorch_tests/test_quantize.py index f1a7522159090e..600821fa16204c 100644 --- a/tests/layer_tests/pytorch_tests/test_quantize.py +++ b/tests/layer_tests/pytorch_tests/test_quantize.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -49,6 +51,8 @@ def _prepare_input(self): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantize_per_tensor_dequantize(self, scale, zero_point, dtype, ie_device, precision, ir_version): if dtype == torch.quint8: zero_point = abs(zero_point) self._test(aten_quantize_per_tensor_aten_dequantize(scale, zero_point, dtype), None, ["aten::quantize_per_tensor", "aten::dequantize"], @@ -88,6 +92,8 @@ def _prepare_input(self): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantize_per_channel_dequantize(self, scale, zero_point, dtype, axis, ie_device, precision, ir_version): np.random.shuffle(scale), np.random.shuffle(zero_point) if dtype == torch.quint8: zero_point = abs(zero_point) diff --git a/tests/layer_tests/pytorch_tests/test_quantized_add.py b/tests/layer_tests/pytorch_tests/test_quantized_add.py index 960d3b4cca7aef..59a992fc088d5a 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_add.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_add.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -38,6 +40,8 @@ def _prepare_input(self): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantized_add(self, scale, zero_point, dtype, ie_device, precision, ir_version): if dtype == torch.quint8: zero_point = abs(zero_point) self._test(quantized_add(scale, zero_point, dtype), None, ["quantized::add"], diff --git a/tests/layer_tests/pytorch_tests/test_quantized_add_relu.py b/tests/layer_tests/pytorch_tests/test_quantized_add_relu.py index 4a0dd797e3525c..6cb64dfab053d6 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_add_relu.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_add_relu.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -38,6 +40,8 @@ def _prepare_input(self): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantized_add_relu(self, scale, zero_point, dtype, ie_device, precision, ir_version): if dtype == torch.quint8: zero_point = abs(zero_point) self._test(quantized_add_relu(scale, zero_point, dtype), None, ["quantized::add_relu"], diff --git a/tests/layer_tests/pytorch_tests/test_quantized_cat.py b/tests/layer_tests/pytorch_tests/test_quantized_cat.py index db6e5278bb5c50..ce0bc880e78f66 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_cat.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_cat.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -73,6 +75,8 @@ def _prepare_input(self): @pytest.mark.parametrize("dtype", [torch.quint8, torch.qint8]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantized_cat(self, scale, zero_point, dtype, ie_device, precision, ir_version): self._test( aten_quantized_cat(scale, zero_point, dtype), @@ -91,6 +95,8 @@ def test_quantized_cat(self, scale, zero_point, dtype, ie_device, precision, ir_ @pytest.mark.parametrize("dtype", [torch.quint8, torch.qint8]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_append_quantized_cat(self, scale, zero_point, dtype, ie_device, precision, ir_version): self._test( aten_append_quantized_cat(scale, zero_point, dtype), @@ -130,6 +136,8 @@ def test_loop_append_quantized_cat(self, scale, zero_point, dtype, ie_device, pr @pytest.mark.parametrize("dtype", [torch.quint8, torch.qint8]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_add_quantized_cat(self, scale, zero_point, dtype, ie_device, precision, ir_version): self._test( aten_add_quantized_cat(scale, zero_point, dtype), diff --git a/tests/layer_tests/pytorch_tests/test_quantized_convnd.py b/tests/layer_tests/pytorch_tests/test_quantized_convnd.py index cf3ec0142cf46b..bc4ac9e1788b34 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_convnd.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_convnd.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest import numpy as np import torch @@ -78,6 +80,8 @@ def forward(self, x): @pytest.mark.parametrize("zero_point", [0, 1]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantized_conv2d(self, params, bias, relu, scale, zero_point, ie_device, precision, ir_version): self._test( *self.create_model(**params, bias=bias, relu=relu, diff --git a/tests/layer_tests/pytorch_tests/test_quantized_hardswish.py b/tests/layer_tests/pytorch_tests/test_quantized_hardswish.py index a0b40783c4e98d..4508bbcb266ab6 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_hardswish.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_hardswish.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -36,6 +38,8 @@ def _prepare_input(self): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantized_hardswish(self, scale, zero_point, dtype, ie_device, precision, ir_version): if dtype == torch.quint8: zero_point = abs(zero_point) self._test(quantized_hardswish(scale, zero_point, dtype), None, ["quantized::hardswish"], diff --git a/tests/layer_tests/pytorch_tests/test_quantized_linear.py b/tests/layer_tests/pytorch_tests/test_quantized_linear.py index 1ded932f234055..bd89ea48303f25 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_linear.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_linear.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest import torch import numpy as np @@ -73,6 +75,8 @@ def forward(self, inp): @pytest.mark.parametrize("trace", [True, False]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantized_linear(self, params, scale, zero_point, trace, ie_device, precision, ir_version): input_shape = params.get("input_shape") weight_shape = params.get("weight_shape") @@ -84,6 +88,8 @@ def test_quantized_linear(self, params, scale, zero_point, trace, ie_device, pre @pytest.mark.parametrize("inplace", [True, False]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantized_hardtanh_linear(self, trace, inplace, ie_device, precision, ir_version): self._test(*self.create_hardtanh_model([10, 9], True, 1, 0.3, inplace), ie_device, precision, ir_version, kwargs_to_prepare_input={"input_shape": [2, 3, 9]}, trace_model=trace, freeze_model=False, quantized_ops=True, quant_size=0.3) diff --git a/tests/layer_tests/pytorch_tests/test_quantized_mul.py b/tests/layer_tests/pytorch_tests/test_quantized_mul.py index cc877daa919b5b..d170d70308b6a5 100644 --- a/tests/layer_tests/pytorch_tests/test_quantized_mul.py +++ b/tests/layer_tests/pytorch_tests/test_quantized_mul.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import torch @@ -38,6 +40,8 @@ def _prepare_input(self): ]) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_quantized_mul(self, scale, zero_point, dtype, ie_device, precision, ir_version): if dtype == torch.quint8: zero_point = abs(zero_point) self._test(quantized_mul(scale, zero_point, dtype), None, ["quantized::mul"], diff --git a/tests/layer_tests/pytorch_tests/test_scaled_dot_product_attention.py b/tests/layer_tests/pytorch_tests/test_scaled_dot_product_attention.py index 22ed325471823b..69c600a0b7562d 100644 --- a/tests/layer_tests/pytorch_tests/test_scaled_dot_product_attention.py +++ b/tests/layer_tests/pytorch_tests/test_scaled_dot_product_attention.py @@ -36,6 +36,7 @@ def forward(self, query, key, value): @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.precommit_fx_backend @pytest.mark.parametrize(['mask', "is_causal"], [(False, False), (False, True), (True, True), (True, False)]) def test_scaled_dot_product_atten(self, ie_device, precision, ir_version, mask, is_causal): self._test(*self.create_model(mask, is_causal),ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_var_mean.py b/tests/layer_tests/pytorch_tests/test_var_mean.py index 6ce85988e9edfb..bd8a5a10617eb4 100644 --- a/tests/layer_tests/pytorch_tests/test_var_mean.py +++ b/tests/layer_tests/pytorch_tests/test_var_mean.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from pytorch_layer_test_class import PytorchLayerTest @@ -52,6 +54,8 @@ def forward(self, x): @pytest.mark.precommit @pytest.mark.parametrize("unbiased", [True, False]) @pytest.mark.parametrize("op_type", ["var", "var_mean", "std", "std_mean"]) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_var2args(self, unbiased, op_type, ie_device, precision, ir_version): self._test(*self.create_model(unbiased, op_type=op_type), ie_device, precision, ir_version) @@ -61,5 +65,7 @@ def test_var2args(self, unbiased, op_type, ie_device, precision, ir_version): @pytest.mark.parametrize("dim", [None, 0, 1, 2, 3, -1, -2, (0, 1), (-1, -2), (0, 1, -1), (0, 1, 2, 3)]) @pytest.mark.parametrize("keepdim", [True, False]) @pytest.mark.parametrize("op_type", ["var", "var_mean", "std", "std_mean"]) + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122715') def test_var(self, unbiased, dim, keepdim, op_type, ie_device, precision, ir_version): self._test(*self.create_model(unbiased, dim, keepdim, two_args_case=False, op_type=op_type), ie_device, precision, ir_version) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_BroadcastTo.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_BroadcastTo.py index bae3f51ce97ff0..6f3eb1b70ed2f2 100644 --- a/tests/layer_tests/tensorflow_lite_tests/test_tfl_BroadcastTo.py +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_BroadcastTo.py @@ -1,3 +1,5 @@ +import platform + import pytest import tensorflow as tf @@ -29,5 +31,7 @@ def make_model(self, params): @pytest.mark.parametrize("params", test_params) @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 123324') def test_broadcast_to(self, params, ie_device, precision, temp_dir): self._test(ie_device, precision, temp_dir, params) diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_RFFT2D.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_RFFT2D.py index b534878970ac59..1ae3464c207b34 100644 --- a/tests/layer_tests/tensorflow_lite_tests/test_tfl_RFFT2D.py +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_RFFT2D.py @@ -1,3 +1,5 @@ +import platform + import pytest import tensorflow as tf @@ -30,5 +32,7 @@ def make_model(self, params): @pytest.mark.parametrize("params", test_params) @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 123324') def test_rfft2d(self, params, ie_device, precision, temp_dir): self._test(ie_device, precision, temp_dir, params) diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_SegmentSum.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_SegmentSum.py index a5ce2d314aee0b..c7339efaf7f55e 100644 --- a/tests/layer_tests/tensorflow_lite_tests/test_tfl_SegmentSum.py +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_SegmentSum.py @@ -1,3 +1,5 @@ +import platform + import pytest import tensorflow as tf @@ -40,5 +42,7 @@ def make_model(self, params): @pytest.mark.parametrize("params", test_params) @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 123324') def test_segment_sum(self, params, ie_device, precision, temp_dir): self._test(ie_device, precision, temp_dir, params) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_AdjustContrastv2.py b/tests/layer_tests/tensorflow_tests/test_tf_AdjustContrastv2.py index 88944c50a38091..896e1789111eaa 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_AdjustContrastv2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_AdjustContrastv2.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -38,6 +40,8 @@ def create_adjust_contrast_net(self, input_shape, input_type): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_adjust_contrast_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_adjust_contrast_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py b/tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py index 62689f5609cc12..7e3964e68c9c35 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_BinaryOps.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest @@ -126,6 +128,8 @@ def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, op_type 'Xdivy']) @pytest.mark.nightly @pytest.mark.precommit + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_binary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_new_frontend, use_old_api): if precision == "FP16": diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Bucketize.py b/tests/layer_tests/tensorflow_tests/test_tf_Bucketize.py index d981b2997542b5..8ab60f9ac65beb 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Bucketize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Bucketize.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -40,6 +42,8 @@ def create_bucketize_net(self, input_shape, input_type, boundaries_size): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_bucketize_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_bucketize_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_CropAndResize.py b/tests/layer_tests/tensorflow_tests/test_tf_CropAndResize.py index 92ef18ff5aba98..30cefc07c942d2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_CropAndResize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_CropAndResize.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -53,6 +55,8 @@ def create_crop_and_resize_net(self, image_shape, num_boxes, crop_size_value, me @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_crop_and_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_crop_and_resize_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_DivNoNan.py b/tests/layer_tests/tensorflow_tests/test_tf_DivNoNan.py index 58db73ece154e1..5a6f3883185f23 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_DivNoNan.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_DivNoNan.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -41,6 +43,8 @@ def create_div_no_nan_net(self, input_shape, input_type): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_div_no_nan_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_div_no_nan_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py index 43d8da8e38019d..191b46e035a376 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_FakeQuantWithMinMaxVars.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -42,6 +44,8 @@ def create_fake_quant_with_min_max_vars_net(self, inputs_shape, min_value, max_v ]) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_fake_quant_with_min_max_vars_basic(self, params, fake_quant_op, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): diff --git a/tests/layer_tests/tensorflow_tests/test_tf_If.py b/tests/layer_tests/tensorflow_tests/test_tf_If.py index 0e4e7a6fb249e5..20085e6ac86672 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_If.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_If.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -67,6 +69,8 @@ def else_branch(): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_if_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): if ie_device == 'GPU': @@ -137,6 +141,8 @@ def else_branch(): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_if_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): if ie_device == 'GPU': @@ -215,6 +221,8 @@ def else_branch(): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_if_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): if ie_device == 'GPU': @@ -305,6 +313,8 @@ def else_branch(): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_if_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): if ie_device == 'GPU': diff --git a/tests/layer_tests/tensorflow_tests/test_tf_LeakyRelu.py b/tests/layer_tests/tensorflow_tests/test_tf_LeakyRelu.py index 1504ae706a9b19..ea672ac144d987 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_LeakyRelu.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_LeakyRelu.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest import tensorflow as tf from common.tf_layer_test_class import CommonTFLayerTest @@ -31,6 +33,8 @@ def create_leaky_relu_net(self, x_shape, alpha_value): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_leaky_relu_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_leaky_relu_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_LinSpace.py b/tests/layer_tests/tensorflow_tests/test_tf_LinSpace.py index c696eaaa0355e5..216fe7b7816de4 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_LinSpace.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_LinSpace.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest import tensorflow as tf from common.tf_layer_test_class import CommonTFLayerTest @@ -28,6 +30,8 @@ def create_lin_space_net(self, num_value): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_lin_space_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_lin_space_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py b/tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py index bef52905aa3159..063e310dd8174a 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_LogSoftmax.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -39,6 +41,8 @@ def create_log_softmax_net(self, logits_shape): @pytest.mark.precommit @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_log_softmax_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_log_softmax_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MaxPoolWithArgmax.py b/tests/layer_tests/tensorflow_tests/test_tf_MaxPoolWithArgmax.py index 4d1fed5747ba11..f08995f3c09d11 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_MaxPoolWithArgmax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_MaxPoolWithArgmax.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -59,6 +61,8 @@ def create_max_pool_with_argmax_net(self, input_shape, ksize, strides, input_typ ]) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_max_pool_with_argmax_basic(self, params, input_type, padding, targmax, include_batch_in_index, with_second_output, ie_device, precision, ir_version, temp_dir, diff --git a/tests/layer_tests/tensorflow_tests/test_tf_NormalizeL2.py b/tests/layer_tests/tensorflow_tests/test_tf_NormalizeL2.py index 51a1b322af6541..5de76778d1d837 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_NormalizeL2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_NormalizeL2.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from common.tf_layer_test_class import CommonTFLayerTest @@ -30,6 +32,8 @@ def create_normalize_l2_net(shape, axes): @pytest.mark.precommit @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_normalize_l2_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_normalize_l2_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py b/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py index eb3ac133b3687d..7c523740d79f96 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Pooling.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from common.layer_test_class import check_ir_version from common.tf_layer_test_class import CommonTFLayerTest @@ -145,6 +147,8 @@ def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, me @pytest.mark.parametrize("params", test_data_4D) @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_pooling_net(**params, ir_version=ir_version, @@ -227,6 +231,8 @@ def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_n @pytest.mark.parametrize("params", test_data_5D) @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_pool_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): if ie_device == 'GPU': diff --git a/tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py b/tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py index 0006afd9ab9eca..1f5f778db3ac2f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_RandomUniform.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest import tensorflow as tf from common.layer_test_class import check_ir_version @@ -88,6 +90,8 @@ def create_tf_random_uniform_net(self, global_seed, op_seed, x_shape, min_val, m @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_tf_fe + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_random_uniform_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): if ie_device == 'GPU': diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Resize.py b/tests/layer_tests/tensorflow_tests/test_tf_Resize.py index 184a8115772128..c62492c7a76196 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Resize.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Resize.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -60,6 +62,8 @@ def create_resize_net(self, images_shape, images_type, size_value, align_corners @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_resize_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_resize_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ScatterND.py b/tests/layer_tests/tensorflow_tests/test_tf_ScatterND.py index dac986b96c281e..26ddcfdd53bcc2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ScatterND.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ScatterND.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from common.tf_layer_test_class import CommonTFLayerTest @@ -69,6 +71,8 @@ def create_tf_scatternd_placeholder_const_net(self, x_shape, indices, updates, i @pytest.mark.parametrize("params", test_data) @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_tf_scatter_nd(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_tf_scatternd_placeholder_const_net(**params, ir_version=ir_version, diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SegmentSum.py b/tests/layer_tests/tensorflow_tests/test_tf_SegmentSum.py index 5d74c361f51c20..f0f99d4b9cf95f 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SegmentSum.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SegmentSum.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -41,6 +43,8 @@ def create_segment_sum_net(self, data_shape, segment_ids_shape, data_type, segme @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_segment_sum_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): if not use_new_frontend: diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Softmax.py b/tests/layer_tests/tensorflow_tests/test_tf_Softmax.py index fc9391feaae3e8..574fe3d32949f7 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Softmax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Softmax.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -34,6 +36,8 @@ def create_softmax_net(self, input_shape): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_softmax_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_softmax_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatch.py b/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatch.py index b0f24322b01041..03e83dc39e9c8d 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatch.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_SpaceToBatch.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import pytest from common.tf_layer_test_class import CommonTFLayerTest @@ -33,6 +35,8 @@ def create_space_to_batch_net(self, in_shape, pads_value, block_shape_value): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_space_to_batch_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_space_to_batch_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ToBool.py b/tests/layer_tests/tensorflow_tests/test_tf_ToBool.py new file mode 100644 index 00000000000000..74da79c36d52a1 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_ToBool.py @@ -0,0 +1,43 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + + +class TestToBool(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'x' in inputs_info + x_shape = inputs_info['x'] + inputs_data = {} + inputs_data['x'] = np.random.randint(-10, 10, x_shape).astype(np.float32) + + return inputs_data + + def create_tobool_net(self, input_shape, input_type): + self.input_type = input_type + tf.compat.v1.reset_default_graph() + # Create the graph and model + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(input_type, input_shape, 'x') + tf.raw_ops.ToBool(input=x) + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + return tf_net, None + + test_data_basic = [ + dict(input_shape=[10, 20], input_type=np.float32), + dict(input_shape=[2, 3, 4], input_type=np.float32), + ] + + @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + def test_to_bool_basic(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_tobool_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py b/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py index ece6f08471a643..73efaf490b23dd 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TopKV2.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -46,6 +48,8 @@ def create_topk_v2_net(self, input_shape, input_type, k, sorted, is_first_output @pytest.mark.parametrize("params", test_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_topk_v2_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_topk_v2_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TruncateDiv.py b/tests/layer_tests/tensorflow_tests/test_tf_TruncateDiv.py new file mode 100644 index 00000000000000..18440dbcd7f44a --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_TruncateDiv.py @@ -0,0 +1,53 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import platform + +import numpy as np +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + + +class TestTruncateDiv(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'x' in inputs_info + assert 'y' in inputs_info + x_shape = inputs_info['x'] + y_shape = inputs_info['y'] + inputs_data = {} + # generate x and y to ensure truncation + inputs_data['x'] = np.random.randint(-10, 10, x_shape).astype(self.input_type) + inputs_data['y'] = np.random.randint(1, 10, y_shape).astype(self.input_type) + return inputs_data + + def create_truncate_div_net(self, input_shape, input_type): + self.input_type = input_type + tf.compat.v1.reset_default_graph() + # Create the graph and model + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(input_type, input_shape, 'x') + y = tf.compat.v1.placeholder(input_type, input_shape, 'y') + tf.raw_ops.TruncateDiv(x=x, y=y) + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + return tf_net, None + + test_data_basic = [ + dict(input_shape=[10, 20], input_type=np.float32), + dict(input_shape=[8, 5], input_type=np.float32), + dict(input_shape=[5, 3], input_type=np.int32), + dict(input_shape=[6, 4], input_type=np.int32), + ] + + @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') + def test_truncate_div_basic(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_truncate_div_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TruncateMod.py b/tests/layer_tests/tensorflow_tests/test_tf_TruncateMod.py new file mode 100644 index 00000000000000..48b738095c8bb0 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_TruncateMod.py @@ -0,0 +1,49 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + + +class TestTruncateMod(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'x' in inputs_info + assert 'y' in inputs_info + x_shape = inputs_info['x'] + y_shape = inputs_info['y'] + inputs_data = {} + # generate x and y to ensure truncation + inputs_data['x'] = np.random.randint(-10, 10, x_shape).astype(self.input_type) + inputs_data['y'] = np.random.randint(1, 10, y_shape).astype(self.input_type) + return inputs_data + + def create_truncate_mod_net(self, input_shape, input_type): + self.input_type = input_type + tf.compat.v1.reset_default_graph() + # Create the graph and model + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(input_type, input_shape, 'x') + y = tf.compat.v1.placeholder(input_type, input_shape, 'y') + tf.raw_ops.TruncateMod(x=x, y=y) + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + return tf_net, None + + test_data_basic = [ + dict(input_shape=[10, 20], input_type=np.float32), + dict(input_shape=[8, 5], input_type=np.float32), + dict(input_shape=[5, 3], input_type=np.int32), + dict(input_shape=[6, 4], input_type=np.int32), + ] + + @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + def test_truncate_mod_basic(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_truncate_mod_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnsortedSegmentSum.py b/tests/layer_tests/tensorflow_tests/test_tf_UnsortedSegmentSum.py index 09afd6f26330ca..f7dcf2eeb324f2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UnsortedSegmentSum.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnsortedSegmentSum.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -55,6 +57,8 @@ def create_unsorted_segment_sum_net(self, data_shape, segment_ids_shape, num_seg ]) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_unsorted_segment_sum_basic(self, params, data_type, segment_ids_type, num_segments_type, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py b/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py index 7c80fbdad88b09..4da47e7b5356c4 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Xlog1py.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -42,6 +44,8 @@ def create_xlog1py_net(self, input_shape, input_type): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_xlog1py_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_xlog1py_net(**params), diff --git a/tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py b/tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py index 6ecddeb439aed3..911c3b0eea2154 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_Xlogy.py @@ -1,6 +1,8 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -42,6 +44,8 @@ def create_xlogy_net(self, input_shape, input_type): @pytest.mark.parametrize("params", test_data_basic) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64', + reason='Ticket - 122716') def test_xlogy_basic(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_xlogy_net(**params), diff --git a/tests/model_hub_tests/torch_tests/hf_transformers_models b/tests/model_hub_tests/torch_tests/hf_transformers_models index 0618d98a4d9f31..31a24b681eb4c5 100644 --- a/tests/model_hub_tests/torch_tests/hf_transformers_models +++ b/tests/model_hub_tests/torch_tests/hf_transformers_models @@ -10,7 +10,6 @@ albert-base-v2,albert AlekseyKorshuk/test_reward_model,reward_model,skip,Load problem alibaba-damo/mgp-str-base,mgp-str,xfail,Compile error: unsupported Einsum allenai/hvila-block-layoutlm-finetuned-docbank,hierarchical_model,skip,Load problem -allenai/longformer-base-4096,longformer,xfail,Unsupported op aten::as_strided ameya772/sentence-t5-base-atis-fine-tuned,T5,skip,Load problem andreasmadsen/efficient_mlm_m0.40,roberta-prelayernorm anton-l/emformer-base-librispeech,emformer,skip,Load problem @@ -68,7 +67,7 @@ facebook/detr-resnet-50,detr facebook/dinov2-base,dinov2,skip,Load problem facebook/dpr-question_encoder-single-nq-base,dpr facebook/encodec_24khz,encodec,xfail,Unsupported op aten::lstm -facebook/esm2_t6_8M_UR50D,esm,xfail,Tracing error: The values for attribute 'shape' do not match +facebook/esm2_t6_8M_UR50D,esm facebook/flava-full,flava,xfail,Tracing problem facebook/flava-image-codebook,flava_image_codebook,skip,Load problem facebook/m2m100_418M,m2m_100 @@ -123,10 +122,10 @@ hf-internal-testing/tiny-random-Data2VecAudioModel,data2vec-audio,skip,Load prob hf-internal-testing/tiny-random-Data2VecTextModel,data2vec-text hf-internal-testing/tiny-random-Data2VecVisionModel,data2vec-vision hf-internal-testing/tiny-random-DeiTModel,deit -hf-internal-testing/tiny-random-DonutSwinModel,donut-swin,xfail,Unsupported op aten::adaptive_avg_pool1d +hf-internal-testing/tiny-random-DonutSwinModel,donut-swin hf-internal-testing/tiny-random-EfficientFormerForImageClassification,efficientformer hf-internal-testing/tiny-random-flaubert,flaubert -hf-internal-testing/tiny-random-FocalNetModel,focalnet,xfail,Unsupported op aten::adaptive_avg_pool1d +hf-internal-testing/tiny-random-FocalNetModel,focalnet hf-internal-testing/tiny-random-GPTBigCodeForCausalLM,gpt_bigcode,xfail,Conversion is failed for: aten::mul hf-internal-testing/tiny-random-GPTJModel,gptj hf-internal-testing/tiny-random-groupvit,groupvit @@ -155,7 +154,7 @@ hf-internal-testing/tiny-random-Speech2TextModel,speech_to_text,skip,Load proble hf-internal-testing/tiny-random-speech-encoder-decoder,speech-encoder-decoder,skip,Load problem hf-internal-testing/tiny-random-SplinterModel,splinter hf-internal-testing/tiny-random-SqueezeBertModel,squeezebert -hf-internal-testing/tiny-random-SwinModel,swin,xfail,Unsupported op aten::adaptive_avg_pool1d +hf-internal-testing/tiny-random-SwinModel,swin hf-internal-testing/tiny-random-unispeech,unispeech,skip,Load problem hf-internal-testing/tiny-random-UniSpeechSatModel,unispeech-sat,skip,Load problem hf-internal-testing/tiny-random-vision_perceiver_conv,perceiver @@ -247,7 +246,7 @@ microsoft/markuplm-base,markuplm microsoft/resnet-50,resnet microsoft/speecht5_hifigan,hifigan,skip,Load problem microsoft/speecht5_tts,speecht5,skip,Load problem -microsoft/swinv2-tiny-patch4-window8-256,swinv2,xfail,Unsupported op aten::adaptive_avg_pool1d +microsoft/swinv2-tiny-patch4-window8-256,swinv2 microsoft/table-transformer-detection,table-transformer microsoft/wavlm-large,wavlm,skip,Load problem microsoft/xclip-base-patch32,xclip,skip,Load problem @@ -301,7 +300,6 @@ pie/example-re-textclf-tacred,TransformerTextClassificationModel,skip,Load probl pleisto/yuren-baichuan-7b,multimodal_llama,skip,Load problem predictia/europe_reanalysis_downscaler_convbaseline,convbilinear,skip,Load problem predictia/europe_reanalysis_downscaler_convswin2sr,conv_swin2sr,skip,Load problem -pszemraj/led-large-book-summary,led,xfail,Unsupported op aten::as_strided qmeeus/whisper-small-ner-combined,whisper_for_slu,skip,Load problem raman-ai/pcqv2-tokengt-lap16,tokengt,skip,Load problem range3/pegasus-gpt2-medium,pegasusgpt2,skip,Load problem @@ -330,8 +328,8 @@ sheonhan/ict-imagenet-256,ict,skip,Load problem shibing624/text2vec-base-chinese-paraphrase,ernie shikhartuli/flexibert-mini,flexibert,skip,Load problem shikras/shikra-7b-delta-v1-0708,shikra,skip,Load problem -shi-labs/dinat-mini-in1k-224,dinat,xfail,Unsupported op aten::adaptive_avg_pool1d -shi-labs/nat-mini-in1k-224,nat,xfail,Unsupported op aten::adaptive_avg_pool1d +shi-labs/dinat-mini-in1k-224,dinat,xfail,Accuracy validation failed +shi-labs/nat-mini-in1k-224,nat,xfail,Accuracy validation failed shi-labs/oneformer_ade20k_swin_large,oneformer,skip,Load problem shuqi/seed-encoder,seed_encoder,skip,Load problem sijunhe/nezha-cn-base,nezha diff --git a/tests/model_hub_tests/torch_tests/test_hf_transformers.py b/tests/model_hub_tests/torch_tests/test_hf_transformers.py index 184e725a04f9b9..caeb2e0ff2a01d 100644 --- a/tests/model_hub_tests/torch_tests/test_hf_transformers.py +++ b/tests/model_hub_tests/torch_tests/test_hf_transformers.py @@ -292,7 +292,8 @@ def teardown_method(self): cleanup_dir(hf_hub_cache_dir) super().teardown_method() - @pytest.mark.parametrize("name,type", [("bert-base-uncased", "bert"), + @pytest.mark.parametrize("name,type", [("allenai/led-base-16384", "led"), + ("bert-base-uncased", "bert"), ("facebook/bart-large-mnli", "bart"), ("google/flan-t5-base", "t5"), ("google/tapas-large-finetuned-wtq", "tapas"), diff --git a/thirdparty/dependencies.cmake b/thirdparty/dependencies.cmake index fac4752c318250..4eed13c9a79af6 100644 --- a/thirdparty/dependencies.cmake +++ b/thirdparty/dependencies.cmake @@ -414,14 +414,14 @@ if(ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_TF_FRONTEND if(CMAKE_VERBOSE_MAKEFILE) set(Protobuf_DEBUG ON) endif() - if(OV_VCPKG_BUILD) - set(protobuf_config CONFIG) - endif() # try to find newer version first (major is changed) # see https://protobuf.dev/support/version-support/ and # https://github.com/protocolbuffers/protobuf/commit/d61f75ff6db36b4f9c0765f131f8edc2f86310fa - find_package(Protobuf 4.22.0 QUIET ${protobuf_config}) + find_package(Protobuf 4.22.0 QUIET CONFIG) if(NOT Protobuf_FOUND) + if(OV_VCPKG_BUILD) + set(protobuf_config CONFIG) + endif() # otherwise, fallback to existing default find_package(Protobuf 3.20.3 REQUIRED ${protobuf_config}) endif() diff --git a/thirdparty/fluid/modules/gapi/include/opencv2/gapi/imgproc.hpp b/thirdparty/fluid/modules/gapi/include/opencv2/gapi/imgproc.hpp index 5c4c6f7031e6d8..5a6f237b27cda1 100644 --- a/thirdparty/fluid/modules/gapi/include/opencv2/gapi/imgproc.hpp +++ b/thirdparty/fluid/modules/gapi/include/opencv2/gapi/imgproc.hpp @@ -1214,7 +1214,7 @@ or column if there are N channels, or have N columns if there is a single channe @param src Input set of 2D points stored in one of possible containers: Mat, std::vector, std::vector, std::vector. @param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER -and @ref DIST_C are not suppored. +and @ref DIST_C are not supported. @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value is chosen. @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the @@ -1286,7 +1286,7 @@ or column if there are N channels, or have N columns if there is a single channe @param src Input set of 3D points stored in one of possible containers: Mat, std::vector, std::vector, std::vector. @param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER -and @ref DIST_C are not suppored. +and @ref DIST_C are not supported. @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value is chosen. @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the diff --git a/thirdparty/open_model_zoo b/thirdparty/open_model_zoo index e0e434f64a4da0..bb98fe444c84d6 160000 --- a/thirdparty/open_model_zoo +++ b/thirdparty/open_model_zoo @@ -1 +1 @@ -Subproject commit e0e434f64a4da07274c31c1aae48fbdcfa087fb0 +Subproject commit bb98fe444c84d67fd67ee7ec15a340722c652053