diff --git a/CMakeLists.txt b/CMakeLists.txt index d478d76028f0d9..13c0eda419b905 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -71,7 +71,6 @@ function(build_ngraph) ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE OFF) endif() ngraph_set(NGRAPH_INTERPRETER_ENABLE ON) - ngraph_set(NGRAPH_TF_FRONTEND_ENABLE OFF) if(NOT (ANDROID OR WIN32 OR ARM OR AARCH64) ) ngraph_set(NGRAPH_PDPD_FRONTEND_ENABLE ON) #TODO: make it OFF by default else() diff --git a/model-optimizer/mo/front_ng/extractor.py b/model-optimizer/mo/front_ng/extractor.py index a1b0e8ff94f464..2be013fc9670c6 100644 --- a/model-optimizer/mo/front_ng/extractor.py +++ b/model-optimizer/mo/front_ng/extractor.py @@ -11,7 +11,7 @@ from mo.utils.error import Error -def fe_decodeNameWithPort (inputModel, node_name: str): +def fe_decodeNameWithPort(inputModel, node_name: str): """ Decode name with optional port specification w/o traversing all the nodes in the graph :param inputModel: Input Model @@ -39,15 +39,17 @@ def fe_decodeNameWithPort (inputModel, node_name: str): def fe_input_user_data_repack(inputModel, input_user_shapes: [None, list, dict, np.ndarray], - freeze_placeholder: dict, input_user_data_types = dict()): + freeze_placeholder: dict, input_user_data_types=dict()): """ - Restructures user input cutting request. Splits ports out of node names. Transforms node names to node ids. + Restructures user input cutting request. Splits ports out of node names. + Transforms node names to node ids. :param graph: graph to operate on :param input_user_shapes: data structure representing user input cutting request. It may be: # None value if user did not provide neither --input nor --input_shape keys - # list instance which contains input layer names with or without ports if user provided only --input key - # dict instance which contains input layer names with or without ports as keys and shapes as values if user - provided both --input and --input_shape + # list instance which contains input layer names with or without ports if user provided + only --input key + # dict instance which contains input layer names with or without ports as keys and shapes as + values if user provided both --input and --input_shape # np.ndarray if user provided only --input_shape key :param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values :param input_user_data_types: dictionary with input nodes and its data types @@ -136,7 +138,7 @@ def fe_output_user_data_repack(inputModel, outputs: list): def fe_user_data_repack(inputModel, input_user_shapes: [None, list, dict, np.array], - input_user_data_types: dict, outputs: list, freeze_placeholder: dict): + input_user_data_types: dict, outputs: list, freeze_placeholder: dict): """ :param inputModel: Input Model to operate on :param input_user_shapes: data structure representing user input cutting request @@ -144,8 +146,8 @@ def fe_user_data_repack(inputModel, input_user_shapes: [None, list, dict, np.arr :param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values :return: restructured input, output and freeze placeholder dictionaries or None values """ - _input_shapes, _freeze_placeholder = fe_input_user_data_repack(inputModel, input_user_shapes, freeze_placeholder, - input_user_data_types=input_user_data_types) + _input_shapes, _freeze_placeholder = fe_input_user_data_repack( + inputModel, input_user_shapes, freeze_placeholder, input_user_data_types=input_user_data_types) _outputs = fe_output_user_data_repack(inputModel, outputs) print('---------- Inputs/outpus/freezePlaceholder -----------') diff --git a/model-optimizer/mo/front_ng/frontendmanager_wrapper.py b/model-optimizer/mo/front_ng/frontendmanager_wrapper.py index cb1e0786017ad1..b8c2961ef64ad5 100644 --- a/model-optimizer/mo/front_ng/frontendmanager_wrapper.py +++ b/model-optimizer/mo/front_ng/frontendmanager_wrapper.py @@ -10,7 +10,7 @@ def create_fem(): fem = None try: - from ngraph.frontend import FrontEndManager # pylint: disable=no-name-in-module,import-error + from ngraph.frontend import FrontEndManager # pylint: disable=no-name-in-module,import-error fem = FrontEndManager() except Exception: print("nGraph FrontEndManager is not initialized") diff --git a/model-optimizer/mo/front_ng/pipeline.py b/model-optimizer/mo/front_ng/pipeline.py index f27c2186b2b270..669445083c072a 100644 --- a/model-optimizer/mo/front_ng/pipeline.py +++ b/model-optimizer/mo/front_ng/pipeline.py @@ -81,7 +81,7 @@ def compare_nodes(old, new): for i in range(oldPartShape.rank.get_length()): # Assume batch size is always 1-st dimension in shape # Keep other dimensions unchanged - newshape.append(Dimension(argv.batch) if i is 0 else oldPartShape.get_dimension(i)) + newshape.append(Dimension(argv.batch) if i == 0 else oldPartShape.get_dimension(i)) oldshape_converted.append(oldPartShape.get_dimension(i)) validate_batch_in_shape(oldshape_converted, joinedName) diff --git a/model-optimizer/mo/front_ng/serialize.py b/model-optimizer/mo/front_ng/serialize.py index 1c19a4b6ef9022..48ff887ad70912 100644 --- a/model-optimizer/mo/front_ng/serialize.py +++ b/model-optimizer/mo/front_ng/serialize.py @@ -8,7 +8,7 @@ def ngraph_emit_ir(nGraphFunction, argv: argparse.Namespace): output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() - from ngraph import function_to_cnn # pylint: disable=no-name-in-module,import-error + from ngraph import function_to_cnn # pylint: disable=no-name-in-module,import-error network = function_to_cnn(nGraphFunction) orig_model_name = os.path.normpath(os.path.join(output_dir, argv.model_name)) diff --git a/ngraph/CMakeLists.txt b/ngraph/CMakeLists.txt index 190a2ef8970119..07148b5e0d92c8 100644 --- a/ngraph/CMakeLists.txt +++ b/ngraph/CMakeLists.txt @@ -88,7 +88,6 @@ option(NGRAPH_INTERPRETER_ENABLE "Control the building of the INTERPRETER backen option(NGRAPH_DEBUG_ENABLE "Enable output for NGRAPH_DEBUG statements" OFF) option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" OFF) option(NGRAPH_ONNX_EDITOR_ENABLE "Enable ONNX Editor" OFF) -option(NGRAPH_TF_FRONTEND_ENABLE "Enable TensorFlow FrontEnd" OFF) option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" OFF) option(NGRAPH_LIB_VERSIONING_ENABLE "Enable shared library versioning" OFF) option(NGRAPH_PYTHON_BUILD_ENABLE "Enable build nGraph python package wheel" OFF) @@ -99,7 +98,7 @@ option(NGRAPH_THREAD_SANITIZER_ENABLE "Compiles and links with Thread Sanitizer" option(NGRAPH_UB_SANITIZER_ENABLE "Compiles and links with Undefined Behavior Sanitizer" OFF) option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" OFF) -if (NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_TF_FRONTEND_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE) +if (NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE) option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system provided Protobuf shared object" OFF) endif() if(NGRAPH_ONNX_EDITOR_ENABLE AND NOT NGRAPH_ONNX_IMPORT_ENABLE) @@ -114,7 +113,6 @@ message(STATUS "NGRAPH_INTERPRETER_ENABLE: ${NGRAPH_INTERPRETER_ENABL message(STATUS "NGRAPH_LIB_VERSIONING_ENABLE: ${NGRAPH_LIB_VERSIONING_ENABLE}") message(STATUS "NGRAPH_ONNX_IMPORT_ENABLE: ${NGRAPH_ONNX_IMPORT_ENABLE}") message(STATUS "NGRAPH_ONNX_EDITOR_ENABLE: ${NGRAPH_ONNX_EDITOR_ENABLE}") -message(STATUS "NGRAPH_TF_FRONTEND_ENABLE: ${NGRAPH_TF_FRONTEND_ENABLE}") message(STATUS "NGRAPH_PDPD_FRONTEND_ENABLE: ${NGRAPH_PDPD_FRONTEND_ENABLE}") message(STATUS "NGRAPH_PYTHON_BUILD_ENABLE: ${NGRAPH_PYTHON_BUILD_ENABLE}") message(STATUS "NGRAPH_THREAD_SANITIZER_ENABLE: ${NGRAPH_THREAD_SANITIZER_ENABLE}") @@ -295,19 +293,15 @@ if (NGRAPH_EXPORT_TARGETS_ENABLE) COMPONENT ngraph_dev) endif() -set(USE_PROTOBUF OFF) -if (NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_TF_FRONTEND_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE) - set(USE_PROTOBUF ON) +set(USE_STATIC_PROTOBUF OFF) +if (NGRAPH_PDPD_FRONTEND_ENABLE) # add more frontends here + set(USE_STATIC_PROTOBUF ON) endif() -if (USE_PROTOBUF) - message("Add PROTOBUF dependency") - set(BEFORE_ONNX_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS}) - set(BUILD_SHARED_LIBS OFF) # TODO: it was ON for ONNX_IMPORT, but should be fixed - +if (NGRAPH_ONNX_IMPORT_ENABLE OR USE_STATIC_PROTOBUF) if (MSVC) - # When we build dll libraries. These flags make sure protobuf build with /MD, not /MT. - # These two options can't be mixed, because they requires link two incompatible runtime. + # When we build dll libraries. These flags make sure onnx and protobuf build with /MD, not /MT. + # These two options can't be mixed, because they requires link two imcompatiable runtime. set(protobuf_WITH_ZLIB OFF CACHE BOOL "" FORCE) if(NOT DEFINED ONNX_USE_MSVC_STATIC_RUNTIME) @@ -318,6 +312,10 @@ if (USE_PROTOBUF) endif() endif() + set(BEFORE_ONNX_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS}) + set(BUILD_SHARED_LIBS ON) + set(BUILD_STANDALONE_STATIC OFF) + if (NOT NGRAPH_USE_SYSTEM_PROTOBUF) include(cmake/external_protobuf.cmake) else() @@ -336,6 +334,7 @@ if (USE_PROTOBUF) unset(BEFORE_ONNX_BUILD_SHARED_LIBS) endif() + add_subdirectory(frontend) if(NGRAPH_UNIT_TEST_ENABLE) diff --git a/ngraph/cmake/external_protobuf.cmake b/ngraph/cmake/external_protobuf.cmake index eb96667622a3ba..72604fc77e0801 100644 --- a/ngraph/cmake/external_protobuf.cmake +++ b/ngraph/cmake/external_protobuf.cmake @@ -64,25 +64,45 @@ if(PROTOC_VERSION VERSION_LESS "3.9" AND NGRAPH_USE_PROTOBUF_LITE) message(FATAL_ERROR "Minimum supported version of protobuf-lite library is 3.9.0") else() if(PROTOC_VERSION VERSION_GREATER_EQUAL "3.0") - FetchContent_Declare( - ext_protobuf - GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL} - GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG} - GIT_SHALLOW TRUE - ) - - FetchContent_GetProperties(ext_protobuf) - if(NOT ext_protobuf_POPULATED) - FetchContent_Populate(ext_protobuf) - set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests") - set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support") - add_subdirectory(${ext_protobuf_SOURCE_DIR}/cmake ${ext_protobuf_BINARY_DIR} EXCLUDE_FROM_ALL) + if (NOT BUILD_STANDALONE_STATIC) + FetchContent_Declare( + ext_protobuf + GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL} + GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG} + GIT_SHALLOW TRUE + ) + FetchContent_GetProperties(ext_protobuf) + if(NOT ext_protobuf_POPULATED) + FetchContent_Populate(ext_protobuf) + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests") + set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support") + add_subdirectory(${ext_protobuf_SOURCE_DIR}/cmake ${ext_protobuf_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() + endif() + if (USE_STATIC_PROTOBUF) + FetchContent_Declare( + ext_protobuf_static + GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL} + GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG} + GIT_SHALLOW TRUE + ) + FetchContent_GetProperties(ext_protobuf_static) + if(NOT ext_protobuf_static_POPULATED AND BUILD_STANDALONE_STATIC) + FetchContent_Populate(ext_protobuf_static) + set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests") + set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support") + add_subdirectory(${ext_protobuf_static_SOURCE_DIR}/cmake ${ext_protobuf_static_BINARY_DIR} EXCLUDE_FROM_ALL) + endif() endif() else() message(FATAL_ERROR "Minimum supported version of protobuf library is 3.0.0") endif() - set(Protobuf_INCLUDE_DIRS ${ext_protobuf_SOURCE_DIR}/src) + if (BUILD_STANDALONE_STATIC) + set(Protobuf_INCLUDE_DIRS ${ext_protobuf_static_SOURCE_DIR}/src) + else() + set(Protobuf_INCLUDE_DIRS ${ext_protobuf_SOURCE_DIR}/src) + endif() if(NGRAPH_USE_PROTOBUF_LITE) set(Protobuf_LIBRARIES libprotobuf-lite) else() @@ -117,6 +137,7 @@ endif() # Now make sure we restore the original flags set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE "${PUSH_CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE}") +message("NGRAPH_INSTALL_LIB = ${NGRAPH_INSTALL_LIB}") install(TARGETS ${Protobuf_LIBRARIES} RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph @@ -125,9 +146,6 @@ if (NGRAPH_EXPORT_TARGETS_ENABLE) export(TARGETS ${Protobuf_LIBRARIES} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") endif() -message(${ext_protobuf_BINARY_DIR}) -#include("${ext_protobuf_BINARY_DIR}/lib/cmake/protobuf/protobuf-module.cmake") - #TODO: ---Find out the way to reuse these function from Protobuf modules --- function(protobuf_generate) diff --git a/ngraph/frontend/CMakeLists.txt b/ngraph/frontend/CMakeLists.txt index 431a2693c3fd4b..fe6d34404840ef 100644 --- a/ngraph/frontend/CMakeLists.txt +++ b/ngraph/frontend/CMakeLists.txt @@ -2,6 +2,53 @@ # SPDX-License-Identifier: Apache-2.0 # +message(${CMAKE_CURRENT_SOURCE_DIR}/cmake_static_protobuf) +message(BINARY ${CMAKE_CURRENT_BINARY_DIR}) + +## DEBUG - print all variables +# get_cmake_property(_variableNames VARIABLES) +# set(ALL_VARS "") +# foreach (_variableName ${_variableNames}) +# set(ALL_VARS ${ALL_VARS} -D${_variableName}=${${_variableName}}\ ) +# endforeach() +# message(---------------------ALL VARS: ${ALL_VARS}-------) + +# There seems no suitable other way to identify exact output binary name +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + set(PROTOBUF_STATIC_LIB_OUTPUT ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/libprotobufd.a) +else(CMAKE_BUILD_TYPE STREQUAL "Debug") + set(PROTOBUF_STATIC_LIB_OUTPUT ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/libprotobuf.a) +endif() + +message("Static protobuf lib: ${PROTOBUF_STATIC_LIB_OUTPUT}") +add_custom_command( + OUTPUT + ${PROTOBUF_STATIC_LIB_OUTPUT} + COMMAND ${CMAKE_COMMAND} ${CMAKE_CURRENT_SOURCE_DIR}/cmake_static_protobuf + -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} + -DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY} + -DCMAKE_COMPILE_PDB_OUTPUT_DIRECTORY=${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY} + -DCMAKE_PDB_OUTPUT_DIRECTORY=${CMAKE_PDB_OUTPUT_DIRECTORY} + -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_CXX_VISIBILITY_PRESET=${CMAKE_CXX_VISIBILITY_PRESET} + -DNGRAPH_INSTALL_LIB=${NGRAPH_INSTALL_LIB} + ${NGRAPH_FORWARD_CMAKE_ARGS} + COMMAND ${CMAKE_COMMAND} --build . --target libprotobuf + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Build Protobuf Static Library" + # TODO: add DEPENDS +) + +# Import targets + +add_custom_target(libprotobuf_static + DEPENDS + ${PROTOBUF_STATIC_LIB_OUTPUT} + ) + + add_subdirectory(frontend_manager) if (NGRAPH_PDPD_FRONTEND_ENABLE) diff --git a/ngraph/frontend/cmake_static_protobuf/CMakeLists.txt b/ngraph/frontend/cmake_static_protobuf/CMakeLists.txt new file mode 100644 index 00000000000000..6df263046380e5 --- /dev/null +++ b/ngraph/frontend/cmake_static_protobuf/CMakeLists.txt @@ -0,0 +1,23 @@ +# Copyright (C) 2018-2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +cmake_minimum_required(VERSION 3.13) + +project(libprotobuf_static) + +# DEBUG - print all defined variables +# get_cmake_property(_variableNames VARIABLES) +# set(ALL_VARS "") +# foreach (_variableName ${_variableNames}) +# set(ALL_VARS ${ALL_VARS} -D${_variableName}=${${_variableName}}\n ) +# endforeach() +# message("---------------------${ALL_VARS}-------") + +message("Add PROTOBUF dependency - static") + +set(BUILD_SHARED_LIBS OFF) +set(BUILD_STANDALONE_STATIC ON) +set(USE_STATIC_PROTOBUF ON) + +include(../../cmake/external_protobuf.cmake) diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt index 9c3bfaad1f53ef..8ae6982b277182 100644 --- a/ngraph/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -29,11 +29,12 @@ include_directories(${Protobuf_INCLUDE_DIRS} ${paddlepaddle_ngraph_frontend_INCL add_library(paddlepaddle_ngraph_frontend SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS} ${PROTO_SRCS} ${PROTO_HDRS}) add_library(ngraph::paddlepaddle_ngraph_frontend ALIAS paddlepaddle_ngraph_frontend) -# TODO: fix relative include directory by moving +add_dependencies(paddlepaddle_ngraph_frontend libprotobuf_static) + target_include_directories(paddlepaddle_ngraph_frontend PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src - ${CMAKE_CURRENT_SOURCE_DIR}/../frontend_manager/include + ${FRONTEND_INCLUDE_PATH} ${CMAKE_CURRENT_BINARY_DIR}) if(COMMAND ie_add_vs_version_file) @@ -41,7 +42,7 @@ if(COMMAND ie_add_vs_version_file) FILEDESCRIPTION "FrontEnd to load and convert PaddlePaddle file format") endif() -target_link_libraries(paddlepaddle_ngraph_frontend PRIVATE ${Protobuf_LIBRARIES} PUBLIC ngraph PRIVATE ngraph::builder) +target_link_libraries(paddlepaddle_ngraph_frontend PRIVATE ${PROTOBUF_STATIC_LIB_OUTPUT} PUBLIC ngraph PRIVATE ngraph::builder) target_link_libraries(paddlepaddle_ngraph_frontend PRIVATE frontend_manager) add_clang_format_target(paddlepaddle_ngraph_frontend_clang FOR_TARGETS paddlepaddle_ngraph_frontend diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp index b2e042acc8b45f..19dcc61d24a4cf 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/utility.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once diff --git a/ngraph/frontend/paddlepaddle/src/decoder.hpp b/ngraph/frontend/paddlepaddle/src/decoder.hpp index 3bd04304aca2e4..12a6c69b699ae5 100644 --- a/ngraph/frontend/paddlepaddle/src/decoder.hpp +++ b/ngraph/frontend/paddlepaddle/src/decoder.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once diff --git a/ngraph/frontend/paddlepaddle/src/node_context.hpp b/ngraph/frontend/paddlepaddle/src/node_context.hpp index a9281fddee6572..8fc77debfb6dc1 100644 --- a/ngraph/frontend/paddlepaddle/src/node_context.hpp +++ b/ngraph/frontend/paddlepaddle/src/node_context.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once #include diff --git a/ngraph/frontend/paddlepaddle/src/op/cast.cpp b/ngraph/frontend/paddlepaddle/src/op/cast.cpp index bcabbfc80941f8..2cb181f0b24158 100644 --- a/ngraph/frontend/paddlepaddle/src/op/cast.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/cast.cpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #include "cast.hpp" #include diff --git a/ngraph/frontend/paddlepaddle/src/op/cast.hpp b/ngraph/frontend/paddlepaddle/src/op/cast.hpp index af321b7e443ad2..c9a575a3e470b2 100644 --- a/ngraph/frontend/paddlepaddle/src/op/cast.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/cast.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once #include "node_context.hpp" diff --git a/ngraph/frontend/paddlepaddle/src/op/concat.cpp b/ngraph/frontend/paddlepaddle/src/op/concat.cpp index aa68a2c784f789..a9c6fa6388d848 100644 --- a/ngraph/frontend/paddlepaddle/src/op/concat.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/concat.cpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #include "concat.hpp" #include diff --git a/ngraph/frontend/paddlepaddle/src/op/concat.hpp b/ngraph/frontend/paddlepaddle/src/op/concat.hpp index d21d874a8f11b1..5cf14fb15c6f42 100644 --- a/ngraph/frontend/paddlepaddle/src/op/concat.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/concat.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once #include "node_context.hpp" diff --git a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp index 8d0b3d60c8f258..ec6498ebb0a58c 100644 --- a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.cpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #include "leakyrelu.hpp" #include diff --git a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp index f85bdc38fef5c6..7bb181c8eeb5d3 100644 --- a/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/leakyrelu.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once #include "node_context.hpp" diff --git a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp index f9234edfe3154f..e99ad8e3e103f4 100644 --- a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.cpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #include "multiclass_nms.hpp" #include diff --git a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp index 3f5c1c9083b4ce..956d7fa72a2bd6 100644 --- a/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/multiclass_nms.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once #include "node_context.hpp" diff --git a/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp b/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp index d083a30301f20f..b4565f86de8248 100644 --- a/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/pool2d.cpp @@ -68,7 +68,6 @@ namespace ngraph default: throw std::runtime_error("Unsupported pooling paddings " + std::to_string(paddings.size())); - break; } } diff --git a/ngraph/frontend/paddlepaddle/src/op/relu.cpp b/ngraph/frontend/paddlepaddle/src/op/relu.cpp index a60238b3ae5300..68d1cca3203cd3 100644 --- a/ngraph/frontend/paddlepaddle/src/op/relu.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/relu.cpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #include "relu.hpp" #include diff --git a/ngraph/frontend/paddlepaddle/src/op/relu.hpp b/ngraph/frontend/paddlepaddle/src/op/relu.hpp index fcfc5ba1dc571a..7a63e7f89d8317 100644 --- a/ngraph/frontend/paddlepaddle/src/op/relu.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/relu.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once #include "node_context.hpp" diff --git a/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp b/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp index 1662a6486fac24..e944bc89396125 100644 --- a/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp +++ b/ngraph/frontend/paddlepaddle/src/op/transpose2.cpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #include "transpose2.hpp" #include diff --git a/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp b/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp index 31b6af6944ef14..d33fe0a1089679 100644 --- a/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp +++ b/ngraph/frontend/paddlepaddle/src/op/transpose2.hpp @@ -1,18 +1,6 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** #pragma once #include "node_context.hpp" diff --git a/ngraph/frontend/tensorflow/CMakeLists.txt b/ngraph/frontend/tensorflow/CMakeLists.txt deleted file mode 100644 index be9b3a163fd37c..00000000000000 --- a/ngraph/frontend/tensorflow/CMakeLists.txt +++ /dev/null @@ -1,66 +0,0 @@ -# ****************************************************************************** -# Copyright 2017-2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ****************************************************************************** - -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cc) -file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp ${CMAKE_CURRENT_SOURCE_DIR}/src/*.h) -file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) - -find_package(Protobuf REQUIRED IMPORTED) -include_directories(${Protobuf_INCLUDE_DIRS}) - -set(TENSORFLOW_FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) - -# Create named folders for the sources within the .vcproj -# Empty name lists them directly under the .vcproj - -source_group("src" FILES ${LIBRARY_SRC}) -source_group("include" FILES ${LIBRARY_HEADERS}) -source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) - -set(PROTOBUF_GENERATE_CPP_APPEND_PATH ON) -file(GLOB proto_files ${CMAKE_CURRENT_SOURCE_DIR}/src/proto/*.proto) -protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS ${proto_files}) - -include_directories(${Protobuf_INCLUDE_DIRS}) - -# Create shared library -add_library(tensorflow_frontend SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS} ${PROTO_SRCS} ${PROTO_HDRS}) -add_library(ngraph::tensorflow_frontend ALIAS tensorflow_frontend) - -target_include_directories(tensorflow_frontend PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../generic/include ${CMAKE_CURRENT_BINARY_DIR}) - -if(COMMAND ie_add_vs_version_file) - ie_add_vs_version_file(NAME tensorflow_frontend - FILEDESCRIPTION "FrontEnd to load and convert Tensorflow file format") -endif() - -target_link_libraries(tensorflow_frontend PRIVATE ${Protobuf_LIBRARIES} PUBLIC ngraph) - -# TODO: Consider to remove the following block (inherited from onnx_import just in case). -if (CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") - target_compile_options(tensorflow_frontend PRIVATE -Wno-undef -Wno-reserved-id-macro -Wno-switch-enum - -Wno-invalid-offsetof -Wno-shorten-64-to-32 -Wno-unused-macros -Wno-missing-variable-declarations - -Wno-unused-private-field -Wno-shadow -Wno-deprecated PUBLIC -Wno-undefined-func-template) -endif() - -install(TARGETS tensorflow_frontend EXPORT ngraphTargets - RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph - LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph) - -if (NGRAPH_EXPORT_TARGETS_ENABLE) - export(TARGETS tensorflow_frontend NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}") -endif() diff --git a/ngraph/frontend/tensorflow/include/tensorflow_frontend/tensorflow.hpp b/ngraph/frontend/tensorflow/include/tensorflow_frontend/tensorflow.hpp deleted file mode 100644 index 33e7e06c32e983..00000000000000 --- a/ngraph/frontend/tensorflow/include/tensorflow_frontend/tensorflow.hpp +++ /dev/null @@ -1,75 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#pragma once - -// TODO: include it by just frontend_manager.hpp without path -//#include "../../include/frontend_manager/frontend_manager.hpp" -#include "frontend_manager/frontend_manager.hpp" - -namespace tensorflow { class GraphDef; } - -namespace ngraph -{ - namespace frontend - { - class PlaceTensorflow : public Place - { - public: - - std::string name; - enum Kind { PORT_INPUT, PORT_OUTPUT, TENSOR, OP } kind; - size_t port; - - PlaceTensorflow (const std::string& _name, Kind _kind = OP, size_t _port = 0) : name(_name), kind(_kind), port(_port) {} - }; - - class NGRAPH_API InputModelTensorflow : public InputModel - { - public: - - std::shared_ptr graph_def; - std::string path; - - // TODO: map from PlaceTensorflow, not from name string - std::map partialShapes; - - InputModelTensorflow (const std::string& _path); - - std::vector getInputs () const override; - - void setPartialShape (Place::Ptr place, const ngraph::PartialShape& pshape) override; - }; - - class NGRAPH_API FrontEndTensorflow : public FrontEnd - { - public: - - FrontEndTensorflow () - { - } - - virtual InputModel::Ptr loadFromFile (const std::string& path) const override - { - return std::make_shared(path); - } - - virtual std::shared_ptr convert (InputModel::Ptr model) const override; - }; - - } // namespace frontend - -} // namespace ngraph diff --git a/ngraph/frontend/tensorflow/src/default_opset.h b/ngraph/frontend/tensorflow/src/default_opset.h deleted file mode 100644 index bcd360e2261ef5..00000000000000 --- a/ngraph/frontend/tensorflow/src/default_opset.h +++ /dev/null @@ -1,34 +0,0 @@ -/******************************************************************************* - * Copyright 2017-2020 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ - -#ifndef NGRAPH_TF_BRIDGE_DEFAULT_OPSET_H_ -#define NGRAPH_TF_BRIDGE_DEFAULT_OPSET_H_ -#pragma once - -#include "ngraph/opsets/opset5.hpp" - -namespace tensorflow { -namespace ngraph_bridge { - -namespace opset = ngraph::opset5; -namespace default_opset = ngraph::opset5; - -#define NGRAPH_TF_FE_NOT_IMPLEMENTED { std::cerr << "[ NOT IMPLEMENTED ] source: " << __FILE__ << ":" << __LINE__ << "\n"; throw "NOT IMPLEMENTED"; } - -} // namespace ngraph_bridge -} // namespace tensorflow - -#endif \ No newline at end of file diff --git a/ngraph/frontend/tensorflow/src/ngraph_builder.cpp b/ngraph/frontend/tensorflow/src/ngraph_builder.cpp deleted file mode 100644 index 57c4e49223822c..00000000000000 --- a/ngraph/frontend/tensorflow/src/ngraph_builder.cpp +++ /dev/null @@ -1,3216 +0,0 @@ -/******************************************************************************* - * Copyright 2017-2020 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ - -#include -#include "graph.pb.h" -#include "tensor.pb.h" - -#include "ngraph/op/util/logical_reduction.hpp" -#include "ngraph/pass/constant_folding.hpp" -#include "ngraph/pass/manager.hpp" -#include "ngraph/pass/pass_config.hpp" -#include "ngraph/slice_plan.hpp" - -#include "ngraph_builder.h" -#include "ngraph_conversions.h" -#include "default_opset.h" - - -using namespace std; -namespace ng = ngraph; - -namespace tensorflow { - -namespace ngraph_bridge { - -static bool VecStrCmp(const std::vector& a, - const std::vector& b) { - return a == b; -} - -static Status ValidateInputCount(const TFNodeDecoder* op, int32_t count) { - if (op->num_inputs() != count) { - std::ostringstream buf; - buf << "\"" << op->name() << "\" requires " << count << - " input(s), got " << op->num_inputs() << - " instead"; - return errors::InvalidArgument(buf.str()); - } - return Status::OK(); -} - -static Status ValidateInputCountMin(const TFNodeDecoder* op, int32_t count) { - if (op->num_inputs() < count) { - std::ostringstream buf; - buf << "\"" << op->name() << "\" requires at least " << - count << " input(s), got " << op->num_inputs() << - " instead"; - return errors::InvalidArgument(buf.str()); - } - return Status::OK(); -} - -// Check to make sure the axis dimension for reduction are in within range. -// Returns error if axis is out of range. Otherwise returns Status::OK(). -static Status CheckAxisDimInRange(std::vector axes, size_t rank) { - for (auto i : axes) { - if (i < (int)-rank || i >= (int)rank) { - std::ostringstream buf; - buf << "Axis Dimension is out of range. Got " << i << - ", should be in range [-" << rank << ", " << - rank << ")"; - return errors::InvalidArgument(buf.str()); - } - } - return Status::OK(); -} - -// -// Helper for storing ops in ng_op_map. -// For most of the cases, op would have one output so -// vector ng_op_map[op_name] would contain one element. -// -// If storing more than one output_nodes, make sure it's in -// the same order as tensorflow would do that. -// -// Parameters: -// Builder::OpMap& ng_op_map - The TF-to-nGraph op map. -// std::string op_name - Name of the op. -// -// ng::Output output_node - ng::Node to store -// - -static void SaveNgOp(Builder::OpMap& ng_op_map, const std::string& op_name, - ng::Output output_node) { - // no need to try-catch, map[key] will create vector object - // if not exists - ng_op_map[op_name].push_back(output_node); -} - -void Builder::SetTracingInfo(const std::string& op_name, - const ng::Output ng_node) { - auto node = ng_node.get_node_shared_ptr(); - node->set_friendly_name(op_name + "/" + node->get_name()); - node->add_provenance_tag(op_name); -#if 0 - if (api::IsLoggingPlacement()) { - cout << "TF_to_NG: " << op_name << " --> " << node << "\n"; - } -#endif -} - -template -ng::Output ConstructNgNode(const std::string& op_name, - TArg&&... Args) { - auto ng_node = std::make_shared(std::forward(Args)...); - Builder::SetTracingInfo(op_name, ng_node); - return ng_node; -} - -// Helper for fetching correct input node from ng_op_map. -// Handles edge checking to make sure correct input node is -// fetched. -// -// Reduces some boilerplate code (incorrect from now) like this: -// -// TFNodeDecoder* tf_input; -// TF_RETURN_IF_ERROR(op->input_node(0, &tf_input)); -// -// ng::Output ng_input; -// try { -// ng_input = ng_op_map.at(tf_input->name()); -// } catch (const std::out_of_range&) { -// return errors::NotFound(tf_input->name(), -// " is not found in the ng_op_map"); -// } -// -// Into 2 lines: -// -// ng::Output ng_input; -// TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, &ng_input)) -// -// -// -// Parameters: -// Builder::OpMap& ng_op_map - The TF-to-nGraph op map. -// TFNodeDecoder* op - TF op being translated. -// input_idx - index of input -// -// ng::Output *result - ng::Node pointer where result -// will be written -// -// - -static Status GetInputNode(const Builder::OpMap& ng_op_map, const TFNodeDecoder* op, - size_t input_idx, ng::Output& result) { - // Stub - #if 0 - // input op may have resulted in more than one ng::Node (eg. Split) - // we need to look at Edge to check index of the input op - std::vector edges; - TF_RETURN_IF_ERROR(op->input_edges(&edges)); - size_t src_output_idx; - try { - src_output_idx = edges.at(input_idx)->src_output(); - } catch (const out_of_range&) { - return Status(error::NOT_FOUND, "Edge not found"); - } - -#endif - - const TFNodeDecoder* tf_input; - size_t src_output_idx; - TF_RETURN_IF_ERROR(op->input_node(input_idx, &tf_input, &src_output_idx)); - std::vector> ng_op; - try { - ng_op = ng_op_map.at(tf_input->name()); - } catch (const out_of_range&) { - return Status(string("Ngraph op not found for ") + tf_input->name()); - } - try { - result = ng_op.at(src_output_idx); - } catch (const out_of_range&) { - return Status(string("Input node not found at index ") + - to_string(src_output_idx)); - } - return Status::OK(); - - - - NGRAPH_TF_FE_NOT_IMPLEMENTED -} - -namespace detail { -static Status GetInputNodes(const Builder::OpMap&, const TFNodeDecoder*, size_t) { - return Status::OK(); -} - -template -static Status GetInputNodes(const Builder::OpMap& ng_op_map, const TFNodeDecoder* op, - size_t index, ng::Output& result, - Arguments&... remaining) { - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, index, result)); - return GetInputNodes(ng_op_map, op, index + 1, remaining...); -} -} // namespace detail - -template -static Status GetInputNodes(const Builder::OpMap& ng_op_map, const TFNodeDecoder* op, - Arguments&... remaining) { - constexpr size_t args_len = sizeof...(Arguments); - TF_RETURN_IF_ERROR(ValidateInputCount(op, args_len)); - return detail::GetInputNodes(ng_op_map, op, 0, remaining...); -} - -static Status GetStaticNodeTensor( - const TFNodeDecoder* node, const std::vector& static_input_map, - TensorWrapper* result) { - if (node->IsArg()) { - int arg_index; - TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "index", &arg_index)); - const TensorWrapper* source_tensor = static_input_map[arg_index]; - if (source_tensor == nullptr) { - return errors::Internal( - "GetStaticNodeTensor called on _Arg but input tensor is missing from " - "static input map"); - } - *result = *source_tensor; - return Status::OK(); - } else if (node->type_string() == "Const") { - if (GetNodeAttr(node->attrs(), "value", &result).status != 0) { - return errors::Internal( - "GetStaticNodeTensor: Const tensor proto parsing failed"); - } - return Status::OK(); - } else { - return errors::Internal("GetStaticNodeTensor called on node with type " + - node->type_string() + "; _Arg or Const expected"); - } -} - -template -static void ConvertTensorDataToVector(const TensorWrapper& tensor, - std::vector* vector) { - const Ttensor* data = tensor.flat().data(); - vector->resize(tensor.NumElements()); - for (int64_t i = 0; i < tensor.NumElements(); i++) { - (*vector)[i] = Tvector(data[i]); - } -} - -template -static Status TensorDataToVector(const TensorWrapper& tensor, std::vector* vector) { -// stub - #if 0 - DataType dt = tensor.dtype(); - - // If dt and T match, we can just copy. - if (dt == DataTypeToEnum::value) { - *vector = std::vector(tensor.flat().data(), - tensor.flat().data() + tensor.NumElements()); - } - // Else we have to convert. - else { - switch (dt) { - case DT_FLOAT: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_DOUBLE: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_INT8: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_INT16: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_INT32: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_INT64: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_UINT8: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_UINT16: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_UINT32: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_UINT64: - ConvertTensorDataToVector(tensor, vector); - break; - case DT_BOOL: - ConvertTensorDataToVector(tensor, vector); - break; - default: - return errors::Internal("TensorDataToVector: tensor has element type ", - DataType_Name(dt), ", vector has type ", - DataType_Name(DataTypeToEnum::value), - "; don't know how to convert"); - } - } - return Status::OK(); - #endif - - NGRAPH_TF_FE_NOT_IMPLEMENTED -} - -template -static Status GetStaticInputVector( - Builder::OpMap& ng_op_map, - const TFNodeDecoder* op, int64_t input_index, - const std::vector& static_input_map, - std::vector* vector) { - ng::Output ng_input; - GetInputNode(ng_op_map, op, input_index, ng_input); - if(auto constant = std::dynamic_pointer_cast(ng_input.get_node_shared_ptr())) - { - *vector = constant->cast_vector(); - return Status::OK(); - } - - NGRAPH_TF_FE_NOT_IMPLEMENTED; -/* - TFNodeDecoder* input_node; - TF_RETURN_IF_ERROR(op->input_node(input_index, &input_node)); - TensorWrapper* input_tensor; - TF_RETURN_IF_ERROR( - GetStaticNodeTensor(input_node, static_input_map, &input_tensor)); - TF_RETURN_IF_ERROR(TensorDataToVector(input_tensor, vector)); - return Status::OK();*/ -} - -#if 0 -template -static Status GetStaticInputVector( - const TFNodeDecoder* op, int64_t input_index, - const std::vector& static_input_map, - std::vector* vector) { - TFNodeDecoder* input_node; - TF_RETURN_IF_ERROR(op->input_node(input_index, &input_node)); - TensorWrapper* input_tensor; - TF_RETURN_IF_ERROR( - GetStaticNodeTensor(input_node, static_input_map, &input_tensor)); - TF_RETURN_IF_ERROR(TensorDataToVector(input_tensor, vector)); - return Status::OK(); -} - -static Status GetStaticInputNode( - const TFNodeDecoder* op, int64_t input_index, - const std::vector& static_input_map, DataType dt, - ng::Output& node_) { - ng::element::Type type; - TF_RETURN_IF_ERROR(TFDataTypeToNGraphElementType(dt, &type)); - switch (dt) { - case DataType::DT_FLOAT: { - std::vector vec_float; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, input_index, static_input_map, &vec_float)); - node_ = ConstructNgNode(op->name(), type, ng::Shape{}, - vec_float[0]); - } break; - case DataType::DT_DOUBLE: { - std::vector vec_double; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, input_index, static_input_map, &vec_double)); - node_ = ConstructNgNode(op->name(), type, ng::Shape{}, - vec_double[0]); - } break; - case DataType::DT_INT32: { - std::vector vec_i32; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, input_index, static_input_map, &vec_i32)); - node_ = ConstructNgNode(op->name(), type, ng::Shape{}, - vec_i32[0]); - } break; - case DataType::DT_INT64: { - std::vector vec_i64; - TF_RETURN_IF_ERROR( - GetStaticInputVector(op, input_index, static_input_map, &vec_i64)); - node_ = ConstructNgNode(op->name(), type, ng::Shape{}, - vec_i64[0]); - } break; - default: - return errors::Internal("GetStaticInputNode: TF data type " + - DataType_Name(dt) + " not supported."); - break; - } - return Status::OK(); -} -#endif - -// Taken from: tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc -// Extract values from a Const op to `values`. Returns true if succeeds. -// -// Modified with an extra `VecT` parameter to handle the case where the type -// in the vector does not match TensorFlow's notion of what the C++ type -// should be (e.g. when T is `bool`, we actually need a vector of `char` for -// compatibility with nGraph). -template -static Status ValuesFromConstNode(const TFNodeDecoder* node, - ngraph::Shape* const_tensor_shape, - std::vector* values) { -#if 1 - - if (node->op() != "Const") { - return errors::InvalidArgument("TFNodeDecoder not a Const"); - } - DataType dt; - node->getAttrValue("dtype", &dt); - - - /* - if (dt != DataTypeToEnum::value) { - std::stringstream ss; - ss << "Invalid data type defined for Const. Defined: " - << node.attr().at("dtype").type(); - return errors::InvalidArgument(ss.str()); - } - */ - - // TensorWrapper represents the content of the tensor in either _val or - // tensor_content. - TensorWrapper* tensor; - node->getAttrValue("value", &tensor); - //typename checkpoint::SaveTypeTraits::RepeatedField* tensor_values = - // checkpoint::MutableTensorProtoData(const_cast(&tensor)); - - const TensorShapeProto& shape = tensor->tensor_def->tensor_shape(); - ngraph::PartialShape pshape; - TFTensorShapeToNGraphShape(shape, &pshape); - *const_tensor_shape = pshape.get_shape(); - if(pshape.is_dynamic()) - NGRAPH_TF_FE_NOT_IMPLEMENTED; - auto tensor_content = tensor->tensor_def->tensor_content(); - std::vector tensor_values_plain(tensor_content.begin(), tensor_content.end()); - const T* tensor_values = reinterpret_cast(tensor_values_plain.data()); - - if (!tensor_values_plain.empty() && tensor->tensor_def->has_tensor_shape()) { - // When tensor_shape is set, theoretically the representation of the data - // could be compressed. So, before copying values to the returned vector, - // make sure no compression happens. - //if (shape.dim_size() == 1 && shape.dim(0).size() == tensor_values_plain.size()/sizeof(T)) { - values->insert(values->end(), tensor_values, - tensor_values + tensor_values_plain.size()/sizeof(T)); - return Status::OK(); - //} - } - - const auto tensor_content_size = tensor->tensor_def->tensor_content().size(); - if(tensor_content_size % sizeof(VecT)) { - std::cerr << "[ ERROR ] tensor_content_size (" << tensor_content_size - << ") is not a multiple of " << sizeof(VecT); - } - - // If tensor_content_size is zero, we'll have to take the values from - // int_val, float_val, etc. - if (tensor_content_size == 0) { - int64_t n_elements = 1; - for (auto i = 0; i < shape.dim_size(); i++) { - if (shape.dim(i).size() < 0) { - return errors::InvalidArgument( - "Const node has empty tensor and an unknown dimension size"); - } - n_elements *= shape.dim(i).size(); - } - values->resize(n_elements); - - auto val_lastsaved = (T)0; // cast - - for (auto i = 0; i < n_elements; i++) { - auto& tensor_proto = *tensor->tensor_def; - int64_t val_size = 0; - auto val_i = (T)0; // cast - switch (dt) { - // TODO(amprocte/NGRAPH-2502): there are more element types to support - // here - case DT_INT32: - val_size = tensor_proto.int_val_size(); - if (val_size > 0) val_i = tensor_proto.int_val()[i]; - break; - case DT_INT64: - val_size = tensor_proto.int64_val_size(); - if (val_size > 0) val_i = tensor_proto.int64_val()[i]; - break; - case DT_FLOAT: - val_size = tensor_proto.float_val_size(); - if (val_size > 0) val_i = tensor_proto.float_val()[i]; - break; - case DT_BOOL: - val_size = tensor_proto.bool_val_size(); - if (val_size > 0) val_i = tensor_proto.bool_val()[i]; - break; - case DT_DOUBLE: - val_size = tensor_proto.double_val_size(); - if (val_size > 0) val_i = tensor_proto.double_val()[i]; - break; - default: - NGRAPH_VLOG(0) - << "Const node has empty tensor_proto and we don't know how to " - "handle this element type"; - NGRAPH_VLOG(0) << node->DebugString(); - NGRAPH_VLOG(0) << shape.DebugString(); - return errors::Unimplemented("Encountered unknown element type " + - DataType_Name(dt) + - " on an empty tensor_proto"); - } - if (val_size == 0) { - return errors::InvalidArgument("Empty values vector"); - } else if (i < val_size) { - (*values)[i] = val_i; - val_lastsaved = val_i; - } else { - (*values)[i] = val_lastsaved; - } - } - } else { - - return Status::OK(); - //values->resize(tensor_content_size / sizeof(VecT)); - //port::CopyToArray(tensor.tensor_content(), - // reinterpret_cast(values->data())); - } - - return Status::OK(); -#endif - - -} - -// Helper for Builder::TranslateGraph ("Const" op) -template -static Status MakeConstOp(const TFNodeDecoder* op, ng::element::Type et, - ng::Output& ng_node) { - vector const_values; - ngraph::Shape ng_shape; - - TF_RETURN_IF_ERROR( - (ValuesFromConstNode(op, &ng_shape, &const_values))); - - ng_node = - ConstructNgNode(op->name(), et, ng_shape, const_values); - return Status::OK(); -} - -const Builder::ConstMap& Builder::TF_NGRAPH_CONST_MAP() { - static const Builder::ConstMap the_map = { - {DataType::DT_FLOAT, make_pair(MakeConstOp, ng::element::f32)}, - {DataType::DT_DOUBLE, make_pair(MakeConstOp, ng::element::f64)}, - {DataType::DT_INT8, make_pair(MakeConstOp, ng::element::i8)}, - {DataType::DT_INT16, make_pair(MakeConstOp, ng::element::i16)}, -#if 0 - {DataType::DT_QINT8, make_pair(MakeConstOp, ng::element::i8)}, - {DataType::DT_QUINT8, make_pair(MakeConstOp, ng::element::u8)}, - {DataType::DT_QUINT16, make_pair(MakeConstOp, ng::element::u16)}, -#endif - {DataType::DT_INT32, make_pair(MakeConstOp, ng::element::i32)}, - {DataType::DT_INT64, make_pair(MakeConstOp, ng::element::i64)}, - {DataType::DT_UINT8, make_pair(MakeConstOp, ng::element::u8)}, - {DataType::DT_UINT16, make_pair(MakeConstOp, ng::element::u16)}, - {DataType::DT_BOOL, - make_pair(MakeConstOp, ng::element::boolean)}}; - return the_map; -} - -// Helper function to translate a unary op. -// -// Parameters: -// -// TFNodeDecoder* op - TF op being translated. Must have one input. -// const std::vector& static_input_map -// - the static input map -// Builder::OpMap& ng_op_map - The TF-to-nGraph op map. -// -// std::function(ng::Output> -// create_unary_op - Function to construct the graph implementing -// the unary op, given the input to the unop -// as an argument. -// -// Example usage: -// -// if (n->type_string == "Square") { -// TF_RETURN_IF_ERROR(TranslateUnaryOp(n, static_input_map, ng_op_map, -// [] (ng::Output n) { -// return -// (ng::Output(n,n)); -// }); -// } -static Status TranslateUnaryOp( - const TFNodeDecoder* op, const std::vector&, - Builder::OpMap& ng_op_map, - std::function(ng::Output)> create_unary_op) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - auto ng_node = create_unary_op(ng_input); - if (ng_node != ng_input) { - Builder::SetTracingInfo(op->name(), ng_node); - } - SaveNgOp(ng_op_map, op->name(), ng_node); - return Status::OK(); -} - -// Helper function to translate a unary op in cases where there is a one-to-one -// mapping from TensorFlow ops to nGraph ops. -// -// Example usage: -// -// if (n->type_string == "Abs") { -// TF_RETURN_IF_ERROR(TranslateUnaryOp(n, static_input_map, -// ng_op_map)); -// } -// -template -static Status TranslateUnaryOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - return TranslateUnaryOp(op, static_input_map, ng_op_map, - [&op](ng::Output n) { - return ConstructNgNode(op->name(), n); - }); -} - -// Helper function to translate a binary op -// Parameters: -// -// TFNodeDecoder* op - TF op being translated. Must have only two -// inputs. -// const std::vector& static_input_map - the static input map -// Builder::OpMap& ng_op_map - The TF-to-nGraph op map. -// std::function(ng::Output, -// ng::Output)> -// create_binary_op - Function to construct the graph implementing -// the binary op, given the 2 ng_inputs to the -// binaryop -// Example Usage: -// -// if (op->type_string() == "SquaredDifference") { -// TF_RETURN_IF_ERROR(TranslateBinaryOp(op, ng_op_map, -// [](ng::Output ng_input1, ng::Output -// ng_input2) { -// auto ng_diff = ng::Output(input1, -// input2); -// return ng::Output(ng_diff,ng_diff); -// })); -// } -// - -static Status TranslateBinaryOp( - const TFNodeDecoder* op, const std::vector&, - Builder::OpMap& ng_op_map, - std::function(ng::Output&, - ng::Output&)> - create_binary_op) { - ng::Output ng_lhs, ng_rhs; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_lhs, ng_rhs)); - auto ng_node = create_binary_op(ng_lhs, ng_rhs); - if (ng_node != ng_lhs && ng_node != ng_rhs) { - Builder::SetTracingInfo(op->name(), ng_node); - } - SaveNgOp(ng_op_map, op->name(), ng_node); - return Status::OK(); -} - -// Helper function to translate a binary op in cases where there is a one-to-one -// mapping from TensorFlow ops to nGraph ops. -// -// Example usage: -// -// if (n->type_string == "Add") { -// TF_RETURN_IF_ERROR(TranslateBinaryOp(op, -// static_input_map, -// ng_op_map)); -// } -// -template -static Status TranslateBinaryOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - return TranslateBinaryOp( - op, static_input_map, ng_op_map, - [&op](ng::Output& ng_lhs, ng::Output& ng_rhs) { - return ConstructNgNode(op->name(), ng_lhs, ng_rhs); - }); -} - -static Status TranslateAddNOp(const TFNodeDecoder* op, const std::vector&, - Builder::OpMap& ng_op_map) { - std::vector> ng_arg_vec(op->num_inputs()); - - for (int inp_idx = 0; inp_idx < op->num_inputs(); inp_idx++) - TF_RETURN_IF_ERROR( - GetInputNode(ng_op_map, op, inp_idx, ng_arg_vec[inp_idx])); - auto ng_addn = std::accumulate( - std::next(ng_arg_vec.begin()), ng_arg_vec.end(), ng_arg_vec.at(0), - [&op](ng::Output a, ng::Output b) { - return ConstructNgNode(op->name(), a, b); - }); // accumulation: start with - // first element. default op is - // addition - SaveNgOp(ng_op_map, op->name(), ng_addn); - return Status::OK(); -} -static Status TranslateArgMinMax( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map, std::string mode) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - - std::vector tf_dim; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &tf_dim)); - - ng::Shape input_shape = ng_input.get_shape(); - size_t input_rank = input_shape.size(); - - if (tf_dim.size() != 1) { - return errors::InvalidArgument( - "ArgMax Op: dimension must be scalar, operates on a single axis"); - } - - // If input dimension is negative, make it positive - if (tf_dim[0] < 0) { - NGRAPH_VLOG(3) << "Input dimension is negative, make it positive " - << tf_dim[0]; - tf_dim[0] = (int64_t)input_rank + tf_dim[0]; - } - NGRAPH_VLOG(3) << "Axis along which to compute " << tf_dim[0]; - size_t k_axis = tf_dim[0]; - - DataType dtype; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "output_type", &dtype)); - - ng::element::Type ng_et; - TF_RETURN_IF_ERROR(TFDataTypeToNGraphElementType(dtype, &ng_et)); - - auto ng_k = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{}, std::vector({1})); - - std::string sort = "none"; - auto ng_topk = - std::make_shared(ng_input, ng_k, k_axis, mode, sort, ng_et); - auto ng_indices = ng_topk->output(1); - int axis = ng_topk->get_axis(); - auto axis_to_remove = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{1}, std::vector({axis})); - auto reshaped_indices = - ConstructNgNode(op->name(), ng_indices, axis_to_remove); - Builder::SetTracingInfo(op->name(), reshaped_indices); - SaveNgOp(ng_op_map, op->name(), reshaped_indices); - return Status::OK(); -} - -static Status TranslateArgMaxOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - return (TranslateArgMinMax(op, static_input_map, ng_op_map, "max")); -} - -static Status TranslateArgMinOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - return (TranslateArgMinMax(op, static_input_map, ng_op_map, "min")); -} - -static Status TranslateAvgPoolOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - std::vector tf_strides; - std::vector tf_ksize; - std::string tf_padding_type; - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "strides", &tf_strides)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "ksize", &tf_ksize)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "padding", &tf_padding_type)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "AvgPool data format is neither NHWC nor NCHW"); - } - - bool is_nhwc = (tf_data_format == "NHWC"); - - NGRAPH_VLOG(3) << ng::join(tf_strides); - NGRAPH_VLOG(3) << ng::join(tf_ksize); - NGRAPH_VLOG(3) << tf_padding_type; - NGRAPH_VLOG(3) << tf_data_format; - - ng::Strides ng_strides(2); - ng::Shape ng_image_shape(2); - ng::Shape ng_kernel_shape(2); - NHWCtoHW(is_nhwc, tf_strides, ng_strides); - NHWCtoHW(is_nhwc, ng_input.get_shape(), ng_image_shape); - NHWCtoHW(is_nhwc, tf_ksize, ng_kernel_shape); - NHWCtoNCHW(op->name(), is_nhwc, ng_input); - NGRAPH_VLOG(3) << "ng_strides: " << ng::join(ng_strides); - NGRAPH_VLOG(3) << "ng_image_shape: " << ng::join(ng_image_shape); - NGRAPH_VLOG(3) << "ng_kernel_shape: " << ng::join(ng_kernel_shape); - - ng::CoordinateDiff padding_below; - ng::CoordinateDiff padding_above; - ng::Shape ng_dilations{1, 1}; - Builder::MakePadding(tf_padding_type, ng_image_shape, ng_kernel_shape, - ng_strides, ng_dilations, padding_below, padding_above); - - // TODO: remove this once nGraph supports negative padding - // (CoordinateDiff) for AvgPool - ng::Shape ng_padding_below(padding_below.begin(), padding_below.end()); - ng::Shape ng_padding_above(padding_above.begin(), padding_above.end()); - - ng::Output ng_avgpool = ConstructNgNode( - op->name(), ng_input, ng_strides, ng_padding_below, ng_padding_above, - ng_kernel_shape, true, ng::op::RoundingType::FLOOR); - - NCHWtoNHWC(op->name(), is_nhwc, ng_avgpool); - NGRAPH_VLOG(3) << "avgpool outshape: {" << ng::join(ng_avgpool.get_shape()) - << "}"; - - SaveNgOp(ng_op_map, op->name(), ng_avgpool); - return Status::OK(); -} - -static Status TranslateBiasAddOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_bias; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_bias)); - - std::string tf_data_format; - if (GetNodeAttr(op->attrs(), "data_format", &tf_data_format) != - Status::OK()) { - tf_data_format = "NHWC"; - } - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "BiasAdd data format is neither NHWC nor NCHW"); - } - - auto ng_input_shape = ng_input.get_shape(); - auto ng_bias_shape = ng_bias.get_shape(); - if (ng_bias_shape.size() != 1) { - return errors::InvalidArgument( - "Bias argument to BiasAdd does not have one dimension"); - } - - // We'll choose reshape over broadcast - // Reshape the bias to (1, C, 1, ...) if input is channels-first. - ng::Output ng_bias_reshaped = ng_bias; - if (tf_data_format == "NCHW") { - auto channel_dim = ng_input_shape[1]; - std::vector target_shape(ng_input_shape.size()); - for (int64_t i = 0; i < ng_input_shape.size(); i++) { - if (i == 1) { - target_shape[i] = channel_dim; - } else { - target_shape[i] = 1; - } - } - auto target_shape_node = make_shared( - ng::element::i64, ng::Shape{ng_input_shape.size()}, target_shape); - ng_bias_reshaped = ConstructNgNode( - op->name(), ng_bias, target_shape_node, false); - } - - ng::Output ng_add = - ConstructNgNode(op->name(), ng_input, ng_bias_reshaped); - - SaveNgOp(ng_op_map, op->name(), ng_add); - return Status::OK(); -} - -static Status TranslateCastOp(const TFNodeDecoder* op, const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - DataType dtype; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "DstT", &dtype)); - - ng::element::Type ng_et; - TF_RETURN_IF_ERROR(TFDataTypeToNGraphElementType(dtype, &ng_et)); - - try { - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, ng_et)); - } catch (const std::out_of_range&) { - return errors::Unimplemented("Failed to convert TF data type: " + - DataType_Name(dtype)); - } - return Status::OK(); -} - -static Status TranslateConcatV2Op( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - TF_RETURN_IF_ERROR(ValidateInputCountMin(op, 2)); - - std::vector tf_concat_axis_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector( - ng_op_map, op, op->num_inputs() - 1, static_input_map, &tf_concat_axis_vec)); - - int64_t concat_axis = tf_concat_axis_vec[0]; - - if (concat_axis < 0) { - ng::Output ng_first_arg; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_first_arg)); - - concat_axis += int64_t(ng_first_arg.get_shape().size()); - } - - ng::OutputVector ng_args; - - for (int i = 0; i < op->num_inputs() - 1; i++) { - ng::Output ng_arg; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, i, ng_arg)); - ng_args.push_back(ng_arg); - } - - SaveNgOp( - ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_args, size_t(concat_axis))); - return Status::OK(); -} - -static Status TranslateConstOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - DataType dtype; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "dtype", &dtype)); - - ng::Output ng_node; - - // For some reason the following do not work (no specialization of - // tensorflow::checkpoint::SavedTypeTraits...) - // case DataType::DT_UINT32: - // TF_RETURN_IF_ERROR(MakeConstOp(op, ng::element::u32, - // &ng_node)); - // break; - // case DataType::DT_UINT64: - // TF_RETURN_IF_ERROR(MakeConstOp(op, ng::element::u64, - // &ng_node)); - // break; - try { - const auto& func_param = Builder::TF_NGRAPH_CONST_MAP().at(dtype); - TF_RETURN_IF_ERROR(func_param.first(op, func_param.second, ng_node)); - } catch (const std::out_of_range&) { - return errors::Unimplemented("Failed to translate Constant with TF type:" + - DataType_Name(dtype)); - } - - SaveNgOp(ng_op_map, op->name(), ng_node); - return Status::OK(); -} - -static Status TranslateConv2DOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_filter; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_filter)); - - std::vector tf_strides; - std::vector tf_dilations; - std::string tf_padding_type; - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "strides", &tf_strides)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "dilations", &tf_dilations)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "padding", &tf_padding_type)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "Conv2D data format is neither NHWC nor NCHW"); - } - - bool is_nhwc = (tf_data_format == "NHWC"); - - // TF Kernel Test Checks - // Strides in the batch and depth dimension is not supported - if (tf_strides[0] != 1 || tf_strides[is_nhwc ? 3 : 1] != 1) { - return errors::InvalidArgument( - "Strides in batch and depth dimensions is not supported: " + - op->type_string()); - } - - NGRAPH_VLOG(3) << ng::join(tf_strides); - NGRAPH_VLOG(3) << ng::join(tf_dilations); - NGRAPH_VLOG(3) << tf_padding_type; - NGRAPH_VLOG(3) << tf_data_format; - - ng::Strides ng_strides(2); - ng::Strides ng_dilations(2); - ng::Shape ng_image_shape(2); - ng::Shape ng_kernel_shape(2); - - NHWCtoHW(is_nhwc, tf_strides, ng_strides); - NHWCtoHW(is_nhwc, ng_input.get_shape(), ng_image_shape); - NHWCtoHW(is_nhwc, tf_dilations, ng_dilations); - NHWCtoNCHW(op->name(), is_nhwc, ng_input); - - NGRAPH_VLOG(3) << "ng_strides: " << ng::join(ng_strides); - NGRAPH_VLOG(3) << "ng_dilations: " << ng::join(ng_dilations); - NGRAPH_VLOG(3) << "ng_image_shape: " << ng::join(ng_image_shape); - - auto& ng_filter_shape = ng_filter.get_shape(); - ng_kernel_shape[0] = ng_filter_shape[0]; - ng_kernel_shape[1] = ng_filter_shape[1]; - Transpose<3, 2, 0, 1>(ng_filter); - Builder::SetTracingInfo(op->name(), ng_filter); - - NGRAPH_VLOG(3) << "ng_kernel_shape: " << ng::join(ng_kernel_shape); - - ng::CoordinateDiff ng_padding_below; - ng::CoordinateDiff ng_padding_above; - Builder::MakePadding(tf_padding_type, ng_image_shape, ng_kernel_shape, - ng_strides, ng_dilations, ng_padding_below, - ng_padding_above); - - ng::Output ng_conv = ConstructNgNode( - op->name(), ng_input, ng_filter, ng_strides, ng_padding_below, - ng_padding_above, ng_dilations); - - NCHWtoNHWC(op->name(), is_nhwc, ng_conv); - SaveNgOp(ng_op_map, op->name(), ng_conv); - return Status::OK(); -} - -static Status TranslateConv2DBackpropInputOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_filter, ng_out_backprop, ng_unused; - TF_RETURN_IF_ERROR( - GetInputNodes(ng_op_map, op, ng_unused, ng_filter, ng_out_backprop)); - - // TODO: refactor me to be less redundant with other convolution ops - std::vector tf_strides; - std::vector tf_dilations; - std::string tf_padding_type; - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "strides", &tf_strides)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "dilations", &tf_dilations)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "padding", &tf_padding_type)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "Conv2DBackpropInput data format is neither NHWC nor NCHW: %s" + - tf_data_format); - } - - std::vector tf_input_sizes; - TF_RETURN_IF_ERROR( - GetStaticInputVector(ng_op_map, op, 0, static_input_map, &tf_input_sizes)); - - if (std::any_of(tf_input_sizes.begin(), tf_input_sizes.end(), - [](int32_t size) { return size <= 0; })) { - return errors::InvalidArgument( - "Conv2DBackpropInput input sizes must be positive integers"); - } - - bool is_nhwc = (tf_data_format == "NHWC"); - - NGRAPH_VLOG(3) << ng::join(tf_strides); - NGRAPH_VLOG(3) << ng::join(tf_dilations); - NGRAPH_VLOG(3) << tf_padding_type; - NGRAPH_VLOG(3) << tf_data_format; - - ng::Strides ng_strides(2); - ng::Strides ng_dilations(2); - ng::Shape ng_image_shape(2); - ng::Shape ng_kernel_shape(2); - ng::Shape ng_batch_shape(4); - - NHWCtoHW(is_nhwc, tf_strides, ng_strides); - NHWCtoHW(is_nhwc, tf_dilations, ng_dilations); - NHWCtoHW(is_nhwc, tf_input_sizes, ng_image_shape); - NHWCtoNCHW(op->name(), is_nhwc, ng_out_backprop); - if (is_nhwc) { - ng_batch_shape = {static_cast(tf_input_sizes[0]), - static_cast(tf_input_sizes[3]), - static_cast(tf_input_sizes[1]), - static_cast(tf_input_sizes[2])}; - } else { - ng_batch_shape = {static_cast(tf_input_sizes[0]), - static_cast(tf_input_sizes[1]), - static_cast(tf_input_sizes[2]), - static_cast(tf_input_sizes[3])}; - } - - NGRAPH_VLOG(3) << "ng_strides: " << ng::join(ng_strides); - NGRAPH_VLOG(3) << "ng_dilations: " << ng::join(ng_dilations); - NGRAPH_VLOG(3) << "ng_image_shape: " << ng::join(ng_image_shape); - - auto& ng_filter_shape = ng_filter.get_shape(); - ng_kernel_shape[0] = ng_filter_shape[0]; - ng_kernel_shape[1] = ng_filter_shape[1]; - Transpose<3, 2, 0, 1>(ng_filter); - Builder::SetTracingInfo(op->name(), ng_filter); - - NGRAPH_VLOG(3) << "ng_kernel_shape: " << ng::join(ng_kernel_shape); - - ng::CoordinateDiff ng_padding_below; - ng::CoordinateDiff ng_padding_above; - Builder::MakePadding(tf_padding_type, ng_image_shape, ng_kernel_shape, - ng_strides, ng_dilations, ng_padding_below, - ng_padding_above); - - auto ng_output_shape = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{ng_batch_shape.size() - 2}, - vector(ng_batch_shape.begin() + 2, ng_batch_shape.end())); - - auto ng_data = ConstructNgNode( - op->name(), ng_out_backprop, ng_filter, ng_output_shape, ng_strides, - ng_padding_below, ng_padding_above, ng_dilations); - - NCHWtoNHWC(op->name(), is_nhwc, ng_data); - SaveNgOp(ng_op_map, op->name(), ng_data); - return Status::OK(); -} - -// Translate Conv3D Op -static Status TranslateConv3DOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_filter; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_filter)); - - std::vector tf_strides; - std::vector tf_dilations; - std::string tf_padding_type; - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "strides", &tf_strides)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "dilations", &tf_dilations)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "padding", &tf_padding_type)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - if (tf_data_format != "NDHWC" && tf_data_format != "NCDHW") { - return errors::InvalidArgument( - "Conv3D data format is neither NDHWC nor NCDHW"); - } - - bool is_ndhwc = (tf_data_format == "NDHWC"); - - // TODO: in 3D - // TF Kernel Test Checks - // // Strides in the batch and depth dimension is not supported - // if (tf_strides[0] != 1 || tf_strides[is_nhwc ? 3 : 1] != 1) { - // return errors::InvalidArgument( - // "Strides in batch and depth dimensions is not supported: ", - // op->type_string()); - // } - - NGRAPH_VLOG(3) << ng::join(tf_strides); - NGRAPH_VLOG(3) << ng::join(tf_dilations); - NGRAPH_VLOG(3) << tf_padding_type; - NGRAPH_VLOG(3) << tf_data_format; - - ng::Strides ng_strides(3); - ng::Strides ng_dilations(3); - ng::Shape ng_image_shape(3); - ng::Shape ng_kernel_shape(3); - - NHWCtoHW(is_ndhwc, tf_strides, ng_strides); - NHWCtoHW(is_ndhwc, ng_input.get_shape(), ng_image_shape); - NHWCtoHW(is_ndhwc, tf_dilations, ng_dilations); - NHWCtoNCHW(op->name(), is_ndhwc, ng_input); - - NGRAPH_VLOG(3) << "ng_strides: " << ng::join(ng_strides); - NGRAPH_VLOG(3) << "ng_dilations: " << ng::join(ng_dilations); - NGRAPH_VLOG(3) << "ng_image_shape: " << ng::join(ng_image_shape); - - auto& ng_filter_shape = ng_filter.get_shape(); - ng_kernel_shape[0] = ng_filter_shape[0]; - ng_kernel_shape[1] = ng_filter_shape[1]; - ng_kernel_shape[2] = ng_filter_shape[2]; - Transpose3D<4, 3, 0, 1, 2>(ng_filter); - Builder::SetTracingInfo(op->name(), ng_filter); - - NGRAPH_VLOG(3) << "ng_kernel_shape: " << ng::join(ng_kernel_shape); - - ng::CoordinateDiff ng_padding_below; - ng::CoordinateDiff ng_padding_above; - Builder::MakePadding(tf_padding_type, ng_image_shape, ng_kernel_shape, - ng_strides, ng_dilations, ng_padding_below, - ng_padding_above); - - ng::Output ng_conv = ConstructNgNode( - op->name(), ng_input, ng_filter, ng_strides, ng_padding_below, - ng_padding_above, ng_dilations); - - NCHWtoNHWC(op->name(), is_ndhwc, ng_conv); - SaveNgOp(ng_op_map, op->name(), ng_conv); - return Status::OK(); -} - -static Status TranslateCumsumOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_x, ng_axis; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_x, ng_axis)); - bool exclusive, reverse; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "exclusive", &exclusive)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "reverse", &reverse)); - - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_x, ng_axis, exclusive, - reverse)); - return Status::OK(); -} - -// Translate DepthToSpace op -static Status TranslateDepthToSpaceOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - // Get the attributes - int64_t block_size; - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "block_size", &block_size)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "DepthToSpace data format is neither NHWC nor NCHW"); - } - - bool is_nhwc = (tf_data_format == "NHWC"); - - NHWCtoNCHW(op->name(), is_nhwc, ng_input); - auto ng_mode = opset::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST; - ng::Output depth_to_space = ConstructNgNode( - op->name(), ng_input, ng_mode, block_size); - NCHWtoNHWC(op->name(), is_nhwc, depth_to_space); - SaveNgOp(ng_op_map, op->name(), depth_to_space); - return Status::OK(); -} - -static Status TranslateDepthwiseConv2dNativeOp( - const TFNodeDecoder* op, const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_filter; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_filter)); - - std::vector tf_strides; - std::vector tf_dilations; - std::string tf_padding_type; - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "strides", &tf_strides)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "dilations", &tf_dilations)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "padding", &tf_padding_type)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "DepthwiseConv2D data format is neither NHWC nor NCHW"); - } - - bool is_nhwc = (tf_data_format == "NHWC"); - - NGRAPH_VLOG(3) << ng::join(tf_strides); - NGRAPH_VLOG(3) << ng::join(tf_dilations); - NGRAPH_VLOG(3) << tf_padding_type; - NGRAPH_VLOG(3) << tf_data_format; - - ng::Strides ng_strides(2); - ng::Strides ng_dilations(2); - ng::Shape ng_image_shape(2); - ng::Shape ng_kernel_shape(2); - - NHWCtoHW(is_nhwc, ng_input.get_shape(), ng_image_shape); - NHWCtoHW(is_nhwc, tf_strides, ng_strides); - NHWCtoHW(is_nhwc, tf_dilations, ng_dilations); - NHWCtoNCHW(op->name(), is_nhwc, ng_input); - - NGRAPH_VLOG(3) << "ng_strides: " << ng::join(ng_strides); - NGRAPH_VLOG(3) << "ng_dilations: " << ng::join(ng_dilations); - NGRAPH_VLOG(3) << "ng_image_shape: " << ng::join(ng_image_shape); - - auto& ng_filter_shape = ng_filter.get_shape(); - ng_kernel_shape[0] = ng_filter_shape[0]; - ng_kernel_shape[1] = ng_filter_shape[1]; - - NGRAPH_VLOG(3) << "ng_kernel_shape: " << ng::join(ng_kernel_shape); - - ng::CoordinateDiff ng_padding_below; - ng::CoordinateDiff ng_padding_above; - Builder::MakePadding(tf_padding_type, ng_image_shape, ng_kernel_shape, - ng_strides, ng_dilations, ng_padding_below, - ng_padding_above); - - // H W I M -> H W I 1 M - auto filter_shape = ConstructNgNode( - op->name(), ng::element::u64, ng::Shape{5}, - ngraph::Shape{ng_filter_shape[0], ng_filter_shape[1], ng_filter_shape[2], - 1, ng_filter_shape[3]}); - auto reshaped_filter = ConstructNgNode(op->name(), ng_filter, - filter_shape, false); - - // H W I 1 M -> I M 1 H W - auto order = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{5}, vector{2, 4, 3, 0, 1}); - auto transposed_filter = - ConstructNgNode(op->name(), reshaped_filter, order); - - auto ng_conv = ConstructNgNode( - op->name(), ng_input, transposed_filter, ng_strides, ng_padding_below, - ng_padding_above, ng_dilations); - - NCHWtoNHWC(op->name(), is_nhwc, ng_conv); - SaveNgOp(ng_op_map, op->name(), ng_conv); - return Status::OK(); -} - -static Status TranslateExpandDimsOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - std::vector dims; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &dims)); - auto ng_dims = ConstructNgNode( - op->name(), ng::element::i64, ngraph::Shape{dims.size()}, dims); - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, ng_dims)); - return Status::OK(); -} - -static Status TranslateFillOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_value, ng_dims; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_dims, ng_value)); - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_value, ng_dims)); - return Status::OK(); -} - -static Status TranslateFloorDivOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - auto floordiv_fn = [&op](ng::Output x, ng::Output y) { - return ConstructNgNode( - op->name(), ConstructNgNode(op->name(), x, y)); - }; - return TranslateBinaryOp(op, static_input_map, ng_op_map, floordiv_fn); -} - -static Status TranslateFusedBatchNormOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_scale, ng_offset, ng_mean, ng_variance; - bool is_v3 = op->type_string() == "FusedBatchNormV3"; - - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_scale, ng_offset, - ng_mean, ng_variance)); - - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "Conv2D data format is neither NHWC nor NCHW"); - } - - bool is_nhwc = (tf_data_format == "NHWC"); - - NGRAPH_VLOG(3) << "data_format: " << tf_data_format; - - float tf_epsilon; - if (GetNodeAttr(op->attrs(), "epsilon", &tf_epsilon) != Status::OK()) { - NGRAPH_VLOG(3) << "epsilon attribute not present, setting to 0.0001"; - // TensorFlow default - tf_epsilon = 0.0001; - } - - NGRAPH_VLOG(3) << "epsilon: " << tf_epsilon; - - NHWCtoNCHW(op->name(), is_nhwc, ng_input); - - auto ng_batch_norm = ConstructNgNode( - op->name(), ng_input, ng_scale, ng_offset, ng_mean, ng_variance, - tf_epsilon); - NCHWtoNHWC(op->name(), is_nhwc, ng_batch_norm); - SaveNgOp(ng_op_map, op->name(), ng_batch_norm); - SaveNgOp(ng_op_map, op->name(), ng_mean); - SaveNgOp(ng_op_map, op->name(), ng_variance); - SaveNgOp(ng_op_map, op->name(), ng_mean); // reserve_space_1 - SaveNgOp(ng_op_map, op->name(), ng_variance); // reserve_space_2 - if (is_v3) { - // FusedBatchNormV3 has 6 outputs - SaveNgOp(ng_op_map, op->name(), ng_mean); // reserve_space_3 - } - return Status::OK(); -} - -static Status TranslateFusedMatMulOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - int num_args; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "num_args", &num_args)); - - std::vector fused_ops; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "fused_ops", &fused_ops)); - - // Transpose arguments if requested. - bool transpose_a = false; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "transpose_a", &transpose_a)); - - bool transpose_b = false; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "transpose_b", &transpose_b)); - - ng::Output ng_lhs, ng_rhs, ng_bias, ng_matmul; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_lhs, ng_rhs, ng_bias)); - ng_matmul = ConstructNgNode(op->name(), ng_lhs, ng_rhs, - transpose_a, transpose_b); - - auto ng_matmul_shape = ng_matmul.get_shape(); - auto ng_bias_shape = ng_bias.get_shape(); - - if (ng_bias_shape.size() != 1) { - return errors::InvalidArgument( - "Bias argument to BiasAdd does not have one dimension"); - } - - auto ng_add = ConstructNgNode(op->name(), ng_matmul, ng_bias); - if (fused_ops.size() == 1) { // Only fusing BiasAdd - SaveNgOp(ng_op_map, op->name(), ng_add); - } else if (fused_ops.size() == 2) { // Also has activation - if (fused_ops[1] == "Relu") { - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_add)); - } else if (fused_ops[1] == "Relu6") { - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_add, 0, 6)); - } else { - return errors::Internal( - "Expected activation to be Relu or Relu6 but got " + fused_ops[1]); - } - } else { - // Adding this here to catch future changes in _FusedMatMul - return errors::Internal("Unsupported combination"); - } - - return Status::OK(); -} - -// See .../tensorflow/include/tensorflow/cc/ops/array_ops.h -// and .../openvino/ngraph/core/include/ngraph/op/gather.hpp -static Status TranslateGatherOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_input_indices; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_input_indices)); - - auto ng_axis = ConstructNgNode(op->name(), ng::element::i64, - ng::Shape{}, 0); - - auto gather_op = ConstructNgNode(op->name(), ng_input, - ng_input_indices, ng_axis); - - SaveNgOp(ng_op_map, op->name(), gather_op); - return Status::OK(); -} - -static Status TranslateGatherV2Op( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_input_coords, ng_unused; - TF_RETURN_IF_ERROR( - GetInputNodes(ng_op_map, op, ng_input, ng_input_coords, ng_unused)); - - std::vector tf_axis; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 2, static_input_map, &tf_axis)); - - if (tf_axis.size() > 1) { - std::ostringstream buf; - buf << "Found axis in GatherV2 op (" << op->name() << - ") translation to be non scalar, of size " << - tf_axis.size(); - return errors::Internal(buf.str()); - } - - // Negative axis is supported. Accounting for that - auto ng_input_shape = ng_input.get_shape(); - size_t ng_input_rank = ng_input_shape.size(); - int axis; - if (tf_axis[0] >= 0) { - axis = tf_axis[0]; - } else { - axis = tf_axis[0] + ng_input_rank; - } - if (axis < 0 || axis >= ng_input_rank) { - std:ostringstream buf; - buf << "Expected axis in the range [-" << - ng_input_rank << ", " << ng_input_rank << - "), but got " << tf_axis[0]; - return errors::InvalidArgument(buf.str()); - } - - auto ng_axis = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{tf_axis.size()}, tf_axis); - - auto gather_op = ConstructNgNode(op->name(), ng_input, - ng_input_coords, ng_axis); - - SaveNgOp(ng_op_map, op->name(), gather_op); - return Status::OK(); -} - -static Status TranslateFusedConv2DOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - int num_args; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "num_args", &num_args)); - - std::vector fused_ops; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "fused_ops", &fused_ops)); - - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - bool is_nhwc = (tf_data_format == "NHWC"); - - auto CreateNgConv = [&](ng::Output& ng_input, - ng::Output& ng_filter, - ng::Output& ng_conv) { - std::vector tf_strides; - std::vector tf_dilations; - std::string tf_padding_type; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "strides", &tf_strides)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "dilations", &tf_dilations)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "padding", &tf_padding_type)); - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "Conv2D data format is neither NHWC nor NCHW"); - } - - // TF Kernel Test Checks - // Strides in the batch and depth dimension is not supported - if (tf_strides[0] != 1 || tf_strides[is_nhwc ? 3 : 1] != 1) { - return errors::InvalidArgument( - "Strides in batch and depth dimensions is not supported: " + - op->type_string()); - } - - NGRAPH_VLOG(3) << ng::join(tf_strides); - NGRAPH_VLOG(3) << ng::join(tf_dilations); - NGRAPH_VLOG(3) << tf_padding_type; - NGRAPH_VLOG(3) << tf_data_format; - - ng::Strides ng_strides(2); - ng::Strides ng_dilations(2); - ng::Shape ng_image_shape(2); - ng::Shape ng_kernel_shape(2); - - NHWCtoHW(is_nhwc, tf_strides, ng_strides); - NHWCtoHW(is_nhwc, ng_input.get_shape(), ng_image_shape); - NHWCtoHW(is_nhwc, tf_dilations, ng_dilations); - NHWCtoNCHW(op->name(), is_nhwc, ng_input); - - NGRAPH_VLOG(3) << "ng_strides: " << ng::join(ng_strides); - NGRAPH_VLOG(3) << "ng_dilations: " << ng::join(ng_dilations); - NGRAPH_VLOG(3) << "ng_image_shape: " << ng::join(ng_image_shape); - - auto& ng_filter_shape = ng_filter.get_shape(); - ng_kernel_shape[0] = ng_filter_shape[0]; - ng_kernel_shape[1] = ng_filter_shape[1]; - Transpose<3, 2, 0, 1>(ng_filter); - Builder::SetTracingInfo(op->name(), ng_filter); - - NGRAPH_VLOG(3) << "ng_kernel_shape: " << ng::join(ng_kernel_shape); - - ng::CoordinateDiff ng_padding_below; - ng::CoordinateDiff ng_padding_above; - Builder::MakePadding(tf_padding_type, ng_image_shape, ng_kernel_shape, - ng_strides, ng_dilations, ng_padding_below, - ng_padding_above); - - ng_conv = ConstructNgNode( - op->name() + "_FusedConv2D_Conv", ng_input, ng_filter, ng_strides, - ng_padding_below, ng_padding_above, ng_dilations); - - return Status::OK(); - }; - - if (VecStrCmp(fused_ops, {"BiasAdd"}) || - VecStrCmp(fused_ops, {"BiasAdd", "Relu"}) || - VecStrCmp(fused_ops, {"BiasAdd", "Relu6"})) { - if (num_args != 1) { - return errors::InvalidArgument( - "FusedConv2DBiasAdd has incompatible num_args"); - } - - ng::Output ng_input, ng_filter, ng_bias, ng_conv; - TF_RETURN_IF_ERROR( - GetInputNodes(ng_op_map, op, ng_input, ng_filter, ng_bias)); - - TF_RETURN_IF_ERROR(CreateNgConv(ng_input, ng_filter, ng_conv)); - - auto ng_conv_shape = ng_conv.get_shape(); - auto ng_bias_shape = ng_bias.get_shape(); - if (ng_bias_shape.size() != 1) { - return errors::InvalidArgument( - "Bias argument to BiasAdd does not have one dimension"); - } - - std::vector reshape_pattern_values(ng_conv_shape.size(), 1U); - reshape_pattern_values[1] = ng_bias.get_shape().front(); - auto reshape_pattern = make_shared( - ng::element::u64, ng::Shape{reshape_pattern_values.size()}, - reshape_pattern_values); - auto ng_bias_reshaped = ConstructNgNode( - op->name(), ng_bias, reshape_pattern, false); - - auto ng_add = ConstructNgNode( - op->name() + "_FusedConv2D_BiasAdd", ng_conv, ng_bias_reshaped); - - if (VecStrCmp(fused_ops, {"BiasAdd", "Relu"})) { - auto ng_relu = ConstructNgNode( - op->name() + "_FusedConv2D_Relu", ng_add); - NCHWtoNHWC(op->name(), is_nhwc, ng_relu); - SaveNgOp(ng_op_map, op->name(), ng_relu); - } else if (VecStrCmp(fused_ops, {"BiasAdd", "Relu6"})) { - auto ng_relu6 = ConstructNgNode( - op->name() + "_FusedConv2D_Relu6", ng_add, 0, 6); - NCHWtoNHWC(op->name(), is_nhwc, ng_relu6); - SaveNgOp(ng_op_map, op->name(), ng_relu6); - } else { - NCHWtoNHWC(op->name(), is_nhwc, ng_add); - SaveNgOp(ng_op_map, op->name(), ng_add); - } - } else if (VecStrCmp(fused_ops, {"FusedBatchNorm"}) || - VecStrCmp(fused_ops, {"FusedBatchNorm", "Relu"}) || - VecStrCmp(fused_ops, {"FusedBatchNorm", "Relu6"})) { - if (num_args != 4) { - return errors::InvalidArgument( - "FusedConv2D with FusedBatchNorm has incompatible num_args"); - } - - ng::Output ng_input, ng_filter, ng_conv, ng_scale, ng_offset, - ng_mean, ng_variance; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_filter, - ng_scale, ng_offset, ng_mean, - ng_variance)); - TF_RETURN_IF_ERROR(CreateNgConv(ng_input, ng_filter, ng_conv)); - - float tf_epsilon; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "epsilon", &tf_epsilon)); - - auto ng_batch_norm = ConstructNgNode( - op->name() + "_FusedConv2D_BatchNorm", ng_conv, ng_scale, ng_offset, - ng_mean, ng_variance, tf_epsilon); - - if (VecStrCmp(fused_ops, {"FusedBatchNorm", "Relu"})) { - auto ng_relu = ConstructNgNode( - op->name() + "_FusedConv2D_BatchNormRelu", ng_batch_norm); - NCHWtoNHWC(op->name(), is_nhwc, ng_relu); - SaveNgOp(ng_op_map, op->name(), ng_relu); - } else if (VecStrCmp(fused_ops, {"FusedBatchNorm", "Relu6"})) { - auto ng_relu6 = ConstructNgNode( - op->name() + "_FusedConv2D_BatchNormRelu", ng_batch_norm, 0, 6); - NCHWtoNHWC(op->name(), is_nhwc, ng_relu6); - SaveNgOp(ng_op_map, op->name(), ng_relu6); - } else { - NCHWtoNHWC(op->name(), is_nhwc, ng_batch_norm); - SaveNgOp(ng_op_map, op->name(), ng_batch_norm); - } - } else { - return errors::Unimplemented("Unsupported _FusedConv2D " + - StrJoin(fused_ops, ",")); - } - return Status::OK(); -} - -static Status TranslateIdentityOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_arg; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_arg)); - SaveNgOp(ng_op_map, op->name(), ng_arg); - return Status::OK(); -} - -static Status TranslateIsFiniteOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - // Implemented tf.is_finite by checking: - // (in != inf) && (in != -inf) && (in == in) - // ^^^^^^^^ checks for NaN's - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - auto const_inf = ConstructNgNode( - op->name(), ng_input.get_element_type(), ng::Shape{}, - std::vector{std::numeric_limits::infinity()}); - - auto const_neg_inf = ConstructNgNode( - op->name(), ng_input.get_element_type(), ng::Shape{}, - std::vector{-std::numeric_limits::infinity()}); - - auto neq_inf = - ConstructNgNode(op->name(), ng_input, const_inf); - auto neq_neg_inf = - ConstructNgNode(op->name(), ng_input, const_neg_inf); - auto eq_nan = ConstructNgNode(op->name(), ng_input, ng_input); - - auto neq_inf_and_neq_neg_inf = - ConstructNgNode(op->name(), neq_inf, neq_neg_inf); - auto is_finite = ConstructNgNode( - op->name(), neq_inf_and_neq_neg_inf, eq_nan); - - SaveNgOp(ng_op_map, op->name(), is_finite); - return Status::OK(); -} - -static Status TranslateL2LossOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - std::vector val; - val.push_back(2.0); - auto const_2 = ConstructNgNode( - op->name(), ng_input.get_element_type(), ng::Shape{}, val[0]); - - auto ng_pow = - ConstructNgNode(op->name(), ng_input, ng_input); - - size_t input_rank = ng_input.get_shape().size(); - std::vector axes; - for (size_t i = 0; i < input_rank; ++i) { - axes.push_back(i); - } - - auto ng_reduction_axes = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{axes.size()}, axes); - auto ng_sum = - ConstructNgNode(op->name(), ng_pow, ng_reduction_axes); - auto ng_l2loss = ConstructNgNode(op->name(), ng_sum, const_2); - SaveNgOp(ng_op_map, op->name(), ng_l2loss); - return Status::OK(); -} - -static Status TranslateLog1pOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - return TranslateUnaryOp( - op, static_input_map, ng_op_map, [&op](ng::Output n) { - auto et = n.get_element_type(); - auto shape = n.get_shape(); - std::vector val_1(ng::shape_size(shape), "1"); - auto ng_const1 = - ConstructNgNode(op->name(), et, shape, val_1); - auto ng_add = ConstructNgNode(op->name(), ng_const1, n); - return ConstructNgNode(op->name(), ng_add); - }); -} - -static Status TranslateLRNOp(const TFNodeDecoder* op, - const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_inp; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_inp)); - - float alpha; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "alpha", &alpha)); - float beta; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "beta", &beta)); - float bias; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "bias", &bias)); - int64_t depth_radius; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "depth_radius", &depth_radius)); - - // OV: Each input value is divided by (bias+(alpha/size)*sum(xi^2 for every xi - // in the local region))^beta - // TF: sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + - // depth_radius + 1] ** 2) - // output = input / (bias + alpha * sqr_sum) ** beta - int64_t size = depth_radius * 2 + 1; - alpha = alpha * size; - // nGraph expects the input to be in NCHW format - NHWCtoNCHW(op->name(), true, ng_inp); - auto ng_output = ConstructNgNode(op->name(), ng_inp, alpha, beta, - bias, (size_t)size); - NCHWtoNHWC(op->name(), true, ng_output); - SaveNgOp(ng_op_map, op->name(), ng_output); - return Status::OK(); -} - -static Status TranslateLogSoftmaxOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_inp; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_inp)); - auto inp_shape = ng_inp.get_shape(); - size_t rank = inp_shape.size(); - int64_t axes = rank - 1; - - auto ng_output = ConstructNgNode(op->name(), ng_inp, axes); - SaveNgOp(ng_op_map, op->name(), ng_output); - return Status::OK(); -} - -static Status TranslateMatMulOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_lhs, ng_rhs; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_lhs, ng_rhs)); - - // Transpose arguments if requested. - bool transpose_a = false; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "transpose_a", &transpose_a)); - - bool transpose_b = false; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "transpose_b", &transpose_b)); - - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_lhs, ng_rhs, - transpose_a, transpose_b)); - return Status::OK(); -} - -template -static Status TranslateMaxPoolOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - std::vector tf_strides; - std::vector tf_ksize; - std::string tf_padding_type; - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "strides", &tf_strides)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "ksize", &tf_ksize)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "padding", &tf_padding_type)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - bool is_nhwc = (tf_data_format == "NHWC") || (tf_data_format == "NDHWC"); - - NGRAPH_VLOG(3) << ng::join(tf_strides); - NGRAPH_VLOG(3) << ng::join(tf_ksize); - NGRAPH_VLOG(3) << tf_padding_type; - NGRAPH_VLOG(3) << tf_data_format; - - ng::Strides ng_strides(N); - ng::Shape ng_image_shape(N); - ng::Shape ng_kernel_shape(N); - ng::Shape ng_dilations(N, 1); - - NHWCtoHW(is_nhwc, tf_strides, ng_strides); - NHWCtoHW(is_nhwc, ng_input.get_shape(), ng_image_shape); - NHWCtoHW(is_nhwc, tf_ksize, ng_kernel_shape); - NHWCtoNCHW(op->name(), is_nhwc, ng_input); - NGRAPH_VLOG(3) << "ng_strides: " << ng::join(ng_strides); - NGRAPH_VLOG(3) << "ng_image_shape: " << ng::join(ng_image_shape); - NGRAPH_VLOG(3) << "ng_kernel_shape: " << ng::join(ng_kernel_shape); - - ng::CoordinateDiff padding_below; - ng::CoordinateDiff padding_above; - Builder::MakePadding(tf_padding_type, ng_image_shape, ng_kernel_shape, - ng_strides, ng_dilations, padding_below, padding_above); - - // TODO: remove this once nGraph supports negative padding - // (CoordinateDiff) for MaxPool - ng::Shape ng_padding_below(padding_below.begin(), padding_below.end()); - ng::Shape ng_padding_above(padding_above.begin(), padding_above.end()); - - auto ng_maxpool = ConstructNgNode( - op->name(), ng_input, ng_strides, ng_padding_below, ng_padding_above, - ng_kernel_shape, ng::op::RoundingType::FLOOR); - - NCHWtoNHWC(op->name(), is_nhwc, ng_maxpool); - - NGRAPH_VLOG(3) << "maxpool outshape: {" << ng::join(ng_maxpool.get_shape()) - << "}"; - - SaveNgOp(ng_op_map, op->name(), ng_maxpool); - return Status::OK(); -} - -static Status TranslateNonMaxSuppressionV2Op( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_boxes, ng_scores, ng_unused, ng_iou_threshold; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_boxes, ng_scores, - ng_unused, ng_iou_threshold)); - - auto ng_axis_boxes = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{1}, std::vector({0})); - auto ng_boxes_unsqueezed = - ConstructNgNode(op->name(), ng_boxes, ng_axis_boxes); - - auto ng_axis_scores = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{1}, std::vector({0})); - auto ng_scores_unsqueezed1 = - ConstructNgNode(op->name(), ng_scores, ng_axis_scores); - auto ng_scores_unsqueezed2 = ConstructNgNode( - op->name(), ng_scores_unsqueezed1, ng_axis_scores); - - std::vector max_output_size; - TF_RETURN_IF_ERROR( - GetStaticInputVector(ng_op_map, op, 2, static_input_map, &max_output_size)); - - // max_output_size must be scalar - if (max_output_size.size() != 1) { - return errors::InvalidArgument( - "NonMaxSuppression Op: max_output_size of nms must be scalar " + - to_string(max_output_size.size())); - } - - auto ng_max_output_size = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{}, max_output_size[0]); - NGRAPH_VLOG(5) << "ng_max_output_size " << max_output_size[0]; - - auto ng_nmsv = ConstructNgNode( - op->name(), ng_boxes_unsqueezed, ng_scores_unsqueezed2, - ng_max_output_size, ng_iou_threshold, - opset::NonMaxSuppression::BoxEncodingType::CORNER, false, - ngraph::element::Type_t::i32); - - auto begin = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{2}, std::vector({0, 2})); - auto end = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{2}, - std::vector({max_output_size[0], 3})); - auto ng_nmsv_slice = ConstructNgNode( - op->name(), ng_nmsv, begin, end, std::vector{0, 0}, - std::vector{0, 0}, std::vector{0, 0}, - std::vector{0, 1}); - - Builder::SetTracingInfo(op->name(), ng_nmsv_slice); - SaveNgOp(ng_op_map, op->name(), ng_nmsv_slice); - return Status::OK(); -} - -static Status TranslateReduceOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map, - std::function(ng::Output, - ng::Output, const bool)> - create_ng_node) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - bool tf_keep_dims; - if (GetNodeAttr(op->attrs(), "keep_dims", &tf_keep_dims) != Status::OK()) { - tf_keep_dims = false; - } - - std::vector axes; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &axes)); - - ng::Shape input_shape = ng_input.get_shape(); - size_t input_rank = input_shape.size(); - - TF_RETURN_IF_ERROR(CheckAxisDimInRange(axes, input_rank)); - - std::vector ng_reduction_axes_vect(axes.size()); - std::transform( - axes.begin(), axes.end(), ng_reduction_axes_vect.begin(), - [input_rank](int idx) { return idx + (idx < 0 ? (int)input_rank : 0); }); - auto ng_reduction_axes = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{ng_reduction_axes_vect.size()}, - ng_reduction_axes_vect); - - ng::Output ng_node = - create_ng_node(ng_input, ng_reduction_axes, tf_keep_dims); - - SaveNgOp(ng_op_map, op->name(), ng_node); - return Status::OK(); -} - -template -static Status TranslateDirectReduceOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - // ensure its either an arithmetic or a logical reduction - if (!(std::is_base_of::value || - std::is_base_of::value)) { - return errors::InvalidArgument( - "Expected node to be either a valid logical or arithmetic reduction " - "type"); - } - return TranslateReduceOp( - op, static_input_map, ng_op_map, - [&op](ng::Output ng_input, - ng::Output ng_reduction_axes, const bool keep_dims) { - return ConstructNgNode(op->name(), ng_input, ng_reduction_axes, - keep_dims); - }); -} - -static Status TranslateOneHotOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_features, ng_unused, ng_on, ng_off, ng_depth; - TF_RETURN_IF_ERROR( - GetInputNodes(ng_op_map, op, ng_features, ng_unused, ng_on, ng_off)); - - auto ng_features_shape = ng_features.get_shape(); - std::vector depth; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &depth)); - - // Depth must be scalar - if (depth.size() != 1) { - return errors::InvalidArgument( - "OneHot Op: depth of one hot dimension must be scalar " + to_string(depth.size())); - } - - auto const_depth = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{}, depth); - - int one_hot_axis; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "axis", &one_hot_axis)); - - auto ng_onehot = ConstructNgNode( - op->name(), ng_features, const_depth, ng_on, ng_off, one_hot_axis); - SaveNgOp(ng_op_map, op->name(), ng_onehot); - return Status::OK(); -} - -static Status TranslatePackOp(const TFNodeDecoder* op, const std::vector&, - Builder::OpMap& ng_op_map) { - TF_RETURN_IF_ERROR(ValidateInputCountMin(op, 1)); - - int32_t tf_axis; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "axis", &tf_axis)); - auto ng_axis = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{1}, - std::vector({tf_axis})); - - ng::OutputVector ng_concat_inputs; - for (int32_t i = 0; i < op->num_inputs(); ++i) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, i, ng_input)); - auto unsqueezed_input = - ConstructNgNode(op->name(), ng_input, ng_axis); - ng_concat_inputs.push_back(unsqueezed_input); - } - - // if inputs shape is (2, 3, 4), and axis is 1, then we want - // to create output_shape (2, num_inputs, 3, 4) - SaveNgOp(ng_op_map, op->name(), ConstructNgNode( - op->name(), ng_concat_inputs, tf_axis)); - return Status::OK(); -} - -// 3 different Pad Ops: Pad, PadV2, MirrorPad -// See https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/pad -// See https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/pad-v2 -// See https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/mirror-pad -static Status TranslatePadOp(const TFNodeDecoder* op, - const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_paddings_op, pad_val_op, result_pad_op; - - // Set inputs and pad_val_op - if (op->type_string() == "Pad" || op->type_string() == "MirrorPad") { - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_paddings_op)); - pad_val_op = ConstructNgNode( - op->name(), ng_input.get_element_type(), ng::Shape(), - std::vector({0})); - } else if (op->type_string() == "PadV2") { - TF_RETURN_IF_ERROR( - GetInputNodes(ng_op_map, op, ng_input, ng_paddings_op, pad_val_op)); - } else { - return errors::InvalidArgument("Incorrect TF Pad OpType: " + - op->type_string()); - } - - // Set pad_mode - auto pad_mode = ng::op::PadMode::CONSTANT; - if (op->type_string() == "MirrorPad") { - std::string pad_mode_str; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "mode", &pad_mode_str)); - if (pad_mode_str == "REFLECT") { - pad_mode = ng::op::PadMode::REFLECT; - } else if (pad_mode_str == "SYMMETRIC") { - pad_mode = ng::op::PadMode::SYMMETRIC; - } else { - return errors::InvalidArgument(pad_mode_str + - " is not an allowed padding mode."); - } - } - - // Set pads_begin & pads_end (from the pad_val_op) - std::vector paddings; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &paddings)); - NGRAPH_VLOG(3) << op->name() << " pads {" << ng::join(paddings) << "}"; - if (paddings.size() % 2 != 0) { - return errors::InvalidArgument( - "Constant node for paddings does not have an even number of " - "elements"); - } - std::vector pad_begin(paddings.size() / 2); - std::vector pad_end(paddings.size() / 2); - for (size_t i = 0; i < paddings.size() / 2; i++) { - pad_begin[i] = paddings[2 * i]; - pad_end[i] = paddings[2 * i + 1]; - } - auto pads_begin_node = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{pad_begin.size()}, pad_begin); - auto pads_end_node = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{pad_end.size()}, pad_end); - - // Create final Op - result_pad_op = - ConstructNgNode(op->name(), ng_input, pads_begin_node, - pads_end_node, pad_val_op, pad_mode); - - SaveNgOp(ng_op_map, op->name(), result_pad_op); - return Status::OK(); -} - -static Status TranslateRangeOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_start, ng_stop, ng_step; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_start, ng_stop, ng_step)); - - //DataType start_type = op->input_type(0); - //DataType stop_type = op->input_type(1); - //DataType step_type = op->input_type(2); - ng::element::Type out_type; - TF_RETURN_IF_ERROR( - TFDataTypeToNGraphElementType(op->output_type(0), &out_type)); - //ng::Output start_node, stop_node, step_node; - //TF_RETURN_IF_ERROR( - // GetStaticInputNode(op, 0, static_input_map, start_type, start_node)); - //TF_RETURN_IF_ERROR( - // GetStaticInputNode(op, 1, static_input_map, stop_type, stop_node)); - //TF_RETURN_IF_ERROR( - // GetStaticInputNode(op, 2, static_input_map, step_type, step_node)); - auto ng_range = ConstructNgNode(op->name(), ng_start, - ng_stop, ng_step, out_type); - - SaveNgOp(ng_op_map, op->name(), ng_range); - return Status::OK(); -} - -static Status TranslateRankOp(const TFNodeDecoder* op, const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - ng::Shape input_shape = ng_input.get_shape(); - auto input_rank = static_cast(input_shape.size()); - - auto ng_rank = ConstructNgNode( - op->name(), ng::element::i32, ng::Shape(), - std::vector({input_rank})); - - SaveNgOp(ng_op_map, op->name(), ng_rank); - return Status::OK(); -} - -static Status TranslateReciprocalOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - return TranslateUnaryOp( - op, static_input_map, ng_op_map, [&op](ng::Output n) { - // Create a constant tensor populated with the value -1. - // (1/x = x^(-1)) - auto et = n.get_element_type(); - auto shape = n.get_shape(); - std::vector constant_values(ng::shape_size(shape), "-1"); - auto ng_exponent = ConstructNgNode( - op->name(), et, shape, constant_values); - - // Raise each element of the input to the power -1. - return ConstructNgNode(op->name(), n, ng_exponent); - }); -} - -static Status TranslateRelu6Op(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, 0, 6)); - return Status::OK(); -} - -static Status TranslateReshapeOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_shape_op; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_shape_op)); - - NGRAPH_VLOG(3) << "Input shape: " << ng::join(ng_input.get_shape()); - - std::vector shape; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &shape)); - - NGRAPH_VLOG(3) << "Requested result shape: " << ng::join(shape); - - auto ng_shape = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{shape.size()}, shape); - SaveNgOp(ng_op_map, op->name(), ConstructNgNode( - op->name(), ng_input, ng_shape, false)); - return Status::OK(); -} - -static Status TranslateRsqrtOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - return TranslateUnaryOp( - op, static_input_map, ng_op_map, [&op](ng::Output n) { - // Create a constant tensor populated with the value -1/2. - // (1/sqrt(x) = x^(-1/2)) - auto et = n.get_element_type(); - auto shape = n.get_shape(); - std::vector constant_values(ng::shape_size(shape), "-0.5"); - auto ng_exponent = ConstructNgNode( - op->name(), et, shape, constant_values); - - // Raise each element of the input to the power -0.5. - return ConstructNgNode(op->name(), n, ng_exponent); - }); -} - -static Status TranslateShapeOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - - DataType dtype; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "out_type", &dtype)); - - ng::element::Type type; - TF_RETURN_IF_ERROR(TFDataTypeToNGraphElementType(dtype, &type)); - - // default output_type = element::i64 - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, type)); - return Status::OK(); -} - -static Status TranslateSizeOp(const TFNodeDecoder* op, const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - DataType dtype; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "out_type", &dtype)); - - // Size has an attribute to specify output, int32_t or int64_t - ng::element::Type type; - TF_RETURN_IF_ERROR(TFDataTypeToNGraphElementType(dtype, &type)); - - auto ng_input_shape = ng_input.get_shape(); - int64_t result = 1; - for (auto dim : ng_input_shape) { - result *= dim; - } - - // make a scalar with value equals to result - auto ng_result = ConstructNgNode( - op->name(), type, ng::Shape(0), std::vector({result})); - - SaveNgOp(ng_op_map, op->name(), ng_result); - return Status::OK(); -} - -static Status TranslateSliceOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_begin, ng_size; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_begin, ng_size)); - - std::vector begin_vec; - std::vector size_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &begin_vec)); - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 2, static_input_map, &size_vec)); - - if (begin_vec.size() != size_vec.size()) - return errors::InvalidArgument( - "Cannot translate slice op: size of begin = " + to_string(begin_vec.size()) + - ", size of size_vec = " + to_string(size_vec.size()) + ". Expected them to match."); - - NGRAPH_VLOG(3) << "Begin input for Slice: " << ng::join(begin_vec); - NGRAPH_VLOG(3) << "Size input for Slice: " << ng::join(size_vec); - - std::vector end_vec(begin_vec.size()); - const auto ng_input_shape = ng_input.get_shape(); - stringstream err_stream; - string err_msg; - for (size_t i = 0; i < size_vec.size(); i++) { - if (size_vec[i] != -1) { - end_vec[i] = begin_vec[i] + size_vec[i]; - } else { - // support -1 for size_vec, to the end of the tensor - end_vec[i] = ng_input_shape[i]; - } - - // check for this condition: 0 <= begin[i] <= begin[i] + size[i] <= Di - if (0 > begin_vec[i]) - err_stream << "lower < 0: " << begin_vec[i] - << ". It should have been positive.\n"; - if (begin_vec[i] > end_vec[i]) - err_stream << "upper < lower: upper = " << end_vec[i] - << ", lower = " << begin_vec[i] << "\n"; - if (begin_vec[i] > ng_input_shape[i]) - err_stream << "dim < upper: dim = " << ng_input_shape[i] - << ", upper = " << end_vec[i] << "\n"; - - err_msg = err_stream.str(); - if (!err_msg.empty()) - return errors::InvalidArgument("Cannot translate slice op at position " + - to_string(i) + " of " + to_string(size_vec.size()) + - ". The reasons are:\n" + err_msg); - } - - auto begin = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{begin_vec.size()}, begin_vec); - auto end = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{end_vec.size()}, end_vec); - - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, begin, - end, std::vector{}, - std::vector{})); - return Status::OK(); -} - -static Status TranslateSoftmaxOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - auto input_shape = ng_input.get_shape(); - auto rank = input_shape.size(); - if (rank < 1) { - return errors::InvalidArgument("TF Softmax logits must be >=1 dimension"); - } - - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, rank - 1)); - return Status::OK(); -} - -// Translate SpaceToDepthOp -static Status TranslateSpaceToDepthOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - // Get the attributes - int64_t block_size; - std::string tf_data_format; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "block_size", &block_size)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "data_format", &tf_data_format)); - - if (tf_data_format != "NHWC" && tf_data_format != "NCHW") { - return errors::InvalidArgument( - "DepthToSpace data format is neither NHWC nor NCHW"); - } - - bool is_nhwc = (tf_data_format == "NHWC"); - - NHWCtoNCHW(op->name(), is_nhwc, ng_input); - auto ng_mode = opset::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; - auto space_to_depth = ConstructNgNode( - op->name(), ng_input, ng_mode, block_size); - NCHWtoNHWC(op->name(), is_nhwc, space_to_depth); - SaveNgOp(ng_op_map, op->name(), space_to_depth); - return Status::OK(); -} - -static Status TranslateSplitOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 1, ng_input)); - // num_split : The number of ways to split. Must evenly divide - // value.shape[split_dim] - int32_t num_split; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "num_split", &num_split)); - - ng::Shape shape = ng_input.get_shape(); - int rank = shape.size(); - - std::vector split_dim_vec; - TF_RETURN_IF_ERROR( - GetStaticInputVector(ng_op_map, op, 0, static_input_map, &split_dim_vec)); - int split_dim = split_dim_vec[0] + (split_dim_vec[0] < 0 ? (int64_t)rank : 0); - auto ng_split_dim = ConstructNgNode( - op->name(), ng::element::u64, ng::Shape{}, split_dim); - auto ng_split = make_shared(ng_input, ng_split_dim, num_split); - - for (int i = 0; i < num_split; ++i) { - auto out = ng_split->output(i); - Builder::SetTracingInfo(op->name(), out); - SaveNgOp(ng_op_map, op->name(), out); - } - return Status::OK(); -} - -static Status TranslateSplitVOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_split_length, ng_split_dim; - - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - - ng::Shape shape = ng_input.get_shape(); - int rank = shape.size(); - - std::vector split_dim_vec; - TF_RETURN_IF_ERROR( - GetStaticInputVector(ng_op_map, op, 2, static_input_map, &split_dim_vec)); - // there should be at least one element specified as axis and not more than - // one as axis is 0-D - if (split_dim_vec.size() != 1) { - return errors::InvalidArgument( - "split_dim_tensor must have " - "exactly one element."); - } - TF_RETURN_IF_ERROR(CheckAxisDimInRange(split_dim_vec, rank)); - int split_dim = split_dim_vec[0] + (split_dim_vec[0] < 0 ? (int64_t)rank : 0); - ng_split_dim = ConstructNgNode(op->name(), ng::element::i32, - ng::Shape{}, split_dim); - - std::vector split_lengths_vec; - TF_RETURN_IF_ERROR( - GetStaticInputVector(ng_op_map, op, 1, static_input_map, &split_lengths_vec)); - - // length: Length of size_splits - int length = 0; - int idx = -1; - - // Find out the total length of the splits and locate -1 's index, if any - bool has_one_neg = false; - for (size_t i = 0; i < split_lengths_vec.size(); ++i) { - if (split_lengths_vec[i] != -1) { - length += split_lengths_vec[i]; - } else { - if (has_one_neg) { - return errors::InvalidArgument("size_splits can only have one -1"); - } else { - idx = i; - has_one_neg = true; - } - } - } - - // Size splits must sum to the dimension of value along split_dim - if (idx > 0) { - split_lengths_vec[idx] = shape[split_dim] - length; - } - - if ((!has_one_neg && length != shape[split_dim]) || - (has_one_neg && split_lengths_vec[idx] < 0)) { - return errors::InvalidArgument( - "The length of size_splits must sum to the value of the dimension " - "along split_dim"); - } - - ng_split_length = ConstructNgNode( - op->name(), ng::element::i32, ng::Shape{split_lengths_vec.size()}, - split_lengths_vec); - - if (split_lengths_vec.size() != 1) { - auto ng_split = make_shared(ng_input, ng_split_dim, - ng_split_length); - for (size_t i = 0; i < split_lengths_vec.size(); ++i) { - auto out = ng_split->output(i); - Builder::SetTracingInfo(op->name(), out); - SaveNgOp(ng_op_map, op->name(), out); - } - } else { - SaveNgOp(ng_op_map, op->name(), ng_input); - } - - return Status::OK(); -} - -static Status TranslateSquareOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - return TranslateUnaryOp( - op, static_input_map, ng_op_map, [&op](ng::Output n) { - return ConstructNgNode(op->name(), n, n); - }); -} - -static Status TranslateSqueezeOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - size_t input_dims = ng_input.get_shape().size(); - - std::vector tf_axis; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "squeeze_dims", &tf_axis)); - - // If input dimension is negative, make it positive - for (size_t i = 0; i < tf_axis.size(); i++) { - tf_axis[i] = tf_axis[i] < 0 ? (int32_t)(input_dims) + tf_axis[i] : tf_axis[i]; - } - - auto ng_const = ConstructNgNode( - op->name(), ng::element::i32, ng::Shape{tf_axis.size()}, tf_axis); - - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, ng_const)); - return Status::OK(); -} - -static Status TranslateStridedSliceOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - - int32_t begin_mask, end_mask, new_axis_mask, shrink_axis_mask, ellipsis_mask; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "begin_mask", &begin_mask)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "end_mask", &end_mask)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "new_axis_mask", &new_axis_mask)); - TF_RETURN_IF_ERROR( - GetNodeAttr(op->attrs(), "shrink_axis_mask", &shrink_axis_mask)); - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "ellipsis_mask", &ellipsis_mask)); - - NGRAPH_VLOG(5) << "strided slice attributes: " - << " begin mask: " << begin_mask << " end mask: " << end_mask - << " new axis mask: " << new_axis_mask - << " shrink axis mask: " << shrink_axis_mask - << " ellipsis mask: " << ellipsis_mask; - - std::vector begin_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &begin_vec)); - std::vector end_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 2, static_input_map, &end_vec)); - std::vector stride_vec; - TF_RETURN_IF_ERROR( - GetStaticInputVector(ng_op_map, op, 3, static_input_map, &stride_vec)); - - auto begin = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{begin_vec.size()}, begin_vec); - auto end = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{end_vec.size()}, end_vec); - auto strides = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{stride_vec.size()}, stride_vec); - - auto mask_to_vec = [](int32_t mask) { - auto length = sizeof(mask) * CHAR_BIT; - std::vector vec(length, 0); - if (mask == 0) { - return vec; - } - for (auto i = 0; i < length; ++i) { - if ((unsigned char)(mask >> i & 0x01) == 1) { - vec[i] = 1; - } - } - return vec; - }; - - SaveNgOp( - ng_op_map, op->name(), - ConstructNgNode( - op->name(), ng_input, begin, end, strides, mask_to_vec(begin_mask), - mask_to_vec(end_mask), mask_to_vec(new_axis_mask), - mask_to_vec(shrink_axis_mask), mask_to_vec(ellipsis_mask))); - return Status::OK(); -} - -static Status TranslateTileOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_multiples; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_multiples)); - - std::vector multiples; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &multiples)); - - auto ng_repeats = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{multiples.size()}, multiples); - SaveNgOp(ng_op_map, op->name(), - ConstructNgNode(op->name(), ng_input, ng_repeats)); - return Status::OK(); -} - -// Translate TopKV2 Op using ngraph core op TopK -static Status TranslateTopKV2Op( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - - TF_RETURN_IF_ERROR(ValidateInputCount(op, 2)); - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - - // axis along which to compute top k indices - int64_t k_axis = ng_input.get_shape().size() - 1; - - // scalar input tensor specifying how many max/min elts should be computed - // CPU backend only supports element type i64 - std::vector ng_k_vec; - TF_RETURN_IF_ERROR(GetStaticInputVector(ng_op_map, op, 1, static_input_map, &ng_k_vec)); - auto ng_k = ConstructNgNode(op->name(), ng::element::i64, - ng::Shape{}, ng_k_vec[0]); - - std::string mode = "max"; - - std::string sort = "value"; - bool sorted = true; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "sorted", &sorted)); - if (!sorted) { - sort = "index"; - } - - auto ng_result = - std::make_shared(ng_input, ng_k, k_axis, mode, sort); - - ng::Output ng_values = ng_result->output(0); - Builder::SetTracingInfo(op->name(), ng_values); - ng::Output ng_indices = ng_result->output(1); - Builder::SetTracingInfo(op->name(), ng_indices); - - SaveNgOp(ng_op_map, op->name(), ng_values); - SaveNgOp(ng_op_map, op->name(), ng_indices); - - return Status::OK(); -} - -static Status TranslateTransposeOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_input, ng_permutation; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input, ng_permutation)); - SaveNgOp(ng_op_map, op->name(), ConstructNgNode( - op->name(), ng_input, ng_permutation)); - return Status::OK(); -} - -static Status TranslateUnpackOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - TF_RETURN_IF_ERROR(ValidateInputCount(op, 1)); - - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, op, 0, ng_input)); - int32_t tf_axis; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "axis", &tf_axis)); - int32_t num_outputs; - TF_RETURN_IF_ERROR(GetNodeAttr(op->attrs(), "num", &num_outputs)); - - auto input_shape = ng_input.get_shape(); - auto rank = input_shape.size(); - for (int i = 0; i < num_outputs; ++i) { - std::vector begin(rank, 0); - std::vector end(rank, 0); - begin[tf_axis] = i; - end[tf_axis] = i + 1; - auto ng_begin = ConstructNgNode( - op->name(), ng::element::i64, ng::Shape{begin.size()}, begin); - auto ng_end = ConstructNgNode(op->name(), ng::element::i64, - ng::Shape{end.size()}, end); - std::vector begin_mask(rank, 1); - begin_mask[tf_axis] = 0; - std::vector end_mask(rank, 1); - end_mask[tf_axis] = 0; - std::vector new_axis_mask(rank, 0); - std::vector shrink_axis_mask(rank, 0); - shrink_axis_mask[tf_axis] = 1; - auto slice = ConstructNgNode( - op->name(), ng_input, ng_begin, ng_end, begin_mask, end_mask, - new_axis_mask, shrink_axis_mask); - SaveNgOp(ng_op_map, op->name(), slice); - } - return Status::OK(); -} - -static Status TranslateXdivyOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_x, ng_y; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_x, ng_y)); - auto zero = - ConstructNgNode(op->name(), ng_x.get_element_type(), - ngraph::Shape{}, std::vector({0})); - auto x_is_zero = ConstructNgNode(op->name(), ng_x, zero); - auto ng_xdivy = ConstructNgNode(op->name(), ng_x, ng_y); - SaveNgOp(ng_op_map, op->name(), ConstructNgNode( - op->name(), x_is_zero, ng_x, ng_xdivy)); - return Status::OK(); -} - -static Status TranslateSelectOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input1, ng_input2, ng_input3; - TF_RETURN_IF_ERROR( - GetInputNodes(ng_op_map, op, ng_input1, ng_input2, ng_input3)); - auto ng_select = ConstructNgNode(op->name(), ng_input1, - ng_input2, ng_input3); - SaveNgOp(ng_op_map, op->name(), ng_select); - return Status::OK(); -} - -static Status TranslateWhereOp( - const TFNodeDecoder* op, const std::vector& static_input_map, - Builder::OpMap& ng_op_map) { - ng::Output ng_cond; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_cond)); - auto non_zero = ConstructNgNode(op->name(), ng_cond); - auto transpose_order = ConstructNgNode( - op->name(), ngraph::element::i64, ngraph::Shape{2}, - std::vector({1, 0})); - SaveNgOp(ng_op_map, op->name(), ConstructNgNode( - op->name(), non_zero, transpose_order)); - return Status::OK(); -} - -static Status TranslateZerosLikeOp(const TFNodeDecoder* op, - const std::vector&, - Builder::OpMap& ng_op_map) { - ng::Output ng_input; - TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, ng_input)); - - ng::Shape input_shape = ng_input.get_shape(); - std::vector const_values(ng::shape_size(input_shape), "0"); - auto ng_result = ConstructNgNode( - op->name(), ng_input.get_element_type(), input_shape, const_values); - SaveNgOp(ng_op_map, op->name(), ng_result); - return Status::OK(); -} - -const static std::map< - const string, - const function&, - Builder::OpMap&)>> - TRANSLATE_OP_MAP{ - {"Abs", TranslateUnaryOp}, - {"Acos", TranslateUnaryOp}, - {"Acosh", TranslateUnaryOp}, - {"Add", TranslateBinaryOp}, - {"AddN", TranslateAddNOp}, - {"AddV2", TranslateBinaryOp}, - {"Any", TranslateDirectReduceOp}, - {"All", TranslateDirectReduceOp}, - {"ArgMax", TranslateArgMaxOp}, - {"ArgMin", TranslateArgMinOp}, - {"Asin", TranslateUnaryOp}, - {"Asinh", TranslateUnaryOp}, - {"Atan", TranslateUnaryOp}, - {"Atanh", TranslateUnaryOp}, - {"AvgPool", TranslateAvgPoolOp}, - {"BiasAdd", TranslateBiasAddOp}, - {"Cast", TranslateCastOp}, - {"Ceil", TranslateUnaryOp}, - {"ConcatV2", TranslateConcatV2Op}, - {"Const", TranslateConstOp}, - {"Conv2D", TranslateConv2DOp}, - {"Conv2DBackpropInput", TranslateConv2DBackpropInputOp}, - {"Conv3D", TranslateConv3DOp}, - {"Cos", TranslateUnaryOp}, - {"Cosh", TranslateUnaryOp}, - {"Cumsum", TranslateCumsumOp}, - {"DepthToSpace", TranslateDepthToSpaceOp}, - {"DepthwiseConv2dNative", TranslateDepthwiseConv2dNativeOp}, - {"Equal", TranslateBinaryOp}, - {"Exp", TranslateUnaryOp}, - {"ExpandDims", TranslateExpandDimsOp}, - {"Fill", TranslateFillOp}, - {"Floor", TranslateUnaryOp}, - {"FloorDiv", TranslateFloorDivOp}, - {"FloorMod", TranslateBinaryOp}, - {"FusedBatchNorm", TranslateFusedBatchNormOp}, - {"FusedBatchNormV2", TranslateFusedBatchNormOp}, - {"FusedBatchNormV3", TranslateFusedBatchNormOp}, - {"Gather", TranslateGatherOp}, - {"GatherV2", TranslateGatherV2Op}, - {"_FusedConv2D", TranslateFusedConv2DOp}, - {"_FusedMatMul", TranslateFusedMatMulOp}, - {"Greater", TranslateBinaryOp}, - {"GreaterEqual", TranslateBinaryOp}, - {"Identity", TranslateIdentityOp}, - {"IsFinite", TranslateIsFiniteOp}, - {"L2Loss", TranslateL2LossOp}, - {"LogSoftmax", TranslateLogSoftmaxOp}, - {"Less", TranslateBinaryOp}, - {"LessEqual", TranslateBinaryOp}, - {"Log", TranslateUnaryOp}, - {"Log1p", TranslateLog1pOp}, - {"LogicalAnd", TranslateBinaryOp}, - {"LogicalNot", TranslateUnaryOp}, - {"LogicalOr", TranslateBinaryOp}, - {"LRN", TranslateLRNOp}, - {"MatMul", TranslateMatMulOp}, - {"Max", TranslateDirectReduceOp}, - {"Maximum", TranslateBinaryOp}, - {"MaxPool", TranslateMaxPoolOp<2>}, - {"MaxPool3D", TranslateMaxPoolOp<3>}, - {"NonMaxSuppressionV2", TranslateNonMaxSuppressionV2Op}, - {"Mean", TranslateDirectReduceOp}, - {"Min", TranslateDirectReduceOp}, - {"Minimum", TranslateBinaryOp}, - {"MirrorPad", TranslatePadOp}, - {"Mul", TranslateBinaryOp}, - {"Mod", TranslateBinaryOp}, - {"Neg", TranslateUnaryOp}, - {"NotEqual", TranslateBinaryOp}, - // Do nothing! NoOps sometimes get placed on nGraph for bureaucratic - // reasons, but they have no data flow inputs or outputs. - {"NoOp", [](const TFNodeDecoder*, const std::vector&, - Builder::OpMap&) { return Status::OK(); }}, - {"OneHot", TranslateOneHotOp}, - {"Pack", TranslatePackOp}, - {"Pad", TranslatePadOp}, - {"PadV2", TranslatePadOp}, - {"Pow", TranslateBinaryOp}, - // PreventGradient is just Identity in dataflow terms, so reuse that. - {"PreventGradient", TranslateIdentityOp}, - {"Prod", TranslateDirectReduceOp}, - {"Range", TranslateRangeOp}, - {"Rank", TranslateRankOp}, - {"RealDiv", TranslateBinaryOp}, - {"Reciprocal", TranslateReciprocalOp}, - {"Relu", TranslateUnaryOp}, - {"Relu6", TranslateRelu6Op}, - {"Reshape", TranslateReshapeOp}, - {"Rsqrt", TranslateRsqrtOp}, - {"Select", TranslateSelectOp}, - {"SelectV2", TranslateSelectOp}, - {"Shape", TranslateShapeOp}, - {"Sigmoid", TranslateUnaryOp}, - {"Sin", TranslateUnaryOp}, - {"Sinh", TranslateUnaryOp}, - {"Size", TranslateSizeOp}, - {"Sign", TranslateUnaryOp}, - {"Slice", TranslateSliceOp}, - {"Snapshot", TranslateIdentityOp}, - {"Softmax", TranslateSoftmaxOp}, - {"Softplus", TranslateUnaryOp}, - {"SpaceToDepth", TranslateSpaceToDepthOp}, - {"Split", TranslateSplitOp}, - {"SplitV", TranslateSplitVOp}, - {"Sqrt", TranslateUnaryOp}, - {"Square", TranslateSquareOp}, - {"SquaredDifference", TranslateBinaryOp}, - {"Squeeze", TranslateSqueezeOp}, - {"StridedSlice", TranslateStridedSliceOp}, - {"Sub", TranslateBinaryOp}, - {"Sum", TranslateDirectReduceOp}, - {"Tan", TranslateUnaryOp}, - {"Tanh", TranslateUnaryOp}, - {"Tile", TranslateTileOp}, - {"TopKV2", TranslateTopKV2Op}, - {"Transpose", TranslateTransposeOp}, - {"Unpack", TranslateUnpackOp}, - {"Where", TranslateWhereOp}, - {"Xdivy", TranslateXdivyOp}, - {"ZerosLike", TranslateZerosLikeOp}}; - - - -class NodeProtoWrapper : public TFNodeDecoder -{ - const NodeDef* node_def; - const GraphDef* graph_def; - std::vector* nodes; -public: - - NodeProtoWrapper(const NodeDef* _node_def, const GraphDef* _graph_def, std::vector* _nodes) : - node_def(_node_def), graph_def(_graph_def), nodes(_nodes) {} - -#define GET_ATTR_VALUE(TYPE, FIELD) virtual void getAttrValue (const char* name, TYPE* x) const override \ - { *x = node_def->attr().at(name).FIELD(); } -#define GET_ATTR_VALUE_VECTOR(TYPE, FIELD) virtual void getAttrValue (const char* name, std::vector* x) const override \ - {\ - const auto& list = node_def->attr().at(name).list();\ - x->reserve(/*node_def->attr().at(name).FIELD##_size()*/list.FIELD##_size());\ - for(size_t i = 0; i < list.FIELD##_size(); ++i)\ - {\ - x->push_back(list.FIELD(i));\ - }\ - } - - GET_ATTR_VALUE_VECTOR(int32_t, i) - GET_ATTR_VALUE_VECTOR(float, f) - //virtual void getAttrValue (const char* name, std::vector* x) const override { NGRAPH_TF_FE_NOT_IMPLEMENTED; } - //virtual void getAttrValue (const char* name, std::vector* x) const override { NGRAPH_TF_FE_NOT_IMPLEMENTED; } - GET_ATTR_VALUE(int32_t, i) - - virtual void getAttrValue (const char* name, DataType* x) const override - { - *x = node_def->attr().at(name).type(); - } - - virtual void getAttrValue (const char* name, ngraph::PartialShape* x) const override { - TFTensorShapeToNGraphShape(node_def->attr().at(name).shape(), x); - } - - GET_ATTR_VALUE(std::string, s) - GET_ATTR_VALUE(bool, b) - GET_ATTR_VALUE(long int, i) - GET_ATTR_VALUE(float, f) - - virtual void getAttrValue (const char* name, std::vector* x) const override { NGRAPH_TF_FE_NOT_IMPLEMENTED; } - - // a way to read Const value as a tensor - virtual void getAttrValue (const char* name, TensorWrapper** x) const override - { - // TODO: use std::shared_ptr! memory is lost! - *x = new TensorWrapper(&node_def->attr().at(name).tensor()); - } - - virtual std::string op () const override - { - return node_def->op(); - } - - virtual unsigned int num_inputs () const override { return node_def->input_size(); } - - virtual std::string name () const override - { - return node_def->name(); - } - - virtual std::string type_string () const override - { - return node_def->op(); - } - - virtual Status input_node (size_t index, TFNodeDecoder const * *) const override { NGRAPH_TF_FE_NOT_IMPLEMENTED; } - - virtual Status input_node (size_t index, TFNodeDecoder const * * retnode, size_t* outputPortIndex) const override - { - std::string input_name = node_def->input(index); - if(input_name.find(':') != std::string::npos) { - NGRAPH_TF_FE_NOT_IMPLEMENTED; - } - // TODO: don't search linearly every time!!! - for(auto node: *nodes) - { - if(node->name() == input_name) - { - *retnode = node; - *outputPortIndex = 0; - return Status::OK(); - } - } - return Status("Node is not found " + input_name + " when searched as an input for node " + name()); - } - - virtual DataType input_type (size_t index) const override { NGRAPH_TF_FE_NOT_IMPLEMENTED; } - virtual DataType output_type (size_t index) const override { NGRAPH_TF_FE_NOT_IMPLEMENTED; } - - virtual bool IsSink () const override - { - // TODO: recognize special op in TF runtime; don't know similar node for proto graph representation - return false; - } - - virtual bool IsSource () const override - { - // TODO: populate with other source operation types - return node_def->op() == "Placeholder"; - } - - virtual bool IsControlFlow () const override - { - // TODO - return false; - } - - virtual std::string DebugString () const override - { - return node_def->op() + "(with name " + node_def->name() + ")"; - } - - virtual bool IsArg () const override - { - // TODO - return IsSource(); - } - - virtual bool IsRetval () const override - { - // TODO - return IsSink(); - } -}; - -void PopulateNodesTopologicallySorted (const GraphDef* input_graph, std::vector* result) -{ - // WARNING! We suppose that input_graph contains nodes in topologically sorted order - // TODO: sort it if it is not the case - - result->reserve(input_graph->node_size()); - for(int i = 0; i < input_graph->node_size(); ++i) - { - result->push_back(new NodeProtoWrapper(&input_graph->node(i), input_graph, result)); - } -} - -Status Builder::TranslateGraph( - const std::map& inputs, - const std::vector& static_input_map, const GraphDef* input_graph, - const std::string name, std::shared_ptr& ng_function) { - // - // We will visit ops in topological order. - // - // ought to be `const TFNodeDecoder*`, but GetReversePostOrder doesn't use `const` - - std::vector ordered; - //GetReversePostOrder(*input_graph, &ordered, NodeComparatorName()); - PopulateNodesTopologicallySorted(input_graph, &ordered); - - // - // Split ops into params, retvals, and all others. - // - vector tf_params; - vector tf_ret_vals; - vector tf_ops; - - for (const auto n : ordered) { -#if 0 - // TODO: Investigate why do we need it - if (n->IsSink() || n->IsSource()) { - continue; - } -#endif - - if (n->IsControlFlow()) { - return errors::Unimplemented( - "Encountered a control flow op in the nGraph bridge: " + - n->DebugString()); - } - - if (n->IsArg()) { - tf_params.push_back(n); - } else if (n->IsRetval()) { - tf_ret_vals.push_back(n); - } else { - tf_ops.push_back(n); - } - } - - // - // The op map holds a mapping from TensorFlow op names (strings) to - // vector of generated nGraph Output. - // - Builder::OpMap ng_op_map; - - // - // Populate the parameter list, and also put parameters into the op map. - // - std::cerr << "[ INFO ] Detected " << tf_params.size() << " parameters\n"; - ng::ParameterVector ng_parameter_list(tf_params.size()); - // enumerate placeholders in some random order, count them and use counter as an index - int index = 0; - - for (auto parm : tf_params) { - DataType dtype; - // TODO: replace dtype by T when converting Arg - if (GetNodeAttr(parm->attrs(), "dtype", &dtype) != Status::OK()) { - return errors::InvalidArgument("No data type defined for _Arg"); - } - - // TODO: use this code for Arg - //if (GetNodeAttr(parm->attrs(), "index", &index) != Status::OK()) { - // return errors::InvalidArgument("No index defined for _Arg"); - //} - - ng::element::Type ng_et; - TF_RETURN_IF_ERROR(TFDataTypeToNGraphElementType(dtype, &ng_et)); - - ng::PartialShape ng_shape; - auto overridenInputShape = inputs.find(parm->name()); - if(overridenInputShape == inputs.end()) { - try { - GetNodeAttr(parm->attrs(), "shape", &ng_shape); - } - catch (google::protobuf::FatalException) { - // suppose there is no shape - // TODO: do it in a good way - } - } else { - ng_shape = overridenInputShape->second; - } - -#if 0 - string prov_tag; - GetNodeAttr(parm->attrs(), "_prov_tag", &prov_tag); -#endif - auto ng_param = - ConstructNgNode(parm->name(), ng_et, ng_shape); - SaveNgOp(ng_op_map, parm->name(), ng_param); - ng_parameter_list[index] = - ngraph::as_type_ptr(ng_param.get_node_shared_ptr()); - - index++; - } - - // - // Now create the nGraph ops from TensorFlow ops. - // - for (auto op : tf_ops) { - NGRAPH_VLOG(2) << "Constructing op " << op->name() << " which is " - << op->type_string() << "\n"; - - const function&, - Builder::OpMap&)>* op_fun; - - try { - op_fun = &(TRANSLATE_OP_MAP.at(op->type_string())); - } catch (const std::out_of_range&) { - // ----------------------------- - // Catch-all for unsupported ops - // ----------------------------- - NGRAPH_VLOG(3) << "No translation handler registered for op: " - << op->name() << " (" << op->type_string() << ")"; - NGRAPH_VLOG(3) << op->DebugString(); - return errors::InvalidArgument( - "No translation handler registered for op: " + op->name() + " (" + - op->type_string() + ")\n" + op->DebugString()); - } - - try { - TF_RETURN_IF_ERROR((*op_fun)(op, static_input_map, ng_op_map)); - } catch (const std::exception& e) { - return errors::Internal("Unhandled exception in op handler: " + op->name() + - " (" + op->type_string() + ")\n" + - op->DebugString() + "\n" + "what(): " + - e.what()); - } - } - - // - // Populate the result list. - // - ng::ResultVector ng_result_list(tf_ret_vals.size()); - - for (auto n : tf_ret_vals) { - // Make sure that this _Retval only has one input node. - if (n->num_inputs() != 1) { - return errors::InvalidArgument("_Retval has " + to_string(n->num_inputs()) + - " inputs, should have 1"); - } - - int index; - if (GetNodeAttr(n->attrs(), "index", &index) != Status::OK()) { - return errors::InvalidArgument("No index defined for _Retval"); - } - - ng::Output result; - TF_RETURN_IF_ERROR(GetInputNode(ng_op_map, n, 0, result)); - auto ng_result = ConstructNgNode(n->name(), result); - ng_result_list[index] = - ngraph::as_type_ptr(ng_result.get_node_shared_ptr()); - } - - // Find all terminal nodes in ngraph graph to complete list of results - for(auto op: tf_ops) - { - auto p = ng_op_map.find(op->name()); - if(p != ng_op_map.end()) - { - for(auto output: p->second) - { - if(output.get_target_inputs().empty()) - ng_result_list.push_back(std::make_shared(output)); - } - } - } - - // - // Create the nGraph function. - // - ng_function = - make_shared(ng_result_list, ng_parameter_list, name); - - // - // Apply additional passes on the nGraph function here. - // - { -#if 0 - ngraph::pass::Manager passes; - if (util::GetEnv("NGRAPH_TF_CONSTANT_FOLDING") == "1") { - passes.register_pass(); - } - if (util::GetEnv("NGRAPH_TF_TRANSPOSE_SINKING") != "0") { - passes.register_pass(); - } - passes.run_passes(ng_function); -#endif - } - NGRAPH_VLOG(5) << "Done with passes"; - // - // Request row-major layout on results. - // - for (auto result : ng_function->get_results()) { - result->set_needs_default_layout(true); - } - NGRAPH_VLOG(5) << "Done with translations"; - return Status::OK(); -} - -} // namespace ngraph_bridge -} // namespace tensorflow diff --git a/ngraph/frontend/tensorflow/src/ngraph_builder.h b/ngraph/frontend/tensorflow/src/ngraph_builder.h deleted file mode 100644 index a8997c07c7e329..00000000000000 --- a/ngraph/frontend/tensorflow/src/ngraph_builder.h +++ /dev/null @@ -1,218 +0,0 @@ -/******************************************************************************* - * Copyright 2017-2020 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ -#ifndef NGRAPH_TF_BRIDGE_BUILDER_H_ -#define NGRAPH_TF_BRIDGE_BUILDER_H_ - -#include -#include -#include -#include -#include -#include - -// TODO: remove explicit proto dependency from this common header -#include "graph.pb.h" - -#include "ngraph/ngraph.hpp" - -namespace tensorflow { - -// Stub for TF class -class Status -{ -public: - int status = 0; - std::string message; - - static Status OK () { return Status(); } - - Status (const std::string& x) : message(x), status(1) {} - Status () {} -}; - -inline bool operator!= (const Status& x, const Status& y) -{ - return x.status != y.status; -} - -inline std::ostream& operator<< (std::ostream& out, const Status& s) -{ - return out << s.message; -} - -#define TF_RETURN_IF_ERROR(S) if((S).status != 0)return S; - -// Stub for tf error system -class errors -{ -public: - - static Status InvalidArgument (const std::string& x) - { - return Status("InvalidArgument: " + x); - } - - static Status Internal (const std::string& x) - { - return Status("Internal: " + x); - } - - static Status Unimplemented (const std::string& x) - { - return Status("Unimplemented: " + x); - } -}; - - -namespace ngraph_bridge { - - class TensorWrapper; - -// ABI-free wrapper for TF node -class TFNodeDecoder -{ -public: - - // a hack to minimize amount of code - TFNodeDecoder& attrs () const { return const_cast(*this); } - virtual void getAttrValue (const char* name, std::vector* x) const = 0; - virtual void getAttrValue (const char* name, std::vector* x) const = 0; - virtual void getAttrValue (const char* name, int32_t* x) const = 0; - virtual void getAttrValue (const char* name, DataType* x) const = 0; - virtual void getAttrValue (const char* name, std::string* x) const = 0; - virtual void getAttrValue (const char* name, bool* x) const = 0; - virtual void getAttrValue (const char* name, long int* x) const = 0; - virtual void getAttrValue (const char* name, float* x) const = 0; - virtual void getAttrValue (const char* name, std::vector* x) const = 0; - virtual void getAttrValue (const char* name, ngraph::PartialShape* x) const = 0; - - virtual std::string op () const = 0; - - // a way to read Const value as a tensor - virtual void getAttrValue (const char* name, TensorWrapper** x) const = 0; - - virtual unsigned int num_inputs () const = 0; - virtual std::string name () const = 0; - virtual bool IsArg () const = 0; - virtual std::string type_string () const = 0; - - virtual Status input_node (size_t index, TFNodeDecoder const * *) const = 0; - virtual Status input_node (size_t index, TFNodeDecoder const * *, size_t* outputPortIndex) const = 0; - virtual DataType input_type (size_t index) const = 0; - virtual DataType output_type (size_t index) const = 0; - - virtual bool IsSink () const = 0; - virtual bool IsSource () const = 0; - virtual bool IsControlFlow () const = 0; - virtual std::string DebugString () const = 0; - virtual bool IsRetval () const = 0; -}; - -// TODO: separate interface from proto implementation; here is a proto implementation -class TensorWrapper -{ -public: - - const TensorProto* tensor_def; - - TensorWrapper (const TensorProto* _tensor_def) : tensor_def(_tensor_def) {} - - // a hack to minimize amount of code - TensorWrapper &attrs() const { return const_cast(*this); } - - //virtual void getAttrValue(const char *name, std::vector &x) = 0; - - template - std::vector flat () const; - - size_t NumElements () const; - - DataType dtype () const; -}; - -template -Status GetNodeAttr (TFNodeDecoder& attrs, const char* attr_name, T* result) -{ - attrs.getAttrValue(attr_name, result); - return Status::OK(); -} - -#if 0 -#define NGRAPH_VLOG(I) std::cerr -#else -#define NGRAPH_VLOG(I) std::ostringstream() -#endif - - - class Builder { - public: - static Status TranslateGraph( - const std::map& inputs, - const std::vector& static_input_map, const GraphDef* tf_graph, - const std::string name, std::shared_ptr& ng_function); - - using OpMap = std::unordered_map>>; - using ConstMap = std::map< - DataType, - std::pair&)>, - const ngraph::element::Type>>; - static const Builder::ConstMap& TF_NGRAPH_CONST_MAP(); - - template - static void MakePadding(const std::string& tf_padding_type, - const ngraph::Shape& ng_image_shape, - const ngraph::Shape& ng_kernel_shape, - const ngraph::Strides& ng_strides, - const ngraph::Shape& ng_dilations, - T& ng_padding_below, T& ng_padding_above) { - if (tf_padding_type == "SAME") { - ngraph::Shape img_shape = {0, 0}; - img_shape.insert(img_shape.end(), ng_image_shape.begin(), - ng_image_shape.end()); - ngraph::infer_auto_padding(img_shape, ng_kernel_shape, ng_strides, - ng_dilations, ngraph::op::PadType::SAME_UPPER, - ng_padding_above, ng_padding_below); - } else if (tf_padding_type == "VALID") { - ng_padding_below.assign(ng_image_shape.size(), 0); - ng_padding_above.assign(ng_image_shape.size(), 0); - } - } - - // This function is used to trace which ng node came from which tf node - // It does 3 things: - // 1. Attaches provenance tags. This is guaranteed to propagate the tag info - // to all nodes. - // The next 2 are not guaranteed to be present for all nodes. - // But when present they are correct and agree with provenance tags - // 2. Attaches friendly names. - // 3. Prints a log if NGRAPH_TF_LOG_PLACEMENT=1 - static void SetTracingInfo(const std::string& op_name, - const ngraph::Output ng_node); -}; - -inline std::string StrJoin (const std::vector& strs, const char* sep) -{ - std::ostringstream str; - std::copy(strs.begin(), strs.end(), std::ostream_iterator(str, sep)); - return str.str(); -} - -} // namespace ngraph_bridge -} // namespace tensorflow - -#endif diff --git a/ngraph/frontend/tensorflow/src/ngraph_conversions.cpp b/ngraph/frontend/tensorflow/src/ngraph_conversions.cpp deleted file mode 100644 index b018041bf5e1e5..00000000000000 --- a/ngraph/frontend/tensorflow/src/ngraph_conversions.cpp +++ /dev/null @@ -1,115 +0,0 @@ -/******************************************************************************* - * Copyright 2017-2020 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ - -#include "ngraph_conversions.h" - -namespace tensorflow { -namespace ngraph_bridge { - -void NHWCtoNCHW(const std::string& op_name, bool is_nhwc, - ngraph::Output& node) { - if (is_nhwc) { - auto rank = node.get_shape().size(); - if (rank == 4) { - Transpose<0, 3, 1, 2>(node); - } else if (rank == 5) { - Transpose3D<0, 4, 1, 2, 3>(node); - } - Builder::SetTracingInfo(op_name, node); - } -} - -void NCHWtoNHWC(const std::string& op_name, bool is_nhwc, - ngraph::Output& node) { - if (is_nhwc) { - auto rank = node.get_shape().size(); - if (rank == 4) { - Transpose<0, 2, 3, 1>(node); - } else if (rank == 5) { - Transpose3D<0, 2, 3, 4, 1>(node); - } - Builder::SetTracingInfo(op_name, node); - } -} - - - Status TFDataTypeToNGraphElementType(DataType tf_dt, - ngraph::element::Type* ng_et) { - switch (tf_dt) { - case DataType::DT_FLOAT: - *ng_et = ngraph::element::f32; - break; - case DataType::DT_DOUBLE: - *ng_et = ngraph::element::f64; - break; - case DataType::DT_INT32: - *ng_et = ngraph::element::i32; - break; - case DataType::DT_UINT8: - *ng_et = ngraph::element::u8; - break; - case DataType::DT_INT8: - *ng_et = ngraph::element::i8; - break; - case DataType::DT_UINT16: - *ng_et = ngraph::element::u16; - break; - case DataType::DT_INT64: - *ng_et = ngraph::element::i64; - break; - case DataType::DT_UINT32: - *ng_et = ngraph::element::u32; - break; - case DataType::DT_UINT64: - *ng_et = ngraph::element::u64; - break; - case DataType::DT_BOOL: - *ng_et = ngraph::element::boolean; - break; - case DataType::DT_QINT8: - *ng_et = ngraph::element::i8; - break; - case DataType::DT_QUINT8: - *ng_et = ngraph::element::u8; - break; - case DataType::DT_QINT32: - *ng_et = ngraph::element::i32; - break; - case DataType::DT_BFLOAT16: - *ng_et = ngraph::element::bf16; - break; - case DataType::DT_HALF: - *ng_et = ngraph::element::f16; - break; - default: - return errors::Unimplemented("Unsupported TensorFlow data type: " + - DataType_Name(tf_dt)); - } - return Status::OK(); - } - - Status TFTensorShapeToNGraphShape(const ::tensorflow::TensorShapeProto& tf_shape, - ngraph::PartialShape* ng_shape) { - std::vector dims; - for (int i = 0; i < tf_shape.dim_size(); i++) { - dims.push_back(tf_shape.dim(i).size()); - } - *ng_shape = ngraph::PartialShape(dims); - return Status::OK(); - } - -} // namespace ngraph_bridge -} // namespace tensorflow diff --git a/ngraph/frontend/tensorflow/src/ngraph_conversions.h b/ngraph/frontend/tensorflow/src/ngraph_conversions.h deleted file mode 100644 index b7f456b66f4c3f..00000000000000 --- a/ngraph/frontend/tensorflow/src/ngraph_conversions.h +++ /dev/null @@ -1,129 +0,0 @@ -/******************************************************************************* - * Copyright 2017-2020 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ - -#ifndef NGRAPH_TF_BRIDGE_CONVERSIONS_H_ -#define NGRAPH_TF_BRIDGE_CONVERSIONS_H_ -#pragma once - -#include - -//#include "logging/ngraph_log.h" -#include "default_opset.h" -#include "ngraph_builder.h" - -namespace tensorflow { -namespace ngraph_bridge { - - // Converts a TensorFlow DataType to an nGraph element::Type. Returns -// errors::Unimplemented if the element type is not supported by nGraph -// Core. Otherwise returns Status::OK(). - Status TFDataTypeToNGraphElementType(DataType tf_dt, - ngraph::element::Type* ng_et); - - Status TFTensorShapeToNGraphShape(const ::tensorflow::TensorShapeProto& tf_shape, - ngraph::PartialShape* ng_shape); - -template -void Transpose(ngraph::Output& node) { - static_assert(a < 4 && b < 4 && c < 4 && d < 4, - "Number of dimensions cannot exceed 4"); - static_assert(a != b && a != c && a != d && b != c && b != d && c != d, - "Dimensions indices cannot be equal"); - auto& s = node.get_shape(); - ngraph::Shape reshaped_shape{s[a], s[b], s[c], s[d]}; - ngraph::Shape transpose_order{a, b, c, d}; - NGRAPH_VLOG(3) << "transposing " << ngraph::join(s) << " to " - << ngraph::join(reshaped_shape) << " axis-order " - << ngraph::join(transpose_order); - auto input_order = std::make_shared( - ngraph::element::u64, ngraph::Shape{transpose_order.size()}, - transpose_order); - node = std::make_shared(node, input_order); -} - -template -void Transpose(std::shared_ptr& node) { - Transpose(node->get_default_output()); -} - -template -void Transpose3D(ngraph::Output& node) { - static_assert(a < 5 && b < 5 && c < 5 && d < 5 && e < 5, - "Number of dimensions cannot exceed 5"); - static_assert(a != b && a != c && a != d && a != e && b != c && b != d && - b != e && c != d && c != e && d != e, - "Dimensions indices cannot be equal"); - auto& s = node.get_shape(); - ngraph::Shape reshaped_shape{s[a], s[b], s[c], s[d], s[e]}; - ngraph::Shape transpose_order{a, b, c, d, e}; - NGRAPH_VLOG(3) << "transposing " << ngraph::join(s) << " to " - << ngraph::join(reshaped_shape) << "axis-order " - << ngraph::join(transpose_order); - auto input_order = std::make_shared( - ngraph::element::u64, ngraph::Shape{transpose_order.size()}, - transpose_order); - node = std::make_shared(node, input_order); -} - -template -void Transpose3D(std::shared_ptr& node) { - Transpose3D(node->get_default_output()); -} - -namespace detail { -template -void NHWCtoHW(const std::vector& src, std::vector& dst) { - if (dst.size() >= 2) { - dst[0] = src[1]; - dst[1] = src[2]; - } - if (dst.size() >= 3) { - dst[2] = src[3]; - } -} - -template -void NCHWtoHW(const std::vector& src, std::vector& dst) { - if (dst.size() >= 2) { - dst[0] = src[2]; - dst[1] = src[3]; - } - if (dst.size() >= 3) { - dst[2] = src[4]; - } -} -} - -void NHWCtoNCHW(const std::string& op_name, bool is_nhwc, - ngraph::Output& ng_input); - -void NCHWtoNHWC(const std::string& op_name, bool is_nhwc, - ngraph::Output& ng_node); - -template -void NHWCtoHW(bool is_nhwc, const std::vector& src, - std::vector& dst) { - if (is_nhwc) { - detail::NHWCtoHW(src, dst); - } else { - detail::NCHWtoHW(src, dst); - } -} - -} // namespace ngraph_bridge -} // namespace tensorflow - -#endif // NGRAPH_TF_BRIDGE_CONVERSIONS_H_ diff --git a/ngraph/frontend/tensorflow/src/proto/allocation_description.proto b/ngraph/frontend/tensorflow/src/proto/allocation_description.proto deleted file mode 100644 index f18caa40b2bde7..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/allocation_description.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -option cc_enable_arenas = true; -option java_outer_classname = "AllocationDescriptionProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/allocation_description_go_proto"; - -message AllocationDescription { - // Total number of bytes requested - int64 requested_bytes = 1; - - // Total number of bytes allocated if known - int64 allocated_bytes = 2; - - // Name of the allocator used - string allocator_name = 3; - - // Identifier of the allocated buffer if known - int64 allocation_id = 4; - - // Set if this tensor only has one remaining reference - bool has_single_reference = 5; - - // Address of the allocation. - uint64 ptr = 6; -} diff --git a/ngraph/frontend/tensorflow/src/proto/api_def.proto b/ngraph/frontend/tensorflow/src/proto/api_def.proto deleted file mode 100644 index 4d5fedd74ad5c1..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/api_def.proto +++ /dev/null @@ -1,136 +0,0 @@ -// Defines the text format for including per-op API definition and -// overrides for client language op code generators. - -syntax = "proto3"; - -package tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "ApiDefProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/api_def_go_proto"; -import "attr_value.proto"; - -// Used to specify and override the default API & behavior in the -// generated code for client languages, from what you would get from -// the OpDef alone. There will be a set of ApiDefs that are common -// to all client languages, and another set per client language. -// The per-client-language ApiDefs will inherit values from the -// common ApiDefs which it can either replace or modify. -// -// We separate the API definition from the OpDef so we can evolve the -// API while remaining backwards compatible when interpretting old -// graphs. Overrides go in an "api_def.pbtxt" file with a text-format -// ApiDefs message. -// -// WARNING: Be *very* careful changing the API for any existing op -- -// you can change the semantics of existing code. These changes may -// need to wait until a major release of TensorFlow to avoid breaking -// our compatibility promises. -message ApiDef { - // Name of the op (in the OpDef) to specify the API for. - string graph_op_name = 1; - // If this op is deprecated, set deprecation message to the message - // that should be logged when this op is used. - // The message should indicate alternative op to use, if any. - string deprecation_message = 12; - // Major version when the op will be deleted. For e.g. set this - // value to 2 if op API should be removed in TensorFlow 2.0 and - // deprecated in versions before that. - int32 deprecation_version = 13; - - enum Visibility { - // Normally this is "VISIBLE" unless you are inheriting a - // different value from another ApiDef. - DEFAULT_VISIBILITY = 0; - // Publicly visible in the API. - VISIBLE = 1; - // Do not include this op in the generated API. If visibility is - // set to 'SKIP', other fields are ignored for this op. - SKIP = 2; - // Hide this op by putting it into an internal namespace (or whatever - // is appropriate in the target language). - HIDDEN = 3; - } - Visibility visibility = 2; - - // If you specify any endpoint, this will replace all of the - // inherited endpoints. The first endpoint should be the - // "canonical" endpoint, and should not be deprecated (unless all - // endpoints are deprecated). - message Endpoint { - // Name should be either like "CamelCaseName" or - // "Package.CamelCaseName". Client-language-specific ApiDefs may - // use a snake_case convention instead of CamelCase. - string name = 1; - - // Set if this endpoint is deprecated. If set to true, a message suggesting - // to use a non-deprecated endpoint instead will be printed. If all - // endpoints are deprecated, set deprecation_message in ApiDef instead. - bool deprecated = 3; - - // Major version when an endpoint will be deleted. For e.g. set this - // value to 2 if endpoint should be removed in TensorFlow 2.0 and - // deprecated in versions before that. - int32 deprecation_version = 4; - } - repeated Endpoint endpoint = 3; - - message Arg { - string name = 1; - - // Change the name used to access this arg in the API from what - // is used in the GraphDef. Note that these names in `backticks` - // will also be replaced in the summary & description fields. - string rename_to = 2; - - // Note: this will replace any inherited arg doc. There is no - // current way of modifying arg descriptions (other than replacing - // them entirely) as can be done with op descriptions. - string description = 3; - } - repeated Arg in_arg = 4; - repeated Arg out_arg = 5; - // List of original in_arg names to specify new argument order. - // Length of arg_order should be either empty to keep current order - // or match size of in_arg. - repeated string arg_order = 11; - - // Description of the graph-construction-time configuration of this - // Op. That is to say, this describes the attr fields that will - // be specified in the NodeDef. - message Attr { - string name = 1; - - // Change the name used to access this attr in the API from what - // is used in the GraphDef. Note that these names in `backticks` - // will also be replaced in the summary & description fields. - string rename_to = 2; - - // Specify a new default value to use for this attr. This default - // will be used when creating new graphs, as opposed to the - // default in the OpDef, which will be used when interpreting old - // GraphDefs. - AttrValue default_value = 3; - - // Note: this will replace any inherited attr doc, there is no current - // way of modifying attr descriptions as can be done with op descriptions. - string description = 4; - } - repeated Attr attr = 6; - - // One-line human-readable description of what the Op does. - string summary = 7; - - // Additional, longer human-readable description of what the Op does. - string description = 8; - - // Modify an existing/inherited description by adding text to the beginning - // or end. - string description_prefix = 9; - string description_suffix = 10; -} - -message ApiDefs { - repeated ApiDef op = 1; -} diff --git a/ngraph/frontend/tensorflow/src/proto/attr_value.proto b/ngraph/frontend/tensorflow/src/proto/attr_value.proto deleted file mode 100644 index ddf134b239cb70..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/attr_value.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "tensor.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "AttrValueProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/attr_value_go_proto"; - -// Protocol buffer representing the value for an attr used to configure an Op. -// Comment indicates the corresponding attr type. Only the field matching the -// attr type may be filled. -message AttrValue { - // LINT.IfChange - message ListValue { - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated DataType type = 6 [packed = true]; // "list(type)" - repeated TensorShapeProto shape = 7; // "list(shape)" - repeated TensorProto tensor = 8; // "list(tensor)" - repeated NameAttrList func = 9; // "list(attr)" - } - // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc) - - oneof value { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - DataType type = 6; // "type" - TensorShapeProto shape = 7; // "shape" - TensorProto tensor = 8; // "tensor" - ListValue list = 1; // any "list(...)" - - // "func" represents a function. func.name is a function's name or - // a primitive op's name. func.attr.first is the name of an attr - // defined for that function. func.attr.second is the value for - // that attr in the instantiation. - NameAttrList func = 10; - - // This is a placeholder only used in nodes defined inside a - // function. It indicates the attr value will be supplied when - // the function is instantiated. For example, let us suppose a - // node "N" in function "FN". "N" has an attr "A" with value - // placeholder = "foo". When FN is instantiated with attr "foo" - // set to "bar", the instantiated node N's attr A will have been - // given the value "bar". - string placeholder = 9; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NameAttrList { - string name = 1; - map attr = 2; -} diff --git a/ngraph/frontend/tensorflow/src/proto/cost_graph.proto b/ngraph/frontend/tensorflow/src/proto/cost_graph.proto deleted file mode 100644 index 166c130df5fcbc..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/cost_graph.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "tensor_shape.proto"; -import "types.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "CostGraphProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/cost_graph_go_proto"; - -message CostGraphDef { - message Node { - // The name of the node. Names are globally unique. - string name = 1; - - // The device of the node. Can be empty if the node is mapped to the - // default partition or partitioning hasn't been run yet. - string device = 2; - - // The id of the node. Node ids are only unique inside a partition. - int32 id = 3; - - // Inputs of this node. They must be executed before this node can be - // executed. An input is a particular output of another node, specified - // by the node id and the output index. - message InputInfo { - int32 preceding_node = 1; - int32 preceding_port = 2; - } - repeated InputInfo input_info = 4; - - // Outputs of this node. - message OutputInfo { - int64 size = 1; - // If >= 0, the output is an alias of an input. Note that an alias input - // may itself be an alias. The algorithm will therefore need to follow - // those pointers. - int64 alias_input_port = 2; - TensorShapeProto shape = 3; - DataType dtype = 4; - } - repeated OutputInfo output_info = 5; - - // Temporary memory used by this node. - int64 temporary_memory_size = 6; - - // Persistent memory used by this node. - int64 persistent_memory_size = 12; - - int64 host_temp_memory_size = 10 [deprecated = true]; - int64 device_temp_memory_size = 11 [deprecated = true]; - int64 device_persistent_memory_size = 16 [deprecated = true]; - - // Estimate of the computational cost of this node, in microseconds. - int64 compute_cost = 9; - - // Analytical estimate of the computational cost of this node, in - // microseconds. - int64 compute_time = 14; - - // Analytical estimate of the memory access cost of this node, in - // microseconds. - int64 memory_time = 15; - - // If true, the output is permanent: it can't be discarded, because this - // node is part of the "final output". Nodes may depend on final nodes. - bool is_final = 7; - - // Ids of the control inputs for this node. - repeated int32 control_input = 8; - - // Are the costs inaccurate? - bool inaccurate = 17; - } - repeated Node node = 1; - - // Total cost of this graph, typically used for balancing decisions. - message AggregatedCost { - // Aggregated cost value. - float cost = 1; - - // Aggregated cost dimension (e.g. 'memory', 'compute', 'network'). - string dimension = 2; - } - repeated AggregatedCost cost = 2; -} diff --git a/ngraph/frontend/tensorflow/src/proto/dataset_options.proto b/ngraph/frontend/tensorflow/src/proto/dataset_options.proto deleted file mode 100644 index 05e15e156254e7..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/dataset_options.proto +++ /dev/null @@ -1,179 +0,0 @@ -syntax = "proto3"; - -package tensorflow.data; - -// Represents the type of auto-sharding we enable. -enum AutoShardPolicy { - AUTO = 0; - FILE = 1; - DATA = 2; - OFF = -1; -} - -message DistributeOptions { - // The type of sharding that auto-shard should attempt. If this is set to - // FILE, then we will attempt to shard by files (each worker will get a set of - // files to process). If we cannot find a set of files to shard for at least - // one file per worker, we will error out. When this option is selected, make - // sure that you have enough files so that each worker gets at least one file. - // There will be a runtime error thrown if there are insufficient files. If - // this is set to DATA, then we will shard by elements produced by the - // dataset, and each worker will process the whole dataset and discard the - // portion that is not for itself. If this is set to OFF, then we will not - // autoshard, and each worker will receive a copy of the full dataset. This - // option is set to AUTO by default, AUTO will attempt to first shard by FILE, - // and fall back to sharding by DATA if we cannot find a set of files to - // shard. - AutoShardPolicy auto_shard_policy = 1; - // The number of devices attached to this input pipeline. - oneof optional_num_devices { - int32 num_devices = 2; - } -} - -message MapVectorization { - // Whether to vectorize map transformations. - oneof optional_enabled { - bool enabled = 1; - } - // Whether to use ChooseFastestBranchDataset with this transformation. If - // True, the pipeline picks between the vectorized and original segment at - // runtime based on their iterations speed. - oneof optional_use_choose_fastest { - bool use_choose_fastest = 2; - } -} - -message OptimizationOptions { - // Whether to apply default graph optimizations. If False, only graph - // optimizations that have been explicitly enabled will be applied. - oneof optional_apply_default_optimizations { - bool apply_default_optimizations = 1; - } - // Whether to automatically tune performance knobs. - oneof optional_autotune { - bool autotune = 2; - } - // When autotuning is enabled (through autotune), determines whether to also - // autotune buffer sizes for datasets with parallelism. - oneof optional_autotune_buffers { - bool autotune_buffers = 3; - } - // When autotuning is enabled (through autotune), determines the CPU budget to - // use. Values greater than the number of schedulable CPU cores are allowed - // but may result in CPU contention. - oneof optional_autotune_cpu_budget { - int32 autotune_cpu_budget = 4; - } - // When autotuning is enabled (through autotune), determines the RAM budget to - // use. Values greater than the available RAM in bytes may result in OOM. If - // 0, defaults to half of the available RAM in bytes. - oneof optional_autotune_ram_budget { - int32 autotune_ram_budget = 5; - } - // Whether to fuse filter transformations. - oneof optional_filter_fusion { - bool filter_fusion = 6; - } - // Whether to fuse filter dataset that predicts random_uniform < rate into a - // sampling dataset. - oneof optional_filter_with_random_uniform_fusion { - bool filter_with_random_uniform_fusion = 7; - } - // Whether to hoist tf.random_uniform() ops out of map transformations. - oneof optional_hoist_random_uniform { - bool hoist_random_uniform = 8; - } - // Whether to fuse map and batch transformations. - oneof optional_map_and_batch_fusion { - bool map_and_batch_fusion = 9; - } - // Whether to fuse map and filter transformations. - oneof optional_map_and_filter_fusion { - bool map_and_filter_fusion = 10; - } - // Whether to fuse map transformations. - oneof optional_map_fusion { - bool map_fusion = 11; - } - // Whether to parallelize stateless map transformations. - oneof optional_map_parallelization { - bool map_parallelization = 12; - } - // The map vectorization options associated with the dataset. - MapVectorization map_vectorization = 13; - // Whether to eliminate no-op transformations. - oneof optional_noop_elimination { - bool noop_elimination = 14; - } - // Whether to parallelize copying of batch elements. This optimization is - // highly experimental and can cause performance degradation (e.g. when the - // parallelization overhead exceeds the benefits of performing the data copies - // in parallel). You should only enable this optimization if a) your input - // pipeline is bottlenecked on batching and b) you have validated that this - // optimization improves performance. - oneof optional_parallel_batch { - bool parallel_batch = 15; - } - // Whether to reorder ops that will discard data to the front of unary - // cardinality preserving transformations, e.g. dataset.map(...).take(3) will - // be optimized to dataset.take(3).map(...). For now this optimization will - // move `skip`, `shard` and `take` to the front of `map` and `prefetch`. This - // optimization is only for performance; it will not affect the output of the - // dataset. - oneof optional_reorder_data_discarding_ops { - bool reorder_data_discarding_ops = 16; - } - // Whether to fuse shuffle and repeat transformations. - oneof optional_shuffle_and_repeat_fusion { - bool shuffle_and_repeat_fusion = 17; - } -} - -message ThreadingOptions { - // If set, it overrides the maximum degree of intra-op parallelism. - oneof optional_max_intra_op_parallelism { - int32 max_intra_op_parallelism = 1; - } - // If set, the dataset will use a private threadpool of the given size. - oneof optional_private_threadpool_size { - int32 private_threadpool_size = 2; - } -} - -// Represents how to handle external state during serialization. -enum ExternalStatePolicy { - WARN = 0; - IGNORE = 1; - FAIL = 2; -} - -// Message stored with Dataset objects to control how datasets are processed and -// optimized. -message Options { - // Whether the outputs need to be produced in deterministic order. - oneof optional_deterministic { - bool deterministic = 1; - } - // The distribution strategy options associated with the dataset. - DistributeOptions distribute_options = 2; - // The optimization options associated with the dataset. - OptimizationOptions optimization_options = 3; - // Whether to introduce 'slack' in the last `prefetch` of the input pipeline, - // if it exists. This may reduce CPU contention with accelerator host-side - // activity at the start of a step. The slack frequency is determined by the - // number of devices attached to this input pipeline. - oneof optional_slack { - bool slack = 4; - } - // The threading options associated with the dataset. - ThreadingOptions threading_options = 5; - // This option can be used to override the default policy for how to handle - // external state when serializing a dataset or checkpointing its iterator. - // There are three settings available - IGNORE: External state is ignored - // without a warning; WARN: External state is ignored and a warning is logged; - // FAIL: External state results in an error. - oneof optional_external_state_policy { - ExternalStatePolicy external_state_policy = 6; - } -} diff --git a/ngraph/frontend/tensorflow/src/proto/device_attributes.proto b/ngraph/frontend/tensorflow/src/proto/device_attributes.proto deleted file mode 100644 index 4c7a2b87e4f5d7..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/device_attributes.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -option cc_enable_arenas = true; -option java_outer_classname = "DeviceAttributesProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/device_attributes_go_proto"; - -message InterconnectLink { - int32 device_id = 1; - string type = 2; - int32 strength = 3; -} - -message LocalLinks { - repeated InterconnectLink link = 1; -} - -message DeviceLocality { - // Optional bus locality of device. Default value of 0 means - // no specific locality. Specific localities are indexed from 1. - int32 bus_id = 1; - - // Optional NUMA locality of device. - int32 numa_node = 2; - - // Optional local interconnect links to other devices. - LocalLinks links = 3; -} - -message DeviceAttributes { - // Fully specified name of the device within a cluster. - string name = 1; - - // String representation of device_type. - string device_type = 2; - - // Memory capacity of device in bytes. - int64 memory_limit = 4; - - // Platform-specific data about device that may be useful - // for supporting efficient data transfers. - DeviceLocality locality = 5; - - // A device is assigned a global unique number each time it is - // initialized. "incarnation" should never be 0. - fixed64 incarnation = 6; - - // String representation of the physical device that this device maps to. - string physical_device_desc = 7; -} diff --git a/ngraph/frontend/tensorflow/src/proto/function.proto b/ngraph/frontend/tensorflow/src/proto/function.proto deleted file mode 100644 index 8502ae5c494b0b..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/function.proto +++ /dev/null @@ -1,126 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "attr_value.proto"; -import "node_def.proto"; -import "op_def.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "FunctionProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/function_go_proto"; - -// A library is a set of named functions. -message FunctionDefLibrary { - repeated FunctionDef function = 1; - repeated GradientDef gradient = 2; -} - -// A function can be instantiated when the runtime can bind every attr -// with a value. When a GraphDef has a call to a function, it must -// have binding for every attr defined in the signature. -// -// TODO(zhifengc): -// * device spec, etc. -message FunctionDef { - // The definition of the function's name, arguments, return values, - // attrs etc. - OpDef signature = 1; - - // Attributes specific to this function definition. - map attr = 5; - - // Attributes for function arguments. These attributes are the same set of - // valid attributes as to _Arg nodes. - message ArgAttrs { - map attr = 1; - } - map arg_attr = 7; - - // Unique IDs for each resource argument, used to track aliasing resources. If - // Argument A and Argument B alias each other, then - // resource_arg_unique_ids[A.index] == resource_arg_unique_ids[B.index]. - // - // If this field is empty, none of the arguments could alias; otherwise, every - // resource argument should have an entry in this field. - // - // When instantiated, the unique IDs will be attached to the _Arg nodes' - // "_resource_arg_unique_id" attribute. - map resource_arg_unique_id = 8; - - // NOTE: field id 2 deleted on Jan 11, 2017, GraphDef version 21. - reserved 2; - - // In both of the following fields, there is the need to specify an - // output that is used as either the input to another node (in - // `node_def`) or as a return value of the function (in `ret`). - // Unlike the NodeDefs in GraphDef, we need to be able to specify a - // list in some cases (instead of just single outputs). Also, we - // need to be able to deal with lists of unknown length (so the - // output index may not be known at function definition time). So - // we use the following format instead: - // * "fun_in" where "fun_in" is the name of a function input arg in - // the `signature` field above. This represents that input, whether - // it is a single tensor or a list. - // * "fun_in:0" gives the first element of a function input arg (a - // non-list input is considered a list of length 1 for these - // purposes). - // * "node:out" where "node" is the name of a node in `node_def` and - // "out" is the name one of its op's output arguments (the name - // comes from the OpDef of the node's op). This represents that - // node's output, whether it is a single tensor or a list. - // Note: We enforce that an op's output arguments are never - // renamed in the backwards-compatibility test. - // * "node:out:0" gives the first element of a node output arg (a - // non-list output is considered a list of length 1 for these - // purposes). - // - // NOT CURRENTLY SUPPORTED (but may be in the future): - // * "node:out:-1" gives last element in a node output list - // * "node:out:1:" gives a list with all but the first element in a - // node output list - // * "node:out::-1" gives a list with all but the last element in a - // node output list - - // The body of the function. Unlike the NodeDefs in a GraphDef, attrs - // may have values of type `placeholder` and the `input` field uses - // the "output" format above. - - // By convention, "op" in node_def is resolved by consulting with a - // user-defined library first. If not resolved, "func" is assumed to - // be a builtin op. - repeated NodeDef node_def = 3; - - // A mapping from the output arg names from `signature` to the - // outputs from `node_def` that should be returned by the function. - map ret = 4; - - // A mapping from control output names from `signature` to node names in - // `node_def` which should be control outputs of this function. - map control_ret = 6; -} - -// GradientDef defines the gradient function of a function defined in -// a function library. -// -// A gradient function g (specified by gradient_func) for a function f -// (specified by function_name) must follow the following: -// -// The function 'f' must be a numerical function which takes N inputs -// and produces M outputs. Its gradient function 'g', which is a -// function taking N + M inputs and produces N outputs. -// -// I.e. if we have -// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), -// then, g is -// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, -// dL/dy1, dL/dy2, ..., dL/dy_M), -// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the -// loss function). dL/dx_i is the partial derivative of L with respect -// to x_i. -message GradientDef { - string function_name = 1; // The function name. - string gradient_func = 2; // The gradient function's name. -} diff --git a/ngraph/frontend/tensorflow/src/proto/graph.proto b/ngraph/frontend/tensorflow/src/proto/graph.proto deleted file mode 100644 index 76bdf43c02ae83..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/graph.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "function.proto"; -import "node_def.proto"; -import "versions.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "GraphProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/graph_go_proto"; - -// Represents the graph of operations -message GraphDef { - repeated NodeDef node = 1; - - // Compatibility versions of the graph. See core/public/version.h for version - // history. The GraphDef version is distinct from the TensorFlow version, and - // each release of TensorFlow will support a range of GraphDef versions. - VersionDef versions = 4; - - // Deprecated single version field; use versions above instead. Since all - // GraphDef changes before "versions" was introduced were forward - // compatible, this field is entirely ignored. - int32 version = 3 [deprecated = true]; - - // "library" provides user-defined functions. - // - // Naming: - // * library.function.name are in a flat namespace. - // NOTE: We may need to change it to be hierarchical to support - // different orgs. E.g., - // { "/google/nn", { ... }}, - // { "/google/vision", { ... }} - // { "/org_foo/module_bar", { ... }} - // map named_lib; - // * If node[i].op is the name of one function in "library", - // node[i] is deemed as a function call. Otherwise, node[i].op - // must be a primitive operation supported by the runtime. - // - // - // Function call semantics: - // - // * The callee may start execution as soon as some of its inputs - // are ready. The caller may want to use Tuple() mechanism to - // ensure all inputs are ready in the same time. - // - // * The consumer of return values may start executing as soon as - // the return values the consumer depends on are ready. The - // consumer may want to use Tuple() mechanism to ensure the - // consumer does not start until all return values of the callee - // function are ready. - FunctionDefLibrary library = 2; -} diff --git a/ngraph/frontend/tensorflow/src/proto/graph_transfer_info.proto b/ngraph/frontend/tensorflow/src/proto/graph_transfer_info.proto deleted file mode 100644 index bb6af6e990c3f8..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/graph_transfer_info.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "types.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "GraphTransferInfoProto"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/graph_transfer_info_go_proto"; - -message GraphTransferNodeInput { - int32 node_id = 1; - int32 output_port = 2; -} -message GraphTransferNodeInfo { - string name = 1; - int32 node_id = 2; - string type_name = 3; - int32 soc_op_id = 4; - int32 padding_id = 5; - int32 input_count = 6; - int32 output_count = 7; -} -message GraphTransferConstNodeInfo { - string name = 1; - int32 node_id = 2; - repeated int64 shape = 3; - bytes data = 4; - DataType dtype = 5; -} -message GraphTransferNodeInputInfo { - int32 node_id = 1; - repeated GraphTransferNodeInput node_input = 2; -} -message GraphTransferNodeOutputInfo { - int32 node_id = 1; - repeated int32 max_byte_size = 2; -} -message GraphTransferGraphInputNodeInfo { - string name = 1; - repeated int64 shape = 2; - DataType dtype = 3; -} - -message GraphTransferGraphOutputNodeInfo { - string name = 1; - repeated int64 shape = 2; - DataType dtype = 3; -} - -// Protocol buffer representing a handle to a tensorflow resource. Handles are -// not valid across executions, but can be serialized back and forth from within -// a single run. -message GraphTransferInfo { - enum Destination { - NOP = 0; - HEXAGON = 1; - } - - repeated GraphTransferNodeInfo node_info = 1; - repeated GraphTransferConstNodeInfo const_node_info = 2; - repeated GraphTransferNodeInputInfo node_input_info = 3; - repeated GraphTransferNodeOutputInfo node_output_info = 4; - // Input Node parameters of transferred graph - repeated GraphTransferGraphInputNodeInfo graph_input_node_info = 5; - repeated GraphTransferGraphOutputNodeInfo graph_output_node_info = 6; - // Destination of graph transfer - Destination destination = 7; -} diff --git a/ngraph/frontend/tensorflow/src/proto/kernel_def.proto b/ngraph/frontend/tensorflow/src/proto/kernel_def.proto deleted file mode 100644 index de76a496d8cf19..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/kernel_def.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "attr_value.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "KernelDefProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/kernel_def_go_proto"; - -message KernelDef { - // Must match the name of an Op. - string op = 1; - - // Type of device this kernel runs on. - string device_type = 2; - - message AttrConstraint { - // Name of an attr from the Op. - string name = 1; - - // A list of values that this kernel supports for this attr. - // Like OpDef.AttrDef.allowed_values, except for kernels instead of Ops. - AttrValue allowed_values = 2; - } - repeated AttrConstraint constraint = 3; - - // Names of the Op's input_/output_args that reside in host memory - // instead of device memory. - repeated string host_memory_arg = 4; - - // This allows experimental kernels to be registered for an op that - // won't be used unless the user specifies a "_kernel" attr with - // value matching this. - string label = 5; - - // Prioritization of kernel amongst different devices. By default we assume - // priority is 0. The higher the priority the better. By default (i.e. if - // this is not set), we prefer GPU kernels over CPU. - int32 priority = 6; -} - -// A collection of KernelDefs -message KernelList { - repeated KernelDef kernel = 1; -} diff --git a/ngraph/frontend/tensorflow/src/proto/log_memory.proto b/ngraph/frontend/tensorflow/src/proto/log_memory.proto deleted file mode 100644 index aa30cef2aeaf4c..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/log_memory.proto +++ /dev/null @@ -1,95 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "tensor_description.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "LogMemoryProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/log_memory_go_proto"; - -message MemoryLogStep { - // Process-unique step id. - int64 step_id = 1; - - // Handle describing the feeds and fetches of the step. - string handle = 2; -} - -message MemoryLogTensorAllocation { - // Process-unique step id. - int64 step_id = 1; - - // Name of the kernel making the allocation as set in GraphDef, - // e.g., "affine2/weights/Assign". - string kernel_name = 2; - - // Allocated tensor details. - TensorDescription tensor = 3; -} - -message MemoryLogTensorDeallocation { - // Id of the tensor buffer being deallocated, used to match to a - // corresponding allocation. - int64 allocation_id = 1; - - // Name of the allocator used. - string allocator_name = 2; -} - -message MemoryLogTensorOutput { - // Process-unique step id. - int64 step_id = 1; - - // Name of the kernel producing an output as set in GraphDef, e.g., - // "affine2/weights/Assign". - string kernel_name = 2; - - // Index of the output being set. - int32 index = 3; - - // Output tensor details. - TensorDescription tensor = 4; -} - -message MemoryLogRawAllocation { - // Process-unique step id. - int64 step_id = 1; - - // Name of the operation making the allocation. - string operation = 2; - - // Number of bytes in the allocation. - int64 num_bytes = 3; - - // Address of the allocation. - uint64 ptr = 4; - - // Id of the tensor buffer being allocated, used to match to a - // corresponding deallocation. - int64 allocation_id = 5; - - // Name of the allocator used. - string allocator_name = 6; -} - -message MemoryLogRawDeallocation { - // Process-unique step id. - int64 step_id = 1; - - // Name of the operation making the deallocation. - string operation = 2; - - // Id of the tensor buffer being deallocated, used to match to a - // corresponding allocation. - int64 allocation_id = 3; - - // Name of the allocator used. - string allocator_name = 4; - - // True if the deallocation is queued and will be performed later, - // e.g. for GPU lazy freeing of buffers. - bool deferred = 5; -} diff --git a/ngraph/frontend/tensorflow/src/proto/model.proto b/ngraph/frontend/tensorflow/src/proto/model.proto deleted file mode 100644 index ba74d7a2b7ee19..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/model.proto +++ /dev/null @@ -1,130 +0,0 @@ -syntax = "proto3"; - -package tensorflow.data.model; - -option cc_enable_arenas = true; - -// Class of a node in the performance model. -enum NodeClass { - UNKNOWN = 0; - INTERLEAVE_MANY = 1; - ASYNC_INTERLEAVE_MANY = 2; - KNOWN_RATIO = 3; - ASYNC_KNOWN_RATIO = 4; - UNKNOWN_RATIO = 5; -} - -// Algorithm used for model autotuning optimization. -enum AutotuneAlgorithm { - HILL_CLIMB = 0; - GRADIENT_DESCENT = 1; -} - -// Protocol buffer representing the data used by the autotuning modeling -// framework. -message ModelProto { - // General representation of a node in the model. - message Node { - // Unique node ID. - int64 id = 1; - - // Human-readable name of the node. - string name = 2; - - // An indication whether autotuning is enabled for this node. - bool autotune = 3; - - // The number of bytes stored in this node's buffer. - int64 buffered_bytes = 4; - - // The number of elements stored in this node's buffer. - int64 buffered_elements = 5; - - // The number of bytes consumed by the node. - int64 bytes_consumed = 6; - - // The number of bytes produced by the node. - int64 bytes_produced = 7; - - // The number of elements produced by the node. - int64 num_elements = 8; - - // The aggregate processing time spent in this node. - int64 processing_time = 9; - - // An indication whether this node records metrics about produced and - // consumed elements. - bool record_metrics = 10; - - // Represents a node parameter. - message Parameter { - // Human-readable name of the parameter. - string name = 1; - - // Identifies the model value of the parameter. This can be different from - // the actual value (e.g. during optimization search). - double value = 2; - - // The actual value of the parameter. - double state_value = 3; - - // Minimum value of the parameter. - double min = 4; - - // Maximum value of the parameter. - double max = 5; - - // Identifies whether the parameter should participate in autotuning. - bool tunable = 6; - } - - // Parameters of this node. - repeated Parameter parameters = 11; - - // Statistic of inputs processing time history. - double input_processing_time_sum = 12; - int64 input_processing_time_count = 13; - - // Inputs of this node. - repeated Node inputs = 14; - - // Class of this node. - NodeClass node_class = 15; - - // Ratio of input to output elements. This is only used by KNOWN_RATIO and - // ASYNC_KNOWN_RATIO nodes. - double ratio = 16; - - // Ratio identifies how many parallelism calls are introduced by one - // buffered element. This is only used by ASYNC_KNOWN_RATIO nodes. - double memory_ratio = 17; - } - - // Output node of this model. - Node output = 1; - - // Counter for node IDs of this model. - int64 id_counter = 2; - - // Indicates whether the modeling framework should collect resource usage, - // e.g. CPU, memory. - bool collect_resource_usage = 3; - - // Contains parameters of the model autotuning optimization. - message OptimizationParams { - // Algorithm used for autotuning optimization. - AutotuneAlgorithm algorithm = 1; - - // Number of available logical threads. - int64 cpu_budget = 2; - - // Amount of available memory in bytes. - int64 ram_budget = 3; - - // Time between two consecutive `GetNext` calls to the iterator represented - // by the output node. - double model_input_time = 4; - } - - OptimizationParams optimization_params = 4; -} diff --git a/ngraph/frontend/tensorflow/src/proto/node_def.proto b/ngraph/frontend/tensorflow/src/proto/node_def.proto deleted file mode 100644 index 17d8ecf684b77d..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/node_def.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "attr_value.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/node_def_go_proto"; - -message NodeDef { - // The name given to this operator. Used for naming inputs, - // logging, visualization, etc. Unique within a single GraphDef. - // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_>./]*". - string name = 1; - - // The operation name. There may be custom parameters in attrs. - // Op names starting with an underscore are reserved for internal use. - string op = 2; - - // Each input is "node:src_output" with "node" being a string name and - // "src_output" indicating which output tensor to use from "node". If - // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs - // may optionally be followed by control inputs that have the format - // "^node". - repeated string input = 3; - - // A (possibly partial) specification for the device on which this - // node should be placed. - // The expected syntax for this string is as follows: - // - // DEVICE_SPEC ::= PARTIAL_SPEC - // - // PARTIAL_SPEC ::= ("/" CONSTRAINT) * - // CONSTRAINT ::= ("job:" JOB_NAME) - // | ("replica:" [1-9][0-9]*) - // | ("task:" [1-9][0-9]*) - // | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") ) - // - // Valid values for this string include: - // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) - // * "/job:worker/device:GPU:3" (partial specification) - // * "" (no specification) - // - // If the constraints do not resolve to a single device (or if this - // field is empty or not present), the runtime will attempt to - // choose a device automatically. - string device = 4; - - // Operation-specific graph-construction-time configuration. - // Note that this should include all attrs defined in the - // corresponding OpDef, including those with a value matching - // the default -- this allows the default to change and makes - // NodeDefs easier to interpret on their own. However, if - // an attr with a default is not specified in this list, the - // default will be used. - // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and - // one of the names from the corresponding OpDef's attr field). - // The values must have a type matching the corresponding OpDef - // attr's type field. - // TODO(josh11b): Add some examples here showing best practices. - map attr = 5; - - message ExperimentalDebugInfo { - // Opaque string inserted into error messages created by the runtime. - // - // This is intended to store the list of names of the nodes from the - // original graph that this node was derived. For example if this node, say - // C, was result of a fusion of 2 nodes A and B, then 'original_node' would - // be {A, B}. This information can be used to map errors originating at the - // current node to some top level source code. - repeated string original_node_names = 1; - - // This is intended to store the list of names of the functions from the - // original graph that this node was derived. For example if this node, say - // C, was result of a fusion of node A in function FA and node B in function - // FB, then `original_funcs` would be {FA, FB}. If the node is in the top - // level graph, the `original_func` is empty. This information, with the - // `original_node_names` can be used to map errors originating at the - // current ndoe to some top level source code. - repeated string original_func_names = 2; - } - - // This stores debug information associated with the node. - ExperimentalDebugInfo experimental_debug_info = 6; -} diff --git a/ngraph/frontend/tensorflow/src/proto/op_def.proto b/ngraph/frontend/tensorflow/src/proto/op_def.proto deleted file mode 100644 index 5e5412103a7e7b..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/op_def.proto +++ /dev/null @@ -1,174 +0,0 @@ -syntax = "proto3"; - -package tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "OpDefProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/op_def_go_proto"; -import "attr_value.proto"; -import "types.proto"; -import "resource_handle.proto"; - -// Defines an operation. A NodeDef in a GraphDef specifies an Op by -// using the "op" field which should match the name of a OpDef. -// LINT.IfChange -message OpDef { - // Op names starting with an underscore are reserved for internal use. - // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9>_]*". - string name = 1; - - // For describing inputs and outputs. - message ArgDef { - // Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*". - string name = 1; - - // Human readable description. - string description = 2; - - // Describes the type of one or more tensors that are accepted/produced - // by this input/output arg. The only legal combinations are: - // * For a single tensor: either the "type" field is set or the - // "type_attr" field is set to the name of an attr with type "type". - // * For a sequence of tensors with the same type: the "number_attr" - // field will be set to the name of an attr with type "int", and - // either the "type" or "type_attr" field will be set as for - // single tensors. - // * For a sequence of tensors, the "type_list_attr" field will be set - // to the name of an attr with type "list(type)". - DataType type = 3; - string type_attr = 4; // if specified, attr must have type "type" - string number_attr = 5; // if specified, attr must have type "int" - // If specified, attr must have type "list(type)", and none of - // type, type_attr, and number_attr may be specified. - string type_list_attr = 6; - - // The handle data for resource inputs. - repeated ResourceHandleProto.DtypeAndShape handle_data = 7; - - // For inputs: if true, the inputs are required to be refs. - // By default, inputs can be either refs or non-refs. - // For outputs: if true, outputs are refs, otherwise they are not. - bool is_ref = 16; - }; - - // Description of the input(s). - repeated ArgDef input_arg = 2; - - // Description of the output(s). - repeated ArgDef output_arg = 3; - - // Named control outputs for this operation. Useful only for composite - // operations (i.e. functions) which want to name different control outputs. - repeated string control_output = 20; - - // Description of the graph-construction-time configuration of this - // Op. That is to say, this describes the attr fields that will - // be specified in the NodeDef. - message AttrDef { - // A descriptive name for the argument. May be used, e.g. by the - // Python client, as a keyword argument name, and so should match - // the regexp "[a-z][a-z0-9_]+". - string name = 1; - - // One of the type names from attr_value.proto ("string", "list(string)", - // "int", etc.). - string type = 2; - - // A reasonable default for this attribute if the user does not supply - // a value. If not specified, the user must supply a value. - AttrValue default_value = 3; - - // Human-readable description. - string description = 4; - - // TODO(josh11b): bool is_optional? - - // --- Constraints --- - // These constraints are only in effect if specified. Default is no - // constraints. - - // For type == "int", this is a minimum value. For "list(___)" - // types, this is the minimum length. - bool has_minimum = 5; - int64 minimum = 6; - - // The set of allowed values. Has type that is the "list" version - // of the "type" field above (uses the "list" field of AttrValue). - // If type == "type" or "list(type)" above, then the "type" field - // of "allowed_values.list" has the set of allowed DataTypes. - // If type == "string" or "list(string)", then the "s" field of - // "allowed_values.list" has the set of allowed strings. - AttrValue allowed_values = 7; - } - repeated AttrDef attr = 4; - - // Optional deprecation based on GraphDef versions. - OpDeprecation deprecation = 8; - - // One-line human-readable description of what the Op does. - string summary = 5; - - // Additional, longer human-readable description of what the Op does. - string description = 6; - - // ------------------------------------------------------------------------- - // Which optimizations this operation can participate in. - - // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs) - bool is_commutative = 18; - - // If is_aggregate is true, then this operation accepts N >= 2 - // inputs and produces 1 output all of the same type. Should be - // associative and commutative, and produce output with the same - // shape as the input. The optimizer may replace an aggregate op - // taking input from multiple devices with a tree of aggregate ops - // that aggregate locally within each device (and possibly within - // groups of nearby devices) before communicating. - // TODO(josh11b): Implement that optimization. - bool is_aggregate = 16; // for things like add - - // Other optimizations go here, like - // can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc. - - // ------------------------------------------------------------------------- - // Optimization constraints. - - // Ops are marked as stateful if their behavior depends on some state beyond - // their input tensors (e.g. variable reading op) or if they have - // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops - // must always produce the same output for the same input and have - // no side-effects. - // - // By default Ops may be moved between devices. Stateful ops should - // either not be moved, or should only be moved if that state can also - // be moved (e.g. via some sort of save / restore). - // Stateful ops are guaranteed to never be optimized away by Common - // Subexpression Elimination (CSE). - bool is_stateful = 17; // for things like variables, queue - - // ------------------------------------------------------------------------- - // Non-standard options. - - // By default, all inputs to an Op must be initialized Tensors. Ops - // that may initialize tensors for the first time should set this - // field to true, to allow the Op to take an uninitialized Tensor as - // input. - bool allows_uninitialized_input = 19; // for Assign, etc. -}; -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/core/framework/op_def_util.cc) - -// Information about version-dependent deprecation of an op -message OpDeprecation { - // First GraphDef version at which the op is disallowed. - int32 version = 1; - - // Explanation of why it was deprecated and what to use instead. - string explanation = 2; -}; - -// A collection of OpDefs -message OpList { - repeated OpDef op = 1; -}; diff --git a/ngraph/frontend/tensorflow/src/proto/reader_base.proto b/ngraph/frontend/tensorflow/src/proto/reader_base.proto deleted file mode 100644 index 6fae310248dd40..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/reader_base.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -option cc_enable_arenas = true; -option java_outer_classname = "ReaderBaseProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/reader_base_go_proto"; - -// For serializing and restoring the state of ReaderBase, see -// reader_base.h for details. -message ReaderBaseState { - int64 work_started = 1; - int64 work_finished = 2; - int64 num_records_produced = 3; - bytes current_work = 4; -} diff --git a/ngraph/frontend/tensorflow/src/proto/remote_fused_graph_execute_info.proto b/ngraph/frontend/tensorflow/src/proto/remote_fused_graph_execute_info.proto deleted file mode 100644 index 2cdaa6719e0aab..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/remote_fused_graph_execute_info.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "graph.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "RemoteFusedGraphExecuteInfoProto"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/remote_fused_graph_execute_info_go_proto"; - -// Protocol buffer representing a handle to a tensorflow resource. Handles are -// not valid across executions, but can be serialized back and forth from within -// a single run. -message RemoteFusedGraphExecuteInfo { - message TensorShapeTypeProto { - DataType dtype = 1; - TensorShapeProto shape = 2; - } - - // Definition of remote graph - GraphDef remote_graph = 1; - - // Remote fused graph input node name - repeated string graph_input_node_name = 2; - - // Remote fused graph output node name - repeated string graph_output_node_name = 3; - - // Executor's name - string executor_name = 4; - - // Optional: Parameters given to the executor - bytes serialized_executor_parameters = 5; - - // Optional: Default graph input tensor shape used to allocate memory - // before executing op - repeated TensorShapeTypeProto default_graph_input_tensor_shape = 6; - - // Optional: Default graph input tensor shape used to allocate memory - // before executing op - // TODO(satok): Remote output tensor shape once shape information is stored - // in NodeDef - repeated TensorShapeTypeProto default_graph_output_tensor_shape = 7; -} diff --git a/ngraph/frontend/tensorflow/src/proto/resource_handle.proto b/ngraph/frontend/tensorflow/src/proto/resource_handle.proto deleted file mode 100644 index e2bce956547810..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/resource_handle.proto +++ /dev/null @@ -1,45 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "tensor_shape.proto"; -import "types.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "ResourceHandle"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/resource_handle_go_proto"; - -// Protocol buffer representing a handle to a tensorflow resource. Handles are -// not valid across executions, but can be serialized back and forth from within -// a single run. -message ResourceHandleProto { - // Unique name for the device containing the resource. - string device = 1; - - // Container in which this resource is placed. - string container = 2; - - // Unique name of this resource. - string name = 3; - - // Hash code for the type of the resource. Is only valid in the same device - // and in the same execution. - uint64 hash_code = 4; - - // For debug-only, the name of the type pointed to by this handle, if - // available. - string maybe_type_name = 5; - - // Protocol buffer representing a pair of (data type, tensor shape). - message DtypeAndShape { - DataType dtype = 1; - TensorShapeProto shape = 2; - } - - // Data types and shapes for the underlying resource. - repeated DtypeAndShape dtypes_and_shapes = 6; - - reserved 7; -} diff --git a/ngraph/frontend/tensorflow/src/proto/step_stats.proto b/ngraph/frontend/tensorflow/src/proto/step_stats.proto deleted file mode 100644 index 7b97c3a38325de..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/step_stats.proto +++ /dev/null @@ -1,88 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "allocation_description.proto"; -import "tensor_description.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "StepStatsProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/step_stats_go_proto"; - -// An allocation/de-allocation operation performed by the allocator. -message AllocationRecord { - // The timestamp of the operation. - int64 alloc_micros = 1; - // Number of bytes allocated, or de-allocated if negative. - int64 alloc_bytes = 2; -} - -message AllocatorMemoryUsed { - string allocator_name = 1; - // These are per-node allocator memory stats. - int64 total_bytes = 2; - int64 peak_bytes = 3; - // The bytes that are not deallocated. - int64 live_bytes = 4; - // The allocation and deallocation timeline. - repeated AllocationRecord allocation_records = 6; - - // These are snapshots of the overall allocator memory stats. - // The number of live bytes currently allocated by the allocator. - int64 allocator_bytes_in_use = 5; -} - -// Output sizes recorded for a single execution of a graph node. -message NodeOutput { - int32 slot = 1; - TensorDescription tensor_description = 3; -} - -// For memory tracking. -message MemoryStats { - int64 temp_memory_size = 1; - int64 persistent_memory_size = 3; - repeated int64 persistent_tensor_alloc_ids = 5; - - int64 device_temp_memory_size = 2 [deprecated = true]; - int64 device_persistent_memory_size = 4 [deprecated = true]; - repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true]; -} - -// Time/size stats recorded for a single execution of a graph node. -message NodeExecStats { - // TODO(tucker): Use some more compact form of node identity than - // the full string name. Either all processes should agree on a - // global id (cost_id?) for each node, or we should use a hash of - // the name. - string node_name = 1; - int64 all_start_micros = 2; - int64 op_start_rel_micros = 3; - int64 op_end_rel_micros = 4; - int64 all_end_rel_micros = 5; - repeated AllocatorMemoryUsed memory = 6; - repeated NodeOutput output = 7; - string timeline_label = 8; - int64 scheduled_micros = 9; - uint32 thread_id = 10; - repeated AllocationDescription referenced_tensor = 11; - MemoryStats memory_stats = 12; - int64 all_start_nanos = 13; - int64 op_start_rel_nanos = 14; - int64 op_end_rel_nanos = 15; - int64 all_end_rel_nanos = 16; - int64 scheduled_nanos = 17; -} - -message DeviceStepStats { - string device = 1; - repeated NodeExecStats node_stats = 2; - // Its key is thread id. - map thread_names = 3; -} - -message StepStats { - repeated DeviceStepStats dev_stats = 1; -} diff --git a/ngraph/frontend/tensorflow/src/proto/summary.proto b/ngraph/frontend/tensorflow/src/proto/summary.proto deleted file mode 100644 index ef1aa2e0d1c587..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/summary.proto +++ /dev/null @@ -1,149 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "tensor.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "SummaryProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/summary_go_proto"; - -// Metadata associated with a series of Summary data -message SummaryDescription { - // Hint on how plugins should process the data in this series. - // Supported values include "scalar", "histogram", "image", "audio" - string type_hint = 1; -} - -// Serialization format for histogram module in -// core/lib/histogram/histogram.h -message HistogramProto { - double min = 1; - double max = 2; - double num = 3; - double sum = 4; - double sum_squares = 5; - - // Parallel arrays encoding the bucket boundaries and the bucket values. - // bucket(i) is the count for the bucket i. The range for - // a bucket is: - // i == 0: -DBL_MAX .. bucket_limit(0) - // i != 0: bucket_limit(i-1) .. bucket_limit(i) - repeated double bucket_limit = 6 [packed = true]; - repeated double bucket = 7 [packed = true]; -} - -// A SummaryMetadata encapsulates information on which plugins are able to make -// use of a certain summary value. -message SummaryMetadata { - message PluginData { - // The name of the plugin this data pertains to. - string plugin_name = 1; - - // The content to store for the plugin. The best practice is for this to be - // a binary serialized protocol buffer. - bytes content = 2; - } - - // Data that associates a summary with a certain plugin. - PluginData plugin_data = 1; - - // Display name for viewing in TensorBoard. - string display_name = 2; - - // Longform readable description of the summary sequence. Markdown supported. - string summary_description = 3; - - // Class of data stored in this time series. Required for compatibility with - // TensorBoard's generic data facilities (`DataProvider`, et al.). This value - // imposes constraints on the dtype and shape of the corresponding tensor - // values. See `DataClass` docs for details. - DataClass data_class = 4; -} - -enum DataClass { - // Unknown data class, used (implicitly) for legacy data. Will not be - // processed by data ingestion pipelines. - DATA_CLASS_UNKNOWN = 0; - // Scalar time series. Each `Value` for the corresponding tag must have - // `tensor` set to a rank-0 tensor of type `DT_FLOAT` (float32). - DATA_CLASS_SCALAR = 1; - // Tensor time series. Each `Value` for the corresponding tag must have - // `tensor` set. The tensor value is arbitrary, but should be small to - // accommodate direct storage in database backends: an upper bound of a few - // kilobytes is a reasonable rule of thumb. - DATA_CLASS_TENSOR = 2; - // Blob sequence time series. Each `Value` for the corresponding tag must - // have `tensor` set to a rank-1 tensor of bytestring dtype. - DATA_CLASS_BLOB_SEQUENCE = 3; -} - -// A Summary is a set of named values to be displayed by the -// visualizer. -// -// Summaries are produced regularly during training, as controlled by -// the "summary_interval_secs" attribute of the training operation. -// Summaries are also produced at the end of an evaluation. -message Summary { - message Image { - // Dimensions of the image. - int32 height = 1; - int32 width = 2; - // Valid colorspace values are - // 1 - grayscale - // 2 - grayscale + alpha - // 3 - RGB - // 4 - RGBA - // 5 - DIGITAL_YUV - // 6 - BGRA - int32 colorspace = 3; - // Image data in encoded format. All image formats supported by - // image_codec::CoderUtil can be stored here. - bytes encoded_image_string = 4; - } - - message Audio { - // Sample rate of the audio in Hz. - float sample_rate = 1; - // Number of channels of audio. - int64 num_channels = 2; - // Length of the audio in frames (samples per channel). - int64 length_frames = 3; - // Encoded audio data and its associated RFC 2045 content type (e.g. - // "audio/wav"). - bytes encoded_audio_string = 4; - string content_type = 5; - } - - message Value { - // This field is deprecated and will not be set. - string node_name = 7; - - // Tag name for the data. Used by TensorBoard plugins to organize data. Tags - // are often organized by scope (which contains slashes to convey - // hierarchy). For example: foo/bar/0 - string tag = 1; - - // Contains metadata on the summary value such as which plugins may use it. - // Take note that many summary values may lack a metadata field. This is - // because the FileWriter only keeps a metadata object on the first summary - // value with a certain tag for each tag. TensorBoard then remembers which - // tags are associated with which plugins. This saves space. - SummaryMetadata metadata = 9; - - // Value associated with the tag. - oneof value { - float simple_value = 2; - bytes obsolete_old_style_histogram = 3; - Image image = 4; - HistogramProto histo = 5; - Audio audio = 6; - TensorProto tensor = 8; - } - } - - // Set of values for the summary. - repeated Value value = 1; -} diff --git a/ngraph/frontend/tensorflow/src/proto/tensor.proto b/ngraph/frontend/tensorflow/src/proto/tensor.proto deleted file mode 100644 index 7a25c446e68772..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/tensor.proto +++ /dev/null @@ -1,96 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "resource_handle.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "TensorProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_go_proto"; - -// Protocol buffer representing a tensor. -message TensorProto { - DataType dtype = 1; - - // Shape of the tensor. TODO(touts): sort out the 0-rank issues. - TensorShapeProto tensor_shape = 2; - - // Only one of the representations below is set, one of "tensor_contents" and - // the "xxx_val" attributes. We are not using oneof because as oneofs cannot - // contain repeated fields it would require another extra set of messages. - - // Version number. - // - // In version 0, if the "repeated xxx" representations contain only one - // element, that element is repeated to fill the shape. This makes it easy - // to represent a constant Tensor with a single value. - int32 version_number = 3; - - // Serialized raw tensor content from either Tensor::AsProtoTensorContent or - // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation - // can be used for all tensor types. The purpose of this representation is to - // reduce serialization overhead during RPC call by avoiding serialization of - // many repeated small items. - bytes tensor_content = 4; - - // Type specific representations that make it easy to create tensor protos in - // all languages. Only the representation corresponding to "dtype" can - // be set. The values hold the flattened representation of the tensor in - // row major order. - - // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll - // have some pointless zero padding for each value here. - repeated int32 half_val = 13 [packed = true]; - - // DT_FLOAT. - repeated float float_val = 5 [packed = true]; - - // DT_DOUBLE. - repeated double double_val = 6 [packed = true]; - - // DT_INT32, DT_INT16, DT_INT8, DT_UINT8. - repeated int32 int_val = 7 [packed = true]; - - // DT_STRING - repeated bytes string_val = 8; - - // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real - // and imaginary parts of i-th single precision complex. - repeated float scomplex_val = 9 [packed = true]; - - // DT_INT64 - repeated int64 int64_val = 10 [packed = true]; - - // DT_BOOL - repeated bool bool_val = 11 [packed = true]; - - // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real - // and imaginary parts of i-th double precision complex. - repeated double dcomplex_val = 12 [packed = true]; - - // DT_RESOURCE - repeated ResourceHandleProto resource_handle_val = 14; - - // DT_VARIANT - repeated VariantTensorDataProto variant_val = 15; - - // DT_UINT32 - repeated uint32 uint32_val = 16 [packed = true]; - - // DT_UINT64 - repeated uint64 uint64_val = 17 [packed = true]; -} - -// Protocol buffer representing the serialization format of DT_VARIANT tensors. -message VariantTensorDataProto { - // Name of the type of objects being serialized. - string type_name = 1; - // Portions of the object that are not Tensors. - bytes metadata = 2; - // Tensors contained within objects being serialized. - repeated TensorProto tensors = 3; -} diff --git a/ngraph/frontend/tensorflow/src/proto/tensor_description.proto b/ngraph/frontend/tensorflow/src/proto/tensor_description.proto deleted file mode 100644 index c162c0e2385533..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/tensor_description.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -import "allocation_description.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -option cc_enable_arenas = true; -option java_outer_classname = "TensorDescriptionProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_description_go_proto"; - -message TensorDescription { - // Data type of tensor elements - DataType dtype = 1; - - // Shape of the tensor. - TensorShapeProto shape = 2; - - // Information about the size and allocator used for the data - AllocationDescription allocation_description = 4; -} diff --git a/ngraph/frontend/tensorflow/src/proto/tensor_shape.proto b/ngraph/frontend/tensorflow/src/proto/tensor_shape.proto deleted file mode 100644 index 45d5b78ecbbc4c..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/tensor_shape.proto +++ /dev/null @@ -1,46 +0,0 @@ -// Protocol buffer representing the shape of tensors. - -syntax = "proto3"; -option cc_enable_arenas = true; -option java_outer_classname = "TensorShapeProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_shape_go_proto"; - -package tensorflow; - -// Dimensions of a tensor. -message TensorShapeProto { - // One dimension of the tensor. - message Dim { - // Size of the tensor in that dimension. - // This value must be >= -1, but values of -1 are reserved for "unknown" - // shapes (values of -1 mean "unknown" dimension). Certain wrappers - // that work with TensorShapeProto may fail at runtime when deserializing - // a TensorShapeProto containing a dim value of -1. - int64 size = 1; - - // Optional name of the tensor dimension. - string name = 2; - }; - - // Dimensions of the tensor, such as {"input", 30}, {"output", 40} - // for a 30 x 40 2D tensor. If an entry has size -1, this - // corresponds to a dimension of unknown size. The names are - // optional. - // - // The order of entries in "dim" matters: It indicates the layout of the - // values in the tensor in-memory representation. - // - // The first entry in "dim" is the outermost dimension used to layout the - // values, the last entry is the innermost dimension. This matches the - // in-memory layout of RowMajor Eigen tensors. - // - // If "dim.size()" > 0, "unknown_rank" must be false. - repeated Dim dim = 2; - - // If true, the number of dimensions in the shape is unknown. - // - // If true, "dim.size()" must be 0. - bool unknown_rank = 3; -}; diff --git a/ngraph/frontend/tensorflow/src/proto/tensor_slice.proto b/ngraph/frontend/tensorflow/src/proto/tensor_slice.proto deleted file mode 100644 index 4463658d391e2f..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/tensor_slice.proto +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol buffer representing slices of a tensor - -syntax = "proto3"; - -package tensorflow; - -option cc_enable_arenas = true; -option java_outer_classname = "TensorSliceProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_slice_go_proto"; - -// Can only be interpreted if you know the corresponding TensorShape. -message TensorSliceProto { - // Extent of the slice in one dimension. - message Extent { - // Either both or no attributes must be set. When no attribute is set - // means: All data in that dimension. - - // Start index of the slice, starting at 0. - int64 start = 1; - - // Length of the slice: if the length is missing or -1 we will - // interpret this as "everything in this dimension". We use - // "oneof" to preserve information about whether the length is - // present without changing the serialization format from the - // prior proto2 version of this proto. - oneof has_length { - int64 length = 2; - } - } - - // Extent of the slice in all tensor dimensions. - // - // Must have one entry for each of the dimension of the tensor that this - // slice belongs to. The order of sizes is the same as the order of - // dimensions in the TensorShape. - repeated Extent extent = 1; -} diff --git a/ngraph/frontend/tensorflow/src/proto/types.proto b/ngraph/frontend/tensorflow/src/proto/types.proto deleted file mode 100644 index e5f33036dcde55..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/types.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TypesProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/types_go_proto"; - -// (== suppress_warning documentation-presence ==) -// LINT.IfChange -enum DataType { - // Not a legal value for DataType. Used to indicate a DataType field - // has not been set. - DT_INVALID = 0; - - // Data types that all computation devices are expected to be - // capable to support. - DT_FLOAT = 1; - DT_DOUBLE = 2; - DT_INT32 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_INT8 = 6; - DT_STRING = 7; - DT_COMPLEX64 = 8; // Single-precision complex - DT_INT64 = 9; - DT_BOOL = 10; - DT_QINT8 = 11; // Quantized int8 - DT_QUINT8 = 12; // Quantized uint8 - DT_QINT32 = 13; // Quantized int32 - DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. - DT_QINT16 = 15; // Quantized int16 - DT_QUINT16 = 16; // Quantized uint16 - DT_UINT16 = 17; - DT_COMPLEX128 = 18; // Double-precision complex - DT_HALF = 19; - DT_RESOURCE = 20; - DT_VARIANT = 21; // Arbitrary C++ data types - DT_UINT32 = 22; - DT_UINT64 = 23; - - // Do not use! These are only for parameters. Every enum above - // should have a corresponding value below (verified by types_test). - DT_FLOAT_REF = 101; - DT_DOUBLE_REF = 102; - DT_INT32_REF = 103; - DT_UINT8_REF = 104; - DT_INT16_REF = 105; - DT_INT8_REF = 106; - DT_STRING_REF = 107; - DT_COMPLEX64_REF = 108; - DT_INT64_REF = 109; - DT_BOOL_REF = 110; - DT_QINT8_REF = 111; - DT_QUINT8_REF = 112; - DT_QINT32_REF = 113; - DT_BFLOAT16_REF = 114; - DT_QINT16_REF = 115; - DT_QUINT16_REF = 116; - DT_UINT16_REF = 117; - DT_COMPLEX128_REF = 118; - DT_HALF_REF = 119; - DT_RESOURCE_REF = 120; - DT_VARIANT_REF = 121; - DT_UINT32_REF = 122; - DT_UINT64_REF = 123; -} -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/c/tf_datatype.h, -// https://www.tensorflow.org/code/tensorflow/go/tensor.go, -// https://www.tensorflow.org/code/tensorflow/core/framework/tensor.cc, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.h, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.cc, -// https://www.tensorflow.org/code/tensorflow/python/framework/dtypes.py, -// https://www.tensorflow.org/code/tensorflow/python/framework/function.py) - -// For identifying the underlying type of a variant. For variants, the types -// listed here are a subset of the types in the variant type registry, -// corresponding to commonly used variants which must occasionally be -// special-cased. -enum SpecializedType { - // Invalid/unknown specialized type. - ST_INVALID = 0; - // "tensorflow::TensorList" in the variant type registry. - ST_TENSOR_LIST = 1; - // "tensorflow::data::Optional" in the variant type registry. - ST_OPTIONAL = 2; -} diff --git a/ngraph/frontend/tensorflow/src/proto/variable.proto b/ngraph/frontend/tensorflow/src/proto/variable.proto deleted file mode 100644 index 09d7fb3d45c95a..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/variable.proto +++ /dev/null @@ -1,84 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -option cc_enable_arenas = true; -option java_outer_classname = "VariableProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/variable_go_proto"; - -// Indicates when a distributed variable will be synced. -enum VariableSynchronization { - // `AUTO`: Indicates that the synchronization will be determined by the - // current `DistributionStrategy` (eg. With `MirroredStrategy` this would be - // `ON_WRITE`). - VARIABLE_SYNCHRONIZATION_AUTO = 0; - // `NONE`: Indicates that there will only be one copy of the variable, so - // there is no need to sync. - VARIABLE_SYNCHRONIZATION_NONE = 1; - // `ON_WRITE`: Indicates that the variable will be updated across devices - // every time it is written. - VARIABLE_SYNCHRONIZATION_ON_WRITE = 2; - // `ON_READ`: Indicates that the variable will be aggregated across devices - // when it is read (eg. when checkpointing or when evaluating an op that uses - // the variable). - VARIABLE_SYNCHRONIZATION_ON_READ = 3; -} - -// Indicates how a distributed variable will be aggregated. -enum VariableAggregation { - // `NONE`: This is the default, giving an error if you use a - // variable-update operation with multiple replicas. - VARIABLE_AGGREGATION_NONE = 0; - // `SUM`: Add the updates across replicas. - VARIABLE_AGGREGATION_SUM = 1; - // `MEAN`: Take the arithmetic mean ("average") of the updates across - // replicas. - VARIABLE_AGGREGATION_MEAN = 2; - // `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same - // update, but we only want to perform the update once. Used, e.g., for the - // global step counter. - VARIABLE_AGGREGATION_ONLY_FIRST_REPLICA = 3; -} - -// Protocol buffer representing a Variable. -message VariableDef { - // Name of the variable tensor. - string variable_name = 1; - - // Name of the tensor holding the variable's initial value. - string initial_value_name = 6; - - // Name of the initializer op. - string initializer_name = 2; - - // Name of the snapshot tensor. - string snapshot_name = 3; - - // Support for saving variables as slices of a larger variable. - SaveSliceInfoDef save_slice_info_def = 4; - - // Whether to represent this as a ResourceVariable. - bool is_resource = 5; - - // Whether this variable should be trained. - bool trainable = 7; - - // Indicates when a distributed variable will be synced. - VariableSynchronization synchronization = 8; - - // Indicates how a distributed variable will be aggregated. - VariableAggregation aggregation = 9; -} - -message SaveSliceInfoDef { - // Name of the full variable of which this is a slice. - string full_name = 1; - // Shape of the full variable. - repeated int64 full_shape = 2; - // Offset of this variable into the full variable. - repeated int64 var_offset = 3; - // Shape of this variable. - repeated int64 var_shape = 4; -} diff --git a/ngraph/frontend/tensorflow/src/proto/versions.proto b/ngraph/frontend/tensorflow/src/proto/versions.proto deleted file mode 100644 index 2cca6e37d325dc..00000000000000 --- a/ngraph/frontend/tensorflow/src/proto/versions.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package tensorflow; - -option cc_enable_arenas = true; -option java_outer_classname = "VersionsProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; -option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/versions_go_proto"; - -// Version information for a piece of serialized data -// -// There are different types of versions for each type of data -// (GraphDef, etc.), but they all have the same common shape -// described here. -// -// Each consumer has "consumer" and "min_producer" versions (specified -// elsewhere). A consumer is allowed to consume this data if -// -// producer >= min_producer -// consumer >= min_consumer -// consumer not in bad_consumers -// -message VersionDef { - // The version of the code that produced this data. - int32 producer = 1; - - // Any consumer below this version is not allowed to consume this data. - int32 min_consumer = 2; - - // Specific consumer versions which are disallowed (e.g. due to bugs). - repeated int32 bad_consumers = 3; -} diff --git a/ngraph/frontend/tensorflow/src/tensorflow.cpp b/ngraph/frontend/tensorflow/src/tensorflow.cpp deleted file mode 100644 index 5ee5b7b3240d3b..00000000000000 --- a/ngraph/frontend/tensorflow/src/tensorflow.cpp +++ /dev/null @@ -1,75 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2021 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - - -#include - -#include -#include -#include - -#include "graph.pb.h" - -#include "../include/tensorflow_frontend/tensorflow.hpp" - -#include "ngraph_builder.h" - -using namespace google; - -using namespace ngraph::frontend; - -InputModelTensorflow::InputModelTensorflow (const std::string& _path) : path(_path) -{ - std::ifstream pb_stream(path, std::ios::binary); - graph_def = std::make_shared(); - std::cout << "[ INFO ] Model Parsed: " << graph_def->ParseFromIstream(&pb_stream) << std::endl; - std::cout << "[ INFO ] Loaded model contains " << graph_def->node_size() << " nodes." << std::endl; -} - -std::vector InputModelTensorflow::getInputs () const { -// TODO: Cache results - std::vector result; - for (size_t i = 0; i < graph_def->node_size(); ++i) { - if (graph_def->node(i).op() == "Placeholder") - result.push_back(std::make_shared(graph_def->node(i).name())); - } - return result; -} - -void InputModelTensorflow::setPartialShape (Place::Ptr place, const ngraph::PartialShape& pshape) { - auto place_tf = std::dynamic_pointer_cast(place); - partialShapes[place_tf->name] = pshape; -} - -std::shared_ptr ngraph::frontend::FrontEndTensorflow::convert (InputModel::Ptr model) const -{ - auto model_tf = std::dynamic_pointer_cast(model); - std::cerr << "[ INFO ] FrontEndTensorflow::convert invoked\n"; - - std::shared_ptr f; - std::cerr << "[ STATUS ] TranslateGraph return: " << tensorflow::ngraph_bridge::Builder::TranslateGraph( - model_tf->partialShapes, {}, model_tf->graph_def.get(), "here_should_be_a_graph_name", f) << "\n"; - std::cerr << "[ INFO ] Resulting nGraph function contains " << f->get_ops().size() << " nodes." << std::endl; - std::cerr << "[ STATUS ] Running Transpose Sinking transformation\n"; - - ngraph::pass::Manager manager; - manager.register_pass(); - manager.register_pass(); - manager.run_passes(f); - - std::cerr << "[ INFO ] Resulting nGraph function contains " << f->get_ops().size() << " nodes." << std::endl; - return f; -} diff --git a/ngraph/python/CMakeLists.txt b/ngraph/python/CMakeLists.txt index d876ccd3182988..d51d836f57e249 100644 --- a/ngraph/python/CMakeLists.txt +++ b/ngraph/python/CMakeLists.txt @@ -82,10 +82,7 @@ file(GLOB_RECURSE SOURCES src/pyngraph/*.cpp) pybind11_add_module(_${PROJECT_NAME} MODULE ${SOURCES}) target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src") -target_include_directories(_${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../frontend/frontend_manager/include") -if (NGRAPH_TF_FRONTEND_ENABLE) - target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::tensorflow_frontend) -endif() +target_include_directories(_${PROJECT_NAME} PRIVATE "${FRONTEND_INCLUDE_PATH}") if (NGRAPH_ONNX_IMPORT_ENABLE) target_link_libraries(_${PROJECT_NAME} PRIVATE ngraph::onnx_importer) diff --git a/ngraph/python/setup.py b/ngraph/python/setup.py index dfbb03fad98c00..e90cc66d6d173d 100644 --- a/ngraph/python/setup.py +++ b/ngraph/python/setup.py @@ -39,6 +39,7 @@ "ngraph.impl.op", "ngraph.impl.op.util", "ngraph.impl.passes", + "ngraph.frontend", ] data_files = [] diff --git a/ngraph/python/src/ngraph/__init__.py b/ngraph/python/src/ngraph/__init__.py index 518c12c94bc262..f51c5cea130510 100644 --- a/ngraph/python/src/ngraph/__init__.py +++ b/ngraph/python/src/ngraph/__init__.py @@ -11,25 +11,23 @@ except DistributionNotFound: __version__ = "0.0.0.dev0" -from ngraph.impl import Node -from ngraph.impl import PartialShape + from ngraph.impl import Dimension from ngraph.impl import Function - +from ngraph.impl import Node +from ngraph.impl import PartialShape +from ngraph.frontend import FrontEnd +from ngraph.frontend import FrontEndCapabilities +from ngraph.frontend import FrontEndManager +from ngraph.frontend import GeneralFailure from ngraph.frontend import NotImplementedFailure from ngraph.frontend import InitializationFailure +from ngraph.frontend import InputModel from ngraph.frontend import OpConversionFailure from ngraph.frontend import OpValidationFailure -from ngraph.frontend import GeneralFailure -from ngraph.frontend import FrontEndManager -from ngraph.frontend import FrontEndCapabilities -from ngraph.frontend import FrontEnd -from ngraph.frontend import InputModel from ngraph.frontend import Place - from ngraph.helpers import function_from_cnn from ngraph.helpers import function_to_cnn - from ngraph.opset7 import absolute from ngraph.opset7 import absolute as abs from ngraph.opset7 import acos diff --git a/ngraph/python/src/ngraph/frontend/__init__.py b/ngraph/python/src/ngraph/frontend/__init__.py index f8d2deb98a717a..0ea21ad7c8827b 100644 --- a/ngraph/python/src/ngraph/frontend/__init__.py +++ b/ngraph/python/src/ngraph/frontend/__init__.py @@ -6,6 +6,8 @@ Low level wrappers for the FrontEnd c++ api. """ +# flake8: noqa + # main classes from _pyngraph import FrontEndManager from _pyngraph import FrontEnd diff --git a/ngraph/python/tests/test_ngraph/test_frontendmanager.py b/ngraph/python/tests/test_ngraph/test_frontendmanager.py index 2406f6ff4b7962..d90c5924816ea3 100644 --- a/ngraph/python/tests/test_ngraph/test_frontendmanager.py +++ b/ngraph/python/tests/test_ngraph/test_frontendmanager.py @@ -2,22 +2,33 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -from ngraph.frontend import FrontEndManager, FrontEndCapabilities, InitializationFailure +import pytest + from ngraph import PartialShape +from ngraph.frontend import FrontEndCapabilities, FrontEndManager, InitializationFailure from ngraph.utils.types import get_element_type -from pybind_mock_frontend import get_fe_stat, get_mdl_stat, get_place_stat +mock_available = True +try: + from pybind_mock_frontend import get_fe_stat, get_mdl_stat, get_place_stat +except Exception: + print("No mock frontend available") + mock_available = False # FrontEndManager shall be initialized and destroyed after all tests finished # This is because destroy of FrontEndManager will unload all plugins, no objects shall exist after this fem = FrontEndManager() +mock_needed = pytest.mark.skipif(not mock_available, + reason="mock fe is not available") + # ---------- FrontEnd tests --------------- +@mock_needed def test_load_by_framework_caps(): frontEnds = fem.get_available_front_ends() assert frontEnds is not None - assert 'mock_py' in frontEnds + assert "mock_py" in frontEnds caps = [FrontEndCapabilities.DEFAULT, FrontEndCapabilities.CUT, FrontEndCapabilities.NAMES, @@ -28,7 +39,7 @@ def test_load_by_framework_caps(): stat = get_fe_stat(fe) assert cap == stat.load_flags for i in range(len(caps) - 1): - for j in range(i+1, len(caps)): + for j in range(i + 1, len(caps)): assert caps[i] != caps[j] @@ -40,18 +51,20 @@ def test_load_by_unknown_framework(): except InitializationFailure as exc: print(exc) else: - assert False + raise AssertionError("Unexpected exception.") +@mock_needed def test_load_from_file(): fe = fem.load_by_framework(framework="mock_py") assert fe is not None model = fe.load_from_file("abc.bin") assert model is not None stat = get_fe_stat(fe) - assert 'abc.bin' in stat.load_paths + assert "abc.bin" in stat.load_paths +@mock_needed def test_convert_model(): fe = fem.load_by_framework(framework="mock_py") assert fe is not None @@ -62,6 +75,7 @@ def test_convert_model(): assert stat.convert_model == 1 +@mock_needed def test_convert_partially(): fe = fem.load_by_framework(framework="mock_py") assert fe is not None @@ -74,6 +88,7 @@ def test_convert_partially(): assert stat.convert == 1 +@mock_needed def test_decode_and_normalize(): fe = fem.load_by_framework(framework="mock_py") assert fe is not None @@ -88,12 +103,14 @@ def test_decode_and_normalize(): # --------InputModel tests----------------- +@mock_needed def init_model(): fe = fem.load_by_framework(framework="mock_py") model = fe.load_from_file(path="") return model +@mock_needed def test_model_get_inputs(): model = init_model() for i in range(1, 10): @@ -102,6 +119,7 @@ def test_model_get_inputs(): assert stat.get_inputs == i +@mock_needed def test_model_get_outputs(): model = init_model() for i in range(1, 10): @@ -110,6 +128,7 @@ def test_model_get_outputs(): assert stat.get_outputs == i +@mock_needed def test_model_get_place_by_tensor_name(): model = init_model() for i in range(1, 10): @@ -120,6 +139,7 @@ def test_model_get_place_by_tensor_name(): assert stat.lastArgString == name +@mock_needed def test_model_get_place_by_operation_name(): model = init_model() for i in range(1, 10): @@ -130,28 +150,31 @@ def test_model_get_place_by_operation_name(): assert stat.lastArgString == name +@mock_needed def test_model_get_place_by_operation_and_input_port(): model = init_model() for i in range(1, 10): name = str(i) - model.get_place_by_operation_and_input_port(operationName=name, inputPortIndex=i*2) + model.get_place_by_operation_and_input_port(operationName=name, inputPortIndex=i * 2) stat = get_mdl_stat(model) assert stat.get_place_by_operation_and_input_port == i assert stat.lastArgString == name assert stat.lastArgInt == i * 2 +@mock_needed def test_model_get_place_by_operation_and_output_port(): model = init_model() for i in range(1, 10): name = str(i) - model.get_place_by_operation_and_output_port(operationName=name, outputPortIndex=i*2) + model.get_place_by_operation_and_output_port(operationName=name, outputPortIndex=i * 2) stat = get_mdl_stat(model) assert stat.get_place_by_operation_and_output_port == i assert stat.lastArgString == name assert stat.lastArgInt == i * 2 +@mock_needed def test_model_set_name_for_tensor(): model = init_model() place = model.get_place_by_tensor_name(tensorName="") @@ -162,6 +185,7 @@ def test_model_set_name_for_tensor(): assert stat.lastArgPlace == place +@mock_needed def test_model_add_name_for_tensor(): model = init_model() place = model.get_place_by_tensor_name(tensorName="") @@ -172,6 +196,7 @@ def test_model_add_name_for_tensor(): assert stat.lastArgPlace == place +@mock_needed def test_model_set_name_for_operation(): model = init_model() place = model.get_place_by_operation_name(operationName="") @@ -182,6 +207,7 @@ def test_model_set_name_for_operation(): assert stat.lastArgPlace == place +@mock_needed def test_model_free_name_for_tensor(): model = init_model() model.free_name_for_tensor(name="2222") @@ -190,6 +216,7 @@ def test_model_free_name_for_tensor(): assert stat.lastArgString == "2222" +@mock_needed def test_model_free_name_for_operation(): model = init_model() model.free_name_for_operation(name="3333") @@ -198,6 +225,7 @@ def test_model_free_name_for_operation(): assert stat.lastArgString == "3333" +@mock_needed def test_model_set_name_for_dimension(): model = init_model() place = model.get_place_by_operation_name(operationName="") @@ -209,6 +237,7 @@ def test_model_set_name_for_dimension(): assert stat.lastArgPlace == place +@mock_needed def test_model_cut_and_add_new_input(): model = init_model() place = model.get_place_by_operation_name("") @@ -224,6 +253,7 @@ def test_model_cut_and_add_new_input(): assert stat.lastArgPlace == place +@mock_needed def test_model_cut_and_add_new_output(): model = init_model() place = model.get_place_by_operation_name("") @@ -239,6 +269,7 @@ def test_model_cut_and_add_new_output(): assert stat.lastArgPlace == place +@mock_needed def test_model_add_output(): model = init_model() place = model.get_place_by_operation_name("") @@ -249,6 +280,7 @@ def test_model_add_output(): assert stat.lastArgPlace == place +@mock_needed def test_model_remove_output(): model = init_model() place = model.get_place_by_operation_name("") @@ -258,6 +290,7 @@ def test_model_remove_output(): assert stat.lastArgPlace == place +@mock_needed def test_model_set_partial_shape(): model = init_model() place = model.get_place_by_tensor_name(tensorName="") @@ -269,6 +302,7 @@ def test_model_set_partial_shape(): assert stat.lastArgPartialShape == test_shape +@mock_needed def test_model_get_partial_shape(): model = init_model() place = model.get_place_by_tensor_name(tensorName="") @@ -279,6 +313,7 @@ def test_model_get_partial_shape(): assert stat.lastArgPlace == place +@mock_needed def test_model_override_all_inputs(): model = init_model() place1 = model.get_place_by_tensor_name(tensorName="p1") @@ -291,6 +326,7 @@ def test_model_override_all_inputs(): assert stat.lastArgInputPlaces[1] == place2 +@mock_needed def test_model_override_all_outputs(): model = init_model() place1 = model.get_place_by_tensor_name(tensorName="p1") @@ -303,6 +339,7 @@ def test_model_override_all_outputs(): assert stat.lastArgOutputPlaces[1] == place2 +@mock_needed def test_model_extract_subgraph(): model = init_model() place1 = model.get_place_by_tensor_name(tensorName="p1") @@ -320,6 +357,7 @@ def test_model_extract_subgraph(): assert stat.lastArgOutputPlaces[1] == place4 +@mock_needed def test_model_set_element_type(): model = init_model() place = model.get_place_by_tensor_name(tensorName="") @@ -331,6 +369,7 @@ def test_model_set_element_type(): # ----------- Place test ------------ +@mock_needed def init_place(): fe = fem.load_by_framework(framework="mock_py") model = fe.load_from_file(path="") @@ -338,6 +377,7 @@ def init_place(): return model, place +@mock_needed def test_place_is_input(): _, place = init_place() assert place.is_input() is not None @@ -345,6 +385,7 @@ def test_place_is_input(): assert stat.is_input == 1 +@mock_needed def test_place_is_output(): _, place = init_place() assert place.is_output() is not None @@ -352,6 +393,7 @@ def test_place_is_output(): assert stat.is_output == 1 +@mock_needed def test_place_get_names(): _, place = init_place() assert place.get_names() is not None @@ -359,6 +401,7 @@ def test_place_get_names(): assert stat.get_names == 1 +@mock_needed def test_place_is_equal(): model, place = init_place() place2 = model.get_place_by_tensor_name("2") @@ -368,6 +411,7 @@ def test_place_is_equal(): assert stat.lastArgPlace == place2 +@mock_needed def test_place_is_equal_data(): model, place = init_place() place2 = model.get_place_by_tensor_name("2") @@ -377,15 +421,7 @@ def test_place_is_equal_data(): assert stat.lastArgPlace == place2 -def test_place_is_equal_data(): - model, place = init_place() - place2 = model.get_place_by_tensor_name("2") - assert place.is_equal_data(other=place2) is not None - stat = get_place_stat(place) - assert stat.is_equal_data == 1 - assert stat.lastArgPlace == place2 - - +@mock_needed def test_place_get_consuming_operations(): _, place = init_place() assert place.get_consuming_operations(outputPortIndex=22) is not None @@ -398,6 +434,7 @@ def test_place_get_consuming_operations(): assert stat.lastArgInt == -1 +@mock_needed def test_place_get_target_tensor(): _, place = init_place() assert place.get_target_tensor(outputPortIndex=22) is not None @@ -410,6 +447,7 @@ def test_place_get_target_tensor(): assert stat.lastArgInt == -1 +@mock_needed def test_place_get_producing_operation(): _, place = init_place() assert place.get_producing_operation(inputPortIndex=22) is not None @@ -422,6 +460,7 @@ def test_place_get_producing_operation(): assert stat.lastArgInt == -1 +@mock_needed def test_place_get_producing_port(): _, place = init_place() assert place.get_producing_port() is not None @@ -429,6 +468,7 @@ def test_place_get_producing_port(): assert stat.get_producing_port == 1 +@mock_needed def test_place_get_input_port(): _, place = init_place() assert place.get_input_port() is not None @@ -441,6 +481,7 @@ def test_place_get_input_port(): assert stat.lastArgInt == 22 +@mock_needed def test_place_get_input_port2(): _, place = init_place() assert place.get_input_port(inputName="abc") is not None @@ -455,6 +496,7 @@ def test_place_get_input_port2(): assert stat.lastArgString == "abcd" +@mock_needed def test_place_get_output_port(): _, place = init_place() assert place.get_output_port() is not None @@ -467,6 +509,7 @@ def test_place_get_output_port(): assert stat.lastArgInt == 22 +@mock_needed def test_place_get_output_port2(): _, place = init_place() assert place.get_output_port(outputName="abc") is not None @@ -481,6 +524,7 @@ def test_place_get_output_port2(): assert stat.lastArgString == "abcd" +@mock_needed def test_place_get_consuming_ports(): _, place = init_place() assert place.get_consuming_ports() is not None @@ -488,6 +532,7 @@ def test_place_get_consuming_ports(): assert stat.get_consuming_ports == 1 +@mock_needed def test_place_get_source_tensor(): _, place = init_place() assert place.get_source_tensor() is not None