From 5f8de7ef12e89873708ed0647c8bee03dbe69b4a Mon Sep 17 00:00:00 2001 From: Evgenya Stepyreva Date: Wed, 24 May 2023 11:24:56 +0400 Subject: [PATCH 1/5] Custom attribute reading and While operation support --- .../frontend/tensorflow_lite/node_context.hpp | 65 +++++++-- .../tensorflow_lite/src/CMakeLists.txt | 1 + .../src/decoder_flatbuffer.cpp | 43 +++++- .../tensorflow_lite/src/decoder_flatbuffer.h | 36 ++++- .../tensorflow_lite/src/frontend.cpp | 21 ++- .../src/graph_iterator_flatbuffer.cpp | 132 +++++++++++------- .../src/graph_iterator_flatbuffer.hpp | 17 ++- .../tensorflow_lite/src/input_model.cpp | 61 +++++--- .../tensorflow_lite/src/input_model.hpp | 3 + .../tensorflow_lite/src/op/while.cpp | 105 ++++++++++++++ .../tensorflow_lite/src/op_table.cpp | 2 +- .../tensorflow_lite/src/op_table.hpp | 30 ++-- .../tensorflow_lite/src/tensor_lite_place.hpp | 6 +- .../tensorflow_lite_tests/test_tfl_While.py | 30 ++++ 14 files changed, 441 insertions(+), 111 deletions(-) create mode 100644 src/frontends/tensorflow_lite/src/op/while.cpp create mode 100644 tests/layer_tests/tensorflow_lite_tests/test_tfl_While.py diff --git a/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/node_context.hpp b/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/node_context.hpp index 0d165ea0ec0738..95baea1cdc9830 100644 --- a/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/node_context.hpp +++ b/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/node_context.hpp @@ -12,6 +12,9 @@ namespace ov { namespace frontend { namespace tensorflow_lite { +using SubGraphFuncs = std::vector()>>; +using SubGraphFuncsPtr = std::shared_ptr; + /// Keep necessary data for a single node in the original FW graph to facilitate /// conversion process in the rules code. class TENSORFLOW_LITE_API NodeContext : public ov::frontend::NodeContext { @@ -20,28 +23,68 @@ class TENSORFLOW_LITE_API NodeContext : public ov::frontend::NodeContext { NodeContext(const std::shared_ptr& decoder, const OutputVector& inputs) : ov::frontend::NodeContext(decoder->get_op_type()), m_decoder(decoder), - m_inputs(inputs) {} + m_inputs(inputs), + m_subgraph_functions(nullptr) {} + + NodeContext(const std::shared_ptr& decoder, + const OutputVector& inputs, + const SubGraphFuncsPtr& subgraph_functions) + : ov::frontend::NodeContext(decoder->get_op_type()), + m_decoder(decoder), + m_inputs(inputs), + m_subgraph_functions(subgraph_functions) {} + + /// \brief Returns a number of inputs + size_t get_input_size() const override { + return m_inputs.size(); + } + + /// \brief Returns exactly one input with a given idx; throws if there is no inputs or + /// there are more than one input + Output get_input(int port_index) const override { + return m_inputs.at(port_index); + } /// Detects if there is at least one input attached with a given name bool has_input(const size_t& port_index) const { return port_index < m_inputs.size(); } - Output get_input(int port_index) const override { - return m_inputs.at(port_index); + /// \brief Get a node name + const std::string& get_name() const override { + return m_decoder->get_op_name(); } OutputVector get_inputs() const { return m_inputs; } - size_t get_input_size() const override { - return m_inputs.size(); + /// \brief Returns node attribute by name as ov::Any. + ov::Any get_attribute_as_any(const std::string& name) const override { + return m_decoder->get_attribute(name); } - /// \brief Get a node name - const std::string& get_name() const override { - return m_decoder->get_op_name(); + /// \brief Returns the number of sub-graphs that can be enumerated with get_subgraph + size_t get_subgraph_size() const override { + if (!m_subgraph_functions) + return 0; + return m_subgraph_functions->size(); + } + + /// \brief Returns subgraph converted on demand by the first access + /// If there is no query for specific sub-graph it shouldn't be converted + /// idx should be in range 0..get_subgraph_size()-1 + std::shared_ptr get_subgraph(int idx) const override { + FRONT_END_GENERAL_CHECK(m_subgraph_functions != nullptr, + "Requested subgraph while subgraphs are not configured"); + int size = static_cast(get_subgraph_size()); + FRONT_END_GENERAL_CHECK(idx >= 0 && idx < size, + "Incorrect subgraph idx ", + idx, + ". There are only ", + get_subgraph_size(), + "subgraphs currently"); + return m_subgraph_functions->operator[](idx)(); } /// \brief Get a decoder @@ -49,14 +92,10 @@ class TENSORFLOW_LITE_API NodeContext : public ov::frontend::NodeContext { return m_decoder; } - ov::Any get_attribute_as_any(const std::string& name) const override { - auto res = m_decoder->get_attribute(name); - return res; - } - private: std::shared_ptr m_decoder; const OutputVector& m_inputs; + SubGraphFuncsPtr m_subgraph_functions; }; using CreatorFunction = std::function; diff --git a/src/frontends/tensorflow_lite/src/CMakeLists.txt b/src/frontends/tensorflow_lite/src/CMakeLists.txt index 9658844f9b5709..5841829e65a74d 100644 --- a/src/frontends/tensorflow_lite/src/CMakeLists.txt +++ b/src/frontends/tensorflow_lite/src/CMakeLists.txt @@ -1,6 +1,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # +add_definitions(-DFLATBUFFERS_LOCALE_INDEPENDENT=0) ov_add_frontend(NAME tensorflow_lite LINKABLE_FRONTEND diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp index 74a7398c28fd34..75b253820c654c 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp @@ -87,9 +87,46 @@ std::shared_ptr DecoderFlatBuffe ov::frontend::tensorflow_lite::get_ov_type(tensor->type()), names, ov::frontend::tensorflow_lite::get_quantization(tensor->quantization()), - tensor_info.input_idx, - tensor_info.output_idx, - (tensor_info.buffer->data() ? tensor_info.buffer->data()->data() : nullptr)); + (tensor_info.buffer && tensor_info.buffer->data() ? tensor_info.buffer->data()->data() : nullptr)); +} + +ov::Any get_value_as_ov_any(const flexbuffers::Reference& value) { +#define CASE_MACRO(fbt, as_stmt) \ + case flexbuffers::fbt: \ + return {value.as_stmt()}; + switch (value.GetType()) { + CASE_MACRO(FBT_INT, AsInt32) + CASE_MACRO(FBT_INDIRECT_INT, AsInt32) + CASE_MACRO(FBT_UINT, AsUInt32) + CASE_MACRO(FBT_INDIRECT_UINT, AsUInt32) + CASE_MACRO(FBT_FLOAT, AsFloat) + CASE_MACRO(FBT_INDIRECT_FLOAT, AsFloat) + CASE_MACRO(FBT_STRING, AsString) + CASE_MACRO(FBT_BOOL, AsBool) + case flexbuffers::FBT_NULL: + case flexbuffers::FBT_MAP: + case flexbuffers::FBT_KEY: + case flexbuffers::FBT_VECTOR: + case flexbuffers::FBT_VECTOR_INT: + case flexbuffers::FBT_VECTOR_UINT: + case flexbuffers::FBT_VECTOR_FLOAT: + case flexbuffers::FBT_VECTOR_KEY: + case flexbuffers::FBT_VECTOR_STRING_DEPRECATED: + case flexbuffers::FBT_VECTOR_INT2: + case flexbuffers::FBT_VECTOR_UINT2: + case flexbuffers::FBT_VECTOR_FLOAT2: + case flexbuffers::FBT_VECTOR_INT3: + case flexbuffers::FBT_VECTOR_UINT3: + case flexbuffers::FBT_VECTOR_FLOAT3: + case flexbuffers::FBT_VECTOR_INT4: + case flexbuffers::FBT_VECTOR_UINT4: + case flexbuffers::FBT_VECTOR_FLOAT4: + case flexbuffers::FBT_BLOB: + case flexbuffers::FBT_VECTOR_BOOL: + case flexbuffers::FBT_MAX_TYPE: + return {}; + } + return {}; } } // namespace tensorflow_lite diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h index 65381fcbad3af6..2c605e6d7469af 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h @@ -11,6 +11,7 @@ #include "graph_iterator_flatbuffer.hpp" #include "openvino/frontend/tensorflow_lite/visibility.hpp" #include "openvino/frontend/decoder.hpp" +#include "flatbuffers/flexbuffers.h" namespace ov { namespace frontend { @@ -19,6 +20,8 @@ namespace tensorflow_lite { class TensorLitePlace; struct TensorInfo; +ov::Any get_value_as_ov_any(const flexbuffers::Reference& value); + class DecoderFlatBuffer : public ov::frontend::DecoderBase { public: explicit DecoderFlatBuffer(const tflite::Operator* node_def, @@ -44,7 +47,17 @@ class DecoderFlatBuffer : public ov::frontend::DecoderBase { } ov::Any get_attribute(const std::string& name) const override { - return {}; + const auto opts = m_node_def->custom_options(); + if (opts == nullptr) + return {}; + const flexbuffers::Map& m = flexbuffers::GetRoot(opts->Data(), opts->size()).AsMap(); + flexbuffers::Reference value; + try { + value = m[name]; + } catch (...) { + return {}; + } + return get_value_as_ov_any(value); } size_t get_input_size() const override; @@ -68,15 +81,32 @@ class DecoderFlatBuffer : public ov::frontend::DecoderBase { std::shared_ptr decode_output_tensor(size_t idx, const ov::frontend::InputModel& model) const; -private: +protected: std::shared_ptr decode_tensor( - const ov::frontend::tensorflow_lite::TensorInfo& tensor_info, const InputModel& model) const; + const ov::frontend::tensorflow_lite::TensorInfo& tensor_info, const ov::frontend::InputModel& model) const; const tflite::Operator* m_node_def; std::string m_type, m_name; std::map m_input_info, m_output_info; }; +class DecoderFlatBufferTensors : public DecoderFlatBuffer { +public: + DecoderFlatBufferTensors(const TensorInfo &info, int64_t input_idx, int64_t output_idx) : + DecoderFlatBuffer(nullptr, "", "", {}, {}), m_info{info}, m_input_idx(input_idx), m_output_idx(output_idx) {}; + + std::shared_ptr decode_tensor(const ov::frontend::InputModel& model) const { + auto tensor = DecoderFlatBuffer::decode_tensor(m_info, model); + tensor->set_input_index(m_input_idx); + tensor->set_output_index(m_output_idx); + return tensor; + } + +private: + TensorInfo m_info; + int64_t m_input_idx, m_output_idx; +}; + } // namespace tensorflow_lite } // namespace frontend } // namespace ov diff --git a/src/frontends/tensorflow_lite/src/frontend.cpp b/src/frontends/tensorflow_lite/src/frontend.cpp index 52931f8986104a..4dd9826b7dafd4 100644 --- a/src/frontends/tensorflow_lite/src/frontend.cpp +++ b/src/frontends/tensorflow_lite/src/frontend.cpp @@ -168,6 +168,22 @@ void FrontEnd::translate_graph(const InputModel::Ptr& model, const auto& model_lite = std::dynamic_pointer_cast(model); FRONT_END_GENERAL_CHECK(model_lite, "nullptr for InputModel is given for translation into OV Model"); + auto subgraphs_as_input_models = model_lite->get_subgraphs(); + auto input_to_ov_model = [&](const std::shared_ptr& in_model) { + auto simple_lambda = [&]() -> std::shared_ptr { + std::shared_ptr model; + if (in_model) + translate_graph(in_model, fail_fast, no_conversion, model); + return model; + }; + return simple_lambda; + }; + auto submodel_translation_functions = std::make_shared()>>>(); + submodel_translation_functions->reserve(subgraphs_as_input_models.size()); + for (const auto& subgraph : subgraphs_as_input_models) { + submodel_translation_functions->emplace_back(input_to_ov_model(subgraph)); + } + const auto& translate_map = no_conversion ? ov::frontend::tensorflow_lite::TranslatorDictionaryType{} : m_op_translators; @@ -220,7 +236,7 @@ void FrontEnd::translate_graph(const InputModel::Ptr& model, FRONT_END_OP_CONVERSION_CHECK(translate_map.count(decoder->get_op_type()), "No translator found for " + decoder->get_op_type() + " node."); auto op_fun = &(translate_map.at(decoder->get_op_type())); - ov::frontend::tensorflow_lite::NodeContext node_context(decoder, inputs); + ov::frontend::tensorflow_lite::NodeContext node_context(decoder, inputs, submodel_translation_functions); ov_outputs = (*op_fun)(node_context); } catch (...) { if (fail_fast) { @@ -250,6 +266,9 @@ void FrontEnd::translate_graph(const InputModel::Ptr& model, tensor != nullptr, "Inputs of ov::frontend::tensorflow_lite::InputModel must be TensorLitePlace instances"); const auto name = tensor->get_names()[0]; + if (!all_tensor_values.count(name)) { + continue; + } const auto& output_value = all_tensor_values[name]; const auto& result = std::make_shared(output_value); auto input = result->output(0); diff --git a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp index 5bdba29f355c6d..b8cb4c39f0053b 100644 --- a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.cpp @@ -25,63 +25,91 @@ GraphIteratorFlatBuffer::GraphIteratorFlatBuffer(const std::string& path) { model_file.close(); m_model = tflite::GetModel(m_data.data()); - const auto subgraphs = m_model->subgraphs(); - FRONT_END_GENERAL_CHECK(subgraphs->size() == 1, - "Number of sub-graphs in the model is ", - subgraphs->size(), - ". Supported number of sub-graphs is 1."); - const auto graph = *subgraphs->begin(); - const auto operators = graph->operators(); - m_nodes = {operators->begin(), operators->end()}; + auto sub_graphs = m_model->subgraphs(); + m_subgraphs = {sub_graphs->begin(), sub_graphs->end()}; + m_graph = m_subgraphs[0]; + const auto operators = m_graph->operators(); + auto operators_vec = std::vector{operators->begin(), operators->end()}; + + m_nodes.assign(operators_vec.begin(), operators_vec.end()); + auto outputs = m_graph->outputs(); + auto inputs = m_graph->inputs(); + m_nodes.insert(m_nodes.begin(), outputs->begin(), outputs->end()); + m_nodes.insert(m_nodes.begin(), inputs->begin(), inputs->end()); +} + +size_t GraphIteratorFlatBuffer::get_subgraph_size() const { + return m_subgraphs.size(); +} + +std::shared_ptr GraphIteratorFlatBuffer::get_subgraph(const size_t& idx) const { + FRONT_END_GENERAL_CHECK(m_subgraphs.size() > idx, "There is no subgraph with idx ", idx); + auto iterator = std::make_shared(); + iterator->node_index = 0; + iterator->m_model = m_model; + iterator->m_subgraphs = {}; // TODO: check if we need to pass all sub-graphs here (while in a while situation) + iterator->m_graph = m_subgraphs[idx]; + const auto operators = iterator->m_graph->operators(); + auto operators_vec = std::vector{operators->begin(), operators->end()}; + iterator->m_nodes.assign(operators_vec.begin(), operators_vec.end()); + auto outputs = iterator->m_graph->outputs(); + auto inputs = iterator->m_graph->inputs(); + iterator->m_nodes.insert(iterator->m_nodes.begin(), outputs->begin(), outputs->end()); + iterator->m_nodes.insert(iterator->m_nodes.begin(), inputs->begin(), inputs->end()); + return iterator; } std::shared_ptr GraphIteratorFlatBuffer::get_decoder() const { - auto inputs_vec = (*m_model->subgraphs()->begin())->inputs(); - auto outputs_vec = (*m_model->subgraphs()->begin())->outputs(); - auto inputs = std::set{inputs_vec->begin(), inputs_vec->end()}; - auto outputs = std::set{outputs_vec->begin(), outputs_vec->end()}; - - auto buffers = m_model->buffers(); - auto tensors = m_model->subgraphs()->begin()->tensors(); - - std::map input_info = {}, output_info = {}; - size_t i = 0; - for (auto input : *m_nodes[node_index]->inputs()) { - if (input == -1) { - continue; + auto any_item = m_nodes[node_index]; + bool is_op = any_item.is(); + FRONT_END_GENERAL_CHECK(is_op || any_item.is()); + auto tensors = m_graph->tensors(); + + if (is_op) { + auto node = m_nodes[node_index].as(); + auto buffers = m_model->buffers(); + + std::map input_info = {}, output_info = {}; + size_t i = 0; + for (auto input : *node->inputs()) { + if (input == -1) + continue; + auto buffer = (*buffers)[(*tensors)[input]->buffer()]; + auto tensor = (*tensors)[input]; + input_info[i++] = TensorInfo{tensor, buffer}; } - auto buffer = (*buffers)[(*tensors)[input]->buffer()]; - auto is_input = inputs.find(input) != inputs.end(); - int64_t input_idx = - !is_input ? -1 : std::find(inputs_vec->begin(), inputs_vec->end(), input) - inputs_vec->begin(); - auto is_output = outputs.find(input) != outputs.end(); - int64_t output_idx = - !is_output ? -1 : std::find(outputs_vec->begin(), outputs_vec->end(), input) - outputs_vec->begin(); - input_info[i++] = TensorInfo{input_idx, output_idx, (*tensors)[input], buffer}; - } - i = 0; - // If we have any m_nodes[node_index]->intermediates() than trigger internal smth? no - // put all the info in Decoder as a sub-graph! + i = 0; + for (auto output : *node->outputs()) { + auto buffer = (*buffers)[(*tensors)[output]->buffer()]; + auto tensor = (*tensors)[output]; + output_info[i++] = TensorInfo{tensor, buffer}; + } + auto op_codes = m_model->operator_codes(); + auto operator_code = (*op_codes)[node->opcode_index()]; + std::string type; + if (operator_code->deprecated_builtin_code() < + tflite::BuiltinOperator::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES) { + type = tflite::EnumNamesBuiltinOperator()[operator_code->deprecated_builtin_code()]; + } else { + type = tflite::EnumNamesBuiltinOperator()[operator_code->builtin_code()]; + } + if (type == "CUSTOM") { + type = operator_code->custom_code()->str(); + } + return std::make_shared(node, type, std::to_string(node_index), input_info, output_info); + } else { + auto tensor_id = m_nodes[node_index].as(); + auto tensor = (*tensors)[tensor_id]; + auto info = TensorInfo{tensor, nullptr}; + auto inputs = m_graph->inputs(); + auto outputs = m_graph->outputs(); - for (auto output : *m_nodes[node_index]->outputs()) { - auto buffer = (*buffers)[(*tensors)[output]->buffer()]; - auto is_output = outputs.find(output) != outputs.end(); + auto input_it = std::find(inputs->begin(), inputs->end(), tensor_id); + auto output_it = std::find(outputs->begin(), outputs->end(), tensor_id); + int64_t input_idx = + input_it == inputs->end() ? -1 : static_cast(std::distance(inputs->begin(), input_it)); int64_t output_idx = - !is_output ? -1 : std::find(outputs_vec->begin(), outputs_vec->end(), output) - outputs_vec->begin(); - output_info[i++] = TensorInfo{-1, output_idx, (*tensors)[output], buffer}; - } - auto op_codes = m_model->operator_codes(); - auto operator_code = (*op_codes)[m_nodes[node_index]->opcode_index()]; - std::string type; - if (operator_code->deprecated_builtin_code() < - tflite::BuiltinOperator::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES) { - type = tflite::EnumNamesBuiltinOperator()[operator_code->deprecated_builtin_code()]; - } else { - type = tflite::EnumNamesBuiltinOperator()[operator_code->builtin_code()]; + output_it == outputs->end() ? -1 : static_cast(std::distance(outputs->begin(), output_it)); + return std::make_shared(info, input_idx, output_idx); } - return std::make_shared(m_nodes[node_index], - type, - std::to_string(node_index), - input_info, - output_info); } diff --git a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.hpp b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.hpp index 9430bc158d7ef7..51689c997260d4 100644 --- a/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.hpp +++ b/src/frontends/tensorflow_lite/src/graph_iterator_flatbuffer.hpp @@ -6,6 +6,7 @@ #include +#include "openvino/core/any.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/util/file_util.hpp" #include "schema_generated.h" @@ -16,7 +17,6 @@ namespace tensorflow_lite { class DecoderFlatBuffer; struct TensorInfo { - int64_t input_idx, output_idx; const tflite::Tensor* tensor; const tflite::Buffer* buffer; }; @@ -24,10 +24,13 @@ struct TensorInfo { class GraphIteratorFlatBuffer { size_t node_index = 0; std::vector m_data; - std::vector m_nodes; - const tflite::Model* m_model; + std::vector m_nodes; + const tflite::Model* m_model{}; + std::vector m_subgraphs; + const tflite::SubGraph* m_graph{}; public: + GraphIteratorFlatBuffer() = default; explicit GraphIteratorFlatBuffer(const std::string& path); #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT @@ -58,6 +61,14 @@ class GraphIteratorFlatBuffer { /// Return Decoder for the current node that iterator points to std::shared_ptr get_decoder() const; + + /// \brief Returns the number of sub-graphs that can be enumerated with get_subgraph + size_t get_subgraph_size() const; + + /// \brief Returns iterator for a subgraph created on demand + /// If there is no query for specific sub-graph iterator shouldn't be created + /// idx should be in range 0..get_subgraph_size()-1 + std::shared_ptr get_subgraph(const size_t& idx) const; }; } // namespace tensorflow_lite diff --git a/src/frontends/tensorflow_lite/src/input_model.cpp b/src/frontends/tensorflow_lite/src/input_model.cpp index 274b573fab9e2c..d3b1317e8d554d 100644 --- a/src/frontends/tensorflow_lite/src/input_model.cpp +++ b/src/frontends/tensorflow_lite/src/input_model.cpp @@ -59,6 +59,8 @@ class InputModel::InputModelTFLiteImpl { void extract_subgraph(const std::vector& inputs, const std::vector& outputs); + std::vector> get_subgraphs(); + private: void load_model(); void clean_up(); @@ -72,7 +74,7 @@ class InputModel::InputModelTFLiteImpl { std::shared_ptr m_graph_iterator; const ov::frontend::InputModel& m_input_model; - + std::vector> m_subgraphs; std::shared_ptr m_telemetry; }; @@ -82,6 +84,20 @@ void InputModel::InputModelTFLiteImpl::load_model() { m_op_places.reserve(m_graph_iterator->size()); for (; !m_graph_iterator->is_end(); m_graph_iterator->next()) { const auto& decoder = m_graph_iterator->get_decoder(); + + if (auto tensor_decoder = std::dynamic_pointer_cast(decoder)) { + auto tensor_place = tensor_decoder->decode_tensor(m_input_model); + FRONT_END_GENERAL_CHECK(tensor_place->is_input() || tensor_place->is_output()); + auto name = tensor_place->get_names()[0]; + if (m_tensor_places.count(name) == 0) { + m_tensor_places[name] = tensor_place; + if (tensor_place->is_input()) + m_inputs.push_back(tensor_place); + if (tensor_place->is_output()) + m_outputs.push_back(tensor_place); + } + continue; + } m_op_places.push_back(std::make_shared(m_input_model, decoder)); if (m_telemetry) { @@ -91,12 +107,9 @@ void InputModel::InputModelTFLiteImpl::load_model() { for (size_t i = 0; i < decoder->get_input_size(); ++i) { auto place = decoder->decode_input_tensor(i, m_input_model); auto name = place->get_names()[0]; - if (m_tensor_places.find(name) == m_tensor_places.end()) { + if (m_tensor_places.count(name) == 0) { m_tensor_places[name] = place; - if (place->is_input()) { - // will reorder by index later - m_inputs.push_back(place); - } else if (auto data = place->get_data()) { + if (auto data = place->get_data()) { auto constant = ov::op::v0::Constant::create(place->get_element_type(), place->get_partial_shape().to_shape(), data); @@ -124,13 +137,8 @@ void InputModel::InputModelTFLiteImpl::load_model() { for (size_t i = 0; i < decoder->get_output_size(); ++i) { auto place = decoder->decode_output_tensor(i, m_input_model); auto name = place->get_names()[0]; - if (m_tensor_places.find(name) == m_tensor_places.end()) { + if (m_tensor_places.count(name) == 0) m_tensor_places[name] = place; - if (place->is_output()) { - // will reorder by index later - m_outputs.push_back(place); - } - } } } @@ -162,6 +170,17 @@ void InputModel::InputModelTFLiteImpl::load_model() { m_telemetry->send_event("op_count", "tflite_" + op.first, static_cast(op.second)); } } + + size_t subgraph_size = m_graph_iterator->get_subgraph_size(); + if (subgraph_size > 1) { + m_subgraphs.reserve(subgraph_size); + m_subgraphs.push_back(nullptr); // no main graph + for (size_t i = 1; i < subgraph_size; ++i) { + m_subgraphs.push_back( + std::make_shared(m_graph_iterator->get_subgraph(i), + m_telemetry)); + } + } } InputModel::InputModelTFLiteImpl::InputModelTFLiteImpl(const GraphIteratorFlatBuffer::Ptr& graph_iterator, @@ -292,10 +311,11 @@ void InputModel::InputModelTFLiteImpl::override_all_outputs(const std::vector& inputs, const std::vector& outputs) { - for (const auto& input_place : m_inputs) { - auto input_lite_place = std::dynamic_pointer_cast(input_place); - FRONT_END_GENERAL_CHECK(input_lite_place != nullptr, "Input Model has unexpected place as input"); - input_lite_place->set_input_index(-1); + for (const auto& output_place : m_outputs) { + auto output_lite_place = + std::dynamic_pointer_cast(output_place); + FRONT_END_GENERAL_CHECK(output_lite_place != nullptr, "Input Model has unexpected place as output"); + output_lite_place->set_output_index(-1); } m_inputs.clear(); for (const auto& input_place : inputs) { @@ -318,6 +338,11 @@ void InputModel::InputModelTFLiteImpl::clean_up() { // TODO: remove all the unnecessary tensors and operations. Could be postponed as TF Lite is OOB type of FrontEnd } +std::vector> +InputModel::InputModelTFLiteImpl::get_subgraphs() { + return m_subgraphs; +} + InputModel::InputModel(const GraphIteratorFlatBuffer::Ptr& graph_iterator, const std::shared_ptr& telemetry) : _impl{std::make_shared(graph_iterator, *this, telemetry)} {} @@ -392,6 +417,10 @@ void InputModel::extract_subgraph(const std::vector& i _impl->extract_subgraph(inputs, outputs); } +std::vector> InputModel::get_subgraphs() const { + return _impl->get_subgraphs(); +} + } // namespace tensorflow_lite } // namespace frontend } // namespace ov diff --git a/src/frontends/tensorflow_lite/src/input_model.hpp b/src/frontends/tensorflow_lite/src/input_model.hpp index 990cf41efc7aae..792267dcdbe7a6 100644 --- a/src/frontends/tensorflow_lite/src/input_model.hpp +++ b/src/frontends/tensorflow_lite/src/input_model.hpp @@ -24,6 +24,9 @@ class InputModel : public ov::frontend::InputModel { std::map> get_tensor_places() const; std::map> get_tensor_values() const; + ////// Subgraph Handling ///// + std::vector> get_subgraphs() const; + public: explicit InputModel(const ov::frontend::tensorflow_lite::GraphIteratorFlatBuffer::Ptr& graph_iterator, const std::shared_ptr& telemetry = {}); diff --git a/src/frontends/tensorflow_lite/src/op/while.cpp b/src/frontends/tensorflow_lite/src/op/while.cpp new file mode 100644 index 00000000000000..0d671f2c1d6dfb --- /dev/null +++ b/src/frontends/tensorflow_lite/src/op/while.cpp @@ -0,0 +1,105 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_op_table.hpp" +#include "op_translation_utils.hpp" +#include "utils.hpp" + +using namespace std; +using namespace ov::opset11; + +namespace ov { +namespace frontend { +namespace tensorflow_lite { +namespace op { + +OutputVector while_op(const ov::frontend::tensorflow_lite::NodeContext& node) { + const auto& decoder = get_decoder(node); + int32_t cond_idx = decoder->get_attribute(&tflite::WhileOptions::cond_subgraph_index); + int32_t body_idx = decoder->get_attribute(&tflite::WhileOptions::body_subgraph_index); + + auto condition = node.get_subgraph(cond_idx); + auto body = node.get_subgraph(body_idx); + + FRONT_END_GENERAL_CHECK(condition, "Incorrect model: condition graph was not read properly"); + FRONT_END_GENERAL_CHECK(body, "Incorrect model: body graph was not read properly"); + + // insert condition before loop + ov::OutputVector ov_inputs = node.get_inputs(); + auto condition_prior_loop = condition->clone(); + auto condition_parameters = condition_prior_loop->get_parameters(); + for (size_t param_ind = 0; param_ind < condition_parameters.size(); ++param_ind) + condition_parameters[param_ind]->output(0).replace(ov_inputs[param_ind]); + ov::OutputVector ov_outputs; + for (const auto& result_node : condition_prior_loop->get_results()) + ov_outputs.push_back(result_node->input_value(0)); + + TENSORFLOW_OP_VALIDATION(node, + ov_outputs.size() == 1, + "[TensorFlow Lite Frontend] Internal error or inconsistent model: condition body must " + "contain one Result node."); + + auto exec_cond = ov_outputs[0]; + auto trip_count = make_shared(element::i32, Shape{}, -1); + auto loop = make_shared(trip_count, exec_cond); + + // prepare body model to be set for the Loop node + // note that condition should be computed on the updated input + // because this is while(cond) {} construction, + // that is why condition graph is stitched to the body results + auto body_params = body->get_parameters(); + auto body_results = body->get_results(); + auto cond_results = condition->get_results(); + condition_parameters = condition->get_parameters(); + auto cond_params_size = condition_parameters.size(); + TENSORFLOW_OP_VALIDATION(node, + body_params.size() == node.get_input_size(), + "[TensorFlow Lite Frontend] Internal error or inconsistent model: body graph " + " must have the same number of Parameter nodes as a number of inputs to While."); + TENSORFLOW_OP_VALIDATION(node, + body_results.size() == node.get_input_size(), + "[TensorFlow Lite Frontend] Internal error or inconsistent model: body graphs " + " must have the same number of Result nodes as a number of inputs to While."); + TENSORFLOW_OP_VALIDATION(node, + condition_parameters.size() == node.get_input_size(), + "[TensorFlow Lite Frontend] Internal error or inconsistent model: condition graph " + " must have the same number of Parameter nodes as a number of inputs to While."); + for (size_t param_ind = 0; param_ind < cond_params_size; ++param_ind) { + condition_parameters[param_ind]->output(0).replace(body_results[param_ind]->input_value(0)); + } + + // update body model with the new result that corresponds to execution condition + TENSORFLOW_OP_VALIDATION(node, + cond_results.size() == 1 && cond_results[0], + "[TensorFlow Lite Frontend] Internal error or inconsistent model: condition body must " + "contain one Result node."); + auto body_condition_output_idx = static_cast(body_results.size()); + body->add_results(cond_results); + + // set data for the Loop node + loop->set_function(body); + + for (int input_ind = 0; input_ind < static_cast(node.get_input_size()); ++input_ind) { + loop->set_merged_input(body_params[input_ind], + node.get_input(input_ind), + body_results[input_ind]->input_value(0)); + } + loop->set_special_body_ports({-1, body_condition_output_idx}); + + // set external outputs for Loop node + // do not get execution condition outside the Loop node + for (size_t output_ind = 0; output_ind < node.get_input_size(); ++output_ind) { + loop->get_iter_value(body_results[output_ind]); + } + loop->validate_and_infer_types(); + loop->set_friendly_name(node.get_name()); + return loop->outputs(); +} + +} // namespace op +} // namespace tensorflow_lite +} // namespace frontend +} // namespace ov diff --git a/src/frontends/tensorflow_lite/src/op_table.cpp b/src/frontends/tensorflow_lite/src/op_table.cpp index 8a5d4c3cb1239c..96738f5332195e 100644 --- a/src/frontends/tensorflow_lite/src/op_table.cpp +++ b/src/frontends/tensorflow_lite/src/op_table.cpp @@ -193,7 +193,7 @@ std::map get_supported_ops() { // UNSORTED_SEGMENT_SUM // VAR_HANDLE {"WHERE", OP_CONVERT_TYPE_RENAME(translate_where_op, "Where")}, - // WHILE + {"WHILE", while_op}, {"ZEROS_LIKE", DEQUANTIZE_INPUTS(translate_zeros_like_op)}, }; } diff --git a/src/frontends/tensorflow_lite/src/op_table.hpp b/src/frontends/tensorflow_lite/src/op_table.hpp index 797e02c7722249..b7afec427f92b6 100644 --- a/src/frontends/tensorflow_lite/src/op_table.hpp +++ b/src/frontends/tensorflow_lite/src/op_table.hpp @@ -25,16 +25,17 @@ std::map get_supported_ops(); #define TFL_OP_CONVERTER(op) OutputVector op(const ov::frontend::tensorflow_lite::NodeContext& node) +/// built-in ops TFL_OP_CONVERTER(arg_max); TFL_OP_CONVERTER(arg_min); TFL_OP_CONVERTER(avg_pool_2d); TFL_OP_CONVERTER(batch_matmul); TFL_OP_CONVERTER(cast); +TFL_OP_CONVERTER(complex_abs); TFL_OP_CONVERTER(concatenation); TFL_OP_CONVERTER(conv2d); -TFL_OP_CONVERTER(complex_abs); -TFL_OP_CONVERTER(depthwise_conv2d); TFL_OP_CONVERTER(depth_to_space); +TFL_OP_CONVERTER(depthwise_conv2d); TFL_OP_CONVERTER(dequantize); TFL_OP_CONVERTER(fully_connected); TFL_OP_CONVERTER(gather); @@ -42,24 +43,25 @@ TFL_OP_CONVERTER(l2_normalization); TFL_OP_CONVERTER(leaky_relu); TFL_OP_CONVERTER(max_pool_2d); TFL_OP_CONVERTER(mirror_pad); -TFL_OP_CONVERTER(reshape); +TFL_OP_CONVERTER(one_hot); TFL_OP_CONVERTER(pack); -TFL_OP_CONVERTER(softmax); -TFL_OP_CONVERTER(resize_nearest_neightbor); +TFL_OP_CONVERTER(quantize); +TFL_OP_CONVERTER(range); +TFL_OP_CONVERTER(reshape); TFL_OP_CONVERTER(resize_bilinear); -TFL_OP_CONVERTER(squeeze); -TFL_OP_CONVERTER(split); +TFL_OP_CONVERTER(resize_nearest_neightbor); +TFL_OP_CONVERTER(reverse_sequence); +TFL_OP_CONVERTER(rfft2d); TFL_OP_CONVERTER(shape); -TFL_OP_CONVERTER(range); -TFL_OP_CONVERTER(strided_slice); +TFL_OP_CONVERTER(softmax); TFL_OP_CONVERTER(space_to_depth); -TFL_OP_CONVERTER(one_hot); -TFL_OP_CONVERTER(reverse_sequence); +TFL_OP_CONVERTER(split); +TFL_OP_CONVERTER(squeeze); +TFL_OP_CONVERTER(strided_slice); +TFL_OP_CONVERTER(transpose_conv); TFL_OP_CONVERTER(unique); TFL_OP_CONVERTER(unpack); -TFL_OP_CONVERTER(transpose_conv); -TFL_OP_CONVERTER(rfft2d); -TFL_OP_CONVERTER(quantize); +TFL_OP_CONVERTER(while_op); template OutputVector translate_binary_op_with_activation(const ov::frontend::tensorflow_lite::NodeContext& node); diff --git a/src/frontends/tensorflow_lite/src/tensor_lite_place.hpp b/src/frontends/tensorflow_lite/src/tensor_lite_place.hpp index 324d008d80a501..6f809bd55707a5 100644 --- a/src/frontends/tensorflow_lite/src/tensor_lite_place.hpp +++ b/src/frontends/tensorflow_lite/src/tensor_lite_place.hpp @@ -22,13 +22,9 @@ class TensorLitePlace : public ov::frontend::tensorflow::TensorPlace { ov::element::Type type, const std::vector& names, std::shared_ptr quantization, - int64_t input_idx, - int64_t output_idx, const void* data) : ov::frontend::tensorflow::TensorPlace(input_model, pshape, type, names), m_quantization(quantization), - m_input_idx(input_idx), - m_output_idx(output_idx), m_data(data){}; void translate(ov::Output& output, bool convert_tensor_attrs_to_nodes = false); @@ -60,7 +56,7 @@ class TensorLitePlace : public ov::frontend::tensorflow::TensorPlace { protected: std::shared_ptr m_quantization; - int64_t m_input_idx, m_output_idx; + int64_t m_input_idx = -1, m_output_idx = -1; const void* m_data; }; diff --git a/tests/layer_tests/tensorflow_lite_tests/test_tfl_While.py b/tests/layer_tests/tensorflow_lite_tests/test_tfl_While.py new file mode 100644 index 00000000000000..96d952b833aaf6 --- /dev/null +++ b/tests/layer_tests/tensorflow_lite_tests/test_tfl_While.py @@ -0,0 +1,30 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import tensorflow as tf + +from common.tflite_layer_test_class import TFLiteLayerTest + + +class TestTFLiteWhileLayerTest(TFLiteLayerTest): + inputs = ["Input"] + outputs = ["While"] + allowed_ops = ["ADD", "LESS", "RESHAPE", "WHILE"] + + def make_model(self, params): + tf.compat.v1.reset_default_graph() + with tf.compat.v1.Session() as sess: + i = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1], name=self.inputs[0]) + c = lambda i: tf.less(i, [10]) + b = lambda i: (tf.add(i, [1]), ) + tf.while_loop(c, b, [i], name=self.outputs[0]) + net = sess.graph_def + return net + + @pytest.mark.parametrize("params", [dict()]) + @pytest.mark.nightly + def test_while(self, params, ie_device, precision, temp_dir): + pytest.xfail("CVS-112884") + self._test(ie_device, precision, temp_dir, params) From bb0d62fa1dfc9f940c0a033f895ec2f520f255ad Mon Sep 17 00:00:00 2001 From: Evgenya Stepyreva Date: Thu, 8 Jun 2023 14:02:01 +0400 Subject: [PATCH 2/5] Rearanges FLATBUFFERS_LOCALE_INDEPENDENT setting --- .../tensorflow_lite/src/CMakeLists.txt | 1 - .../src/decoder_flatbuffer.cpp | 20 +++++++++++++++++++ .../tensorflow_lite/src/decoder_flatbuffer.h | 16 +-------------- src/frontends/tensorflow_lite/src/utils.cpp | 1 - 4 files changed, 21 insertions(+), 17 deletions(-) diff --git a/src/frontends/tensorflow_lite/src/CMakeLists.txt b/src/frontends/tensorflow_lite/src/CMakeLists.txt index 5841829e65a74d..9658844f9b5709 100644 --- a/src/frontends/tensorflow_lite/src/CMakeLists.txt +++ b/src/frontends/tensorflow_lite/src/CMakeLists.txt @@ -1,7 +1,6 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -add_definitions(-DFLATBUFFERS_LOCALE_INDEPENDENT=0) ov_add_frontend(NAME tensorflow_lite LINKABLE_FRONTEND diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp index 75b253820c654c..5e615c6773627f 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp @@ -4,7 +4,12 @@ #include "decoder_flatbuffer.h" +#ifdef FLATBUFFERS_LOCALE_INDEPENDENT +#undef FLATBUFFERS_LOCALE_INDEPENDENT +#endif +#define FLATBUFFERS_LOCALE_INDEPENDENT 0 #include "schema_generated.h" +#include "flatbuffers/flexbuffers.h" #include "utils.hpp" namespace ov { @@ -129,6 +134,21 @@ ov::Any get_value_as_ov_any(const flexbuffers::Reference& value) { return {}; } + +ov::Any DecoderFlatBuffer::get_attribute(const std::string& name) const { + const auto opts = m_node_def->custom_options(); + if (opts == nullptr) + return {}; + const flexbuffers::Map& m = flexbuffers::GetRoot(opts->Data(), opts->size()).AsMap(); + flexbuffers::Reference value; + try { + value = m[name]; + } catch (...) { + return {}; + } + return get_value_as_ov_any(value); +} + } // namespace tensorflow_lite } // namespace frontend } // namespace ov diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h index 2c605e6d7469af..de339300892746 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.h @@ -11,7 +11,6 @@ #include "graph_iterator_flatbuffer.hpp" #include "openvino/frontend/tensorflow_lite/visibility.hpp" #include "openvino/frontend/decoder.hpp" -#include "flatbuffers/flexbuffers.h" namespace ov { namespace frontend { @@ -20,7 +19,6 @@ namespace tensorflow_lite { class TensorLitePlace; struct TensorInfo; -ov::Any get_value_as_ov_any(const flexbuffers::Reference& value); class DecoderFlatBuffer : public ov::frontend::DecoderBase { public: @@ -46,19 +44,7 @@ class DecoderFlatBuffer : public ov::frontend::DecoderBase { return (opts->*member)(); } - ov::Any get_attribute(const std::string& name) const override { - const auto opts = m_node_def->custom_options(); - if (opts == nullptr) - return {}; - const flexbuffers::Map& m = flexbuffers::GetRoot(opts->Data(), opts->size()).AsMap(); - flexbuffers::Reference value; - try { - value = m[name]; - } catch (...) { - return {}; - } - return get_value_as_ov_any(value); - } + ov::Any get_attribute(const std::string& name) const override; size_t get_input_size() const override; size_t get_output_size() const; diff --git a/src/frontends/tensorflow_lite/src/utils.cpp b/src/frontends/tensorflow_lite/src/utils.cpp index b89c7e0f659fda..c83a702eaec1e7 100644 --- a/src/frontends/tensorflow_lite/src/utils.cpp +++ b/src/frontends/tensorflow_lite/src/utils.cpp @@ -7,7 +7,6 @@ #include #include -#include "schema_generated.h" #include "tflite_ops/tflite_quantize.hpp" using namespace ov; From 02bf3dd0b232a7b45f95dafc5bee6ee52af37f68 Mon Sep 17 00:00:00 2001 From: Evgenya Stepyreva Date: Thu, 8 Jun 2023 14:06:03 +0400 Subject: [PATCH 3/5] Style --- src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp index 5e615c6773627f..0e114e72ec87d3 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp @@ -5,11 +5,11 @@ #include "decoder_flatbuffer.h" #ifdef FLATBUFFERS_LOCALE_INDEPENDENT -#undef FLATBUFFERS_LOCALE_INDEPENDENT +# undef FLATBUFFERS_LOCALE_INDEPENDENT #endif #define FLATBUFFERS_LOCALE_INDEPENDENT 0 -#include "schema_generated.h" #include "flatbuffers/flexbuffers.h" +#include "schema_generated.h" #include "utils.hpp" namespace ov { @@ -134,7 +134,6 @@ ov::Any get_value_as_ov_any(const flexbuffers::Reference& value) { return {}; } - ov::Any DecoderFlatBuffer::get_attribute(const std::string& name) const { const auto opts = m_node_def->custom_options(); if (opts == nullptr) From dadc49e941e09362df12973932fa7a3f011a9587 Mon Sep 17 00:00:00 2001 From: Evgenya Stepyreva Date: Thu, 8 Jun 2023 14:39:01 +0400 Subject: [PATCH 4/5] Make flatbuffers code as version independent as possible --- .../src/decoder_flatbuffer.cpp | 26 ++----------------- 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp index 0e114e72ec87d3..d2466f84dd95f6 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp @@ -108,27 +108,7 @@ ov::Any get_value_as_ov_any(const flexbuffers::Reference& value) { CASE_MACRO(FBT_INDIRECT_FLOAT, AsFloat) CASE_MACRO(FBT_STRING, AsString) CASE_MACRO(FBT_BOOL, AsBool) - case flexbuffers::FBT_NULL: - case flexbuffers::FBT_MAP: - case flexbuffers::FBT_KEY: - case flexbuffers::FBT_VECTOR: - case flexbuffers::FBT_VECTOR_INT: - case flexbuffers::FBT_VECTOR_UINT: - case flexbuffers::FBT_VECTOR_FLOAT: - case flexbuffers::FBT_VECTOR_KEY: - case flexbuffers::FBT_VECTOR_STRING_DEPRECATED: - case flexbuffers::FBT_VECTOR_INT2: - case flexbuffers::FBT_VECTOR_UINT2: - case flexbuffers::FBT_VECTOR_FLOAT2: - case flexbuffers::FBT_VECTOR_INT3: - case flexbuffers::FBT_VECTOR_UINT3: - case flexbuffers::FBT_VECTOR_FLOAT3: - case flexbuffers::FBT_VECTOR_INT4: - case flexbuffers::FBT_VECTOR_UINT4: - case flexbuffers::FBT_VECTOR_FLOAT4: - case flexbuffers::FBT_BLOB: - case flexbuffers::FBT_VECTOR_BOOL: - case flexbuffers::FBT_MAX_TYPE: + default: return {}; } return {}; @@ -139,13 +119,11 @@ ov::Any DecoderFlatBuffer::get_attribute(const std::string& name) const { if (opts == nullptr) return {}; const flexbuffers::Map& m = flexbuffers::GetRoot(opts->Data(), opts->size()).AsMap(); - flexbuffers::Reference value; try { - value = m[name]; + return get_value_as_ov_any(m[name]); } catch (...) { return {}; } - return get_value_as_ov_any(value); } } // namespace tensorflow_lite From 42ce7fb9d72376253444fc99286a986a4cbf6559 Mon Sep 17 00:00:00 2001 From: Evgenya Stepyreva Date: Fri, 9 Jun 2023 12:32:57 +0400 Subject: [PATCH 5/5] Comments addressed --- .../frontend/tensorflow_lite/node_context.hpp | 16 ++++++---------- .../tensorflow_lite/src/decoder_flatbuffer.cpp | 6 +----- src/frontends/tensorflow_lite/src/frontend.cpp | 6 +++--- src/frontends/tensorflow_lite/src/op/while.cpp | 3 +-- 4 files changed, 11 insertions(+), 20 deletions(-) diff --git a/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/node_context.hpp b/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/node_context.hpp index 95baea1cdc9830..93526670729ada 100644 --- a/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/node_context.hpp +++ b/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/node_context.hpp @@ -13,7 +13,6 @@ namespace frontend { namespace tensorflow_lite { using SubGraphFuncs = std::vector()>>; -using SubGraphFuncsPtr = std::shared_ptr; /// Keep necessary data for a single node in the original FW graph to facilitate /// conversion process in the rules code. @@ -24,11 +23,11 @@ class TENSORFLOW_LITE_API NodeContext : public ov::frontend::NodeContext { : ov::frontend::NodeContext(decoder->get_op_type()), m_decoder(decoder), m_inputs(inputs), - m_subgraph_functions(nullptr) {} + m_subgraph_functions(m_empty_vector) {} NodeContext(const std::shared_ptr& decoder, const OutputVector& inputs, - const SubGraphFuncsPtr& subgraph_functions) + const SubGraphFuncs& subgraph_functions) : ov::frontend::NodeContext(decoder->get_op_type()), m_decoder(decoder), m_inputs(inputs), @@ -66,17 +65,13 @@ class TENSORFLOW_LITE_API NodeContext : public ov::frontend::NodeContext { /// \brief Returns the number of sub-graphs that can be enumerated with get_subgraph size_t get_subgraph_size() const override { - if (!m_subgraph_functions) - return 0; - return m_subgraph_functions->size(); + return m_subgraph_functions.size(); } /// \brief Returns subgraph converted on demand by the first access /// If there is no query for specific sub-graph it shouldn't be converted /// idx should be in range 0..get_subgraph_size()-1 std::shared_ptr get_subgraph(int idx) const override { - FRONT_END_GENERAL_CHECK(m_subgraph_functions != nullptr, - "Requested subgraph while subgraphs are not configured"); int size = static_cast(get_subgraph_size()); FRONT_END_GENERAL_CHECK(idx >= 0 && idx < size, "Incorrect subgraph idx ", @@ -84,7 +79,7 @@ class TENSORFLOW_LITE_API NodeContext : public ov::frontend::NodeContext { ". There are only ", get_subgraph_size(), "subgraphs currently"); - return m_subgraph_functions->operator[](idx)(); + return m_subgraph_functions[idx](); } /// \brief Get a decoder @@ -95,7 +90,8 @@ class TENSORFLOW_LITE_API NodeContext : public ov::frontend::NodeContext { private: std::shared_ptr m_decoder; const OutputVector& m_inputs; - SubGraphFuncsPtr m_subgraph_functions; + const SubGraphFuncs& m_subgraph_functions; + const SubGraphFuncs m_empty_vector = {}; }; using CreatorFunction = std::function; diff --git a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp index d2466f84dd95f6..b6f28224a6acf4 100644 --- a/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp +++ b/src/frontends/tensorflow_lite/src/decoder_flatbuffer.cpp @@ -119,11 +119,7 @@ ov::Any DecoderFlatBuffer::get_attribute(const std::string& name) const { if (opts == nullptr) return {}; const flexbuffers::Map& m = flexbuffers::GetRoot(opts->Data(), opts->size()).AsMap(); - try { - return get_value_as_ov_any(m[name]); - } catch (...) { - return {}; - } + return get_value_as_ov_any(m[name]); } } // namespace tensorflow_lite diff --git a/src/frontends/tensorflow_lite/src/frontend.cpp b/src/frontends/tensorflow_lite/src/frontend.cpp index 4dd9826b7dafd4..1b7d3615c4b672 100644 --- a/src/frontends/tensorflow_lite/src/frontend.cpp +++ b/src/frontends/tensorflow_lite/src/frontend.cpp @@ -178,10 +178,10 @@ void FrontEnd::translate_graph(const InputModel::Ptr& model, }; return simple_lambda; }; - auto submodel_translation_functions = std::make_shared()>>>(); - submodel_translation_functions->reserve(subgraphs_as_input_models.size()); + std::vector()>> submodel_translation_functions; + submodel_translation_functions.reserve(subgraphs_as_input_models.size()); for (const auto& subgraph : subgraphs_as_input_models) { - submodel_translation_functions->emplace_back(input_to_ov_model(subgraph)); + submodel_translation_functions.push_back(input_to_ov_model(subgraph)); } const auto& translate_map = diff --git a/src/frontends/tensorflow_lite/src/op/while.cpp b/src/frontends/tensorflow_lite/src/op/while.cpp index 0d671f2c1d6dfb..922c552fe03e84 100644 --- a/src/frontends/tensorflow_lite/src/op/while.cpp +++ b/src/frontends/tensorflow_lite/src/op/while.cpp @@ -2,10 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "common_op_table.hpp" #include "op_translation_utils.hpp" +#include "openvino/opsets/opset11.hpp" #include "utils.hpp" using namespace std;