diff --git a/inference-engine/tests/functional/inference_engine/paddle_reader/read_paddle_model_test.cpp b/inference-engine/tests/functional/inference_engine/paddle_reader/read_paddle_model_test.cpp index 77220d86263102..e7a5d8c186a205 100644 --- a/inference-engine/tests/functional/inference_engine/paddle_reader/read_paddle_model_test.cpp +++ b/inference-engine/tests/functional/inference_engine/paddle_reader/read_paddle_model_test.cpp @@ -43,7 +43,7 @@ TEST(PDPD_Reader_Tests, ImportBasicModelToCore) { "RefPDPDFunction"); const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::NAMES); const FunctionsComparator::Result res = func_comparator(function, reference); - ASSERT_TRUE(res.valid); + ASSERT_TRUE(res.valid) << res.message; } #if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) @@ -79,6 +79,6 @@ TEST(PDPD_Reader_Tests, ImportBasicModelToCoreWstring) { "RefPDPDFunction"); const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::NAMES); const FunctionsComparator::Result res = func_comparator(function, reference); - ASSERT_TRUE(res.valid); + ASSERT_TRUE(res.valid) << res.message; } #endif diff --git a/ngraph/frontend/frontend_manager/include/frontend_manager/frontend.hpp b/ngraph/frontend/frontend_manager/include/frontend_manager/frontend.hpp index 34456d4df7f4b9..8af4b93464e79a 100644 --- a/ngraph/frontend/frontend_manager/include/frontend_manager/frontend.hpp +++ b/ngraph/frontend/frontend_manager/include/frontend_manager/frontend.hpp @@ -61,9 +61,7 @@ namespace ngraph /// \brief Completely convert the remaining, not converted part of a function. /// \param partiallyConverted partially converted nGraph function - /// \return fully converted nGraph function - virtual std::shared_ptr - convert(std::shared_ptr partially_converted) const; + virtual void convert(std::shared_ptr partially_converted) const; /// \brief Convert only those parts of the model that can be converted leaving others /// as-is. Converted parts are not normalized by additional transformations; normalize diff --git a/ngraph/frontend/frontend_manager/src/frontend_manager.cpp b/ngraph/frontend/frontend_manager/src/frontend_manager.cpp index a37b56586df10a..2f994ffc59285f 100644 --- a/ngraph/frontend/frontend_manager/src/frontend_manager.cpp +++ b/ngraph/frontend/frontend_manager/src/frontend_manager.cpp @@ -147,7 +147,7 @@ std::shared_ptr FrontEnd::convert(InputModel::Ptr model) const FRONT_END_NOT_IMPLEMENTED(convert); } -std::shared_ptr FrontEnd::convert(std::shared_ptr) const +void FrontEnd::convert(std::shared_ptr) const { FRONT_END_NOT_IMPLEMENTED(convert); } diff --git a/ngraph/frontend/onnx/frontend/include/onnx_frontend/frontend.hpp b/ngraph/frontend/onnx/frontend/include/onnx_frontend/frontend.hpp index fdc004365d6672..20dbde0922e863 100644 --- a/ngraph/frontend/onnx/frontend/include/onnx_frontend/frontend.hpp +++ b/ngraph/frontend/onnx/frontend/include/onnx_frontend/frontend.hpp @@ -20,8 +20,7 @@ namespace ngraph { public: std::shared_ptr convert(InputModel::Ptr model) const override; - std::shared_ptr - convert(std::shared_ptr partially_converted) const override; + void convert(std::shared_ptr partially_converted) const override; std::shared_ptr decode(InputModel::Ptr model) const override; protected: diff --git a/ngraph/frontend/onnx/frontend/src/frontend.cpp b/ngraph/frontend/onnx/frontend/src/frontend.cpp index 3caa85db68c365..cad2fa337ce6a6 100644 --- a/ngraph/frontend/onnx/frontend/src/frontend.cpp +++ b/ngraph/frontend/onnx/frontend/src/frontend.cpp @@ -42,10 +42,9 @@ std::shared_ptr FrontEndONNX::convert(InputModel::Ptr model) c return model_onnx->convert(); } -std::shared_ptr - FrontEndONNX::convert(std::shared_ptr partially_converted) const +void FrontEndONNX::convert(std::shared_ptr partially_converted) const { - return onnx_import::convert_decoded_function(partially_converted); + onnx_import::convert_decoded_function(partially_converted); } std::shared_ptr FrontEndONNX::decode(InputModel::Ptr model) const diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx.hpp b/ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx.hpp index 39b923328c123f..54ee83d67084d6 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx.hpp +++ b/ngraph/frontend/onnx/onnx_import/include/onnx_import/onnx.hpp @@ -76,10 +76,8 @@ namespace ngraph /// \brief Converts a nGraph function (onnx model decoded to function with /// ONNXFrameworkNode(s)) /// to a complete function with actual compute operations - /// - /// \return A nGraph function. ONNX_IMPORTER_API - std::shared_ptr convert_decoded_function(std::shared_ptr function); + void convert_decoded_function(std::shared_ptr function); } // namespace onnx_import } // namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_internal.hpp b/ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_internal.hpp index 006fcc561e6c36..6f9adcf5c64887 100644 --- a/ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_internal.hpp +++ b/ngraph/frontend/onnx/onnx_import/include/onnx_import/utils/onnx_internal.hpp @@ -53,7 +53,7 @@ namespace ngraph decode_to_framework_nodes(std::shared_ptr model_proto, const std::string& model_path); - std::shared_ptr convert_decoded_function(std::shared_ptr function); + void convert_decoded_function(std::shared_ptr function); } // namespace detail } // namespace onnx_import } // namespace ngraph diff --git a/ngraph/frontend/onnx/onnx_import/src/onnx.cpp b/ngraph/frontend/onnx/onnx_import/src/onnx.cpp index 39beac60108b06..35d12346c049ca 100644 --- a/ngraph/frontend/onnx/onnx_import/src/onnx.cpp +++ b/ngraph/frontend/onnx/onnx_import/src/onnx.cpp @@ -58,9 +58,9 @@ namespace ngraph op_name, version, domain == "ai.onnx" ? "" : domain); } - std::shared_ptr convert_decoded_function(std::shared_ptr function) + void convert_decoded_function(std::shared_ptr function) { - return detail::convert_decoded_function(function); + detail::convert_decoded_function(function); } } // namespace onnx_import diff --git a/ngraph/frontend/onnx/onnx_import/src/utils/onnx_internal.cpp b/ngraph/frontend/onnx/onnx_import/src/utils/onnx_internal.cpp index 689eef00cc3f35..ce1943df013381 100644 --- a/ngraph/frontend/onnx/onnx_import/src/utils/onnx_internal.cpp +++ b/ngraph/frontend/onnx/onnx_import/src/utils/onnx_internal.cpp @@ -61,7 +61,7 @@ namespace ngraph } } - std::shared_ptr convert_decoded_function(std::shared_ptr function) + void convert_decoded_function(std::shared_ptr function) { for (const auto& node : function->get_ordered_ops()) { @@ -87,8 +87,6 @@ namespace ngraph } detail::remove_dangling_parameters(function); detail::remove_dangling_results(function); - - return function; } void apply_transformations(ONNX_NAMESPACE::ModelProto& model_proto, diff --git a/ngraph/frontend/paddlepaddle/CMakeLists.txt b/ngraph/frontend/paddlepaddle/CMakeLists.txt index 198d0f2124351c..ab9c5bcef84da7 100644 --- a/ngraph/frontend/paddlepaddle/CMakeLists.txt +++ b/ngraph/frontend/paddlepaddle/CMakeLists.txt @@ -71,7 +71,7 @@ endif() link_system_libraries(${TARGET_NAME} PRIVATE ${Protobuf_LITE_LIBRARIES}) target_link_libraries(${TARGET_NAME} PRIVATE ngraph::frontend_manager::static - PRIVATE ngraph::builder) + PRIVATE ngraph::builder inference_engine_transformations) add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} EXCLUDE_PATTERNS ${PROTO_SRCS} ${PROTO_HDRS}) diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp index 410068b2e26fcc..d872e5fedf0014 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp @@ -12,6 +12,8 @@ namespace ngraph { namespace frontend { + class OpPlacePDPD; + class PDPD_API FrontEndPDPD : public FrontEnd { public: @@ -22,6 +24,25 @@ namespace ngraph /// \return fully converted nGraph function std::shared_ptr convert(InputModel::Ptr model) const override; + /// \brief Completely convert the remaining, not converted part of a function. + /// \param partiallyConverted partially converted nGraph function + void convert(std::shared_ptr partiallyConverted) const override; + + /// \brief Convert only those parts of the model that can be converted leaving others + /// as-is. Converted parts are not normalized by additional transformations; normalize + /// function or another form of convert function should be called to finalize the + /// conversion process. + /// \param model Input model + /// \return partially converted nGraph function + std::shared_ptr convert_partially(InputModel::Ptr model) const override; + + /// \brief Convert operations with one-to-one mapping with decoding nodes. + /// Each decoding node is an nGraph node representing a single FW operation node with + /// all attributes represented in FW-independent way. + /// \param model Input model + /// \return nGraph function after decoding + std::shared_ptr decode(InputModel::Ptr model) const override; + protected: /// \brief Check if FrontEndPDPD can recognize model from given parts /// \param params Can be path to folder which contains __model__ file or path to @@ -40,7 +61,10 @@ namespace ngraph private: static std::shared_ptr - convert_model(const std::shared_ptr& model); + convert_each_node(const std::shared_ptr& model, + std::function( + const std::map>&, + const std::shared_ptr&)> func); }; } // namespace frontend diff --git a/ngraph/frontend/paddlepaddle/src/decoder.cpp b/ngraph/frontend/paddlepaddle/src/decoder.cpp index 1758893b254c2a..bfe845a4df0039 100644 --- a/ngraph/frontend/paddlepaddle/src/decoder.cpp +++ b/ngraph/frontend/paddlepaddle/src/decoder.cpp @@ -99,6 +99,31 @@ namespace ngraph return output_names; } + size_t DecoderPDPDProto::get_output_size() const + { + size_t res = 0; + for (const auto& output : op_place->get_desc().outputs()) + { + res += output.arguments().size(); + } + return res; + } + + std::map> + DecoderPDPDProto::get_output_type_map() const + { + std::map> output_types; + for (const auto& out_port_pair : op_place->get_output_ports()) + { + for (const auto& p_place : out_port_pair.second) + { + output_types[out_port_pair.first].push_back( + p_place->get_target_tensor_pdpd()->get_element_type()); + } + } + return output_types; + } + ngraph::element::Type DecoderPDPDProto::get_out_port_type(const std::string& port_name) const { @@ -135,5 +160,40 @@ namespace ngraph " Expected number: 0 or 1"); return attrs; } + + namespace + { + inline std::map map_for_each_input_impl( + const google::protobuf::RepeatedPtrField& c, + const std::function(const std::string&, size_t)>& func) + { + size_t idx = 0; + std::map res; + for (const auto& port : c) + { + std::vector> v; + v.reserve(port.arguments_size()); + for (const auto& inp : port.arguments()) + { + v.push_back(func(inp, idx++)); + } + res.emplace(std::make_pair(port.parameter(), v)); + } + return res; + } + } // namespace + + std::map DecoderPDPDProto::map_for_each_input( + const std::function(const std::string&, size_t)>& func) const + { + return map_for_each_input_impl(op_place->get_desc().inputs(), func); + } + + std::map DecoderPDPDProto::map_for_each_output( + const std::function(const std::string&, size_t)>& func) const + { + return map_for_each_input_impl(op_place->get_desc().outputs(), func); + } + } // namespace frontend } // namespace ngraph \ No newline at end of file diff --git a/ngraph/frontend/paddlepaddle/src/decoder.hpp b/ngraph/frontend/paddlepaddle/src/decoder.hpp index 67be6694f860a4..e7f6f6f4bb256b 100644 --- a/ngraph/frontend/paddlepaddle/src/decoder.hpp +++ b/ngraph/frontend/paddlepaddle/src/decoder.hpp @@ -40,10 +40,20 @@ namespace ngraph std::vector get_output_names() const override; + size_t get_output_size() const override; + ngraph::element::Type get_out_port_type(const std::string& port_name) const override; std::string get_op_type() const override; + std::map> get_output_type_map() const; + + std::map map_for_each_input( + const std::function(const std::string&, size_t)>& func) const; + + std::map map_for_each_output( + const std::function(const std::string&, size_t)>& func) const; + private: std::vector decode_attribute_helper(const std::string& name) const; diff --git a/ngraph/frontend/paddlepaddle/src/frontend.cpp b/ngraph/frontend/paddlepaddle/src/frontend.cpp index ab7d18765d46f9..231fbb6cb5388f 100644 --- a/ngraph/frontend/paddlepaddle/src/frontend.cpp +++ b/ngraph/frontend/paddlepaddle/src/frontend.cpp @@ -21,10 +21,9 @@ #include "decoder.hpp" #include "node_context.hpp" #include "op_table.hpp" +#include "pdpd_fw_node.hpp" #include "pdpd_utils.hpp" -#include "frontend_manager/frontend_manager.hpp" - using namespace ngraph::opset7; using namespace ngraph; using namespace ngraph::frontend; @@ -35,25 +34,25 @@ namespace ngraph { namespace pdpd { - NamedOutputs make_ng_node(std::map>& nodes, + NamedOutputs make_ng_node(const std::map>& nodes, const std::shared_ptr& op_place, const std::map& CREATORS_MAP) { - const auto& op = op_place->get_desc(); + const auto& op_desc = op_place->get_desc(); - FRONT_END_OP_CONVERSION_CHECK(CREATORS_MAP.find(op.type()) != CREATORS_MAP.end(), + auto creator_it = CREATORS_MAP.find(op_desc.type()); + FRONT_END_OP_CONVERSION_CHECK(creator_it != CREATORS_MAP.end(), "No creator found for ", - op.type(), + op_desc.type(), " node."); - pdpd::NamedInputs named_inputs; - const auto& input_ports = op_place->get_input_ports(); - for (const auto& name_to_ports : input_ports) + NamedInputs named_inputs; + for (const auto& input_port : op_desc.inputs()) { - for (const auto& port : name_to_ports.second) + for (const auto& in_tensor_name : input_port.arguments()) { - const auto& var_desc = port->get_source_tensor_pdpd()->get_desc(); - if (nodes.count(var_desc.name())) - named_inputs[name_to_ports.first].push_back(nodes.at(var_desc.name())); + auto node_it = nodes.find(in_tensor_name); + if (node_it != nodes.end()) + named_inputs[input_port.parameter()].push_back(node_it->second); else // return empty map when not all inputs exist. It usually means that // these nodes are not used because model inputs were overwritten @@ -61,17 +60,75 @@ namespace ngraph } } - try + return creator_it->second(NodeContext(DecoderPDPDProto(op_place), named_inputs)); + } + + NamedOutputs make_framework_node(const std::map>& nodes, + const std::shared_ptr& op_place) + { + const auto& op_desc = op_place->get_desc(); + + OutputVector inputs_vector; + std::vector inputs_names; + NamedOutputs named_outputs; + for (const auto& input_port : op_desc.inputs()) { - return CREATORS_MAP.at(op.type())( - NodeContext(DecoderPDPDProto(op_place), named_inputs)); + for (const auto& in_tensor_name : input_port.arguments()) + { + auto it = nodes.find(in_tensor_name); + if (it != nodes.end()) + { + inputs_vector.push_back(it->second); + inputs_names.push_back(in_tensor_name); + } + else + { + // return empty map when not all inputs exist. It usually means that + // these nodes are not used because model inputs were overwritten + return named_outputs; + } + } } - catch (...) + + auto node = std::make_shared( + DecoderPDPDProto(op_place), inputs_vector, inputs_names); + + return node->return_named_outputs(); + } + + bool + normalize_framework_node(const std::shared_ptr& node, + const std::map& CREATORS_MAP) + { + auto type = node->get_op_type(); + auto creator_it = CREATORS_MAP.find(type); + FRONT_END_OP_CONVERSION_CHECK( + creator_it != CREATORS_MAP.end(), "No creator found for ", type, " node."); + + auto new_node_outputs = + creator_it->second(NodeContext(node->get_decoder(), node->get_named_inputs())); + auto new_node = new_node_outputs.begin()->second[0].get_node_shared_ptr(); + new_node->set_friendly_name(node->get_friendly_name()); + auto node_outputs = node->return_named_outputs(); + + auto new_ports = new_node_outputs.begin(); + auto old_ports = node_outputs.begin(); + for (; new_ports != new_node_outputs.end() && old_ports != node_outputs.end(); + ++new_ports, ++old_ports) { - // TODO: define exception types - // In case of partial conversion we need to create generic ngraph op here - return NamedOutputs(); + FRONT_END_OP_CONVERSION_CHECK(new_ports->first == old_ports->first, + "Node outputs inconsistent after normalization: ", + node->get_friendly_name()); + auto new_output = new_ports->second.begin(); + auto old_output = old_ports->second.begin(); + for (; new_output != new_ports->second.end() && + old_output != old_ports->second.end(); + ++old_output, ++new_output) + { + old_output->replace(*new_output); + } } + return true; } std::istream* variant_to_stream_ptr(const std::shared_ptr& variant, @@ -104,16 +161,16 @@ namespace ngraph } // namespace pdpd - std::shared_ptr - FrontEndPDPD::convert_model(const std::shared_ptr& model) + std::shared_ptr FrontEndPDPD::convert_each_node( + const std::shared_ptr& model, + std::function( + const std::map>&, const std::shared_ptr&)> + func) { - // std::cout << "Convert Model Start" << std::endl; - - std::map> nodes_dict(model->getTensorValues()); + auto nodes_dict(model->getTensorValues()); ParameterVector parameter_nodes; ResultVector result_nodes; - std::map CREATORS_MAP = pdpd::get_supported_ops(); for (const auto& _inp_place : model->get_inputs()) { const auto& inp_place = std::dynamic_pointer_cast(_inp_place); @@ -130,45 +187,44 @@ namespace ngraph const auto& op_places = model->getOpPlaces(); for (const auto& op_place : op_places) { - const auto& op_type = op_place->get_desc().type(); - if (op_type == "feed" || op_type == "fetch") + const auto& op_desc = op_place->get_desc(); + if (op_desc.type() == "feed" || op_desc.type() == "fetch") { // inputs and outputs are stored in the model already continue; } else { - const auto& named_outputs = - pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP); + pdpd::NamedOutputs named_outputs = func(nodes_dict, op_place); - // set layer name by the name of first output var if (!named_outputs.empty()) { - const auto& first_output_var = op_place->get_output_ports() - .begin() - ->second.at(0) - ->get_target_tensor_pdpd() - ->get_desc(); + // set layer name by the name of first output var + const auto& tensor_name = op_desc.outputs().begin()->arguments()[0]; auto node = named_outputs.begin()->second[0].get_node_shared_ptr(); - node->set_friendly_name(first_output_var.name()); - } + node->set_friendly_name(tensor_name); - const auto& out_ports = op_place->get_output_ports(); - for (const auto& name_to_outputs : named_outputs) - { - const auto& ports = out_ports.at(name_to_outputs.first); - FRONT_END_OP_CONVERSION_CHECK( - ports.size() == name_to_outputs.second.size(), - "The number of output tensors must be equal to " - "the number of outputs of the ngraph node."); - for (size_t idx = 0; idx < ports.size(); ++idx) + const auto& out_ports = op_desc.outputs(); + for (const auto& port : out_ports) { - const auto& var = ports[idx]->get_target_tensor_pdpd()->get_desc(); - name_to_outputs.second[idx].get_tensor().set_names({var.name()}); - // if nodes_dict already has node mapped to this tensor name it usually - // means that it was overwritten using setTensorValue - if (!nodes_dict.count(var.name())) - nodes_dict[var.name()] = name_to_outputs.second[idx]; + // TODO: figure a way to safely handle unused outputs + if (named_outputs.count(port.parameter())) + { + const auto& ng_outputs = named_outputs.at(port.parameter()); + FRONT_END_OP_CONVERSION_CHECK( + ng_outputs.size() == port.arguments_size(), + "The number of output tensors must be equal to " + "the number of outputs of the ngraph node."); + for (size_t idx = 0; idx < ng_outputs.size(); ++idx) + { + const auto& var_name = port.arguments()[idx]; + ng_outputs[idx].get_tensor().set_names({var_name}); + // if nodes_dict already has node mapped to this tensor name it + // usually means that it was overwritten using setTensorValue + if (!nodes_dict.count(var_name)) + nodes_dict[var_name] = ng_outputs[idx]; + } + } } } } @@ -288,10 +344,63 @@ namespace ngraph std::shared_ptr FrontEndPDPD::convert(InputModel::Ptr model) const { auto pdpd_model = std::dynamic_pointer_cast(model); - auto f = convert_model(pdpd_model); + std::map CREATORS_MAP = pdpd::get_supported_ops(); + auto f = + convert_each_node(pdpd_model, + [&](const std::map>& nodes_dict, + const std::shared_ptr& op_place) { + return pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP); + }); + return f; + } + + void FrontEndPDPD::convert(std::shared_ptr partiallyConverted) const + { + for (const auto& node : partiallyConverted->get_ordered_ops()) + { + if (is_type(node)) + { + pdpd::normalize_framework_node( + std::dynamic_pointer_cast(node), + pdpd::get_supported_ops()); + } + } + for (auto result : partiallyConverted->get_results()) + { + result->validate_and_infer_types(); + } + } + + std::shared_ptr + FrontEndPDPD::convert_partially(InputModel::Ptr model) const + { + auto pdpd_model = std::dynamic_pointer_cast(model); + std::map CREATORS_MAP = pdpd::get_supported_ops(); + auto f = convert_each_node( + pdpd_model, + [&](const std::map>& nodes_dict, + const std::shared_ptr& op_place) { + pdpd::NamedOutputs named_outputs; + try + { + named_outputs = pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP); + } + catch (const OpConversionFailure&) + { + named_outputs = pdpd::make_framework_node(nodes_dict, op_place); + } + return named_outputs; + }); return f; } + std::shared_ptr FrontEndPDPD::decode(InputModel::Ptr model) const + { + auto pdpd_model = std::dynamic_pointer_cast(model); + std::map CREATORS_MAP = pdpd::get_supported_ops(); + auto f = convert_each_node(pdpd_model, pdpd::make_framework_node); + return f; + } } // namespace frontend } // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/node_context.hpp b/ngraph/frontend/paddlepaddle/src/node_context.hpp index 3cee4812d712e5..21201003c345f2 100644 --- a/ngraph/frontend/paddlepaddle/src/node_context.hpp +++ b/ngraph/frontend/paddlepaddle/src/node_context.hpp @@ -54,6 +54,8 @@ namespace ngraph virtual std::vector get_output_names() const = 0; + virtual size_t get_output_size() const = 0; + /// \brief Get output port type /// /// Current API assumes that output port has only one output type. @@ -141,6 +143,18 @@ namespace ngraph return name_map.at(name); } + /// Returns all inputs in order they appear in map. This is used for FrameworkNode + /// creation + OutputVector get_all_ng_inputs() const + { + OutputVector res; + for (const auto& entry : name_map) + { + res.insert(res.end(), entry.second.begin(), entry.second.end()); + } + return res; + } + std::vector get_output_names() const { return decoder.get_output_names(); diff --git a/ngraph/frontend/paddlepaddle/src/pdpd_fw_node.cpp b/ngraph/frontend/paddlepaddle/src/pdpd_fw_node.cpp new file mode 100644 index 00000000000000..aa55ca4be88935 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/pdpd_fw_node.cpp @@ -0,0 +1,48 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +namespace ngraph +{ + namespace frontend + { + NGRAPH_RTTI_DEFINITION(PDPDFrameworkNode, "PDPDFrameworkNode", 1); + + void PDPDFrameworkNode::validate_and_infer_types() + { + FrameworkNode::validate_and_infer_types(); + size_t idx = 0; + for (const auto& port_pair : m_decoder.get_output_type_map()) + { + for (const auto& p_type : port_pair.second) + { + set_output_type(idx++, p_type, PartialShape::dynamic()); + } + } + } + + std::map PDPDFrameworkNode::get_named_inputs() const + { + return m_decoder.map_for_each_input([&](const std::string& name, size_t) { + auto it = std::find(m_inputs_names.begin(), m_inputs_names.end(), name); + if (it != m_inputs_names.end()) + { + return input(it - m_inputs_names.begin()).get_source_output(); + } + else + { + return Output(); + } + }); + } + + std::map PDPDFrameworkNode::return_named_outputs() + { + return m_decoder.map_for_each_output( + [&](const std::string&, size_t idx) { return output(idx); }); + } + + } // namespace frontend +} // namespace ngraph diff --git a/ngraph/frontend/paddlepaddle/src/pdpd_fw_node.hpp b/ngraph/frontend/paddlepaddle/src/pdpd_fw_node.hpp new file mode 100644 index 00000000000000..967b17e77c2d30 --- /dev/null +++ b/ngraph/frontend/paddlepaddle/src/pdpd_fw_node.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include "decoder.hpp" + +namespace ngraph +{ + namespace frontend + { + class PDPDFrameworkNode : public op::FrameworkNode + { + public: + NGRAPH_RTTI_DECLARATION; + + PDPDFrameworkNode(const DecoderPDPDProto& decoder, + const OutputVector& inputs, + const std::vector& inputs_names) + : FrameworkNode(inputs, decoder.get_output_size()) + , m_decoder{decoder} + , m_inputs_names{inputs_names} + { + op::FrameworkNodeAttrs attrs; + attrs.set_type_name(m_decoder.get_op_type()); + set_attrs(attrs); + + validate_and_infer_types(); + } + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override + { + return std::make_shared(m_decoder, inputs, m_inputs_names); + } + + std::string get_op_type() const { return m_decoder.get_op_type(); } + + const DecoderPDPDProto& get_decoder() const { return m_decoder; } + + std::map get_named_inputs() const; + + std::map return_named_outputs(); + + private: + const DecoderPDPDProto m_decoder; + std::vector m_inputs_names; + }; + } // namespace frontend +} // namespace ngraph \ No newline at end of file diff --git a/ngraph/python/src/pyngraph/frontend/frontend.cpp b/ngraph/python/src/pyngraph/frontend/frontend.cpp index eb723aded423d8..dd98869488c9fa 100644 --- a/ngraph/python/src/pyngraph/frontend/frontend.cpp +++ b/ngraph/python/src/pyngraph/frontend/frontend.cpp @@ -55,11 +55,12 @@ void regclass_pyngraph_FrontEnd(py::module m) Fully converted nGraph function. )"); - fem.def("convert", - static_cast (ngraph::frontend::FrontEnd::*)( - std::shared_ptr) const>(&ngraph::frontend::FrontEnd::convert), - py::arg("function"), - R"( + fem.def( + "convert", + static_cast) const>( + &ngraph::frontend::FrontEnd::convert), + py::arg("function"), + R"( Completely convert the remaining, not converted part of a function. Parameters diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp b/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp index abab82b6bb0abc..624a8ee48da6b6 100644 --- a/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp +++ b/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp @@ -515,11 +515,7 @@ class MOCK_API FrontEndMockPy : public FrontEnd return std::make_shared(NodeVector{}, ParameterVector{}); } - std::shared_ptr convert(std::shared_ptr func) const override - { - m_stat.m_convert++; - return func; - } + void convert(std::shared_ptr func) const override { m_stat.m_convert++; } std::shared_ptr convert_partially(InputModel::Ptr model) const override { diff --git a/ngraph/python/tests/test_frontend/test_frontend_onnx.py b/ngraph/python/tests/test_frontend/test_frontend_onnx.py index 1dbe6a34ae637c..e55f665b883bbc 100644 --- a/ngraph/python/tests/test_frontend/test_frontend_onnx.py +++ b/ngraph/python/tests/test_frontend/test_frontend_onnx.py @@ -86,12 +86,12 @@ def test_decode_and_convert(): assert op.get_type_name() in ["Parameter", "Constant", "ONNXFrameworkNode", "ONNXSubgraphFrameworkNode", "Result"] - function = fe.convert(decoded_function) - assert function - for op in function.get_ordered_ops(): + fe.convert(decoded_function) + assert decoded_function + for op in decoded_function.get_ordered_ops(): assert op.get_type_name() not in ["ONNXFrameworkNode", "ONNXSubgraphFrameworkNode"] a = np.array([[1, 2], [3, 4]], dtype=np.float32) b = np.array([[2, 3], [4, 5]], dtype=np.float32) expected = np.array([[1.5, 5], [10.5, 18]], dtype=np.float32) - run_function(function, a, b, expected=[expected]) + run_function(decoded_function, a, b, expected=[expected]) diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 7f802bbc53d5f1..304522e46f760d 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -651,7 +651,7 @@ install(TARGETS unit-test ############ FRONTEND ############ target_include_directories(unit-test PRIVATE ${FRONTEND_INCLUDE_PATH} frontend/shared/include) -target_link_libraries(unit-test PRIVATE frontend_manager cnpy) +target_link_libraries(unit-test PRIVATE frontend_manager cnpy commonTestUtils) add_subdirectory(frontend) ### END FRONTEND ### diff --git a/ngraph/test/files/paddlepaddle/gen_scripts/generate_unsupported_relu.py b/ngraph/test/files/paddlepaddle/gen_scripts/generate_unsupported_relu.py new file mode 100644 index 00000000000000..ef70895ed38a5d --- /dev/null +++ b/ngraph/test/files/paddlepaddle/gen_scripts/generate_unsupported_relu.py @@ -0,0 +1,105 @@ +# +# relu paddle model generator +# +import os.path + +import sys + +import os +import numpy as np +import paddle as pdpd + + +# print numpy array like C structure +def print_alike(arr): + shape = arr.shape + rank = len(shape) + + # print("shape: ", shape, "rank: %d" %(rank)) + + # for idx, value in np.ndenumerate(arr): + # print(idx, value) + + def print_array(arr, end=' '): + shape = arr.shape + rank = len(arr.shape) + if rank > 1: + line = "{" + for i in range(arr.shape[0]): + line += print_array(arr[i, :], end="},\n" if i < arr.shape[0] - 1 else "}") + line += end + return line + else: + line = "{" + for i in range(arr.shape[0]): + line += "{:.2f}".format(arr[i]) # str(arr[i]) + line += ", " if i < shape[0] - 1 else ' ' + line += end + # print(line) + return line + + print(print_array(arr, "}")) + + +def saveModel(name, exe, feedkeys: list, fetchlist: list, inputs: list, outputs: list, target_dir: str): + model_dir = os.path.join(target_dir, name) + if not os.path.exists(model_dir): + os.makedirs(model_dir) + + print("\n\n------------- %s -----------\n" % (name)) + for i, input in enumerate(inputs): + print("INPUT %s :" % (feedkeys[i]), input.shape, input.dtype, "\n") + print_alike(input) + np.save(os.path.join(model_dir, "input{}".format(i)), input) + np.save(os.path.join(model_dir, "input{}.{}.{}".format(i, feedkeys[i], input.dtype)), input) + print("\n") + + for i, output in enumerate(outputs): + print("OUTPUT %s :" % (fetchlist[i]), output.shape, output.dtype, "\n") + print_alike(output) + np.save(os.path.join(model_dir, "output{}".format(i)), output) + + # composited model + scattered model + pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe) + pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name + ".pdmodel", + params_filename=name + ".pdiparams") + + +def relu(name: str, x): + import paddle as pdpd + pdpd.enable_static() + + node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + out = pdpd.nn.functional.relu(node_x) + + cpu = pdpd.static.cpu_places(1) + exe = pdpd.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(pdpd.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], + inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.array([-2, 0, 1]).astype('float32') + + relu("relu_unsupported", data) + + with open(os.path.join(sys.argv[1], "relu_unsupported", "relu_unsupported.pdmodel"), mode='rb') as file: + modelContent = file.read() + + modelContent = modelContent.replace(b"relu", b"rxyz") + + with open(os.path.join(sys.argv[1], "relu_unsupported", "relu_unsupported.pdmodel"), mode='wb') as file: + file.write(modelContent) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ngraph/test/frontend/paddlepaddle/convert_model.cpp b/ngraph/test/frontend/paddlepaddle/convert_model.cpp new file mode 100644 index 00000000000000..1fc0ba8f6d895e --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/convert_model.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "convert_model.hpp" +#include "paddle_utils.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +using PDPDConvertModelTest = FrontEndConvertModelTest; + +static const std::vector models{ + std::string("conv2d"), + std::string("conv2d_s/conv2d.pdmodel"), + std::string("conv2d_relu/conv2d_relu.pdmodel"), + std::string("2in_2out/2in_2out.pdmodel"), + std::string("multi_tensor_split/multi_tensor_split.pdmodel"), + std::string("2in_2out_dynbatch/2in_2out_dynbatch.pdmodel"), +}; + +INSTANTIATE_TEST_SUITE_P( + PDPDConvertModelTest, + FrontEndConvertModelTest, + ::testing::Combine(::testing::Values(PADDLE_FE), + ::testing::Values(std::string(TEST_PADDLE_MODELS_DIRNAME)), + ::testing::ValuesIn(models)), + FrontEndConvertModelTest::getTestCaseName); diff --git a/ngraph/test/frontend/paddlepaddle/convert_unsupported.cpp b/ngraph/test/frontend/paddlepaddle/convert_unsupported.cpp new file mode 100644 index 00000000000000..ac8c0dc1b252c0 --- /dev/null +++ b/ngraph/test/frontend/paddlepaddle/convert_unsupported.cpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include + +#include "common_test_utils/ngraph_test_utils.hpp" +#include "utils.hpp" +#include "paddle_utils.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +TEST(FrontEndConvertModelTest, test_unsupported_op) +{ + FrontEndManager fem; + FrontEnd::Ptr frontEnd; + InputModel::Ptr inputModel; + ASSERT_NO_THROW(frontEnd = fem.load_by_framework(PADDLE_FE)); + ASSERT_NE(frontEnd, nullptr); + auto model_filename = FrontEndTestUtils::make_model_path( + std::string(TEST_PADDLE_MODELS_DIRNAME) + + std::string("relu_unsupported/relu_unsupported.pdmodel")); + ASSERT_NO_THROW(inputModel = frontEnd->load(model_filename)); + ASSERT_NE(inputModel, nullptr); + std::shared_ptr function; + ASSERT_THROW(function = frontEnd->convert(inputModel), OpConversionFailure); + ASSERT_EQ(function, nullptr); + ASSERT_NO_THROW(function = frontEnd->decode(inputModel)); + ASSERT_THROW(frontEnd->convert(function), OpConversionFailure); + ASSERT_NO_THROW(function = frontEnd->convert_partially(inputModel)); + ASSERT_THROW(frontEnd->convert(function), OpConversionFailure); + + for (auto& node : function->get_ordered_ops()) { + if (node->get_friendly_name() == "rxyz_0.tmp_0") { + function->replace_node(node, std::make_shared(node->input(0).get_source_output())); + } + } + ASSERT_NO_THROW(frontEnd->convert(function)); +} diff --git a/ngraph/test/frontend/shared/include/convert_model.hpp b/ngraph/test/frontend/shared/include/convert_model.hpp new file mode 100644 index 00000000000000..5fef0354671338 --- /dev/null +++ b/ngraph/test/frontend/shared/include/convert_model.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +using ConvertParam = std::tuple; // Model name + +class FrontEndConvertModelTest : public ::testing::TestWithParam +{ +public: + std::string m_feName; + std::string m_pathToModels; + std::string m_modelFile; + ngraph::frontend::FrontEndManager m_fem; + ngraph::frontend::FrontEnd::Ptr m_frontEnd; + ngraph::frontend::InputModel::Ptr m_inputModel; + + static std::string getTestCaseName(const testing::TestParamInfo& obj); + + void SetUp() override; + +protected: + void initParamTest(); + + void doLoadFromFile(); +}; diff --git a/ngraph/test/frontend/shared/src/convert_model.cpp b/ngraph/test/frontend/shared/src/convert_model.cpp new file mode 100644 index 00000000000000..65a4ac67116c57 --- /dev/null +++ b/ngraph/test/frontend/shared/src/convert_model.cpp @@ -0,0 +1,74 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "convert_model.hpp" +#include "common_test_utils/ngraph_test_utils.hpp" +#include "utils.hpp" + +using namespace ngraph; +using namespace ngraph::frontend; + +std::string + FrontEndConvertModelTest::getTestCaseName(const testing::TestParamInfo& obj) +{ + std::string fe, path, fileName; + std::tie(fe, path, fileName) = obj.param; + return fe + "_" + FrontEndTestUtils::fileToTestName(fileName); +} + +void FrontEndConvertModelTest::SetUp() +{ + FrontEndTestUtils::setupTestEnv(); + m_fem = FrontEndManager(); // re-initialize after setting up environment + initParamTest(); +} + +void FrontEndConvertModelTest::initParamTest() +{ + std::tie(m_feName, m_pathToModels, m_modelFile) = GetParam(); + m_modelFile = FrontEndTestUtils::make_model_path(m_pathToModels + m_modelFile); +} + +void FrontEndConvertModelTest::doLoadFromFile() +{ + std::vector frontends; + ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends()); + ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_feName)); + ASSERT_NE(m_frontEnd, nullptr); + ASSERT_NO_THROW(m_inputModel = m_frontEnd->load(m_modelFile)); + ASSERT_NE(m_inputModel, nullptr); +} + +TEST_P(FrontEndConvertModelTest, test_convert_partially_equal_convert) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::shared_ptr function_ref; + ASSERT_NO_THROW(function_ref = m_frontEnd->convert(m_inputModel)); + ASSERT_NE(function_ref, nullptr); + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->convert_partially(m_inputModel)); + ASSERT_NE(function, nullptr); + + const FunctionsComparator func_comparator = + FunctionsComparator::with_default().enable(FunctionsComparator::NAMES); + const FunctionsComparator::Result res = func_comparator(function, function_ref); + ASSERT_TRUE(res.valid) << res.message; +} + +TEST_P(FrontEndConvertModelTest, test_decode_convert_equal_convert) +{ + ASSERT_NO_THROW(doLoadFromFile()); + std::shared_ptr function_ref; + ASSERT_NO_THROW(function_ref = m_frontEnd->convert(m_inputModel)); + ASSERT_NE(function_ref, nullptr); + std::shared_ptr function; + ASSERT_NO_THROW(function = m_frontEnd->decode(m_inputModel)); + ASSERT_NO_THROW(m_frontEnd->convert(function)); + ASSERT_NE(function, nullptr); + + const FunctionsComparator func_comparator = + FunctionsComparator::with_default().enable(FunctionsComparator::NAMES); + const FunctionsComparator::Result res = func_comparator(function, function_ref); + ASSERT_TRUE(res.valid) << res.message; +}