From 3121ee54353846521bd2fb468693aa299c854906 Mon Sep 17 00:00:00 2001 From: Ivan Tikhonov Date: Mon, 19 Apr 2021 15:38:38 +0300 Subject: [PATCH] One OutPlace corresponds one TensorPlace --- .../frontend_manager/frontend_manager.hpp | 6 +++ .../frontend/generic/src/frontend_manager.cpp | 10 ++++ .../include/paddlepaddle_frontend/place.hpp | 41 ++++++++--------- ngraph/frontend/paddlepaddle/src/frontend.cpp | 18 ++++---- ngraph/frontend/paddlepaddle/src/model.cpp | 34 ++++++++------ ngraph/frontend/paddlepaddle/src/place.cpp | 46 ++----------------- 6 files changed, 69 insertions(+), 86 deletions(-) diff --git a/ngraph/frontend/generic/include/frontend_manager/frontend_manager.hpp b/ngraph/frontend/generic/include/frontend_manager/frontend_manager.hpp index 471f25e94b71a6..4618b8c2f715c1 100644 --- a/ngraph/frontend/generic/include/frontend_manager/frontend_manager.hpp +++ b/ngraph/frontend/generic/include/frontend_manager/frontend_manager.hpp @@ -115,9 +115,15 @@ class NGRAPH_API Place /// For operation node returns reference to an input port with specified index virtual Ptr getInputPort (int inputPortIndex = -1) const; + /// For operation node returns reference to an input port with specified name and index + virtual Ptr getInputPort (const std::string& inputName, int inputPortIndex = -1) const; + /// For operation node returns reference to an output port with specified index virtual Ptr getOutputPort (int outputPortIndex = -1) const; + /// For operation node returns reference to an output port with specified name and index + virtual Ptr getOutputPort (const std::string& outputName, int outputPortIndex = -1) const; + /// Returns all input ports that consume data flows through this place virtual std::vector getConsumingPorts () const; diff --git a/ngraph/frontend/generic/src/frontend_manager.cpp b/ngraph/frontend/generic/src/frontend_manager.cpp index 1b7473e1c05e23..9dd82c373298e3 100644 --- a/ngraph/frontend/generic/src/frontend_manager.cpp +++ b/ngraph/frontend/generic/src/frontend_manager.cpp @@ -196,11 +196,21 @@ namespace ngraph FRONT_END_NOT_IMPLEMENTED(getInputPort); } + Place::Ptr Place::getInputPort (const std::string& intputName, int inputPortIndex) const + { + FRONT_END_NOT_IMPLEMENTED(getInputPort); + } + Place::Ptr Place::getOutputPort (int outputPortIndex) const { FRONT_END_NOT_IMPLEMENTED(getOutputPort); } + Place::Ptr Place::getOutputPort (const std::string& outputName, int outputPortIndex) const + { + FRONT_END_NOT_IMPLEMENTED(getOutputPort); + } + std::vector Place::getConsumingPorts () const { FRONT_END_NOT_IMPLEMENTED(getConsumingPorts); diff --git a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp index 3312dac4a6829e..368d2d9463f70f 100644 --- a/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp +++ b/ngraph/frontend/paddlepaddle/include/paddlepaddle_frontend/place.hpp @@ -71,19 +71,17 @@ class InPortPlacePDPD : public PlacePDPD { m_op = op; } - void addSourceTensor(const std::weak_ptr& source_tensor) { - m_source_tensors.push_back(source_tensor); + void setSourceTensor(const std::weak_ptr& source_tensor) { + m_source_tensor = source_tensor; } std::vector> getSourceTensors() const; - std::shared_ptr getSourceTensor(int idx) const override; - - std::shared_ptr getSourceTensorPDPD(int idx) const; + std::shared_ptr getSourceTensorPDPD() const; std::shared_ptr getOp(); private: - std::vector> m_source_tensors; + std::weak_ptr m_source_tensor; std::weak_ptr m_op; }; @@ -95,18 +93,15 @@ class OutPortPlacePDPD : public PlacePDPD { void setOp(const std::weak_ptr& op) { m_op = op; } - void addTargetTensor(const std::weak_ptr& target_tensor) { - m_target_tensors.push_back(target_tensor); + void setTargetTensor(const std::weak_ptr& target_tensor) { + m_target_tensor = target_tensor; } - std::shared_ptr getTargetTensor(int idx) const override; - - std::shared_ptr getTargetTensorPDPD(int idx) const; + std::shared_ptr getTargetTensorPDPD() const; - std::vector> getTargetTensors() const; private: std::weak_ptr m_op; - std::vector> m_target_tensors; + std::weak_ptr m_target_tensor; }; class OpPlacePDPD : public PlacePDPD { @@ -119,35 +114,35 @@ class OpPlacePDPD : public PlacePDPD { const std::shared_ptr& op_desc); void addInPort(const std::shared_ptr& input, const std::string& name) { - m_input_ports[name] = input; + m_input_ports[name].push_back(input); } void addOutPort(const std::shared_ptr& output, const std::string& name) { - m_output_ports[name] = output; + m_output_ports[name].push_back(output); } - const std::map>& getOutputPorts() const { + const std::map>>& getOutputPorts() const { return m_output_ports; } - const std::map>& getInputPorts() const { + const std::map>>& getInputPorts() const { return m_input_ports; } - std::shared_ptr getOutputPortByName(const std::string& name) { - return m_output_ports[name]; + std::shared_ptr getOutputPortPDPD(const std::string& name, int idx) { + return m_output_ports[name][idx]; } - std::shared_ptr getInputPortByName(const std::string& name) { - return m_input_ports[name]; + std::shared_ptr getInputPortPDPD(const std::string& name, int idx) { + return m_input_ports[name][idx]; } const std::shared_ptr& getDesc() const { return m_op_desc; } private: std::shared_ptr m_op_desc; - std::map> m_input_ports; - std::map> m_output_ports; + std::map>> m_input_ports; + std::map>> m_output_ports; }; class TensorPlacePDPD : public PlacePDPD { diff --git a/ngraph/frontend/paddlepaddle/src/frontend.cpp b/ngraph/frontend/paddlepaddle/src/frontend.cpp index dc4974eac45692..0b7713c09217b7 100644 --- a/ngraph/frontend/paddlepaddle/src/frontend.cpp +++ b/ngraph/frontend/paddlepaddle/src/frontend.cpp @@ -54,11 +54,11 @@ std::shared_ptr make_ng_node(std::map>& MY_ASSERT(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(), "No creator found"); std::map named_inputs; const auto& input_ports = op_place->getInputPorts(); - for (const auto& name_to_port : input_ports) { - for (int idx = 0; idx < name_to_port.second->getSourceTensors().size(); ++idx) { - const auto& var_desc = name_to_port.second->getSourceTensorPDPD(idx)->getDesc(); + for (const auto& name_to_ports : input_ports) { + for (const auto& port : name_to_ports.second) { + const auto& var_desc = port->getSourceTensorPDPD()->getDesc(); if (nodes.count(var_desc->name())) - named_inputs[name_to_port.first].push_back(nodes.at(var_desc->name())); + named_inputs[name_to_ports.first].push_back(nodes.at(var_desc->name())); else return std::shared_ptr(); } @@ -138,14 +138,16 @@ std::shared_ptr const auto& node = pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP); if (node) { // set layer name by the name of first output var - const auto& first_output_var = op_place->getOutputPorts().begin()->second->getTargetTensorPDPD(0)->getDesc(); + const auto& first_output_var = op_place->getOutputPorts().begin()->second[0]->getTargetTensorPDPD()->getDesc(); node->set_friendly_name(first_output_var->name()); std::cerr << "Named with " << node->get_friendly_name() << "\n"; - for (const auto &name_to_port : op_place->getOutputPorts()) { - for (size_t idx = 0; idx < name_to_port.second->getTargetTensors().size(); ++idx) { - const auto& var = name_to_port.second->getTargetTensorPDPD(idx)->getDesc(); + for (const auto& name_to_ports : op_place->getOutputPorts()) { + int idx = 0; + for (const auto& port : name_to_ports.second) { + const auto& var = port->getTargetTensorPDPD()->getDesc(); nodes_dict[var->name()] = node->output(idx); + idx++; } } } diff --git a/ngraph/frontend/paddlepaddle/src/model.cpp b/ngraph/frontend/paddlepaddle/src/model.cpp index e4a62cd9bd54e5..b72743f26066c7 100644 --- a/ngraph/frontend/paddlepaddle/src/model.cpp +++ b/ngraph/frontend/paddlepaddle/src/model.cpp @@ -90,40 +90,48 @@ InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::string& _path, op_place_block.push_back(op_place); for (const auto &output : op.outputs()) { - auto out_port = std::make_shared(m_input_model); - op_place->addOutPort(out_port, output.parameter()); - out_port->setOp(op_place); for (const auto &var_name : output.arguments()) { + auto out_port = std::make_shared(m_input_model); + + // connect out_port and tensor const auto& tensor = var_place_block.at(var_name); tensor->addProducingPort(out_port); - out_port->addTargetTensor(tensor); + out_port->setTargetTensor(tensor); + + // connect out_port and op + op_place->addOutPort(out_port, output.parameter()); + out_port->setOp(op_place); } } for (const auto &input : op.inputs()) { - auto in_port = std::make_shared(m_input_model); - op_place->addInPort(in_port, input.parameter()); - in_port->setOp(op_place); for (const auto &var_name : input.arguments()) { + auto in_port = std::make_shared(m_input_model); + + // connect in_port and tensor const auto& tensor = var_place_block.at(var_name); tensor->addConsumingPort(in_port); - in_port->addSourceTensor(tensor); + in_port->setSourceTensor(tensor); + + // connect in_port and op + op_place->addInPort(in_port, input.parameter()); + in_port->setOp(op_place); } } // Determine outputs and inputs if (op.type() == "feed") { - const auto& place = op_place->getOutputPortByName("Out")->getTargetTensor(0); - const auto& var_place = std::dynamic_pointer_cast(place); + const auto& place = op_place->getOutputPortPDPD("Out", 0); + const auto& var_place = std::dynamic_pointer_cast(place->getTargetTensorPDPD()); const auto& tensor_desc = var_place->getDesc()->type().lod_tensor().tensor(); const auto& dims = tensor_desc.dims(); var_place->setElementType(TYPE_MAP[tensor_desc.data_type()]); var_place->setPartialShape(PartialShape(std::vector(dims.begin(), dims.end()))); - m_inputs.push_back(place); + m_inputs.push_back(var_place); } else if (op.type() == "fetch") { - auto place = op_place->getInputPortByName("X")->getSourceTensor(0); - m_outputs.push_back(place); + auto place = op_place->getInputPortPDPD("X", 0); + m_outputs.push_back(place->getSourceTensorPDPD()); } } } diff --git a/ngraph/frontend/paddlepaddle/src/place.cpp b/ngraph/frontend/paddlepaddle/src/place.cpp index 2f8986a387fb50..a6845fe3c6dbad 100644 --- a/ngraph/frontend/paddlepaddle/src/place.cpp +++ b/ngraph/frontend/paddlepaddle/src/place.cpp @@ -88,15 +88,8 @@ Place::Ptr TensorPlacePDPD::getProducingPort() const { MY_ASSERT(false, "Producing Port has expired."); } -std::shared_ptr InPortPlacePDPD::getSourceTensor(int idx) const { - if (const auto& tensor = m_source_tensors[idx].lock()) { - return tensor; - } - MY_ASSERT(false, "Source Tensor has expired."); -} - -std::shared_ptr InPortPlacePDPD::getSourceTensorPDPD(int idx) const { - if (const auto& tensor = m_source_tensors[idx].lock()) { +std::shared_ptr InPortPlacePDPD::getSourceTensorPDPD() const { + if (const auto& tensor = m_source_tensor.lock()) { return tensor; } MY_ASSERT(false, "Source Tensor has expired."); @@ -109,40 +102,9 @@ std::shared_ptr InPortPlacePDPD::getOp() { MY_ASSERT(false, "Operation has expired."); } -std::vector> InPortPlacePDPD::getSourceTensors() const { - std::vector> source_tensors; - for (const auto & tensor: m_source_tensors) { - if (const auto& locked = tensor.lock()) { - source_tensors.push_back(locked); - } else { - MY_ASSERT(false, "Source Tensor has expired."); - } - } - return source_tensors; -} - -std::shared_ptr OutPortPlacePDPD::getTargetTensor(int idx) const { - if (const auto& target_tensor = m_target_tensors.at(idx).lock()) { +std::shared_ptr OutPortPlacePDPD::getTargetTensorPDPD() const { + if (const auto& target_tensor = m_target_tensor.lock()) { return target_tensor; } MY_ASSERT(false, "Target Tensor has expired."); } - -std::shared_ptr OutPortPlacePDPD::getTargetTensorPDPD(int idx) const { - if (const auto& target_tensor = m_target_tensors.at(idx).lock()) { - return target_tensor; - } - MY_ASSERT(false, "Target Tensor has expired."); -} - -std::vector> OutPortPlacePDPD::getTargetTensors() const { - std::vector> target_tensors; - for (const auto & tensor: m_target_tensors) { - if (const auto& locked = tensor.lock()) { - target_tensors.push_back(locked); - } else { - MY_ASSERT(false, "Target Tensor has expired."); - } - } - return target_tensors; -}