Skip to content

Commit

Permalink
One OutPlace corresponds one TensorPlace
Browse files Browse the repository at this point in the history
  • Loading branch information
itikhono committed Apr 19, 2021
1 parent ce58d47 commit 3121ee5
Show file tree
Hide file tree
Showing 6 changed files with 69 additions and 86 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,15 @@ class NGRAPH_API Place
/// For operation node returns reference to an input port with specified index
virtual Ptr getInputPort (int inputPortIndex = -1) const;

/// For operation node returns reference to an input port with specified name and index
virtual Ptr getInputPort (const std::string& inputName, int inputPortIndex = -1) const;

/// For operation node returns reference to an output port with specified index
virtual Ptr getOutputPort (int outputPortIndex = -1) const;

/// For operation node returns reference to an output port with specified name and index
virtual Ptr getOutputPort (const std::string& outputName, int outputPortIndex = -1) const;

/// Returns all input ports that consume data flows through this place
virtual std::vector<Place::Ptr> getConsumingPorts () const;

Expand Down
10 changes: 10 additions & 0 deletions ngraph/frontend/generic/src/frontend_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,11 +196,21 @@ namespace ngraph
FRONT_END_NOT_IMPLEMENTED(getInputPort);
}

Place::Ptr Place::getInputPort (const std::string& intputName, int inputPortIndex) const
{
FRONT_END_NOT_IMPLEMENTED(getInputPort);
}

Place::Ptr Place::getOutputPort (int outputPortIndex) const
{
FRONT_END_NOT_IMPLEMENTED(getOutputPort);
}

Place::Ptr Place::getOutputPort (const std::string& outputName, int outputPortIndex) const
{
FRONT_END_NOT_IMPLEMENTED(getOutputPort);
}

std::vector<Place::Ptr> Place::getConsumingPorts () const
{
FRONT_END_NOT_IMPLEMENTED(getConsumingPorts);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,19 +71,17 @@ class InPortPlacePDPD : public PlacePDPD {
m_op = op;
}

void addSourceTensor(const std::weak_ptr<TensorPlacePDPD>& source_tensor) {
m_source_tensors.push_back(source_tensor);
void setSourceTensor(const std::weak_ptr<TensorPlacePDPD>& source_tensor) {
m_source_tensor = source_tensor;
}

std::vector<std::shared_ptr<TensorPlacePDPD>> getSourceTensors() const;

std::shared_ptr<Place> getSourceTensor(int idx) const override;

std::shared_ptr<TensorPlacePDPD> getSourceTensorPDPD(int idx) const;
std::shared_ptr<TensorPlacePDPD> getSourceTensorPDPD() const;

std::shared_ptr<OpPlacePDPD> getOp();
private:
std::vector<std::weak_ptr<TensorPlacePDPD>> m_source_tensors;
std::weak_ptr<TensorPlacePDPD> m_source_tensor;
std::weak_ptr<OpPlacePDPD> m_op;
};

Expand All @@ -95,18 +93,15 @@ class OutPortPlacePDPD : public PlacePDPD {

void setOp(const std::weak_ptr<OpPlacePDPD>& op) { m_op = op; }

void addTargetTensor(const std::weak_ptr<TensorPlacePDPD>& target_tensor) {
m_target_tensors.push_back(target_tensor);
void setTargetTensor(const std::weak_ptr<TensorPlacePDPD>& target_tensor) {
m_target_tensor = target_tensor;
}

std::shared_ptr<Place> getTargetTensor(int idx) const override;

std::shared_ptr<TensorPlacePDPD> getTargetTensorPDPD(int idx) const;
std::shared_ptr<TensorPlacePDPD> getTargetTensorPDPD() const;

std::vector<std::shared_ptr<TensorPlacePDPD>> getTargetTensors() const;
private:
std::weak_ptr<OpPlacePDPD> m_op;
std::vector<std::weak_ptr<TensorPlacePDPD>> m_target_tensors;
std::weak_ptr<TensorPlacePDPD> m_target_tensor;
};

class OpPlacePDPD : public PlacePDPD {
Expand All @@ -119,35 +114,35 @@ class OpPlacePDPD : public PlacePDPD {
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc);

void addInPort(const std::shared_ptr<InPortPlacePDPD>& input, const std::string& name) {
m_input_ports[name] = input;
m_input_ports[name].push_back(input);
}

void addOutPort(const std::shared_ptr<OutPortPlacePDPD>& output, const std::string& name) {
m_output_ports[name] = output;
m_output_ports[name].push_back(output);
}

const std::map<std::string, std::shared_ptr<OutPortPlacePDPD>>& getOutputPorts() const {
const std::map<std::string, std::vector<std::shared_ptr<OutPortPlacePDPD>>>& getOutputPorts() const {
return m_output_ports;
}

const std::map<std::string, std::shared_ptr<InPortPlacePDPD>>& getInputPorts() const {
const std::map<std::string, std::vector<std::shared_ptr<InPortPlacePDPD>>>& getInputPorts() const {
return m_input_ports;
}

std::shared_ptr<OutPortPlacePDPD> getOutputPortByName(const std::string& name) {
return m_output_ports[name];
std::shared_ptr<OutPortPlacePDPD> getOutputPortPDPD(const std::string& name, int idx) {
return m_output_ports[name][idx];
}

std::shared_ptr<InPortPlacePDPD> getInputPortByName(const std::string& name) {
return m_input_ports[name];
std::shared_ptr<InPortPlacePDPD> getInputPortPDPD(const std::string& name, int idx) {
return m_input_ports[name][idx];
}

const std::shared_ptr<paddle::framework::proto::OpDesc>& getDesc() const { return m_op_desc; }

private:
std::shared_ptr<paddle::framework::proto::OpDesc> m_op_desc;
std::map<std::string, std::shared_ptr<InPortPlacePDPD>> m_input_ports;
std::map<std::string, std::shared_ptr<OutPortPlacePDPD>> m_output_ports;
std::map<std::string, std::vector<std::shared_ptr<InPortPlacePDPD>>> m_input_ports;
std::map<std::string, std::vector<std::shared_ptr<OutPortPlacePDPD>>> m_output_ports;
};

class TensorPlacePDPD : public PlacePDPD {
Expand Down
18 changes: 10 additions & 8 deletions ngraph/frontend/paddlepaddle/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,11 @@ std::shared_ptr<ngraph::Node> make_ng_node(std::map<std::string, Output<Node>>&
MY_ASSERT(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(), "No creator found");
std::map<std::string, OutputVector> named_inputs;
const auto& input_ports = op_place->getInputPorts();
for (const auto& name_to_port : input_ports) {
for (int idx = 0; idx < name_to_port.second->getSourceTensors().size(); ++idx) {
const auto& var_desc = name_to_port.second->getSourceTensorPDPD(idx)->getDesc();
for (const auto& name_to_ports : input_ports) {
for (const auto& port : name_to_ports.second) {
const auto& var_desc = port->getSourceTensorPDPD()->getDesc();
if (nodes.count(var_desc->name()))
named_inputs[name_to_port.first].push_back(nodes.at(var_desc->name()));
named_inputs[name_to_ports.first].push_back(nodes.at(var_desc->name()));
else
return std::shared_ptr<ngraph::Node>();
}
Expand Down Expand Up @@ -138,14 +138,16 @@ std::shared_ptr<Function>
const auto& node = pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP);
if (node) {
// set layer name by the name of first output var
const auto& first_output_var = op_place->getOutputPorts().begin()->second->getTargetTensorPDPD(0)->getDesc();
const auto& first_output_var = op_place->getOutputPorts().begin()->second[0]->getTargetTensorPDPD()->getDesc();
node->set_friendly_name(first_output_var->name());

std::cerr << "Named with " << node->get_friendly_name() << "\n";
for (const auto &name_to_port : op_place->getOutputPorts()) {
for (size_t idx = 0; idx < name_to_port.second->getTargetTensors().size(); ++idx) {
const auto& var = name_to_port.second->getTargetTensorPDPD(idx)->getDesc();
for (const auto& name_to_ports : op_place->getOutputPorts()) {
int idx = 0;
for (const auto& port : name_to_ports.second) {
const auto& var = port->getTargetTensorPDPD()->getDesc();
nodes_dict[var->name()] = node->output(idx);
idx++;
}
}
}
Expand Down
34 changes: 21 additions & 13 deletions ngraph/frontend/paddlepaddle/src/model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,40 +90,48 @@ InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::string& _path,
op_place_block.push_back(op_place);

for (const auto &output : op.outputs()) {
auto out_port = std::make_shared<OutPortPlacePDPD>(m_input_model);
op_place->addOutPort(out_port, output.parameter());
out_port->setOp(op_place);
for (const auto &var_name : output.arguments()) {
auto out_port = std::make_shared<OutPortPlacePDPD>(m_input_model);

// connect out_port and tensor
const auto& tensor = var_place_block.at(var_name);
tensor->addProducingPort(out_port);
out_port->addTargetTensor(tensor);
out_port->setTargetTensor(tensor);

// connect out_port and op
op_place->addOutPort(out_port, output.parameter());
out_port->setOp(op_place);
}
}

for (const auto &input : op.inputs()) {
auto in_port = std::make_shared<InPortPlacePDPD>(m_input_model);
op_place->addInPort(in_port, input.parameter());
in_port->setOp(op_place);
for (const auto &var_name : input.arguments()) {
auto in_port = std::make_shared<InPortPlacePDPD>(m_input_model);

// connect in_port and tensor
const auto& tensor = var_place_block.at(var_name);
tensor->addConsumingPort(in_port);
in_port->addSourceTensor(tensor);
in_port->setSourceTensor(tensor);

// connect in_port and op
op_place->addInPort(in_port, input.parameter());
in_port->setOp(op_place);
}
}

// Determine outputs and inputs
if (op.type() == "feed") {
const auto& place = op_place->getOutputPortByName("Out")->getTargetTensor(0);
const auto& var_place = std::dynamic_pointer_cast<TensorPlacePDPD>(place);
const auto& place = op_place->getOutputPortPDPD("Out", 0);
const auto& var_place = std::dynamic_pointer_cast<TensorPlacePDPD>(place->getTargetTensorPDPD());
const auto& tensor_desc = var_place->getDesc()->type().lod_tensor().tensor();
const auto& dims = tensor_desc.dims();

var_place->setElementType(TYPE_MAP[tensor_desc.data_type()]);
var_place->setPartialShape(PartialShape(std::vector<Dimension>(dims.begin(), dims.end())));
m_inputs.push_back(place);
m_inputs.push_back(var_place);
} else if (op.type() == "fetch") {
auto place = op_place->getInputPortByName("X")->getSourceTensor(0);
m_outputs.push_back(place);
auto place = op_place->getInputPortPDPD("X", 0);
m_outputs.push_back(place->getSourceTensorPDPD());
}
}
}
Expand Down
46 changes: 4 additions & 42 deletions ngraph/frontend/paddlepaddle/src/place.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,15 +88,8 @@ Place::Ptr TensorPlacePDPD::getProducingPort() const {
MY_ASSERT(false, "Producing Port has expired.");
}

std::shared_ptr<Place> InPortPlacePDPD::getSourceTensor(int idx) const {
if (const auto& tensor = m_source_tensors[idx].lock()) {
return tensor;
}
MY_ASSERT(false, "Source Tensor has expired.");
}

std::shared_ptr<TensorPlacePDPD> InPortPlacePDPD::getSourceTensorPDPD(int idx) const {
if (const auto& tensor = m_source_tensors[idx].lock()) {
std::shared_ptr<TensorPlacePDPD> InPortPlacePDPD::getSourceTensorPDPD() const {
if (const auto& tensor = m_source_tensor.lock()) {
return tensor;
}
MY_ASSERT(false, "Source Tensor has expired.");
Expand All @@ -109,40 +102,9 @@ std::shared_ptr<OpPlacePDPD> InPortPlacePDPD::getOp() {
MY_ASSERT(false, "Operation has expired.");
}

std::vector<std::shared_ptr<TensorPlacePDPD>> InPortPlacePDPD::getSourceTensors() const {
std::vector<std::shared_ptr<TensorPlacePDPD>> source_tensors;
for (const auto & tensor: m_source_tensors) {
if (const auto& locked = tensor.lock()) {
source_tensors.push_back(locked);
} else {
MY_ASSERT(false, "Source Tensor has expired.");
}
}
return source_tensors;
}

std::shared_ptr<Place> OutPortPlacePDPD::getTargetTensor(int idx) const {
if (const auto& target_tensor = m_target_tensors.at(idx).lock()) {
std::shared_ptr<TensorPlacePDPD> OutPortPlacePDPD::getTargetTensorPDPD() const {
if (const auto& target_tensor = m_target_tensor.lock()) {
return target_tensor;
}
MY_ASSERT(false, "Target Tensor has expired.");
}

std::shared_ptr<TensorPlacePDPD> OutPortPlacePDPD::getTargetTensorPDPD(int idx) const {
if (const auto& target_tensor = m_target_tensors.at(idx).lock()) {
return target_tensor;
}
MY_ASSERT(false, "Target Tensor has expired.");
}

std::vector<std::shared_ptr<TensorPlacePDPD>> OutPortPlacePDPD::getTargetTensors() const {
std::vector<std::shared_ptr<TensorPlacePDPD>> target_tensors;
for (const auto & tensor: m_target_tensors) {
if (const auto& locked = tensor.lock()) {
target_tensors.push_back(locked);
} else {
MY_ASSERT(false, "Target Tensor has expired.");
}
}
return target_tensors;
}

0 comments on commit 3121ee5

Please sign in to comment.