Skip to content

Commit

Permalink
Fix TI deserialization (#4285)
Browse files Browse the repository at this point in the history
* XmlDeserializer relay on parameter/result (input/output) index which is reported during inserting to function

Xml parameter/result order is irrelevant now.

* drop stack usage - just single class member

* try to fix CentOS build

* fix after review

* Drop std::map to custom structure

* reorder layers in TI in test IR which failed on current master

Co-authored-by: Patryk Elszkowski <[email protected]>
  • Loading branch information
pelszkow and Patryk Elszkowski authored Feb 12, 2021
1 parent deaf1f9 commit 3dbecf0
Show file tree
Hide file tree
Showing 3 changed files with 91 additions and 50 deletions.
69 changes: 44 additions & 25 deletions inference-engine/src/readers/ir_reader/ie_ir_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,33 +47,34 @@ IRParser::IRParser(size_t version, const std::vector<InferenceEngine::IExtension
}
}

std::map<uint64_t, uint64_t> V10Parser::XmlDeserializer::map_type_in_function(const pugi::xml_node& node,
const std::string map_type) {
std::map<uint64_t, uint64_t> type_id_in_function;
uint64_t map_type_number = 0;
V10Parser::XmlDeserializer::IoMap V10Parser::XmlDeserializer::updated_io_map(const pugi::xml_node& node) {
auto body_node = node.child("body");

if (body_node.empty()) {
THROW_IE_EXCEPTION << "Missing body part.";
}

// Fill map: parameter/result id to parameter/result number in Function

auto extend_io_map = io_map;

FOREACH_CHILD(layer, body_node.child("layers"), "layer") {
auto type = XMLParseUtils::GetStrAttr(layer, "type");

if (type == map_type) {
if (type == "Parameter") {
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
extend_io_map.inputs.insert({id, -1}); // try add as unconnected
} else if (type == "Result") {
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
type_id_in_function.emplace(id, map_type_number);
map_type_number++;
extend_io_map.outputs.insert({id, -1}); // try add as unconnected
}
}
return type_id_in_function;
return extend_io_map;
}


std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> V10Parser::XmlDeserializer::parseInputDescription(const pugi::xml_node& node) {
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> inputs;
std::map<uint64_t, uint64_t> param_id_in_function = map_type_in_function(node, "Parameter");
std::map<uint64_t, uint64_t> result_id_in_function = map_type_in_function(node, "Result");
const auto up_io_map = updated_io_map(node);

// Parse PortMap: external_port_id for inputs does not always appear in consecutive order
std::map<uint64_t, pugi::xml_node> input_map;
Expand All @@ -96,9 +97,11 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> V10
int64_t end = XMLParseUtils::GetInt64Attr(xml_input, "end", -1);
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_input, "part_size", 1);

const auto input_index = up_io_map.inputs.at(body_parameter_index);

inputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::SliceInputDescription>
(ti_input_index,
param_id_in_function[body_parameter_index],
input_index,
start,
stride,
part_size,
Expand All @@ -112,10 +115,14 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> V10

if (to_layer == body_parameter_index) {
size_t from_layer = XMLParseUtils::GetUIntAttr(xml_edge, "from-layer");

const auto input_index = up_io_map.inputs.at(body_parameter_index);
const auto output_index = up_io_map.outputs.at(from_layer);

inputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::MergedInputDescription>
(ti_input_index,
param_id_in_function[body_parameter_index],
result_id_in_function[from_layer]));
input_index,
output_index));

is_back_edge_exist = true;
break;
Expand All @@ -125,9 +132,11 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> V10
// ti_input_index = -1 means that Parameter of the body is not connected to inputs of TensorIterator
// and is used only for internal needs.
if (!is_back_edge_exist && ti_input_index >= 0) {
const auto input_index = up_io_map.inputs.at(body_parameter_index);

inputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::InvariantInputDescription>
(ti_input_index,
param_id_in_function[body_parameter_index]));
input_index));
}
}
}
Expand All @@ -136,7 +145,7 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> V10

std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> V10Parser::XmlDeserializer::parseOutputDescription(const pugi::xml_node& node) {
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> outputs;
std::map<uint64_t, uint64_t> result_id_in_function = map_type_in_function(node, "Result");
const auto up_io_map = updated_io_map(node);

// Parse PortMap: outputs
std::map<int64_t, pugi::xml_node> output_map;
Expand All @@ -162,8 +171,10 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> V1
int64_t end = XMLParseUtils::GetInt64Attr(xml_output, "end", -1);
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_output, "part_size", 1);

const auto output_index = up_io_map.outputs.at(body_result_index);

outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::ConcatOutputDescription>
(result_id_in_function[body_result_index],
(output_index,
output_number,
start,
stride,
Expand All @@ -172,8 +183,10 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> V1
axis));
} else {
// otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration.
const auto output_index = up_io_map.outputs.at(body_result_index);

outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::BodyOutputDescription>
(result_id_in_function[body_result_index],
(output_index,
output_number,
-1));
}
Expand All @@ -185,10 +198,10 @@ std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> V1

ngraph::op::v5::Loop::SpecialBodyPorts V10Parser::XmlDeserializer::parsePurposeAttribute(const pugi::xml_node& node) {
ngraph::op::v5::Loop::SpecialBodyPorts result = {-1, -1};
std::map<uint64_t, uint64_t> params = map_type_in_function(node, "Parameter");
std::map<uint64_t, uint64_t> results = map_type_in_function(node, "Result");
const auto up_io_map = updated_io_map(node);

NGRAPH_CHECK(!params.empty() || !results.empty(), "No parameters or results found in body Function.");
NGRAPH_CHECK(!up_io_map.inputs.empty() || !up_io_map.outputs.empty(),
"No parameters or results found in body Function.");

// Parse PortMap: external_port_id for inputs/outputs does not always appear in consecutive order
std::map<uint64_t, pugi::xml_node> input_map;
Expand All @@ -207,7 +220,7 @@ ngraph::op::v5::Loop::SpecialBodyPorts V10Parser::XmlDeserializer::parsePurposeA
auto purpose = XMLParseUtils::GetStrAttr(xml_input, "purpose", "");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
if (purpose == "current_iteration") {
result.current_iteration_input_idx = params[body_parameter_index];
result.current_iteration_input_idx = up_io_map.inputs.at(body_parameter_index);
}
}

Expand All @@ -216,7 +229,7 @@ ngraph::op::v5::Loop::SpecialBodyPorts V10Parser::XmlDeserializer::parsePurposeA
auto purpose = XMLParseUtils::GetStrAttr(xml_output, "purpose", "");
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
if (purpose == "execution_condition") {
result.body_condition_output_idx = results[body_parameter_index];
result.body_condition_output_idx = up_io_map.outputs.at(body_parameter_index);
}
}

Expand Down Expand Up @@ -359,13 +372,18 @@ void V10Parser::XmlDeserializer::on_adapter(const std::string& name, ngraph::Val
adapter.set(ngraph_function);
}


std::shared_ptr<ngraph::Function> V10Parser::XmlDeserializer::parse_function(const pugi::xml_node& root, const Blob::CPtr& weights) {
OV_ITT_TASK_CHAIN(taskChain, itt::domains::V10Reader_RT, "V10Parser", "Parse");

using node_params = struct {
struct edge {
size_t fromLayerId, fromPortId, toPortId;
};
struct node_params {
pugi::xml_node xml;
GenericLayerParams params;
};

std::map<size_t, node_params> params;

std::vector<size_t> outputs;
Expand All @@ -383,7 +401,6 @@ std::shared_ptr<ngraph::Function> V10Parser::XmlDeserializer::parse_function(con
}
}

using edge = struct { size_t fromLayerId, fromPortId, toPortId; };
std::map<size_t, std::vector<edge>> edges;
std::map<size_t, std::shared_ptr<ngraph::Node>> id_to_node;

Expand Down Expand Up @@ -451,10 +468,12 @@ std::shared_ptr<ngraph::Function> V10Parser::XmlDeserializer::parse_function(con
// }

if (const auto& parameter_node = std::dynamic_pointer_cast<ngraph::op::Parameter>(node)) {
io_map.inputs.insert({layer_id, parameter_nodes.size()});
parameter_nodes.emplace_back(parameter_node);
}

if (const auto& result_node = std::dynamic_pointer_cast<ngraph::op::Result>(node)) {
io_map.outputs.insert({layer_id, result_nodes.size()});
result_nodes.emplace_back(result_node);
}

Expand Down
37 changes: 27 additions & 10 deletions inference-engine/src/readers/ir_reader/ie_ir_parser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,10 +183,14 @@ class V10Parser : public IParser {

class XmlDeserializer : public ngraph::AttributeVisitor {
public:
explicit XmlDeserializer(const pugi::xml_node& node, const Blob::CPtr& weights,
const std::unordered_map<std::string, ngraph::OpSet>& opsets,
std::unordered_map<std::string, std::shared_ptr<ngraph::Variable>>& variables)
: node(node), weights(weights), opsets(opsets), variables(variables) {}
/// TODO: move whole class to src file
explicit XmlDeserializer(
const pugi::xml_node& node,
const Blob::CPtr& weights,
const std::unordered_map<std::string, ngraph::OpSet>& opsets,
std::unordered_map<std::string, std::shared_ptr<ngraph::Variable>>& variables)
: node(node), weights(weights), opsets(opsets), variables(variables) {}

void on_adapter(const std::string& name, ngraph::ValueAccessor<std::string>& value) override {
std::string val;
if (!getStrAttribute(node.child("data"), name, val)) return;
Expand Down Expand Up @@ -251,11 +255,26 @@ class V10Parser : public IParser {
adapter.set(value);
}


private:
struct IoMap {
using NodeIdToIoIndex = std::unordered_map<size_t /*xml node id*/, uint64_t /*body io index*/>;
NodeIdToIoIndex inputs;
NodeIdToIoIndex outputs;
};

//TODO move data to the bottom (or top)
const pugi::xml_node node;
const Blob::CPtr& weights;
const std::unordered_map<std::string, ngraph::OpSet>& opsets;
std::unordered_map<std::string, std::shared_ptr<ngraph::Variable>>& variables;

///
/// store information about parameters/results order during function creation
/// it will be used during Inputs/Outputs Description creation in SubGraph processing
///
IoMap io_map;

/// \brief Traverses port_map in order to create vector of InputDescription shared_ptrs.
/// Shall be used only for ops which have port_map attribute.
/// \param node xml op representation
Expand All @@ -266,12 +285,10 @@ class V10Parser : public IParser {
/// \param node xml op representation
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> parseOutputDescription(
const pugi::xml_node& node);
/// \brief Traverses nGraph body function for specified op type and creates a map of all
/// op iterations. Map constains type id and assigned to it consecutive number starting from 0.
/// \param node xml op representation
/// \param type op type name to find
/// \return map container
std::map<uint64_t, uint64_t> map_type_in_function(const pugi::xml_node& node, std::string type);

//TODO consider to call only once per layer/TI-Loop node
IoMap updated_io_map(const pugi::xml_node& node);

/// \brief Traverses xml node representation in order to create nGraph function for it.
/// \param node xml node representation
/// \param weights weights attached to current node
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,16 +74,11 @@
</back_edges>
<body>
<layers>
<layer id="0" name="20" type="Parameter" version="opset1">
<data element_type="f32" shape="1,1,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<!--
***************************************************************************************
* nodes are place unordered to check if XmlDeserializer is not depend on layers order *
***************************************************************************************
-->
<layer id="1" name="7_const" type="Const" version="opset1">
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
Expand Down Expand Up @@ -193,6 +188,15 @@
</port>
</output>
</layer>
<layer id="13" name="18/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</input>
</layer>
<layer id="9" name="471/outport/0/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
Expand Down Expand Up @@ -236,14 +240,15 @@
</port>
</output>
</layer>
<layer id="13" name="18/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<layer id="0" name="20" type="Parameter" version="opset1">
<data element_type="f32" shape="1,1,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</input>
</output>
</layer>
</layers>
<edges>
Expand Down Expand Up @@ -297,4 +302,4 @@
<edge from-layer="3" from-port="4" to-layer="5" to-port="0"/>
<edge from-layer="3" from-port="5" to-layer="6" to-port="0"/>
</edges>
</net>
</net>

0 comments on commit 3dbecf0

Please sign in to comment.