Skip to content

Commit

Permalink
Merge pull request openvinotoolkit#19 from mvafin/mvafin/pt_fe/fix_if
Browse files Browse the repository at this point in the history
Fix prim::If translator
  • Loading branch information
slyalin authored Oct 19, 2022
2 parents f1aec43 + 6356160 commit 30680e8
Show file tree
Hide file tree
Showing 7 changed files with 110 additions and 98 deletions.
3 changes: 2 additions & 1 deletion src/core/src/op/if.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,9 @@ void ov::op::v8::If::validate_and_infer_types() {
auto else_node_result =
m_bodies[ELSE_BODY_INDEX]->get_results().at(else_desc->m_body_value_index)->input_value(0);

element::Type merged_type;
NODE_VALIDATION_CHECK(this,
then_node_result.get_element_type() == else_node_result.get_element_type(),
element::Type::merge(merged_type, then_node_result.get_element_type(), else_node_result.get_element_type()),
"type of then_body output is not equal type of else_body output");

// shape inference for output and associated with it body outputs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace ov {
namespace frontend {
namespace pytorch {

typedef std::map<size_t, Output<Node>> TensorMap;
typedef std::unordered_map<size_t, Output<Node>> TensorMap;

class NodeContext : public frontend::NodeContext {
public:
Expand Down
7 changes: 2 additions & 5 deletions src/frontends/pytorch/src/node_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,8 @@ std::shared_ptr<ov::Model> NodeContext::convert_subgraph(size_t index) {
// Extend external context with internal tensors except Parameter nodes, because internal Parameters are created to
// link internal context with external
TensorMap ext_map(m_ext_tensor_map);
for (auto tensor : *m_tensor_map) {
auto node = tensor.second.get_node_shared_ptr();
if (!std::dynamic_pointer_cast<opset8::Parameter>(node))
ext_map[tensor.first] = tensor.second;
}
// map::insert does not update elements if their key is already in map; so if we have real tensors in outter scope we will not add Parameters we creeated in inner scope.
ext_map.insert(m_tensor_map->begin(), m_tensor_map->end());

auto model = convert_pytorch_model(subgraph_decoder, ext_map);
// Remove unused parameters, they could be created as inputs to the parts of graph that weren't
Expand Down
79 changes: 45 additions & 34 deletions src/frontends/pytorch/src/op/if.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,19 @@ OutputVector translate_if(NodeContext& context) {

std::map<size_t, ParameterVector> inputs_map;
std::map<size_t, ResultVector> outputs_map;
for (auto param : then_body->get_parameters()) {
for (const auto& param : then_body->get_parameters()) {
auto name = param->get_output_tensor(0).get_any_name();
size_t input_idx = (size_t)std::stoll(name);
FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(input_idx) == 0,
"More then one then_body input with same tensor name: ",
"More than one then_body input with same tensor name: ",
input_idx,
"; existing: ",
inputs_map.at(input_idx)[0],
" adding: ",
param);
inputs_map[input_idx] = {param, nullptr};
}
for (auto param : else_body->get_parameters()) {
for (const auto& param : else_body->get_parameters()) {
auto name = param->get_output_tensor(0).get_any_name();
size_t input_idx = (size_t)std::stoll(name);
if (inputs_map.count(input_idx)) {
Expand All @@ -52,39 +54,48 @@ OutputVector translate_if(NodeContext& context) {
inputs_map[input_idx] = {nullptr, param};
}
}
std::map<size_t, std::shared_ptr<opset8::Result>> then_body_results;
std::map<size_t, std::shared_ptr<opset8::Result>> else_body_results;
std::set<size_t> output_idxs;
for (auto result : then_body->get_results()) {
auto name = result->input(0).get_tensor().get_any_name();
OutputVector res;
const auto num_outs = context.num_of_outputs();
const auto then_results = then_body->get_results();
const auto else_results = else_body->get_results();
FRONT_END_OP_CONVERSION_CHECK(then_results.size() >= num_outs && else_results.size() >= num_outs,
"Else or then body have less outputs than prim::If requires.");
for (int i = 0; i < num_outs; i++) {
res.push_back(if_node->set_output(then_results[i], else_results[i]));
}
std::map<size_t, std::shared_ptr<opset8::Result>> extra_then_body_results;
std::map<size_t, std::shared_ptr<opset8::Result>> extra_else_body_results;
std::set<size_t> extra_output_idxs;
for (int i = num_outs; i < then_results.size(); i++) {
const auto result = then_results[i];
const auto name = result->input(0).get_tensor().get_any_name();
size_t output_idx = (size_t)std::stoll(name);
FRONT_END_OP_CONVERSION_CHECK(then_body_results.count(output_idx) == 0,
"More then one then_body output with same tensor name: ",
then_body_results.at(output_idx),
FRONT_END_OP_CONVERSION_CHECK(extra_then_body_results.count(output_idx) == 0,
"More than one then_body output with same tensor name: ",
output_idx,
"; existing: ",
extra_then_body_results.at(output_idx),
" adding: ",
result);
then_body_results[output_idx] = result;
output_idxs.insert(output_idx);
extra_then_body_results[output_idx] = result;
extra_output_idxs.insert(output_idx);
}
for (auto result : else_body->get_results()) {
auto name = result->input(0).get_tensor().get_any_name();
for (int i = num_outs; i < else_results.size(); i++) {
const auto result = else_results[i];
const auto name = result->input(0).get_tensor().get_any_name();
size_t output_idx = (size_t)std::stoll(name);
FRONT_END_OP_CONVERSION_CHECK(else_body_results.count(output_idx) == 0,
"More then one then_body output with same tensor name: ",
else_body_results.at(output_idx),
FRONT_END_OP_CONVERSION_CHECK(extra_else_body_results.count(output_idx) == 0,
"More than one else_body output with same tensor name: ",
output_idx,
"; existing: ",
extra_else_body_results.at(output_idx),
" adding: ",
result);
then_body_results[output_idx] = result;
output_idxs.insert(output_idx);
}
OutputVector res;
for (int i = 0; i < context.num_of_outputs(); i++) {
res.push_back(if_node->set_output(then_body->get_results()[i], else_body->get_results()[i]));
OV_FRONTEND_REQUIRE(output_idxs.erase(then_decoder->output(i)));
OV_FRONTEND_REQUIRE(output_idxs.erase(else_decoder->output(i)));
extra_else_body_results[output_idx] = result;
extra_output_idxs.insert(output_idx);
}
for (auto output_idx : output_idxs) {
if (!then_body_results.count(output_idx)) {
for (const auto& output_idx : extra_output_idxs) {
if (!extra_then_body_results.count(output_idx)) {
// Need to add Parameter->Result construction in then body
auto new_parameter = std::make_shared<opset8::Parameter>(element::dynamic, PartialShape::dynamic());
new_parameter->get_output_tensor(0).add_names({std::to_string(output_idx)});
Expand All @@ -94,9 +105,9 @@ OutputVector translate_if(NodeContext& context) {
then_body->validate_nodes_and_infer_types();
FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in else body");
inputs_map[output_idx][0] = new_parameter;
then_body_results[output_idx] = new_result;
extra_then_body_results[output_idx] = new_result;
std::cout << "[ WARNING ] Modified then body: " << if_node << std::endl;
} else if (!else_body_results.count(output_idx)) {
} else if (!extra_else_body_results.count(output_idx)) {
// Need to add Parameter->Result construction in else body
auto new_parameter = std::make_shared<opset8::Parameter>(element::dynamic, PartialShape::dynamic());
new_parameter->get_output_tensor(0).add_names({std::to_string(output_idx)});
Expand All @@ -106,12 +117,12 @@ OutputVector translate_if(NodeContext& context) {
else_body->validate_nodes_and_infer_types();
FRONT_END_OP_CONVERSION_CHECK(inputs_map.count(output_idx), "Input must exist in then body");
inputs_map[output_idx][1] = new_parameter;
else_body_results[output_idx] = new_result;
extra_else_body_results[output_idx] = new_result;
std::cout << "[ WARNING ] Modified else body: " << if_node << std::endl;
}
}
// Create prim::If inputs and outputs
for (auto input : inputs_map) {
for (const auto& input : inputs_map) {
if (!input_idxs.count(input.first)) {
auto external_output = context.get_tensor_from_model_or_create_input(input.first);
if_node->set_input(external_output, input.second[0], input.second[1]);
Expand All @@ -122,10 +133,10 @@ OutputVector translate_if(NodeContext& context) {
}
}
}
for (auto output_idx : output_idxs) {
for (const auto& output_idx : extra_output_idxs) {
context.add_tensor_to_context(
output_idx,
if_node->set_output(then_body_results.at(output_idx), else_body_results.at(output_idx)));
if_node->set_output(extra_then_body_results.at(output_idx), extra_else_body_results.at(output_idx)));
}
if_node->validate_and_infer_types();
return res;
Expand Down
2 changes: 1 addition & 1 deletion src/frontends/pytorch/src/op/loop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ OutputVector translate_loop(NodeContext& context) {
inputs_map[input_idx].push_back(param);
}
}
for (auto input : inputs_map) {
for (const auto& input : inputs_map) {
if (!input_idxs.count(input.first)) {
auto external_output = context.get_tensor_from_model_or_create_input(input.first);
loop->set_invariant_inputs(external_output, input.second);
Expand Down
105 changes: 54 additions & 51 deletions src/frontends/pytorch/src/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,10 @@ namespace frontend {
namespace pytorch {
int COUNTER = 0;

Output<Node> make_optional_bias(Output<Node> base_op,
Output<Node> make_optional_bias(const Output<Node>& base_op,
const NodeContext& context,
size_t bias_input_idx,
std::vector<int> unsqueeze_dims) {
using namespace ngraph;
const std::vector<int>& unsqueeze_dims) {
using std::make_shared;

if (!context.input_is_none(bias_input_idx)) {
Expand All @@ -33,16 +32,15 @@ Output<Node> make_optional_bias(Output<Node> base_op,
}
}

std::shared_ptr<ov::Node> get_rank_node(ov::Output<ov::Node> node) {
std::shared_ptr<Node> get_rank_node(const Output<Node>& node) {
auto shape = std::make_shared<opset8::ShapeOf>(node);
return std::make_shared<opset8::ShapeOf>(shape);
}

Output<Node> reshape_kernel_for_group(const NodeContext& context,
Output<Node> input,
Output<Node> kernel,
const Output<Node>& input,
const Output<Node>& kernel,
int64_t groups) {
using namespace ngraph;
using std::make_shared;

auto in_shape = std::make_shared<opset8::ShapeOf>(input);
Expand Down Expand Up @@ -86,6 +84,51 @@ Output<Node> reshape_kernel_for_group(const NodeContext& context,
return make_shared<opset8::Reshape>(kernel, new_kernel_shape, false);
}

OutputVector make_framework_node(NodeContext* context) {
auto fw_node = std::make_shared<PtFrameworkNode>(context->get_decoder(),
context->inputs(),
context->get_decoder()->num_of_outputs());
fw_node->set_friendly_name(context->get_op_type() + ":" + std::to_string(COUNTER++));

std::map<size_t, ParameterVector> inputs_map;
std::map<size_t, ResultVector> outputs_map;
std::set<size_t> input_idxs;
for (size_t i = 0; i < context->get_decoder()->get_subgraph_size(); ++i) {
auto subgraph_decoder = context->get_decoder()->get_subgraph_decoder(i);
auto inputs = subgraph_decoder->inputs();
input_idxs.insert(inputs.begin(), inputs.end());
auto body = context->convert_subgraph(i);
fw_node->set_function(i, body);
for (const auto& param : body->get_parameters()) {
auto name = param->get_output_tensor(0).get_any_name();
size_t input_idx = (size_t)std::stoll(name);
inputs_map[input_idx].push_back(param);
}
for (const auto& result : body->get_results()) {
auto name = result->input(0).get_tensor().get_any_name();
size_t out_idx = (size_t)std::stoll(name);
FRONT_END_OP_CONVERSION_CHECK(outputs_map.count(out_idx) == 0,
"More then one body output with same tensor name.");
outputs_map[out_idx].push_back(result);
}
}
for (const auto& input : inputs_map) {
if (!input_idxs.count(input.first)) {
auto external_output = context->get_tensor_from_model_or_create_input(input.first);
fw_node->set_invariant_inputs(external_output, input.second);
} else {
auto external_output = context->get_tensor_from_model(input.first);
if (external_output.get_node()) {
fw_node->set_invariant_inputs(external_output, input.second);
}
}
}
for (const auto& output : outputs_map) {
context->add_tensor_to_context(output.first, fw_node->set_body_outputs(output.second));
}
return context->get_decoder()->mark_node(fw_node)->outputs();
}

OutputVector convert_node(NodeContext* context) {
try {
auto CONVERTERS_MAP = get_supported_ops();
Expand Down Expand Up @@ -131,48 +174,8 @@ OutputVector convert_node(NodeContext* context) {
context->get_decoder()->mark_node(fw_node);
return outputs;
}
auto fw_node = std::make_shared<PtFrameworkNode>(context->get_decoder(),
context->inputs(),
context->get_decoder()->num_of_outputs());
fw_node->set_friendly_name(context->get_op_type() + ":" + std::to_string(COUNTER++));

std::map<size_t, ParameterVector> inputs_map;
std::map<size_t, ResultVector> outputs_map;
std::set<size_t> input_idxs;
for (size_t i = 0; i < context->get_decoder()->get_subgraph_size(); ++i) {
auto subgraph_decoder = context->get_decoder()->get_subgraph_decoder(i);
auto inputs = subgraph_decoder->inputs();
input_idxs.insert(inputs.begin(), inputs.end());
auto body = context->convert_subgraph(i);
fw_node->set_function(i, body);
for (auto param : body->get_parameters()) {
auto name = param->get_output_tensor(0).get_any_name();
size_t input_idx = (size_t)std::stoll(name);
inputs_map[input_idx].push_back(param);
}
for (auto result : body->get_results()) {
auto name = result->input(0).get_tensor().get_any_name();
size_t out_idx = (size_t)std::stoll(name);
FRONT_END_OP_CONVERSION_CHECK(outputs_map.count(out_idx) == 0,
"More then one body output with same tensor name.");
outputs_map[out_idx].push_back(result);
}
}
for (auto input : inputs_map) {
if (!input_idxs.count(input.first)) {
auto external_output = context->get_tensor_from_model_or_create_input(input.first);
fw_node->set_invariant_inputs(external_output, input.second);
} else {
auto external_output = context->get_tensor_from_model(input.first);
if (external_output.get_node()) {
fw_node->set_invariant_inputs(external_output, input.second);
}
}
}
for (auto output : outputs_map) {
context->add_tensor_to_context(output.first, fw_node->set_body_outputs(output.second));
}
return context->get_decoder()->mark_node(fw_node)->outputs();

return make_framework_node(context);
}

std::shared_ptr<ov::Model> convert_pytorch_model(std::shared_ptr<Decoder> pytorch_model,
Expand Down Expand Up @@ -295,12 +298,12 @@ std::shared_ptr<ov::Model> convert_pytorch_model(std::shared_ptr<Decoder> pytorc

// Since parameters can be added we need to list all current parameters
std::set<size_t> param_names;
for (auto param : parameters) {
for (const auto& param : parameters) {
auto name = param->get_output_tensor(0).get_any_name();
size_t input_idx = (size_t)std::stoll(name);
param_names.insert(input_idx);
}
for (auto tensor : mutated_tensors) {
for (const auto& tensor : mutated_tensors) {
if (param_names.count(tensor)) {
OV_FRONTEND_REQUIRE(tensor_map.count(tensor));
// model input was mutated we need to make a result for it
Expand Down
10 changes: 5 additions & 5 deletions src/frontends/pytorch/src/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,16 @@ class FrameworkNode;
namespace frontend {
namespace pytorch {

Output<Node> make_optional_bias(Output<Node> base_op,
Output<Node> make_optional_bias(const Output<Node>& base_op,
const NodeContext& context,
size_t bias_input_idx,
std::vector<int> unsqueeze_dims = {});
const std::vector<int>& unsqueeze_dims = {});

std::shared_ptr<ov::Node> get_rank_node(ov::Output<ov::Node> node);
std::shared_ptr<ov::Node> get_rank_node(const Output<Node>& node);

Output<Node> reshape_kernel_for_group(const NodeContext& context,
Output<Node> input,
Output<Node> kernel,
const Output<Node>& input,
const Output<Node>& kernel,
int64_t groups);

std::shared_ptr<ov::Model> convert_pytorch_model(std::shared_ptr<Decoder> pytorch_model,
Expand Down

0 comments on commit 30680e8

Please sign in to comment.