Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ONNX] Handle optional outputs for Dropout and MaxPool #4143

Merged
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
1de9ee0
first version of dropout and maxpool impl, added ignoring optinal out…
Feb 1, 2021
e681f32
more tests, impl refactor
Feb 2, 2021
b20a4af
Added tests to dropout in opsets<12
Feb 2, 2021
3ad9eca
added tests for MaxPool
Feb 2, 2021
35dbe66
update xfail list
Feb 2, 2021
d7e4390
move dropout impl to cpp
Feb 2, 2021
9da00f5
fixed is_test bug
Feb 2, 2021
8ef8b5b
added dropout in opset 7
Feb 2, 2021
a767c5d
typo
Feb 2, 2021
f814085
added no const ratio test
Feb 5, 2021
1db819f
Merge remote-tracking branch 'upstream/master' into mbencer/HandleOnn…
Feb 5, 2021
3621d50
remove checking legacy attribute
Feb 8, 2021
7626b23
Merge remote-tracking branch 'upstream/master' into mbencer/HandleOnn…
Feb 8, 2021
ab41cd3
removed not needed code
Feb 8, 2021
053df15
enable default mask path
Feb 8, 2021
cc9a07d
Ignore ratio in training mode
Feb 8, 2021
64a0a3f
update test backend list
Feb 8, 2021
733b3a7
fixed constant bool network, setting precission to output blobs
Feb 10, 2021
7a29227
Merge remote-tracking branch 'upstream/master' into mbencer/HandleOnn…
Feb 10, 2021
869864e
ignore not used test values
Feb 10, 2021
566f7f5
Merge remote-tracking branch 'upstream/master' into mbencer/HandleOnn…
Feb 10, 2021
38f3bb4
Merge remote-tracking branch 'upstream/master' into mbencer/HandleOnn…
Feb 10, 2021
a0478fd
removed check constant->get_output_size()
Feb 10, 2021
0d8c1e9
dropout review remarks
Feb 11, 2021
c316bcc
Merge remote-tracking branch 'upstream/master' into mbencer/HandleOnn…
Feb 11, 2021
c762729
Merge remote-tracking branch 'upstream/master' into mbencer/HandleOnn…
Feb 12, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion ngraph/frontend/onnx_import/src/core/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,11 @@ namespace ngraph
OutputVector results;
for (const auto& output : m_graph_proto->output())
{
results.emplace_back(get_ng_node_from_cache(output.name()));
const auto& ng_output = get_ng_node_from_cache(output.name());
if (!ngraph::op::is_null(ng_output)) // ignore optional outputs
{
results.emplace_back(ng_output);
}
}
return results;
}
Expand Down
136 changes: 136 additions & 0 deletions ngraph/frontend/onnx_import/src/op/dropout.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
//*****************************************************************************
// Copyright 2017-2021 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#include <memory>

#include "core/null_node.hpp"
#include "default_opset.hpp"
#include "exceptions.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "op/dropout.hpp"

namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace
{
OutputVector
build_dropout(const Node& node, float drop_probability, bool training_mode)
{
CHECK_VALID_NODE(
node,
drop_probability == 0 || !training_mode,
"Training mode is not supported for Dropout op if drop_probability is not "
"equal 0");

const auto input_data = node.get_ng_inputs().at(0);
const bool return_mask = node.get_outputs_size() > 1;

if (return_mask)
{
NGRAPH_WARN << "Default mask for Dropout is ignored, "
<< "because of unsupported constant networks";
/*const auto mask = std::make_shared<default_opset::Broadcast>(
default_opset::Constant::create(ngraph::element::boolean,
Shape{}, {true}),
std::make_shared<default_opset::ShapeOf>(input_data));*/
// If constant network is supported mask should be returned instead of
// NullNode (ticket 48055)
mbencer marked this conversation as resolved.
Show resolved Hide resolved
return {input_data, std::make_shared<NullNode>()};
}
else
{
return {input_data};
}
}
}

namespace set_12
{
OutputVector dropout(const Node& node)
{
const auto ng_inputs = node.get_ng_inputs();
// seed attribute is ignored because traning mode is not supported anyway

// default values of inputs
double ratio = 0.5f;
bool training_mode = false;

if (ng_inputs.size() > 1)
{
if (!ngraph::op::is_null(ng_inputs.at(1)))
{
CHECK_VALID_NODE(
node,
ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()),
"Not constant (or omitted) ratio input is not supported.");
ratio = as_type_ptr<default_opset::Constant>(
ng_inputs.at(1).get_node_shared_ptr())
mbencer marked this conversation as resolved.
Show resolved Hide resolved
->cast_vector<double>()[0];
}
}
if (ng_inputs.size() > 2)
{
if (!ngraph::op::is_null(ng_inputs.at(2)))
{
mbencer marked this conversation as resolved.
Show resolved Hide resolved
CHECK_VALID_NODE(
node,
ngraph::op::is_constant(ng_inputs.at(2).get_node_shared_ptr()),
"Not constant (or omitted) training_mode input is not supported.");
training_mode = as_type_ptr<default_opset::Constant>(
ng_inputs.at(2).get_node_shared_ptr())
->cast_vector<bool>()[0];
mbencer marked this conversation as resolved.
Show resolved Hide resolved
}
}
return build_dropout(node, ratio, training_mode);
}
} // namespace set_12

namespace set_7
{
OutputVector dropout(const Node& node)
{
// "is_test" attribute was removed
const bool training_mode = false;
const auto ratio = node.get_attribute_value<float>("ratio", 0.5f);

return build_dropout(node, ratio, training_mode);
}
} // namespace set_7

namespace set_1
{
OutputVector dropout(const Node& node)
{
CHECK_VALID_NODE(node,

// legacy consumed_inputs attribute ignored
const bool training_mode = !node.get_attribute_value<int64_t>("is_test", 0);
const auto ratio = node.get_attribute_value<float>("ratio", 0.5f);

return build_dropout(node, ratio, training_mode);
}
} // namespace set_1

} // namespace op

} // namespace onnx_import

} // namespace ngraph
22 changes: 11 additions & 11 deletions ngraph/frontend/onnx_import/src/op/dropout.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,6 @@

#pragma once

#include <memory>

#include "core/null_node.hpp"
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"

namespace ngraph
Expand All @@ -28,15 +24,19 @@ namespace ngraph
{
namespace op
{
namespace set_12
{
OutputVector dropout(const Node& node);
} // namespace set_12

namespace set_7
{
OutputVector dropout(const Node& node);
} // namespace set_7

namespace set_1
{
inline OutputVector dropout(const Node& node)
{
// First value is actual output of Dropout,
// the second one is just a placeholder for optional trailing output.
return {node.get_ng_inputs().at(0).get_node_shared_ptr(),
std::make_shared<NullNode>()};
}
OutputVector dropout(const Node& node);
} // namespace set_1

} // namespace op
Expand Down
6 changes: 6 additions & 0 deletions ngraph/frontend/onnx_import/src/op/max_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <memory>

#include "core/null_node.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/max_pool.hpp"
#include "op/max_pool.hpp"
#include "utils/pooling_factory.hpp"
Expand All @@ -31,6 +32,11 @@ namespace ngraph
{
OutputVector max_pool(const Node& node)
{
if (node.get_outputs_size() > 1)
{
NGRAPH_WARN
<< "Indices output is not supported for MaxPooling and was ignored";
}
auto max_pool = pooling::PoolingFactory(node).make_max_pool();
max_pool.emplace_back(std::make_shared<NullNode>()); // Indices (optional)
return max_pool;
Expand Down
2 changes: 2 additions & 0 deletions ngraph/frontend/onnx_import/src/ops_bridge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,8 @@ namespace ngraph
REGISTER_OPERATOR("Div", 1, div);
REGISTER_OPERATOR("Div", 7, div);
REGISTER_OPERATOR("Dropout", 1, dropout);
REGISTER_OPERATOR("Dropout", 7, dropout);
REGISTER_OPERATOR("Dropout", 12, dropout);
REGISTER_OPERATOR("Elu", 1, elu);
REGISTER_OPERATOR("Equal", 1, equal);
REGISTER_OPERATOR("Erf", 1, erf);
Expand Down
6 changes: 4 additions & 2 deletions ngraph/python/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True):
xfail_issue_38699 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"ai.onnx.preview.training.Gradient")
xfail_issue_38701 = xfail_test(reason="RuntimeError: unsupported element type: STRING")
xfail_issue_38705 = xfail_test(reason="IndexError: deque::_M_range_check: __n (which is 0)"
">= this->size() (which is 0)")
xfail_issue_38706 = xfail_test(reason="RuntimeError: output_3.0 has zero dimension which is not allowed")
xfail_issue_38707 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"SoftmaxCrossEntropyLoss")
Expand Down Expand Up @@ -158,6 +156,7 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True):
"ai.onnx.preview.training.Adagrad")
xfail_issue_38736 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"NegativeLogLikelihoodLoss")
xfail_issue_48052 = xfail_test(reason="Dropout op is not supported in traning mode")
xfail_issue_45177 = xfail_test(reason="RuntimeError: axes has zero dimension which is not allowed")
xfail_issue_45180 = xfail_test(reason="RuntimeError: Unsupported dynamic op: ReduceSum")
xfail_issue_44839 = xfail_test(reason="Huge computation missmatch")
Expand All @@ -182,6 +181,9 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True):
xfail_issue_47330 = xfail_test(reason="RuntimeError: Eltwise node with name `[name]` doesn't support "
"FP64 precision.")
xfail_issue_47337 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::OneHot")
xfail_issue_33593 = xfail_test(reason="Current implementation of MaxPool doesn't support indices output")
xfail_issue_48055 = xfail_test(reason="Dropout doesn't return mask in non-traning mode, "
"because of lack of Constant network support")

# Model MSFT issues:
xfail_issue_37957 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
Expand Down
24 changes: 14 additions & 10 deletions ngraph/python/tests/test_onnx/test_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
xfail_issue_38701,
xfail_issue_33595,
xfail_issue_33651,
xfail_issue_38705,
xfail_issue_38706,
xfail_issue_38736,
xfail_issue_38707,
Expand Down Expand Up @@ -89,7 +88,10 @@
xfail_issue_46765,
xfail_issue_47317,
xfail_issue_47323,
xfail_issue_47330)
xfail_issue_47330,
xfail_issue_48052,
xfail_issue_33593,
xfail_issue_48055)


def expect_fail(test_case_path, xfail): # type: (str) -> None
Expand Down Expand Up @@ -287,14 +289,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_only_bigrams_skip0_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu",
"OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_skip5_cpu"),
(xfail_issue_38705,
"OnnxBackendNodeModelTest.test_training_dropout_mask_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_default_mask_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_mask_cpu",
"OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_strides_cpu",
"OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_pads_cpu",
"OnnxBackendNodeModelTest.test_dropout_default_mask_cpu",
"OnnxBackendNodeModelTest.test_dropout_default_mask_ratio_cpu"),
(xfail_issue_38706,
"OnnxBackendNodeModelTest.test_split_zero_size_splits_cpu"),
(xfail_issue_38736,
Expand Down Expand Up @@ -618,6 +612,11 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None
(xfail_issue_38735,
"OnnxBackendNodeModelTest.test_adagrad_multiple_cpu",
"OnnxBackendNodeModelTest.test_adagrad_cpu"),
(xfail_issue_48052,
"OnnxBackendNodeModelTest.test_training_dropout_mask_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_default_mask_cpu",
"OnnxBackendNodeModelTest.test_training_dropout_zero_ratio_mask_cpu",
"OnnxBackendNodeModelTest.test_dropout_default_mask_ratio_cpu",),
(xfail_issue_45177,
"OnnxBackendNodeModelTest.test_reduce_sum_default_axes_keepdims_example_cpu",
"OnnxBackendNodeModelTest.test_reduce_sum_default_axes_keepdims_random_cpu",
Expand Down Expand Up @@ -682,6 +681,11 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None
"OnnxBackendNodeModelTest.test_squeeze_negative_axes_cpu",),
(xfail_issue_44976,
"OnnxBackendNodeModelTest.test_quantizelinear_axis_cpu",),
(xfail_issue_33593,
"OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_strides_cpu",
"OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_pads_cpu",),
(xfail_issue_48055,
"OnnxBackendNodeModelTest.test_dropout_default_mask_cpu",)
]

for test_group in tests_expected_to_fail:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
ir_version: 7
producer_name: "backend-test"
graph {
node {
input: "x"
output: "y"
op_type: "Dropout"
attribute {
name: "seed"
i: 0
type: INT
}
}
name: "test_dropout_default_mask"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 4
}
dim {
dim_value: 5
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 4
}
dim {
dim_value: 5
}
}
}
}
}
}
opset_import {
version: 12
}
Loading