Skip to content

Commit

Permalink
Extend dynamic shape support for ops which use auto padding mode (ope…
Browse files Browse the repository at this point in the history
  • Loading branch information
Mateusz Bencer authored and mryzhov committed Aug 26, 2020
1 parent d01feff commit 5b8fe8c
Show file tree
Hide file tree
Showing 7 changed files with 515 additions and 77 deletions.
56 changes: 35 additions & 21 deletions ngraph/src/ngraph/op/avg_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,39 +93,53 @@ void op::v1::AvgPool::validate_and_infer_types()
}

const PartialShape& arg_shape = get_input_partial_shape(0);

if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
auto output_shape = PartialShape::dynamic();
if (arg_shape.rank().is_static())
{
if (arg_shape.is_static())
output_shape = std::vector<Dimension>(arg_shape.rank().get_length(), Dimension::dynamic());
if (arg_shape.rank().get_length() > 1)
{
output_shape[0] = arg_shape[0]; // batch size
}
if (arg_shape.rank().get_length() > 2)
{
CoordinateDiff pads_end, pads_begin;
infer_auto_padding(arg_shape.to_shape(),
m_kernel,
m_strides,
Strides(m_kernel.size(), 1), // No dilation
m_auto_pad,
pads_end,
pads_begin);
m_pads_end = Shape(pads_end.begin(), pads_end.end());
m_pads_begin = Shape(pads_begin.begin(), pads_begin.end());
output_shape[1] = arg_shape[1]; // channel size
}
}

bool update_auto_padding_succeed = true;
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
CoordinateDiff pads_end, pads_begin;
update_auto_padding_succeed =
try_apply_auto_padding(arg_shape,
m_kernel,
m_strides,
Strides(m_kernel.size(), 1), // No dilation
m_auto_pad,
pads_end,
pads_begin);
m_pads_end = Shape(pads_end.begin(), pads_end.end());
m_pads_begin = Shape(pads_begin.begin(), pads_begin.end());
}

// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end());
CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end());

set_output_type(0,
get_input_element_type(0),
infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
m_kernel,
m_strides,
!m_exclude_pad,
m_rounding_type == op::RoundingType::CEIL));
update_auto_padding_succeed
? infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
m_kernel,
m_strides,
!m_exclude_pad,
m_rounding_type == op::RoundingType::CEIL)
: output_shape);
}

const Shape& op::v1::AvgPool::get_kernel() const
Expand Down
40 changes: 31 additions & 9 deletions ngraph/src/ngraph/op/binary_convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,23 @@ void op::v1::BinaryConvolution::validate_and_infer_types()
const PartialShape& filters_shape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);

PartialShape result_shape = PartialShape::dynamic();
if (data_batch_shape.rank().is_static())
{
result_shape =
std::vector<Dimension>(data_batch_shape.rank().get_length(), Dimension::dynamic());

if (data_batch_shape.rank().get_length() > 1)
{
result_shape[0] = data_batch_shape[0]; // batch size
}

if (filters_shape.rank().is_static() && filters_shape.rank().get_length() > 1)
{
result_shape[1] = filters_shape[0]; // filter channel size
}
}

if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
Expand All @@ -99,23 +116,28 @@ void op::v1::BinaryConvolution::validate_and_infer_types()

if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
bool auto_padding_applied = false;
if (filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
auto_padding_applied = try_apply_auto_padding(data_batch_shape,
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
if (!auto_padding_applied)
{
set_output_type(0, data_batch_et, result_shape);
return;
}
}

PartialShape result_shape;
result_shape = infer_convolution_forward(this,
data_batch_shape,
Strides(data_batch_shape.rank().get_length() - 2, 1),
Expand Down
116 changes: 69 additions & 47 deletions ngraph/src/ngraph/op/deformable_convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,44 +71,6 @@ void op::v1::DeformableConvolution::validate_and_infer_types()
element::Type deformable_values_et = get_input_element_type(1);
element::Type filters_et = get_input_element_type(2);

if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}

if (m_dilations.size() == 0)
{
m_dilations = conv_default_strides(this, data_batch_shape, filters_shape);
}

if (m_pads_begin.size() == 0)
{
m_pads_begin = conv_default_padding(this, data_batch_shape, filters_shape);
}

if (m_pads_end.size() == 0)
{
m_pads_end = conv_default_padding(this, data_batch_shape, filters_shape);
}

if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
}

if (deformable_values_shape.rank().is_static())
{
NODE_VALIDATION_CHECK(
Expand Down Expand Up @@ -160,15 +122,75 @@ void op::v1::DeformableConvolution::validate_and_infer_types()
filters_et,
").");

const PartialShape result_shape =
infer_convolution_forward(this,
data_batch_shape,
Strides(m_strides.size(), 1), // dummy data dilations
m_pads_begin,
m_pads_end,
filters_shape,
m_strides,
m_dilations);
PartialShape result_shape = PartialShape::dynamic();
if (data_batch_shape.rank().is_static())
{
result_shape =
std::vector<Dimension>(data_batch_shape.rank().get_length(), Dimension::dynamic());

if (data_batch_shape.rank().get_length() > 1)
{
result_shape[0] = data_batch_shape[0]; // batch size
}

if (filters_shape.rank().is_static() && filters_shape.rank().get_length() > 1)
{
result_shape[1] = filters_shape[0]; // filter channel size
}
}

if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}

if (m_dilations.size() == 0)
{
m_dilations = conv_default_strides(this, data_batch_shape, filters_shape);
}

if (m_pads_begin.size() == 0)
{
m_pads_begin = conv_default_padding(this, data_batch_shape, filters_shape);
}

if (m_pads_end.size() == 0)
{
m_pads_end = conv_default_padding(this, data_batch_shape, filters_shape);
}

if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
bool auto_padding_applied = false;
if (filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
auto_padding_applied = try_apply_auto_padding(data_batch_shape,
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
if (!auto_padding_applied)
{
set_output_type(0, data_batch_et, result_shape);
return;
}
}

result_shape = infer_convolution_forward(this,
data_batch_shape,
Strides(m_strides.size(), 1), // dummy data dilations
m_pads_begin,
m_pads_end,
filters_shape,
m_strides,
m_dilations);

set_output_type(0, result_et, result_shape);
}
Expand Down
3 changes: 3 additions & 0 deletions ngraph/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,10 @@ set(SRC
tensor.cpp
type_prop/any.cpp
type_prop/assign.cpp
type_prop/avg_pool.cpp
type_prop/batch_norm.cpp
type_prop/batch_to_space.cpp
type_prop/binary_convolution.cpp
type_prop/binary_elementwise.cpp
type_prop/broadcast.cpp
type_prop/bucketize.cpp
Expand All @@ -102,6 +104,7 @@ set(SRC
type_prop/convert.cpp
type_prop/convolution.cpp
type_prop/ctc_loss.cpp
type_prop/deformable_convolution.cpp
type_prop/deformable_psroi_pooling.cpp
type_prop/depth_to_space.cpp
type_prop/dequantize.cpp
Expand Down
105 changes: 105 additions & 0 deletions ngraph/test/type_prop/avg_pool.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************

#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"

using namespace std;
using namespace ngraph;

TEST(type_prop, avg_pool_auto_padding)
{
const PartialShape arg_shape{1, 3, 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const bool exclude_pad = false;
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;

auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(
arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad);

ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0}));
}

TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_lower)
{
const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const bool exclude_pad = true;
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;

auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(
arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad);

ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0}));
}

TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_upper)
{
const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const bool exclude_pad = false;
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_UPPER;

auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(
arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad);

ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{0, 0}));
ASSERT_EQ(mp->get_pads_end(), (Shape{1, 1}));
}

TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic)
{
const PartialShape arg_shape{1, 3, 32, Dimension::dynamic()};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const bool exclude_pad = true;
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;

auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(
arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad);

ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{1, 3, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{}));
ASSERT_EQ(mp->get_pads_end(), (Shape{}));
}
Loading

0 comments on commit 5b8fe8c

Please sign in to comment.