Skip to content

Commit

Permalink
[Relay/Topi][Op] 1D Pooling (apache#4663)
Browse files Browse the repository at this point in the history
* Added 1D pooling to Topi

* Added 1D pooling relay op and tests.

* Added onnx parsing and tests for maxpool1d and averagepool1d

* formatting

* moved partial import.

* Fixed typo.
  • Loading branch information
jwfromm authored and alexwong committed Feb 28, 2020
1 parent d7319ad commit 43a22c5
Show file tree
Hide file tree
Showing 14 changed files with 786 additions and 18 deletions.
60 changes: 60 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -481,6 +481,66 @@ struct AdaptivePool2DAttrs : public tvm::AttrsNode<AdaptivePool2DAttrs> {
};


/*! \brief Attributes for 1D max pool operator */
struct MaxPool1DAttrs : public tvm::AttrsNode<MaxPool1DAttrs> {
Array<IndexExpr> pool_size;
Array<IndexExpr> strides;
Array<IndexExpr> padding;
std::string layout;
bool ceil_mode;

TVM_DECLARE_ATTRS(MaxPool1DAttrs, "relay.attrs.MaxPool1DAttrs") {
TVM_ATTR_FIELD(pool_size)
.describe("Size of the pooling windows.");
TVM_ATTR_FIELD(strides).set_default(Array<IndexExpr>({1}))
.describe("Specifies the strides of the convolution.");
TVM_ATTR_FIELD(padding).set_default(Array<IndexExpr>({0}))
.describe("If padding is non-zero, then the input is implicitly zero-padded"
"Padding support both symmetric and asymmetric as"
"one int : same padding used on all sides"
"three int : back, bottom, right will use same padding as front, top, left"
"six int : padding width in the order of (front, top, left, back, bottom, right)");
TVM_ATTR_FIELD(layout).set_default("NCW")
.describe("Dimension ordering of data and weight. Can be 'NCW', 'NWC', etc."
"'N', 'C', 'W' stands for batch, channel, and width"
"dimensions respectively. Pooling is applied on the 'W' dimensions.");
TVM_ATTR_FIELD(ceil_mode).set_default(false)
.describe("When true, will use ceil instead of floor to compute the output shape.");
}
};

/*! \brief Attributes for 1D avg pool operator */
struct AvgPool1DAttrs : public tvm::AttrsNode<AvgPool1DAttrs> {
Array<IndexExpr> pool_size;
Array<IndexExpr> strides;
Array<IndexExpr> padding;
std::string layout;
bool ceil_mode;
bool count_include_pad;

TVM_DECLARE_ATTRS(AvgPool1DAttrs, "relay.attrs.AvgPool1DAttrs") {
TVM_ATTR_FIELD(pool_size)
.describe("Size of the pooling windows.");
TVM_ATTR_FIELD(strides).set_default(Array<IndexExpr>({1}))
.describe("Specifies the strides of the convolution.");
TVM_ATTR_FIELD(padding).set_default(Array<IndexExpr>({0}))
.describe("If padding is non-zero, then the input is implicitly zero-padded"
"Padding support both symmetric and asymmetric as"
"one int : same padding used on all sides"
"three int : back, bottom, right will use same padding as front, top, left"
"six int : padding width in the order of (front, top, left, back, bottom, right)");
TVM_ATTR_FIELD(layout).set_default("NCW")
.describe("Dimension ordering of data and weight. Can be 'NCW', 'NHC', etc."
"'N', 'C', 'W' stands for batch, channel, and width"
"dimensions respectively. Pooling is applied on the 'W' dimension.");
TVM_ATTR_FIELD(ceil_mode).set_default(false)
.describe("When true, will use ceil instead of floor to compute the output shape.");
TVM_ATTR_FIELD(count_include_pad).set_default(false)
.describe("When true, will include padding to compute the average");
}
};


/*! \brief Attributes for 3D max pool operator */
struct MaxPool3DAttrs : public tvm::AttrsNode<MaxPool3DAttrs> {
Array<IndexExpr> pool_size;
Expand Down
60 changes: 43 additions & 17 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"""ONNX: Open Neural Network Exchange frontend for Relay."""
from __future__ import absolute_import as _abs

from functools import partial
import numpy as np
import tvm
from ... import nd as _nd
Expand All @@ -43,12 +44,15 @@ def get_numpy(tensor_proto):


def dimension_picker(prefix, surfix=''):
"""Check that dimensions are supported."""
def _impl(attr):
kernel = attr['kernel_shape']
if len(kernel) == 1:
return prefix + '1d' + surfix
if len(kernel) == 2:
return prefix + '2d' + surfix
msg = 'Only 2D kernels are supported for operator {}.'
op_name = prefix + '2d'
msg = 'Only 1D and 2D kernels are supported for operator {}.'
op_name = prefix + '1d/2d'
raise tvm.error.OpAttributeInvalid(msg.format(op_name))

return _impl
Expand Down Expand Up @@ -77,21 +81,27 @@ def get_pad_pair(input1d, kernel1d, stride1d):
return [pad_before, pad_after]


def onnx_storage_order2layout(storage_order):
def onnx_storage_order2layout(storage_order, dims=2):
"""converter of onnx storage order parameter to tvm storage order format"""
if storage_order not in (0, 1):
raise tvm.error.OpAttributeInvalid('Mode of storage_order must be either 0 or 1')

return 'NCHW' if storage_order == 0 else 'NHWC'
if dims == 1:
return 'NCW' if storage_order == 0 else 'NWC'
elif dims == 2:
return 'NCHW' if storage_order == 0 else 'NHWC'
else:
msg = "Only 1d and 2d layouts are currently supported"
raise tvm.error.OpAttributeInvalid(msg.format(op_name))


def dimension_constraint():
def _dim_check(attrs):
if len(attrs['kernel_shape']) == 2:
if len(attrs['kernel_shape']) == 2 or len(attrs['kernel_shape']) == 1:
return True
return False

return _dim_check, "Only 2d kernel supported."
return _dim_check, "Only 1d and 2d kernel supported."


class OnnxOpConverter(object):
Expand Down Expand Up @@ -394,17 +404,33 @@ def _impl_v8(cls, inputs, attr, params):

@classmethod
def _impl_v10(cls, inputs, attr, params):
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'storage_order': ('layout', 'NCHW', onnx_storage_order2layout),
'ceil_mode': 'ceil_mode'
},
# very weird attributes here in onnx, force check
ignores=['dilations', 'auto_pad'],
custom_check=dimension_constraint())(inputs, attr, params)
input_shape = infer_shape(inputs[0])
# 1D Convolution
if len(input_shape) == 3:
return AttrCvt(
op_name="max_pool1d",
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0)),
'storage_order': ('layout', 'NCW', partial(onnx_storage_order2layout, dims=1)),
'ceil_mode': 'ceil_mode'
},
ignores=['dilations', 'auto_pad'])(inputs, attr, params)
#2D Convolution
if len(input_shape) == 4:
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'storage_order': ('layout', 'NCHW', onnx_storage_order2layout),
'ceil_mode': 'ceil_mode'
},
# very weird attributes here in onnx, force check
ignores=['dilations', 'auto_pad'],
custom_check=dimension_constraint())(inputs, attr, params)

raise tvm.error.OpAttributeInvalid("Only 1D and 2D maxpooling are currently supported.")

class Mul(Elemwise):
""" Operator converter for Multiply.
Expand Down
24 changes: 24 additions & 0 deletions python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,18 @@ def schedule_conv1d_transpose(attrs, outs, target):
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)


# max_pool1d
@reg.register_schedule("nn.max_pool1d")
def schedule_max_pool1d(attrs, outs, target):
"""Schedule definition of max_pool1d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)


reg.register_pattern("nn.max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)


# max_pool2d
@reg.register_schedule("nn.max_pool2d")
def schedule_max_pool2d(attrs, outs, target):
Expand All @@ -452,6 +464,18 @@ def schedule_max_pool3d(attrs, outs, target):
reg.register_pattern("nn.max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)


# avg_pool1d
@reg.register_schedule("nn.avg_pool1d")
def schedule_avg_pool1d(attrs, outs, target):
"""Schedule definition of avg_pool1d"""
layout = attrs.layout
with target:
return topi.generic.schedule_pool(outs, layout)


reg.register_pattern("nn.avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)


# avg_pool2d
@reg.register_schedule("nn.avg_pool2d")
def schedule_avg_pool2d(attrs, outs, target):
Expand Down
105 changes: 104 additions & 1 deletion python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,55 @@ def log_softmax(data, axis=-1):
return _make.log_softmax(data, axis)


def max_pool1d(data,
pool_size=(1,),
strides=(1,),
padding=(0,),
layout="NCW",
ceil_mode=False):
r"""1D maximum pooling operator.
This operator takes data as input and does 1D max value calculation
with in pool_size sized window by striding defined by stride.
In the default case, where the data_layout is `NCW`
a data Tensor with shape `(batch_size, channels, width)`,
to produce an output Tensor.
The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
strides : int or tuple of int, optional
The strides of pooling.
padding : int or tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(strides, int):
strides = (strides,)
if isinstance(padding, int):
padding = (padding,)
return _make.max_pool1d(data, pool_size, strides, padding,
layout, ceil_mode)


def max_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
Expand Down Expand Up @@ -470,6 +519,60 @@ def max_pool3d(data,
return _make.max_pool3d(data, pool_size, strides, padding,
layout, ceil_mode)


def avg_pool1d(data,
pool_size=(1,),
strides=(1,),
padding=(0,),
layout="NCW",
ceil_mode=False,
count_include_pad=False):
r"""1D average pooling operator.
This operator takes data as input and does 1D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCW`
a data Tensor with shape `(batch_size, channels, width)`,
to produce an output Tensor.
The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
strides : int or tuple of int, optional
The strides of pooling.
padding : int or tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(strides, int):
strides = (strides,)
if isinstance(padding, int):
padding = (padding,)
return _make.avg_pool1d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)


def avg_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
Expand Down Expand Up @@ -541,7 +644,7 @@ def avg_pool3d(data,
In the default case, where the data_layout is `NCDHW`
a data Tensor with shape `(batch_size, channels, depthm height, width)`,
a data Tensor with shape `(batch_size, channels, depth, height, width)`,
to produce an output Tensor.
The ceil_mode is used to take ceil or floor while computing out shape.
Expand Down
10 changes: 10 additions & 0 deletions python/tvm/relay/op/op_attrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,16 @@ class AvgPool2DAttrs(Attrs):
"""Attributes used in avg_pool2d operators"""


@register_relay_attr_node
class MaxPool1DAttrs(Attrs):
"""Attributes used in max_pool1d operators"""


@register_relay_attr_node
class AvgPool1DAttrs(Attrs):
"""Attributes used in avg_pool1d operators"""


@register_relay_attr_node
class MaxPool3DAttrs(Attrs):
"""Attributes used in max_pool3d operators"""
Expand Down
Loading

0 comments on commit 43a22c5

Please sign in to comment.