Skip to content

Commit

Permalink
[Relay, TF Frontend] Dilation2D operator support (apache#5033)
Browse files Browse the repository at this point in the history
* update docs for dilation 2d

* dilation2d compute

* dilation2d register

* dilation2d rel compute

* dilation2d strategy

* dilation2d attrs

* dilation2d generic schedule

* dilation2d tf frontend support

* dilation2d tf frontend test case

* dilation2d test cases

* pylint fixes

* add exception for cuda target

* Update docstring

* Update docstring

* change rates to dilations

* removed unused param

* merge master

* Update nn.py

* Update nn.py
  • Loading branch information
maheshambule authored and zhiics committed Apr 17, 2020
1 parent b353710 commit 7626220
Show file tree
Hide file tree
Showing 16 changed files with 723 additions and 1 deletion.
2 changes: 2 additions & 0 deletions docs/api/python/topi.rst
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ List of operators
topi.nn.relu
topi.nn.leaky_relu
topi.nn.dilate
topi.nn.dilation2d
topi.nn.pool
topi.nn.global_pool
topi.nn.adaptive_pool
Expand Down Expand Up @@ -197,6 +198,7 @@ topi.nn
.. autofunction:: topi.nn.upsampling
.. autofunction:: topi.nn.softmax
.. autofunction:: topi.nn.dense
.. autofunction:: topi.nn.dilation2d
.. autofunction:: topi.nn.batch_matmul
.. autofunction:: topi.nn.log_softmax
.. autofunction:: topi.nn.conv2d_nchw
Expand Down
1 change: 1 addition & 0 deletions docs/frontend/tensorflow.rst
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ Supported Ops
- DecodeJpeg
- DepthwiseConv2dNative
- DepthToSpace
- Dilation2D
- Equal
- Elu
- Enter
Expand Down
2 changes: 2 additions & 0 deletions docs/langref/relay_op.rst
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ This level enables typical convnet models.
tvm.relay.nn.conv2d
tvm.relay.nn.conv2d_transpose
tvm.relay.nn.dense
tvm.relay.nn.dilation2d
tvm.relay.nn.max_pool2d
tvm.relay.nn.max_pool3d
tvm.relay.nn.avg_pool2d
Expand Down Expand Up @@ -249,6 +250,7 @@ Level 2 Definitions
.. autofunction:: tvm.relay.nn.conv2d
.. autofunction:: tvm.relay.nn.conv2d_transpose
.. autofunction:: tvm.relay.nn.dense
.. autofunction:: tvm.relay.nn.dilation2d
.. autofunction:: tvm.relay.nn.max_pool2d
.. autofunction:: tvm.relay.nn.max_pool3d
.. autofunction:: tvm.relay.nn.avg_pool2d
Expand Down
36 changes: 36 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,42 @@ struct Conv2DAttrs : public tvm::AttrsNode<Conv2DAttrs> {
};


/*! \brief Attributes used in dilation operators */
struct Dilation2DAttrs : public tvm::AttrsNode<Dilation2DAttrs> {
Array<IndexExpr> strides;
Array<IndexExpr> padding;
Array<IndexExpr> dilations;
std::string data_layout;
std::string kernel_layout;
DataType out_dtype;

TVM_DECLARE_ATTRS(Dilation2DAttrs, "relay.attrs.Dilation2DAttrs") {
TVM_ATTR_FIELD(strides).set_default(Array<IndexExpr>({1, 1}))
.describe("Specifies the strides of the sliding window. [stride_height, stride_width].");
TVM_ATTR_FIELD(padding).set_default(Array<IndexExpr>({0, 0}))
.describe("If padding is non-zero, then the input is implicitly zero-padded"
"Padding support both symmetric and asymmetric as"
"one int : same padding used on all sides"
"two int : bottom, right will use same padding as top, left"
"four int : padding width in the order of (top, left, bottom, right)");
TVM_ATTR_FIELD(dilations).set_default(Array<IndexExpr>({1, 1}))
.describe("Specifies the dilation rate to use. [dilation_height, dilation_width]");
TVM_ATTR_FIELD(data_layout).set_default("NCHW")
.describe("Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Convolution is applied on the 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(kernel_layout).set_default("IHW")
.describe("Dimension ordering of weight. Can be 'IHW', 'HWI', etc."
"'I', 'H', 'W' stands for input_channel, height, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(out_dtype)
.set_default(NullValue<DataType>())
.describe("Output data type, set to explicit type under mixed precision setting");
}
};


/*! \brief Attributes used in winograd weight transformation operators */
struct Conv2DWinogradWeightTransformAttrs :
public tvm::AttrsNode<Conv2DWinogradWeightTransformAttrs> {
Expand Down
86 changes: 86 additions & 0 deletions python/tvm/relay/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,6 +410,91 @@ def _impl(inputs, attr, params):
return out
return _impl


# Dilation2d
def _dilation2d():
def _impl(inputs, attr, params):
if 'data_format' not in attr:
attr['data_format'] = 'NHWC'

input_shape = attr['_input_shapes'][inputs[0]]
weights_shape = attr['_input_shapes'][inputs[1]]

if attr['_target_layout'] == "NCHW" and attr['data_format'] == "NHWC":
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
inputs[0] = _op.transpose(inputs[0], axes=(0, 3, 1, 2))
weights_shape = [weights_shape[ii] for ii in (2, 0, 1)]
inputs[1] = _op.transpose(inputs[1], axes=(2, 0, 1))
attr['data_format'] = "NCHW"

if attr['data_format'] in ['NHWC', 'NCHW']:
if 'rates' in attr:
attr['dilations'] = attr['rates']
if 'dilations' in attr:
attr['dilations'] = (attr['dilations'][1], attr['dilations'][2])
attr['strides'] = (attr['strides'][1], attr['strides'][2])
else:
msg = 'Value {} in attribute "data_format" of operator Dilation2D is ' \
'not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr['data_format']))

attr['padding'] = attr['padding'].decode("utf-8")
if attr['padding'] == 'VALID':
attr['padding'] = [0, 0]
elif attr['padding'] == 'SAME':
stride_h, stride_w = attr['strides']
if attr['data_format'] == 'NHWC':
kernel_h, kernel_w = weights_shape[0], weights_shape[1]
else:
kernel_h, kernel_w = weights_shape[1], weights_shape[2]
if attr['data_format'] == 'NHWC':
in_h = input_shape[1]
in_w = input_shape[2]
else:
in_h = input_shape[2]
in_w = input_shape[3]

dilation_h = attr['dilations'][0]
dilation_w = attr['dilations'][1]
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_v = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_h = _get_pad_pair(in_w, dilated_kernel_w, stride_w)

if attr['data_format'] == 'NHWC':
inputs[0] = _op.nn.pad(data=inputs[0],
pad_width=((0, 0),
(pad_v[0], pad_v[1]),
(pad_h[0], pad_h[1]),
(0, 0)))
else:
inputs[0] = _op.nn.pad(data=inputs[0],
pad_width=((0, 0),
(0, 0),
(pad_v[0], pad_v[1]),
(pad_h[0], pad_h[1])))

attr['padding'] = [0, 0]

else:
msg = 'Value {} in attribute "padding" of operator Dilation2d is not ' \
'valid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr['padding']))

attr['kernel_layout'] = 'HWI' if attr['data_format'] == 'NHWC' else 'IHW'
out = AttrCvt(
op_name='dilation2d',
ignores=['explicit_paddings', 'rates'],
transforms={
'data_format': 'data_layout',
})([inputs[0], inputs[1]], attr)
if attr['_target_layout'] == "NCHW":
out = _op.transpose(out, axes=(0, 2, 3, 1))
return out

return _impl


def _conv3d(opname):
def _impl(inputs, attr, params):
attr['data_format'] = attr['data_format'].decode("utf-8")
Expand Down Expand Up @@ -1550,6 +1635,7 @@ def _impl(inputs, attr, params):
'DecodeJpeg' : _decode_image(),
'DepthwiseConv2dNative' : _conv('depthwise'),
'DepthToSpace' : _depth_to_space(),
'Dilation2D' : _dilation2d(),
'Equal' : _broadcast('equal'),
'Elu' : _elu(),
'Erf' : AttrCvt('erf'),
Expand Down
3 changes: 3 additions & 0 deletions python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,9 @@ def legalize_conv2d_transpose(attrs, inputs, types):
reg.register_strategy("nn.conv3d", strategy.conv3d_strategy)
reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE)

# dilation2d
reg.register_strategy("nn.dilation2d", strategy.dilation2d_strategy)
reg.register_pattern("nn.dilation2d", OpPattern.OUT_ELEMWISE_FUSABLE)

# conv1d_transpose
reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy)
Expand Down
57 changes: 57 additions & 0 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2463,3 +2463,60 @@ def adaptive_avg_pool3d(data,
"""
output_size = [] or output_size
return _make.adaptive_avg_pool3d(data, output_size, layout)


def dilation2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilations=(1, 1),
data_layout="NCHW",
kernel_layout="IHW",
out_dtype=""):
r"""Dilation 2D.
This operator takes the weight as the dilation kernel and dilates it with
data to produce an output. In the default case, where the data_layout is `NCHW`
and kernel_layout is `OIHW`, dilation2d takes in a data Tensor with shape
`(batch_size, in_channels, height, width)`, and a weight Tensor with shape
`(channels, kernel_height, kernel_width)` to produce an output Tensor
with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \max_{dy, dx}
\mbox{data}[b, c, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] +
\mbox{weight}[c, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification. Semantically, the operator
will convert the layout to the canonical layout
(`NCHW` for data and `IHW` for weight) and perform the computation.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[Tuple[int]]
The strides of convolution.
padding : Optional[Tuple[int]]
The padding of convolution on both sides of inputs before convolution.
dilations : Optional[Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_dtype : Optional[str]
Specifies the output data type.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""

return _make.dilation2d(data, weight, strides, padding, dilations, data_layout,
kernel_layout, out_dtype)
3 changes: 3 additions & 0 deletions python/tvm/relay/op/op_attrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ class Conv2DWinogradWeightTransformAttrs(Attrs):
class Conv2DWinogradNNPACKWeightTransformAttrs(Attrs):
"""Attributes for nn.contrib_conv2d_winograd_nnpack_weight_transform"""

@register_relay_attr_node
class Dilation2DAttrs(Attrs):
"""Attributes for nn.dilation2d"""

@register_relay_attr_node
class GlobalPool2DAttrs(Attrs):
Expand Down
51 changes: 51 additions & 0 deletions python/tvm/relay/op/strategy/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,6 +442,57 @@ def conv1d_transpose_strategy(attrs, inputs, out_type, target):
name="conv1d_transpose_ncw.generic")
return strategy


# dilation2d
def wrap_compute_dilation2d(topi_compute, need_data_layout=False):
"""Wrap dilation2d topi compute"""
def _compute_dilation2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilations = get_const_tuple(attrs.dilations)
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
args = [inputs[0], inputs[1], strides, padding, dilations]
if need_data_layout:
args.append(data_layout)
args.append(out_dtype)
return [topi_compute(*args)]
return _compute_dilation2d


@override_native_generic_func("dilation2d_strategy")
def dilation2d_strategy(attrs, inputs, out_type, target):
"""dilation2d_strategy generic strategy"""
logger.warning("dilation2d_strategy is not optimized for this platform.")
strategy = _op.OpStrategy()
dilations = get_const_tuple(attrs.dilations)
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout

assert layout in ["NCHW", "NHWC"]
(dilation_h, dilation_w) = dilations
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")

if layout == "NCHW":
assert kernel_layout == "IHW"
strategy.add_implementation(
wrap_compute_dilation2d(topi.nn.dilation2d_nchw),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw),
name="dilation2d_nchw.generic")
elif layout == "NHWC":
assert kernel_layout == "HWI"
strategy.add_implementation(
wrap_compute_dilation2d(topi.nn.dilation2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc),
name="dilation2d_nhwc.generic")
else:
raise RuntimeError("Unsupported dilation2d layout {}".format(layout))
return strategy


# dense
def wrap_compute_dense(topi_compute):
"""wrap dense topi compute"""
Expand Down
60 changes: 60 additions & 0 deletions src/relay/op/nn/convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1040,6 +1040,66 @@ Expr MakeDeformableConv2D(Expr data,
TVM_REGISTER_GLOBAL("relay.op.nn._make.deformable_conv2d")
.set_body_typed(MakeDeformableConv2D);

// relay.nn.dilation2d
TVM_REGISTER_NODE_TYPE(Dilation2DAttrs);

template<typename T>
Array<Array<Layout> > Dilation2DInferCorrectLayout(
const Attrs& attrs,
const Array<Layout>& new_in_layouts,
const Array<Layout>& old_in_layouts,
const Array<Array<IndexExpr>> &old_in_shapes) {
const T* params = attrs.as<T>();

// We always make other operators to fit the layouts of convolution layers
// So this inference ignores all inputs
return Array<Array<Layout> >{{params->data_layout, params->kernel_layout},
{params->data_layout}};
}

// Positional relay function to create dilation2d operator
// used by frontend FFI.
Expr MakeDilation2D(Expr data,
Expr weight,
Array<IndexExpr> strides,
Array<IndexExpr> padding,
Array<IndexExpr> dilations,
std::string data_layout,
std::string kernel_layout,
DataType out_dtype) {
auto attrs = make_object<Dilation2DAttrs>();
attrs->strides = std::move(strides);
attrs->padding = std::move(padding);
attrs->dilations = std::move(dilations);
attrs->data_layout = std::move(data_layout);
attrs->kernel_layout = std::move(kernel_layout);
attrs->out_dtype = std::move(out_dtype);
static const Op& op = Op::Get("nn.dilation2d");
return CallNode::make(op, {data, weight}, Attrs(attrs), {});
}


TVM_REGISTER_GLOBAL("relay.op.nn._make.dilation2d")
.set_body_typed(MakeDilation2D);


RELAY_REGISTER_OP("nn.dilation2d")
.describe(R"code(Computes grayscale dilation of 4D input and 3D filter.
- **data**: This depends on the `layout` parameter. Input is 4D array of shape
(batch_size, in_channels, height, width) if `layout` is `NCHW`.
- **weight**: (in_channels, height, width)
- **out**: This depends on the `layout` parameter. Output is 4D array of shape
(batch_size, channels, out_height, out_width) if `layout` is `NCHW`.
)code" TVM_ADD_FILELINE)
.set_attrs_type<Dilation2DAttrs>()
.set_num_inputs(2)
.add_argument("data", "Tensor", "The input tensor.")
.add_argument("weight", "Tensor", "The weight tensor.")
.set_support_level(2)
.add_type_rel("Dilation2D", Dilation2DRel<Dilation2DAttrs>)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
Dilation2DInferCorrectLayout<Dilation2DAttrs>);


} // namespace relay
} // namespace tvm
Loading

0 comments on commit 7626220

Please sign in to comment.