Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RELAY][DYN] Implementation of the dynamic pad operator #6284

Merged
merged 22 commits into from
Aug 20, 2020
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ struct Conv1DAttrs : public tvm::AttrsNode<Conv1DAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCW")
.describe(
Expand Down Expand Up @@ -148,7 +148,7 @@ struct Conv2DAttrs : public tvm::AttrsNode<Conv2DAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCHW")
.describe(
Expand Down Expand Up @@ -242,7 +242,7 @@ struct Conv2DWinogradAttrs : public tvm::AttrsNode<Conv2DWinogradAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCHW")
.describe(
Expand Down Expand Up @@ -331,7 +331,7 @@ struct Conv3DAttrs : public tvm::AttrsNode<Conv3DAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCDHW")
.describe(
Expand Down Expand Up @@ -381,7 +381,7 @@ struct Conv3DTransposeAttrs : public tvm::AttrsNode<Conv3DTransposeAttrs> {
"i.e. the number of output channels in the convolution.");
TVM_ATTR_FIELD(kernel_size)
.describe("The dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(strides)
.set_default(Array<IndexExpr>({1, 1, 1}))
.describe("The strides of the convolution.");
Expand Down Expand Up @@ -480,7 +480,7 @@ struct Conv3DWinogradAttrs : public tvm::AttrsNode<Conv3DWinogradAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCDHW")
.describe(
Expand Down Expand Up @@ -539,7 +539,7 @@ struct Conv2DTransposeAttrs : public tvm::AttrsNode<Conv2DTransposeAttrs> {
"i.e. the number of output channels in the convolution.");
TVM_ATTR_FIELD(kernel_size)
.describe("The dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(strides)
.set_default(Array<IndexExpr>({1, 1}))
.describe("The strides of the convolution.");
Expand Down Expand Up @@ -626,7 +626,7 @@ struct Conv1DTransposeAttrs : public tvm::AttrsNode<Conv1DTransposeAttrs> {
"i.e. the number of output channels in the convolution.");
TVM_ATTR_FIELD(kernel_size)
.describe("The dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(strides)
.set_default(Array<IndexExpr>({1}))
.describe("The strides of the convolution.");
Expand Down Expand Up @@ -1016,7 +1016,7 @@ struct UpSampling3DAttrs : public tvm::AttrsNode<UpSampling3DAttrs> {
/*! \brief Attributes used for the padding operator */
struct PadAttrs : public tvm::AttrsNode<PadAttrs> {
double pad_value;
Array<Array<IndexExpr> > pad_width;
Array<Array<Integer>> pad_width;
std::string pad_mode;

TVM_DECLARE_ATTRS(PadAttrs, "relay.attrs.PadAttrs") {
Expand All @@ -1037,7 +1037,7 @@ struct PadAttrs : public tvm::AttrsNode<PadAttrs> {
/*! \brief Attributes used for the MirrorPadding operator */
struct MirrorPadAttrs : public tvm::AttrsNode<MirrorPadAttrs> {
std::string mode;
Array<Array<IndexExpr> > pad_width;
Array<Array<IndexExpr>> pad_width;

TVM_DECLARE_ATTRS(MirrorPadAttrs, "relay.attrs.MirrorPadAttrs") {
TVM_ATTR_FIELD(mode)
Expand Down Expand Up @@ -1242,7 +1242,7 @@ struct DeformableConv2DAttrs : public tvm::AttrsNode<DeformableConv2DAttrs> {
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
.set_default(NullValue<Array<IndexExpr>>());
TVM_ATTR_FIELD(data_layout)
.set_default("NCHW")
.describe(
Expand Down
30 changes: 17 additions & 13 deletions include/tvm/topi/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,30 +151,34 @@ inline tvm::te::Tensor prelu(const tvm::te::Tensor& x, const tvm::te::Tensor& sl
inline tvm::te::Tensor pad(const tvm::te::Tensor& t, const tvm::Array<tvm::PrimExpr>& pad_before,
tvm::Array<tvm::PrimExpr> pad_after = tvm::Array<tvm::PrimExpr>(),
PrimExpr pad_value = PrimExpr(), std::string name = "T_pad",
std::string tag = kElementWise, std::string pad_mode = "constant") {
if (pad_after.size() < pad_before.size()) {
for (size_t i = pad_after.size(); i < pad_before.size(); ++i) {
pad_after.push_back(pad_before[i]);
}
}
std::string tag = kElementWise, std::string pad_mode = "constant",
const Array<PrimExpr>* dyn_output_shape = nullptr) {
arith::Analyzer analyzer;
CHECK_GE(pad_before.size(), 1);
CHECK_EQ(pad_before.size(), pad_after.size());
tvm::Array<tvm::PrimExpr> output_shape;
tvm::Array<tvm::PrimExpr> pad_before_int32;
tvm::Array<tvm::PrimExpr> pad_after_int32;

for (const auto& ele : pad_before) {
pad_before_int32.push_back(tvm::cast(tvm::DataType::Int(32), ele));
}
for (const auto& ele : pad_after) {
pad_after_int32.push_back(tvm::cast(tvm::DataType::Int(32), ele));
}
for (size_t i = 0; i < t->shape.size(); ++i) {
if (i >= pad_before.size()) {
output_shape.push_back(t->shape[i]);
} else {
output_shape.push_back(
analyzer.Simplify(t->shape[i] + pad_before_int32[i] + pad_after_int32[i]));

tvm::Array<tvm::PrimExpr> output_shape;
if (dyn_output_shape == nullptr) {
for (size_t i = 0; i < t->shape.size(); ++i) {
if (i >= pad_before.size()) {
output_shape.push_back(t->shape[i]);
} else {
output_shape.push_back(
analyzer.Simplify(t->shape[i] + pad_before_int32[i] + pad_after_int32[i]));
}
}
} else {
for (size_t i = 0; i < dyn_output_shape->size(); i++) {
output_shape.push_back((*dyn_output_shape)[i]);
}
}

Expand Down
20 changes: 20 additions & 0 deletions python/tvm/relay/op/nn/dyn/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay namespace containing dynamic ops."""

from . import _nn
20 changes: 20 additions & 0 deletions python/tvm/relay/op/nn/dyn/_make.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi

tvm._ffi._init_api("relay.op.nn.dyn._make", __name__)
103 changes: 103 additions & 0 deletions python/tvm/relay/op/nn/dyn/_nn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""

from __future__ import absolute_import

from tvm import topi

from tvm.runtime import convert
from tvm.te.hybrid import script
from ...op import register_shape_func, register_compute
from ...op import register_injective_schedule, register_broadcast_schedule

# upsampling
@register_compute("nn.dyn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype):
data = inputs[0]
scale_h = inputs[1]
scale_w = inputs[2]
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(data, scale_h, scale_w, layout,
method, align_corners, out_dtype.shape)]

register_injective_schedule("nn.dyn.upsampling")

# pad
register_broadcast_schedule("nn.dyn.pad")

#####################
# Shape functions #
#####################

# upsampling
@script
def _upsampling_nhwc_shape_func(dshape, scale_h, scale_w, ndim):
out = output_tensor((ndim,), "int64")
batch_size = dshape[0]
in_height = dshape[1]
in_width = dshape[2]
channels = dshape[3]
out[0] = int64(batch_size)
out[1] = int64(round(in_height * scale_h[0]))
out[2] = int64(round(in_width * scale_w[0]))
out[3] = int64(channels)
return out

@script
def _upsampling_nchw_shape_func(dshape, scale_h, scale_w, ndim):
out = output_tensor((ndim,), "int64")
batch_size = dshape[0]
channels = dshape[1]
in_height = dshape[2]
in_width = dshape[3]
out[0] = int64(batch_size)
out[1] = int64(channels)
out[2] = int64(round(in_height * scale_h[0]))
out[3] = int64(round(in_width * scale_w[0]))
return out

@register_shape_func("nn.dyn.upsampling", True)
def upsampling_shape_func(attrs, inputs, _):
"""Shape function for upsampling. Supports NCHW and NHWC layouts."""
if attrs.layout == "NHWC":
shape_func = _upsampling_nhwc_shape_func(inputs[0].shape, inputs[1], inputs[2],
convert(len(inputs[0].shape)))
elif attrs.layout == "NCHW":
shape_func = _upsampling_nchw_shape_func(inputs[0].shape, inputs[1], inputs[2],
convert(len(inputs[0].shape)))
else:
assert false, "Layout passed to the upsampling shape func must be NCHW or NHWC"
return [shape_func]

@script
def _dyn_pad_shape_func(data, pad_width):
ndim = len(data.shape)
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
out[i] = int64(pad_width[i, 0] + pad_width[i, 1] + data.shape[i])
return out

@register_shape_func("nn.dyn.pad", True)
def pad_shape_func(attrs, inputs, data):
"""
Shape function for dynamic pad op.
"""
return [_dyn_pad_shape_func(inputs[0], inputs[1])]
29 changes: 22 additions & 7 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@
from tvm.relay import expr

from . import _make
from .dyn import _make as _dyn_make
from .util import get_pad_tuple1d, get_pad_tuple2d, get_pad_tuple3d
from ...expr import const, Expr


def conv1d(data,
Expand Down Expand Up @@ -1147,13 +1149,13 @@ def upsampling(data,

Parameters
----------
data : tvm.relay.Expr
data : tvm.relay.Expr or tuple<anytype> or list<anytype>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think data can be a tuple or list?

The input data to the operator.

scale_h : tvm.relay.Expr
scale_h : tvm.relay.Expr or int or float
The scale factor for height upsampling.

scale_w : tvm.relay.Expr
scale_w : tvm.relay.Expr or int or float
The scale factor for width upsampling.

layout : str, optional
Expand All @@ -1170,6 +1172,12 @@ def upsampling(data,
result : tvm.relay.Expr
The computed result.
"""
if isinstance(scale_h, Expr) or isinstance(scale_w, Expr):
if not isinstance(scale_h, Expr):
scale_h = const(scale_h, "float64")
if not isinstance(scale_w, Expr):
scale_w = const(scale_w, "float64")
return _dyn_make.upsampling(data, scale_h, scale_w, layout, method, align_corners)
return _make.upsampling(data, scale_h, scale_w, layout, method, align_corners)


Expand Down Expand Up @@ -1410,7 +1418,7 @@ def prelu(data, alpha, axis=1):

def pad(data,
pad_width,
pad_value=0.0,
pad_value=0,
pad_mode='constant'):
r"""Padding

Expand All @@ -1421,10 +1429,10 @@ def pad(data,
----------
data: tvm.relay.Expr
The input data to the operator
pad_width: tuple of <tuple of <int>>, required
pad_width: tuple of <tuple of <int>>, or tvm.relay.Expr, required
Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), ..., (before_N, after_N))
pad_value: float, optional, default=0.0
pad_value: float, or tvm.relay.Expr, optional, default=0
The value used for padding
pad_mode: 'constant', 'edge', 'reflect'
'constant' pads with constant_value pad_value
Expand All @@ -1435,7 +1443,14 @@ def pad(data,
result : tvm.relay.Expr
The computed result.
"""
return _make.pad(data, pad_width, pad_value, pad_mode)
if (isinstance(pad_width, Expr) or (isinstance(pad_value, Expr))):
if not isinstance(pad_width, Expr):
pad_width = const(list(pad_width))
if not isinstance(pad_value, Expr):
pad_value = const(pad_value)
return _dyn_make.pad(data, pad_width, pad_value, pad_mode)
else:
return _make.pad(data, pad_width, pad_value, pad_mode)


def dilate(data, strides):
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/te/hybrid/calls.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def _math_intrin(func_id, args):
from tvm.tir import op
return getattr(op, func_id)(*args)

sqrt = log = exp = tanh = sigmoid = power = popcount = _math_intrin #pylint: disable=invalid-name
sqrt = log = exp = tanh = sigmoid = power = popcount = round = _math_intrin #pylint: disable=invalid-name


def _min_max(func_id, args):
Expand Down
1 change: 1 addition & 0 deletions python/tvm/te/hybrid/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ def max_num_threads(allow_none=True):
'exp' : numpy.exp,
'sigmoid' : sigmoid,
'popcount' : popcount,
'round' : round,
'likely' : lambda cond: cond,
'uint8' : numpy.uint8,
'uint16' : numpy.uint16,
Expand Down
Loading