Skip to content

Commit

Permalink
Move Ops in relay.op.contrib.* (#4942)
Browse files Browse the repository at this point in the history
* move contrib

* lint

* address comment

* address comment
  • Loading branch information
comaniac authored Feb 27, 2020
1 parent 6b1136d commit 81ff061
Show file tree
Hide file tree
Showing 14 changed files with 141 additions and 187 deletions.
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ def _pool2d(new_op, is_avg):

def _mx_adaptive_avg_pooling(inputs, attrs):
output_size = attrs.get_int_tuple("output_size", [])
return _op.contrib.adaptive_avg_pool2d(inputs[0], output_size)
return _op.nn.adaptive_avg_pool2d(inputs[0], output_size)


def _mx_dropout(inputs, attrs):
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def _impl(inputs, input_types):
data = inputs[0]
output_size = _infer_shape(inputs[1])

return _op.contrib.contrib.adaptive_avg_pool2d(
return _op.nn.adaptive_avg_pool2d(
data,
output_size=output_size)
return _impl
Expand All @@ -161,7 +161,7 @@ def _impl(inputs, input_types):
data = inputs[0]
output_size = _infer_shape(inputs[1])

return _op.contrib.contrib.adaptive_max_pool2d(
return _op.nn.adaptive_max_pool2d(
data,
output_size=output_size)
return _impl
Expand Down
1 change: 0 additions & 1 deletion python/tvm/relay/op/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
from . import memory
from . import image
from . import vision
from . import contrib
from . import op_attrs


Expand Down
2 changes: 2 additions & 0 deletions python/tvm/relay/op/_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@
register_injective_schedule("right_shift")
register_injective_schedule("left_shift")
register_injective_schedule("shape_of")
register_injective_schedule("ndarray_size")


# zeros
@register_compute("zeros")
Expand Down
2 changes: 0 additions & 2 deletions python/tvm/relay/op/contrib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,3 @@
# pylint: disable=wildcard-import
"""Neural network related operators."""
from __future__ import absolute_import as _abs
from .contrib import *
from . import _contrib
36 changes: 0 additions & 36 deletions python/tvm/relay/op/contrib/_contrib.py

This file was deleted.

20 changes: 0 additions & 20 deletions python/tvm/relay/op/contrib/_make.py

This file was deleted.

112 changes: 0 additions & 112 deletions python/tvm/relay/op/contrib/contrib.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,115 +17,3 @@
#pylint: disable=invalid-name, too-many-lines
"""Contrib operations."""
from __future__ import absolute_import as _abs
from . import _make


def adaptive_max_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive max pooling operator. This operator is experimental.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_max_pool2d(data, output_size, layout)

def adaptive_avg_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive average pooling operator. This operator is experimental.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_avg_pool2d(data, output_size, layout)

def ndarray_size(data, dtype="int32"):
"""Get number of elements of input tensor.
Parameters
----------
data : tvm.relay.Expr
The input tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.relay.Expr
The number of elements of input tensor.
"""
return _make.ndarray_size(data, dtype)
10 changes: 10 additions & 0 deletions python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,16 @@ def legalize_conv2d_transpose(attrs, inputs, types):
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)


# adaptive_max_pool2d
reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)


# adaptive_avg_pool2d
reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)


# leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu")
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
Expand Down
94 changes: 94 additions & 0 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2277,3 +2277,97 @@ def space_to_depth(data, block_size, layout='NCHW'):
in_height / block_size, in_width / block_size]
"""
return _make.space_to_depth(data, block_size, layout)


def adaptive_max_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive max pooling operator. This operator is experimental.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_max_pool2d(data, output_size, layout)


def adaptive_avg_pool2d(data,
output_size=None,
layout="NCHW"):
r"""2D adaptive average pooling operator. This operator is experimental.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_avg_pool2d(data, output_size, layout)
19 changes: 19 additions & 0 deletions python/tvm/relay/op/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -974,3 +974,22 @@ def shape_of(data, dtype="int32"):
The shape tensor.
"""
return _make.shape_of(data, dtype)


def ndarray_size(data, dtype="int32"):
"""Get number of elements of input tensor.
Parameters
----------
data : tvm.relay.Expr
The input tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.relay.Expr
The number of elements of input tensor.
"""
return _make.ndarray_size(data, dtype)
Loading

0 comments on commit 81ff061

Please sign in to comment.