Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix padding in pooling op #4738

Merged
merged 1 commit into from
Jan 22, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions src/relay/op/nn/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -947,21 +947,21 @@ bool Pool3DRel(const Array<Type>& types,
const auto hidx = layout.IndexOf(LayoutAxis::Get('H'));
const auto widx = layout.IndexOf(LayoutAxis::Get('W'));

IndexExpr pad_d, pad_h, pad_w;
IndexExpr pad[3];
if (param->padding.size() == 1) {
pad_d = param->padding[0] * 2;
pad_h = param->padding[0] * 2;
pad_w = param->padding[0] * 2;
pad[0] = param->padding[0] * 2;
pad[1] = param->padding[0] * 2;
pad[2] = param->padding[0] * 2;
} else if (param->padding.size() == 3) {
// (front, top, left)
pad_d = param->padding[0] * 2;
pad_h = param->padding[1] * 2;
pad_w = param->padding[2] * 2;
pad[0] = param->padding[0] * 2;
pad[1] = param->padding[1] * 2;
pad[2] = param->padding[2] * 2;
} else if (param->padding.size() == 6) {
// (front, top, left, back, bottom, right)
pad_d = param->padding[0] + param->padding[3];
pad_h = param->padding[1] + param->padding[4];
pad_w = param->padding[2] + param->padding[5];
pad[0] = param->padding[0] + param->padding[3];
pad[1] = param->padding[1] + param->padding[4];
pad[2] = param->padding[2] + param->padding[5];
} else {
return false;
}
Expand All @@ -978,10 +978,10 @@ bool Pool3DRel(const Array<Type>& types,
oshape[ii] = dshape[ii];
} else {
if (param->ceil_mode) {
oshape[ii] = ((dshape[ii] + pad_d - param->pool_size[i] +
oshape[ii] = ((dshape[ii] + pad[i] - param->pool_size[i] +
param->strides[i] - 1) / param->strides[i]) + 1;
} else {
oshape[ii] = ((dshape[ii] + pad_d - param->pool_size[i]) / param->strides[i]) + 1;
oshape[ii] = ((dshape[ii] + pad[i] - param->pool_size[i]) / param->strides[i]) + 1;
}
}
}
Expand Down
24 changes: 14 additions & 10 deletions tests/python/relay/test_op_level2.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,10 @@
from tvm import autotvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list
from tvm.relay.testing import ctx_list, run_infer_type
from tvm.contrib import util
import topi.testing

def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body


def test_conv1d_infer_type():
# symbolic in batch dimension
Expand Down Expand Up @@ -768,7 +762,7 @@ def _test_pool1d(opfunc):

def test_pool3d():

def _test_pool3d(opfunc):
def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16)):
n, c, d, h, w = tvm.size_var("n"), 10, 5, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1, 1))
Expand All @@ -780,18 +774,28 @@ def _test_pool3d(opfunc):
dshape = (1, 3, 32, 32, 32)
x = relay.var("x", shape=dshape)
pool_type = 'max' if 'max' in str(opfunc) else 'avg'
y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=(0, 0, 0, 0, 0, 0))
y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding)
func = relay.Function([x], y)
# check output shape
f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape))
assert out_shape == f_out_shape, \
"Output shape mismatch. expected {}, actual {}".format(out_shape, f_out_shape)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2),
(0, 0, 0, 0, 0, 0), (1, 3, 16, 16, 16), pool_type, False)
padding, out_shape, pool_type, False)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)

_test_pool3d(relay.nn.max_pool3d)
_test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))
_test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))
_test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))
_test_pool3d(relay.nn.avg_pool3d)
_test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))
_test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))
_test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))


def test_avg_pool2d_no_count_pad():
Expand Down
18 changes: 13 additions & 5 deletions topi/python/topi/testing/pool3d_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"""max_pool3d and avg_pool3d in python"""
import math
import numpy as np
import tvm

def pool3d_ncdhw_python(np_data, kernel,
strides, padding,
Expand All @@ -39,10 +40,17 @@ def pool3d_ncdhw_python(np_data, kernel,
assert out_shape[3] == int(math.floor(float(in_shape[3] - k_h + pt + pb) / s_h) + 1)
assert out_shape[4] == int(math.floor(float(in_shape[4] - k_w + pl + pr) / s_w) + 1)

pad_np = np.zeros(shape=(in_n, in_c,
in_d + pf + pk,
in_h + pt + pb,
in_w + pl + pr)).astype(dtype)
fill_value = tvm.const(0.0, dtype).value
if not(count_include_pad) and pool_type == 'max':
fill_value = tvm.min_value(dtype).value

pad_np = np.full(shape=(in_n, in_c,
in_d + pf + pk,
in_h + pt + pb,
in_w + pl + pr),
fill_value=fill_value,
dtype=dtype)

no_zero = (range(in_n),
range(in_c),
(range(pf, in_d + pf)),
Expand Down Expand Up @@ -81,5 +89,5 @@ def pool3d_ncdhw_python(np_data, kernel,
else:
raise ValueError("pool type {} is not supported".format(pool_type))

ret_np = np.maximum(ret_np, 0.0)
ret_np = np.maximum(ret_np, fill_value)
return ret_np