Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TOPI] Fix nn.pool*d issue with 'vectorize' function and add unit tests #8541

Merged
merged 5 commits into from
Jul 29, 2021
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 56 additions & 2 deletions python/tvm/topi/testing/poolnd_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,62 @@
"""Ground truth max and average pooling operators in python."""
import itertools
import math
from typing import List, Tuple
from typing import List, Tuple, Optional

import numpy as np
import tvm


def _get_supported_layout(dims: int):
"""
Returns layout that is supported by poolnd_python based on number of
dimensions of input tensor
"""
assert dims in [3, 4, 5], f"{dims}-dimensional tensor is not supported"
if dims == 3:
return "NCW"
elif dims == 4:
return "NCHW"
elif dims == 5:
return "NCDHW"


def _convert_to_layout(
input: np.ndarray,
layout: str,
) -> np.ndarray:
"""
Converts back to original layout after the algorithm is finished
"""
supported_layout = _get_supported_layout(input.ndim)
if layout is not None and supported_layout != layout:
# Generate transpose list
transpose_list = []
for d in layout:
transpose_list.append(supported_layout.index(d))
return input.transpose(transpose_list)
else:
return input


def _convert_from_layout(
input: np.ndarray,
layout: str,
) -> np.ndarray:
"""
Converts tensor to one of suppored layouts
"""
supported_layout = _get_supported_layout(input.ndim)
if layout is not None and supported_layout != layout:
# Generate transpose list
transpose_list = []
for d in supported_layout:
transpose_list.append(layout.index(d))
return input.transpose(transpose_list)
else:
return input


def get_slice(
spatial_dimensions: int,
pad_np: np.array,
Expand Down Expand Up @@ -90,8 +140,12 @@ def poolnd_python(
count_include_pad: bool = True,
ceil_mode: bool = False,
dtype: str = "float32",
layout: Optional[str] = None,
) -> np.array:
"""Ground truth pooling operator impelmented in numpy."""

np_data = _convert_from_layout(np_data, layout)

out_shape = [np_data.shape[0], np_data.shape[1]]
for dim in range(2, len(np_data.shape)):
i = dim - 2
Expand Down Expand Up @@ -158,4 +212,4 @@ def poolnd_python(
else:
raise ValueError("Pool type {} is not supported".format(pool_type))

return ret_np
return _convert_to_layout(ret_np, layout)
6 changes: 3 additions & 3 deletions python/tvm/topi/x86/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ def vectorize(fused_axis, num_parallel_axis, vectorize_limit=64):
reorder_axis = [fused_axis]
for i in range(num_parallel_axis, len(sch.op.axis) - 1):
reorder_axis.append(sch.op.axis[i])
kw, kh = sch.op.reduce_axis
fuse_k = sch.fuse(kw, kh)
k = sch.op.reduce_axis
fuse_k = sch.fuse(*k)
c = sch.op.axis[len(sch.op.axis) - 1]
reorder_axis += [fuse_k, c]
sch.reorder(*reorder_axis)
Expand Down Expand Up @@ -83,7 +83,7 @@ def schedule_pool(outs, layout):
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, te.tensor.ComputeOp):
s[PaddedInput].compute_inline()
do_vectorize = layout[-1] not in "HWhw"
do_vectorize = layout[-1] not in "DHWdhw"
_parallel_sch(s[Pool], outs[0].shape, do_vectorize)

def traverse(OP):
Expand Down
Loading