Skip to content

Commit

Permalink
[TOPI] Fix nn.pool*d issue with 'vectorize' function and add unit t…
Browse files Browse the repository at this point in the history
…ests (apache#8541)

* Fix issue in 'vectorize' function for 1D and 3D tensors

* Add pooling tests for channel last layouts

* Add support for more general layouts in "poolnd" implementation

* Reformat with 'black'

* Fix lint issues
  • Loading branch information
MarioPeric-SiMa-ai authored and ylc committed Sep 29, 2021
1 parent 8066746 commit 97878b8
Show file tree
Hide file tree
Showing 3 changed files with 254 additions and 325 deletions.
56 changes: 54 additions & 2 deletions python/tvm/topi/testing/poolnd_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,60 @@
"""Ground truth max and average pooling operators in python."""
import itertools
import math
from typing import List, Tuple
from typing import List, Tuple, Optional

import numpy as np
import tvm


def _get_supported_layout(dims: int):
"""
Returns layout that is supported by poolnd_python based on number of
dimensions of input tensor
"""
assert dims in [3, 4, 5], f"{dims}-dimensional tensor is not supported"
if dims == 3:
return "NCW"
if dims == 4:
return "NCHW"
# dims == 5
return "NCDHW"


def _convert_to_layout(
input_tensor: np.ndarray,
layout: str,
) -> np.ndarray:
"""
Converts back to original layout after the algorithm is finished
"""
supported_layout = _get_supported_layout(input_tensor.ndim)
if layout is not None and supported_layout != layout:
# Generate transpose list
transpose_list = []
for d in layout:
transpose_list.append(supported_layout.index(d))
return input_tensor.transpose(transpose_list)
return input_tensor


def _convert_from_layout(
input_tensor: np.ndarray,
layout: str,
) -> np.ndarray:
"""
Converts tensor to one of suppored layouts
"""
supported_layout = _get_supported_layout(input_tensor.ndim)
if layout is not None and supported_layout != layout:
# Generate transpose list
transpose_list = []
for d in supported_layout:
transpose_list.append(layout.index(d))
return input_tensor.transpose(transpose_list)
return input_tensor


def get_slice(
spatial_dimensions: int,
pad_np: np.array,
Expand Down Expand Up @@ -90,8 +138,12 @@ def poolnd_python(
count_include_pad: bool = True,
ceil_mode: bool = False,
dtype: str = "float32",
layout: Optional[str] = None,
) -> np.array:
"""Ground truth pooling operator impelmented in numpy."""

np_data = _convert_from_layout(np_data, layout)

out_shape = [np_data.shape[0], np_data.shape[1]]
for dim in range(2, len(np_data.shape)):
i = dim - 2
Expand Down Expand Up @@ -158,4 +210,4 @@ def poolnd_python(
else:
raise ValueError("Pool type {} is not supported".format(pool_type))

return ret_np
return _convert_to_layout(ret_np, layout)
6 changes: 3 additions & 3 deletions python/tvm/topi/x86/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ def vectorize(fused_axis, num_parallel_axis, vectorize_limit=64):
reorder_axis = [fused_axis]
for i in range(num_parallel_axis, len(sch.op.axis) - 1):
reorder_axis.append(sch.op.axis[i])
kw, kh = sch.op.reduce_axis
fuse_k = sch.fuse(kw, kh)
k = sch.op.reduce_axis
fuse_k = sch.fuse(*k)
c = sch.op.axis[len(sch.op.axis) - 1]
reorder_axis += [fuse_k, c]
sch.reorder(*reorder_axis)
Expand Down Expand Up @@ -83,7 +83,7 @@ def schedule_pool(outs, layout):
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, te.tensor.ComputeOp):
s[PaddedInput].compute_inline()
do_vectorize = layout[-1] not in "HWhw"
do_vectorize = layout[-1] not in "DHWdhw"
_parallel_sch(s[Pool], outs[0].shape, do_vectorize)

def traverse(OP):
Expand Down
Loading

0 comments on commit 97878b8

Please sign in to comment.