diff --git a/python/tvm/topi/adreno/conv2d_nchw_winograd.py b/python/tvm/topi/adreno/conv2d_nchw_winograd.py index 538fccf9c3e9..16f7cb8b19d9 100644 --- a/python/tvm/topi/adreno/conv2d_nchw_winograd.py +++ b/python/tvm/topi/adreno/conv2d_nchw_winograd.py @@ -18,21 +18,7 @@ """Winograd NCHW template for Adreno backend""" import logging -import tvm -from tvm import te from tvm import autotvm - -from tvm.topi import nn -from tvm.topi.utils import get_const_int, get_const_tuple, traverse_inline -from ..nn.winograd_util import winograd_transform_matrices -from .utils import ( - split_to_chunks, - pack_input, - pack_filter, - bind_data_copy, - get_texture_storage, - infer_tile_size, -) from .conv2d_winograd_common import conv2d_winograd_comp, schedule_conv2d_winograd_impl @@ -137,4 +123,6 @@ def conv2d_nchw_winograd_comp( output: tvm.te.Tensor 4-D or 5-D with shape NCHW or NCHW4c """ - return conv2d_winograd_comp(cfg, data, kernel, strides, padding, dilation, out_dtype, args, pre_computed, "NCHW") + return conv2d_winograd_comp( + cfg, data, kernel, strides, padding, dilation, out_dtype, args, pre_computed, "NCHW" + ) diff --git a/python/tvm/topi/adreno/conv2d_nhwc_winograd.py b/python/tvm/topi/adreno/conv2d_nhwc_winograd.py index f3850fbec171..bfe385f210a4 100644 --- a/python/tvm/topi/adreno/conv2d_nhwc_winograd.py +++ b/python/tvm/topi/adreno/conv2d_nhwc_winograd.py @@ -18,21 +18,7 @@ """Winograd NHWC template for Adreno backend""" import logging -import tvm -from tvm import te from tvm import autotvm - -from tvm.topi import nn -from tvm.topi.utils import get_const_int, get_const_tuple, traverse_inline -from ..nn.winograd_util import winograd_transform_matrices -from .utils import ( - split_to_chunks, - pack_input, - pack_filter, - bind_data_copy, - get_texture_storage, - infer_tile_size, -) from .conv2d_winograd_common import conv2d_winograd_comp, schedule_conv2d_winograd_impl @@ -137,4 +123,6 @@ def conv2d_nhwc_winograd_comp( output: tvm.te.Tensor 4-D or 5-D with shape NCHW or NCHW4c """ - return conv2d_winograd_comp(cfg, data, kernel, strides, padding, dilation, out_dtype, args, pre_computed, "NHWC") + return conv2d_winograd_comp( + cfg, data, kernel, strides, padding, dilation, out_dtype, args, pre_computed, "NHWC" + ) diff --git a/python/tvm/topi/adreno/conv2d_winograd_common.py b/python/tvm/topi/adreno/conv2d_winograd_common.py index 1b10a8cc57e2..494b691a7f07 100644 --- a/python/tvm/topi/adreno/conv2d_winograd_common.py +++ b/python/tvm/topi/adreno/conv2d_winograd_common.py @@ -17,7 +17,6 @@ # pylint: disable=invalid-name,unused-variable,unused-argument """Common Winograd implementation for Adreno backend""" -import logging import tvm from tvm import te from tvm import autotvm @@ -80,7 +79,7 @@ def conv2d_winograd_comp( output: tvm.te.Tensor 4-D or 5-D with shape NCHW or NCHW4c """ - assert layout == "NCHW" or layout == "NHWC" + assert layout in ("NCHW", "NHWC") tile_size = infer_tile_size(data, layout) if isinstance(dilation, int): @@ -301,9 +300,9 @@ def conv2d_winograd_comp( else: output = te.compute( (N, CO, H, W, COB), - lambda n, co, h, w, cob: inverse[co][n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)][ - idxmod(h, m) - ][idxmod(w, m)][cob].astype(out_dtype), + lambda n, co, h, w, cob: inverse[co][ + n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m) + ][idxmod(h, m)][idxmod(w, m)][cob].astype(out_dtype), name="output", tag="cast_from_acc" + args["accumulator"][-2:], ) @@ -320,9 +319,9 @@ def conv2d_winograd_comp( else: output = te.compute( (N, H, W, CO, COB), - lambda n, h, w, co, cob: inverse[co][n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m)][ - idxmod(h, m) - ][idxmod(w, m)][cob].astype(out_dtype), + lambda n, h, w, co, cob: inverse[co][ + n * nH * nW + idxdiv(h, m) * nW + idxdiv(w, m) + ][idxmod(h, m)][idxmod(w, m)][cob].astype(out_dtype), name="output", tag="cast_from_acc" + args["accumulator"][-2:], )