diff --git a/CMakeLists.txt b/CMakeLists.txt index f6e33b7d337f..640de0719c28 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -259,7 +259,7 @@ file(GLOB DATATYPE_SRCS src/target/datatype/*.cc) list(APPEND COMPILER_SRCS ${DATATYPE_SRCS}) file(GLOB TOPI_SRCS - topi/src/*.cc + src/topi/*.cc ) file(GLOB RUNTIME_SRCS @@ -356,7 +356,7 @@ else() set(CMAKE_CUDA_STANDARD 14) endif() -add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS}) +add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS} ${TOPI_SRCS}) add_library(tvm_topi SHARED ${TOPI_SRCS}) add_library(tvm_runtime SHARED ${RUNTIME_SRCS}) add_library(tvm_runtime_static STATIC ${RUNTIME_SRCS}) @@ -419,15 +419,6 @@ if (HIDE_PRIVATE_SYMBOLS AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") target_link_libraries(tvm_runtime ${HIDE_SYMBOLS_LINKER_FLAGS}) endif() -# Related headers -target_include_directories( - tvm - PUBLIC "topi/include") -target_include_directories( - tvm_topi - PUBLIC "topi/include") - - # Tests set(TEST_EXECS "") file(GLOB TEST_SRCS tests/cpp/*.cc) @@ -472,11 +463,6 @@ if (INSTALL_DEV) FILES_MATCHING PATTERN "*.h" ) - install( - DIRECTORY "topi/include/." DESTINATION "include" - FILES_MATCHING - PATTERN "*.h" - ) install( DIRECTORY "3rdparty/dlpack/include/." DESTINATION "include" FILES_MATCHING diff --git a/Makefile b/Makefile index 9063cd1b8fff..825e5891c27c 100644 --- a/Makefile +++ b/Makefile @@ -77,14 +77,12 @@ $(OUTPUTDIR)/libtvm_web_runtime.js: $(OUTPUTDIR)/libtvm_web_runtime.bc # Lint scripts cpplint: python3 3rdparty/dmlc-core/scripts/lint.py vta cpp vta/include vta/src - python3 3rdparty/dmlc-core/scripts/lint.py topi cpp topi/include; python3 3rdparty/dmlc-core/scripts/lint.py tvm cpp \ include src \ examples/extension/src examples/graph_executor/src pylint: python3 -m pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc - python3 -m pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc python3 -m pylint vta/python/vta --rcfile=$(ROOTDIR)/tests/lint/pylintrc jnilint: diff --git a/apps/android_camera/app/src/main/jni/Android.mk b/apps/android_camera/app/src/main/jni/Android.mk index f135c97602ee..a5eacb0c0c2d 100644 --- a/apps/android_camera/app/src/main/jni/Android.mk +++ b/apps/android_camera/app/src/main/jni/Android.mk @@ -39,8 +39,6 @@ LOCAL_LDFLAGS := -L$(SYSROOT)/usr/lib/ -llog LOCAL_C_INCLUDES := $(ROOT_PATH)/include \ $(ROOT_PATH)/3rdparty/dlpack/include \ $(ROOT_PATH)/3rdparty/dmlc-core/include \ - $(ROOT_PATH)/3rdparty/HalideIR/src \ - $(ROOT_PATH)/topi/include LOCAL_MODULE = tvm4j_runtime_packed diff --git a/apps/android_deploy/app/src/main/jni/Android.mk b/apps/android_deploy/app/src/main/jni/Android.mk index 58f82f9c0abe..1b06a6bdb898 100644 --- a/apps/android_deploy/app/src/main/jni/Android.mk +++ b/apps/android_deploy/app/src/main/jni/Android.mk @@ -38,8 +38,7 @@ LOCAL_LDFLAGS := -L$(SYSROOT)/usr/lib/ -llog LOCAL_C_INCLUDES := $(ROOT_PATH)/include \ $(ROOT_PATH)/3rdparty/dlpack/include \ - $(ROOT_PATH)/3rdparty/dmlc-core/include \ - $(ROOT_PATH)/topi/include + $(ROOT_PATH)/3rdparty/dmlc-core/include LOCAL_MODULE = tvm4j_runtime_packed diff --git a/apps/android_rpc/app/src/main/jni/Android.mk b/apps/android_rpc/app/src/main/jni/Android.mk index 58f82f9c0abe..1b06a6bdb898 100644 --- a/apps/android_rpc/app/src/main/jni/Android.mk +++ b/apps/android_rpc/app/src/main/jni/Android.mk @@ -38,8 +38,7 @@ LOCAL_LDFLAGS := -L$(SYSROOT)/usr/lib/ -llog LOCAL_C_INCLUDES := $(ROOT_PATH)/include \ $(ROOT_PATH)/3rdparty/dlpack/include \ - $(ROOT_PATH)/3rdparty/dmlc-core/include \ - $(ROOT_PATH)/topi/include + $(ROOT_PATH)/3rdparty/dmlc-core/include LOCAL_MODULE = tvm4j_runtime_packed diff --git a/apps/sgx/build.rs b/apps/sgx/build.rs index 702dd0486020..f54aeccd9c20 100644 --- a/apps/sgx/build.rs +++ b/apps/sgx/build.rs @@ -35,9 +35,7 @@ fn main() { concat!( mf_dir!("/../../python"), ":", - mf_dir!("/../../nnvm/python"), - ":", - mf_dir!("/../../topi/python") + mf_dir!("/../../nnvm/python") ), ) .output() diff --git a/topi/README.md b/apps/topi_recipe/README.md similarity index 86% rename from topi/README.md rename to apps/topi_recipe/README.md index 4da591041e17..06c52c6dd18d 100644 --- a/topi/README.md +++ b/apps/topi_recipe/README.md @@ -15,7 +15,7 @@ -# TOPI: TVM Operator Inventory +# TOPI Recipe: TVM Operator Optimization Recipes TOPI is the operator collection library for TVM intended at sharing the effort of crafting and optimizing tvm generated kernels. The goal: @@ -24,11 +24,6 @@ and optimizing tvm generated kernels. The goal: - Give common primitives for fused op creation. - Provide commonly used schedules under each architectures -## Organization -- [include](include) C++ library, header only -- [python](python) python library -- [recipe](recipe) Recipe collections containing useful operator examples. - ## Guidelines - Use numpy-style naming convention for known ops - Seperate operator declaration from schedule when possible. @@ -39,10 +34,6 @@ and optimizing tvm generated kernels. The goal: - Data layout aware, if not specified in argument or in function, assume NCHW by default. -## Testcase -- Add testcases to testout the schedule and dataflow in the TOPI workflow -- Only do correctness testing without attaching compiler flags and only run it once. - ## Performance Tuning Workflow Since TVM is work in progress, some optimization might not be perfect. One quick way I find useful is to do codegen plus manual modification. diff --git a/topi/recipe/broadcast/test_broadcast_map.py b/apps/topi_recipe/broadcast/test_broadcast_map.py similarity index 99% rename from topi/recipe/broadcast/test_broadcast_map.py rename to apps/topi_recipe/broadcast/test_broadcast_map.py index 2f2bb9e900fe..73031913e77d 100644 --- a/topi/recipe/broadcast/test_broadcast_map.py +++ b/apps/topi_recipe/broadcast/test_broadcast_map.py @@ -20,7 +20,7 @@ from tvm.contrib import nvcc import numpy as np -import topi +from tvm import topi TASK = "reduce_map" diff --git a/topi/recipe/conv/depthwise_conv2d_test.py b/apps/topi_recipe/conv/depthwise_conv2d_test.py similarity index 95% rename from topi/recipe/conv/depthwise_conv2d_test.py rename to apps/topi_recipe/conv/depthwise_conv2d_test.py index 72e054e12b14..c5f8b0701203 100644 --- a/topi/recipe/conv/depthwise_conv2d_test.py +++ b/apps/topi_recipe/conv/depthwise_conv2d_test.py @@ -21,9 +21,9 @@ from scipy import signal from tvm.contrib import nvcc -import topi -from topi.util import get_const_tuple -from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_nchw, schedule_depthwise_conv2d_nhwc +from tvm import topi +from tvm.topi.util import get_const_tuple +from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_nchw, schedule_depthwise_conv2d_nhwc TASK = "depthwise_conv2d" USE_MANUAL_CODE = False @@ -118,7 +118,7 @@ def check_device(device): print("average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" % (tcost_2*1e6)) print("average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us" % (tcost_3*1e6)) # correctness - depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw(input_np, filter_np, stride=[stride_h, stride_w], padding=padding) + depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(input_np, filter_np, stride=[stride_h, stride_w], padding=padding) scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape)) for c in range(in_channel * channel_multiplier): scale_shift_scipy[:,c,:,:] = depthwise_conv2d_scipy[:,c,:,:] * scale_np[c] + shift_np[c] @@ -207,7 +207,7 @@ def check_device(device): print("average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" % (tcost_2*1e6)) print("average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us" % (tcost_3*1e6)) # correctness - depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nhwc(input_np, filter_np, stride=[stride_h, stride_w], padding=padding) + depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(input_np, filter_np, stride=[stride_h, stride_w], padding=padding) scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape)) for c in range(in_channel * channel_multiplier): scale_shift_scipy[:,:,:,c] = depthwise_conv2d_scipy[:,:,:,c] * scale_np[c] + shift_np[c] diff --git a/topi/recipe/conv/test_conv2d_hwcn_map.py b/apps/topi_recipe/conv/test_conv2d_hwcn_map.py similarity index 95% rename from topi/recipe/conv/test_conv2d_hwcn_map.py rename to apps/topi_recipe/conv/test_conv2d_hwcn_map.py index 35cd477e1f98..605044c437b6 100644 --- a/topi/recipe/conv/test_conv2d_hwcn_map.py +++ b/apps/topi_recipe/conv/test_conv2d_hwcn_map.py @@ -21,8 +21,8 @@ import tvm from tvm import te from tvm.contrib import nvcc -import topi -from topi.util import get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple TASK = "conv2d_hwcn_map" USE_MANUAL_CODE = False @@ -65,7 +65,7 @@ def test_conv2d_hwcn_map(): a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype) w_np = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype) - b_np = topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding) + b_np = tvm.topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding) c_np = np.maximum(b_np, 0) def check_device(device): diff --git a/topi/recipe/conv/test_conv_int8_arm.py b/apps/topi_recipe/conv/test_conv_int8_arm.py similarity index 99% rename from topi/recipe/conv/test_conv_int8_arm.py rename to apps/topi_recipe/conv/test_conv_int8_arm.py index f0b260e18a98..d4f98b0ac4d7 100644 --- a/topi/recipe/conv/test_conv_int8_arm.py +++ b/apps/topi_recipe/conv/test_conv_int8_arm.py @@ -21,7 +21,7 @@ import numpy as np import tvm from tvm import te -import topi +from tvm import topi logging.basicConfig(stream=sys.stdout, level=logging.INFO) LOGGER = logging.getLogger('test_conv_int8_intel') diff --git a/topi/recipe/conv/test_conv_int8_intel.py b/apps/topi_recipe/conv/test_conv_int8_intel.py similarity index 99% rename from topi/recipe/conv/test_conv_int8_intel.py rename to apps/topi_recipe/conv/test_conv_int8_intel.py index 767262d81d83..93b783340ee1 100644 --- a/topi/recipe/conv/test_conv_int8_intel.py +++ b/apps/topi_recipe/conv/test_conv_int8_intel.py @@ -21,7 +21,7 @@ import numpy as np import tvm from tvm import te -import topi +from tvm import topi logging.basicConfig(stream=sys.stdout, level=logging.INFO) LOGGER = logging.getLogger('test_conv_int8_intel') diff --git a/topi/recipe/gemm/android_gemm_square.py b/apps/topi_recipe/gemm/android_gemm_square.py similarity index 100% rename from topi/recipe/gemm/android_gemm_square.py rename to apps/topi_recipe/gemm/android_gemm_square.py diff --git a/topi/recipe/gemm/cuda_gemm_square.py b/apps/topi_recipe/gemm/cuda_gemm_square.py similarity index 100% rename from topi/recipe/gemm/cuda_gemm_square.py rename to apps/topi_recipe/gemm/cuda_gemm_square.py diff --git a/topi/recipe/gemm/gemm_int8.py b/apps/topi_recipe/gemm/gemm_int8.py similarity index 99% rename from topi/recipe/gemm/gemm_int8.py rename to apps/topi_recipe/gemm/gemm_int8.py index 9d668ebf6fa9..fd037117d77b 100644 --- a/topi/recipe/gemm/gemm_int8.py +++ b/apps/topi_recipe/gemm/gemm_int8.py @@ -21,7 +21,7 @@ import tvm from tvm import te from tvm import autotvm -from topi.cuda.tensor_intrin import dp4a +from tvm.topi.cuda.tensor_intrin import dp4a DO_TUNING = True PRETUNED_INDEX = 75333 diff --git a/topi/recipe/reduce/test_reduce_map.py b/apps/topi_recipe/reduce/test_reduce_map.py similarity index 99% rename from topi/recipe/reduce/test_reduce_map.py rename to apps/topi_recipe/reduce/test_reduce_map.py index 5e5caec73bc3..b6d0602a4e87 100644 --- a/topi/recipe/reduce/test_reduce_map.py +++ b/apps/topi_recipe/reduce/test_reduce_map.py @@ -20,7 +20,7 @@ from tvm.contrib import nvcc import numpy as np -import topi +from tvm import topi TASK = "reduce_map" diff --git a/topi/recipe/rnn/lstm.py b/apps/topi_recipe/rnn/lstm.py similarity index 100% rename from topi/recipe/rnn/lstm.py rename to apps/topi_recipe/rnn/lstm.py diff --git a/topi/recipe/rnn/matexp.py b/apps/topi_recipe/rnn/matexp.py similarity index 100% rename from topi/recipe/rnn/matexp.py rename to apps/topi_recipe/rnn/matexp.py diff --git a/conda/tvm/build.sh b/conda/tvm/build.sh index 358e0b91798a..9bdbe0a6f509 100644 --- a/conda/tvm/build.sh +++ b/conda/tvm/build.sh @@ -22,7 +22,3 @@ set -u cd python $PYTHON setup.py install --single-version-externally-managed --record=/tmp/record.txt cd .. - -cd topi/python -$PYTHON setup.py install --single-version-externally-managed --record=/tmp/record.txt -cd ../.. diff --git a/docker/Dockerfile.demo_android b/docker/Dockerfile.demo_android index 13d1a2175b88..185f7411c0cc 100644 --- a/docker/Dockerfile.demo_android +++ b/docker/Dockerfile.demo_android @@ -70,5 +70,5 @@ RUN cd /usr && \ make -j10 # Environment variables -ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH} +ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH} ENV ANDROID_HOME=/opt/android-sdk-linux/ diff --git a/docker/Dockerfile.demo_cpu b/docker/Dockerfile.demo_cpu index 01ba9f6495f7..3f08e1df7fe3 100644 --- a/docker/Dockerfile.demo_cpu +++ b/docker/Dockerfile.demo_cpu @@ -30,4 +30,4 @@ COPY install/install_tvm_cpu.sh /install/install_tvm_cpu.sh RUN bash /install/install_tvm_cpu.sh # Environment variables -ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH} +ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH} diff --git a/docker/Dockerfile.demo_gpu b/docker/Dockerfile.demo_gpu index b97150ffcbf1..489a67d1a59f 100644 --- a/docker/Dockerfile.demo_gpu +++ b/docker/Dockerfile.demo_gpu @@ -28,7 +28,7 @@ COPY install/install_tvm_gpu.sh /install/install_tvm_gpu.sh RUN bash /install/install_tvm_gpu.sh # Environment variables -ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/topi/python:/usr/tvm/vta/python:${PYTHONPATH} +ENV PYTHONPATH=/usr/tvm/python:/usr/tvm/vta/python:${PYTHONPATH} ENV PATH=/usr/local/nvidia/bin:${PATH} ENV PATH=/usr/local/cuda/bin:${PATH} ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH} diff --git a/docker/Dockerfile.demo_opencl b/docker/Dockerfile.demo_opencl index fb2d8999dc54..e39ee4128c96 100644 --- a/docker/Dockerfile.demo_opencl +++ b/docker/Dockerfile.demo_opencl @@ -76,6 +76,5 @@ RUN mkdir -p ${TVM_BUILD_DIR} && \ make -j6 RUN echo "Building Python package" -ENV PYTHONPATH=${TVM_HOME}/python:${TVM_HOME}/topi/python:${PYTHONPATH} +ENV PYTHONPATH=${TVM_HOME}/python:${PYTHONPATH} RUN cd ${TVM_HOME}/python && python3 setup.py install --user -RUN cd ${TVM_HOME}/topi/python && python3 setup.py install --user diff --git a/docker/bash.sh b/docker/bash.sh index 532738a9711e..dc95c45569d0 100755 --- a/docker/bash.sh +++ b/docker/bash.sh @@ -70,7 +70,7 @@ else fi if [[ "${DOCKER_IMAGE_NAME}" == *"ci"* ]]; then - CI_PY_ENV="-e PYTHONPATH=/workspace/python:/workspace/topi/python" + CI_PY_ENV="-e PYTHONPATH=/workspace/python" else CI_PY_ENV="" fi diff --git a/docs/Doxyfile b/docs/Doxyfile index d665d200dca2..6eb3ee6472be 100644 --- a/docs/Doxyfile +++ b/docs/Doxyfile @@ -770,7 +770,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = include/tvm topi/include/topi +INPUT = include/tvm # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/docs/api/python/index.rst b/docs/api/python/index.rst index bee6e56a8cab..bc9ec5fd8304 100644 --- a/docs/api/python/index.rst +++ b/docs/api/python/index.rst @@ -44,5 +44,5 @@ Python API micro contrib graph_runtime - vta/index topi + vta/index diff --git a/docs/api/python/topi.rst b/docs/api/python/topi.rst index 09c3318be9ee..f62509f571e3 100644 --- a/docs/api/python/topi.rst +++ b/docs/api/python/topi.rst @@ -15,234 +15,34 @@ specific language governing permissions and limitations under the License. -topi ----- -.. automodule:: topi - -List of operators -~~~~~~~~~~~~~~~~~ - -.. autosummary:: - - topi.identity - topi.negative - topi.floor - topi.ceil - topi.sign - topi.trunc - topi.round - topi.abs - topi.isnan - topi.isfinite - topi.isinf - topi.exp - topi.tanh - topi.log - topi.sqrt - topi.rsqrt - topi.sigmoid - topi.clip - topi.cast - topi.reinterpret - topi.transpose - topi.flip - topi.reverse_sequence - topi.strided_slice - topi.expand_dims - topi.reshape - topi.unravel_index - topi.sparse_to_dense - topi.squeeze - topi.concatenate - topi.split - topi.take - topi.gather - topi.gather_nd - topi.full - topi.full_like - topi.nn.relu - topi.nn.leaky_relu - topi.nn.dilate - topi.nn.pool - topi.nn.global_pool - topi.nn.adaptive_pool - topi.nn.upsampling - topi.nn.softmax - topi.nn.dense - topi.nn.batch_matmul - topi.nn.log_softmax - topi.nn.conv2d_nchw - topi.nn.conv2d_hwcn - topi.nn.depthwise_conv2d_nchw - topi.nn.depthwise_conv2d_nhwc - topi.nn.fifo_buffer - topi.max - topi.sum - topi.min - topi.argmax - topi.argmin - topi.prod - topi.broadcast_to - topi.add - topi.subtract - topi.multiply - topi.divide - topi.mod - topi.maximum - topi.minimum - topi.power - topi.greater - topi.less - topi.equal - topi.not_equal - topi.greater_equal - topi.less_equal - topi.all - topi.any - topi.logical_and - topi.logical_or - topi.logical_not - topi.logical_xor - topi.arange - topi.meshgrid - topi.stack - topi.repeat - topi.tile - topi.shape - topi.ndarray_size - topi.layout_transform - topi.image.resize - topi.image.crop_and_resize - topi.image.dilation2d - topi.argsort - topi.topk - topi.sequence_mask - topi.one_hot - - -List of schedules -~~~~~~~~~~~~~~~~~ -.. autosummary:: - - topi.generic.schedule_conv2d_nchw - topi.generic.schedule_depthwise_conv2d_nchw - topi.generic.schedule_reduce - topi.generic.schedule_broadcast - topi.generic.schedule_injective +tvm.topi +-------- +.. automodule:: tvm.topi + :members: + :imported-members: + :autosummary: + +tvm.topi.nn +~~~~~~~~~~~ -topi -~~~~ -.. autofunction:: topi.negative -.. autofunction:: topi.identity -.. autofunction:: topi.floor -.. autofunction:: topi.ceil -.. autofunction:: topi.sign -.. autofunction:: topi.trunc -.. autofunction:: topi.round -.. autofunction:: topi.abs -.. autofunction:: topi.isnan -.. autofunction:: topi.isfinite -.. autofunction:: topi.isinf -.. autofunction:: topi.exp -.. autofunction:: topi.tanh -.. autofunction:: topi.log -.. autofunction:: topi.sqrt -.. autofunction:: topi.rsqrt -.. autofunction:: topi.sigmoid -.. autofunction:: topi.clip -.. autofunction:: topi.cast -.. autofunction:: topi.reinterpret -.. autofunction:: topi.transpose -.. autofunction:: topi.flip -.. autofunction:: topi.reverse_sequence -.. autofunction:: topi.strided_slice -.. autofunction:: topi.expand_dims -.. autofunction:: topi.reshape -.. autofunction:: topi.unravel_index -.. autofunction:: topi.sparse_to_dense -.. autofunction:: topi.squeeze -.. autofunction:: topi.concatenate -.. autofunction:: topi.split -.. autofunction:: topi.take -.. autofunction:: topi.gather -.. autofunction:: topi.gather_nd -.. autofunction:: topi.full -.. autofunction:: topi.full_like -.. autofunction:: topi.all -.. autofunction:: topi.any -.. autofunction:: topi.max -.. autofunction:: topi.sum -.. autofunction:: topi.min -.. autofunction:: topi.prod -.. autofunction:: topi.broadcast_to -.. autofunction:: topi.add -.. autofunction:: topi.subtract -.. autofunction:: topi.multiply -.. autofunction:: topi.divide -.. autofunction:: topi.floor_divide -.. autofunction:: topi.mod -.. autofunction:: topi.floor_mod -.. autofunction:: topi.maximum -.. autofunction:: topi.minimum -.. autofunction:: topi.power -.. autofunction:: topi.greater -.. autofunction:: topi.less -.. autofunction:: topi.arange -.. autofunction:: topi.meshgrid -.. autofunction:: topi.stack -.. autofunction:: topi.repeat -.. autofunction:: topi.tile -.. autofunction:: topi.shape -.. autofunction:: topi.ndarray_size -.. autofunction:: topi.layout_transform -.. autofunction:: topi.argsort -.. autofunction:: topi.topk -.. autofunction:: topi.sequence_mask -.. autofunction:: topi.one_hot -.. autofunction:: topi.logical_and -.. autofunction:: topi.logical_or -.. autofunction:: topi.logical_not -.. autofunction:: topi.logical_xor +.. automodule:: tvm.topi.nn + :members: + :imported-members: + :autosummary: -topi.nn -~~~~~~~ -.. autofunction:: topi.nn.relu -.. autofunction:: topi.nn.leaky_relu -.. autofunction:: topi.nn.dilate -.. autofunction:: topi.nn.pool -.. autofunction:: topi.nn.global_pool -.. autofunction:: topi.nn.upsampling -.. autofunction:: topi.nn.softmax -.. autofunction:: topi.nn.dense -.. autofunction:: topi.nn.batch_matmul -.. autofunction:: topi.nn.log_softmax -.. autofunction:: topi.nn.conv2d_nchw -.. autofunction:: topi.nn.conv2d_hwcn -.. autofunction:: topi.nn.depthwise_conv2d_nchw -.. autofunction:: topi.nn.depthwise_conv2d_nhwc -.. autofunction:: topi.nn.conv3d_ncdhw -.. autofunction:: topi.nn.conv3d_transpose_ncdhw -.. autofunction:: topi.nn.fifo_buffer +tvm.topi.image +~~~~~~~~~~~~~~ +.. automodule:: tvm.topi.image + :members: + :imported-members: + :autosummary: -topi.image -~~~~~~~~~~ -.. autofunction:: topi.image.resize -.. autofunction:: topi.image.crop_and_resize -topi.sparse -~~~~~~~~~~~ -.. autofunction:: topi.sparse.csrmv -.. autofunction:: topi.sparse.csrmm -.. autofunction:: topi.sparse.dense +tvm.topi.sparse +~~~~~~~~~~~~~~~ +.. automodule:: tvm.topi.sparse + :members: + :imported-members: + :autosummary: -topi.generic -~~~~~~~~~~~~ -.. automodule:: topi.generic -.. autofunction:: topi.generic.schedule_conv2d_nchw -.. autofunction:: topi.generic.schedule_depthwise_conv2d_nchw -.. autofunction:: topi.generic.schedule_conv3d_ncdhw -.. autofunction:: topi.generic.schedule_conv3d_transpose_ncdhw -.. autofunction:: topi.generic.schedule_reduce -.. autofunction:: topi.generic.schedule_broadcast -.. autofunction:: topi.generic.schedule_injective diff --git a/docs/conf.py b/docs/conf.py index 73836e92ca7f..c03f1b770079 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -40,7 +40,6 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../python/')) -sys.path.insert(0, os.path.join(curr_path, '../topi/python')) sys.path.insert(0, os.path.join(curr_path, '../vta/python')) # -- General configuration ------------------------------------------------ @@ -54,6 +53,7 @@ os.environ['TVM_BUILD_DOC'] = '1' # Version information. import tvm +from tvm import topi from tvm import te version = tvm.__version__ release = tvm.__version__ diff --git a/docs/contribute/pull_request.rst b/docs/contribute/pull_request.rst index 7e0ba372b183..128ae80b5474 100644 --- a/docs/contribute/pull_request.rst +++ b/docs/contribute/pull_request.rst @@ -121,7 +121,7 @@ If you want to run a single test: make # let python know where to find tvm related libraries - export PYTHONPATH=python:topi/python + export PYTHONPATH=python rm -rf python/tvm/*.pyc python/tvm/*/*.pyc python/tvm/*/*/*.pyc TVM_FFI=ctypes python -m pytest -v tests/python/unittest/test_pass_storage_rewrite.py diff --git a/docs/dev/codebase_walkthrough.rst b/docs/dev/codebase_walkthrough.rst index 7a00339aad5f..0a21bb8909e7 100644 --- a/docs/dev/codebase_walkthrough.rst +++ b/docs/dev/codebase_walkthrough.rst @@ -30,7 +30,7 @@ At the root of the TVM repository, we have following subdirectories that togethe - ``src`` - C++ code for operator compilation and deployment runtimes. - ``src/relay`` - Implementation of Relay, a new functional IR for deep learning framework. - ``python`` - Python frontend that wraps C++ functions and objects implemented in ``src``. -- ``topi`` - Compute definitions and backend schedules for standard neural network operators. +- ``src/topi`` - Compute definitions and backend schedules for standard neural network operators. Using standard Deep Learning terminology, ``src/relay`` is the component that manages a computational graph, and nodes in a graph are compiled and executed using infrastructure implemented in the rest of ``src``. ``python`` provides python bindings for the C++ API and driver code that users can use to execute compilation. Operators corresponding to each node are registered in ``src/relay/op``. Implementations of operators are in ``topi``, and they are coded in either C++ or Python. diff --git a/docs/dev/index.rst b/docs/dev/index.rst index 9fe8394cd337..c448cb00d07b 100644 --- a/docs/dev/index.rst +++ b/docs/dev/index.rst @@ -335,8 +335,8 @@ these scheduling components to the a `tir::PrimFunc` itself. inferbound hybrid_script -topi ----- +tvm/topi +-------- While possible to construct operators directly via TIR or tensor expressions (TE) for each use case it is tedious to do so. `topi` (Tensor operator inventory) provides a set of pre-defined operators (in TE or TIR) defined by numpy and found in common deep learning workloads. We also provide a collection of common schedule templates to obtain performant implementations across different target platforms. diff --git a/docs/install/from_source.rst b/docs/install/from_source.rst index 51ab887ed919..26aec77e09e2 100644 --- a/docs/install/from_source.rst +++ b/docs/install/from_source.rst @@ -51,9 +51,9 @@ Build the Shared Library Our goal is to build the shared libraries: -- On Linux the target library are `libtvm.so, libtvm_topi.so` -- On macOS the target library are `libtvm.dylib, libtvm_topi.dylib` -- On Windows the target library are `libtvm.dll, libtvm_topi.dll` +- On Linux the target library are `libtvm.so` +- On macOS the target library are `libtvm.dylib` +- On Windows the target library are `libtvm.dll` .. code:: bash @@ -174,7 +174,7 @@ Method 1 .. code:: bash export TVM_HOME=/path/to/tvm - export PYTHONPATH=$TVM_HOME/python:$TVM_HOME/topi/python:${PYTHONPATH} + export PYTHONPATH=$TVM_HOME/python:${PYTHONPATH} Method 2 @@ -188,8 +188,6 @@ Method 2 # providing --user flag may trigger error during installation in such case. export MACOSX_DEPLOYMENT_TARGET=10.9 # This is required for mac to avoid symbol conflicts with libstdc++ cd python; python setup.py install --user; cd .. - cd topi/python; python setup.py install --user; cd ../.. - Python dependencies ~~~~~~~~~~~~~~~~~~~ diff --git a/docs/langref/relay_expr.rst b/docs/langref/relay_expr.rst index 3b93360453eb..7585177cfb2d 100644 --- a/docs/langref/relay_expr.rst +++ b/docs/langref/relay_expr.rst @@ -267,7 +267,7 @@ Operators An operator is a primitive operation, such as :code:`add` or :code:`conv2d`, not defined in the Relay language. Operators are declared in the global operator registry in C++. Many common operators are backed by TVM's -Tensor Operator Inventory (`TOPI `__). +Tensor Operator Inventory. To register an operator a user must provide an implementation of the operator, its type, and any other desired metadata. diff --git a/topi/include/topi/broadcast.h b/include/tvm/topi/broadcast.h similarity index 98% rename from topi/include/topi/broadcast.h rename to include/tvm/topi/broadcast.h index 1b36ace4608f..8fabaaee14f9 100644 --- a/topi/include/topi/broadcast.h +++ b/include/tvm/topi/broadcast.h @@ -21,16 +21,17 @@ * \brief Broadcast op constructions * \file topi/broadcast.h */ -#ifndef TOPI_BROADCAST_H_ -#define TOPI_BROADCAST_H_ +#ifndef TVM_TOPI_BROADCAST_H_ +#define TVM_TOPI_BROADCAST_H_ -#include -#include -#include +#include +#include +#include #include #include +namespace tvm { namespace topi { /*! @@ -429,5 +430,6 @@ TOPI_DEFINE_BCAST_OP(greater_equal, { return (a >= b); }); TOPI_DEFINE_BCAST_OP(less_equal, { return (a <= b); }); } // namespace topi +} // namespace tvm -#endif // TOPI_BROADCAST_H_ +#endif // TVM_TOPI_BROADCAST_H_ diff --git a/topi/include/topi/contrib/cublas.h b/include/tvm/topi/contrib/cublas.h similarity index 93% rename from topi/include/topi/contrib/cublas.h rename to include/tvm/topi/contrib/cublas.h index 30ad52510e6f..3032643ed700 100644 --- a/topi/include/topi/contrib/cublas.h +++ b/include/tvm/topi/contrib/cublas.h @@ -21,15 +21,16 @@ * \brief External function interface to cuBLAS libraries * \file cublas.h */ -#ifndef TOPI_CONTRIB_CUBLAS_H_ -#define TOPI_CONTRIB_CUBLAS_H_ +#ifndef TVM_TOPI_CONTRIB_CUBLAS_H_ +#define TVM_TOPI_CONTRIB_CUBLAS_H_ -#include #include +#include +namespace tvm { namespace topi { namespace contrib { -using namespace tvm; + using namespace tvm::te; using namespace topi::detail; /*! @@ -82,5 +83,6 @@ inline Tensor cublas_batch_matmul(const Tensor& lhs, const Tensor& rhs, bool tra } // namespace contrib } // namespace topi +} // namespace tvm -#endif // TOPI_CONTRIB_CUBLAS_H_ +#endif // TVM_TOPI_CONTRIB_CUBLAS_H_ diff --git a/topi/include/topi/contrib/rocblas.h b/include/tvm/topi/contrib/rocblas.h similarity index 90% rename from topi/include/topi/contrib/rocblas.h rename to include/tvm/topi/contrib/rocblas.h index 988c37555b1c..a4fa26f34aa5 100644 --- a/topi/include/topi/contrib/rocblas.h +++ b/include/tvm/topi/contrib/rocblas.h @@ -21,16 +21,16 @@ * \brief External function interface to rocBLAS libraries * \file tags.h */ -#ifndef TOPI_CONTRIB_ROCBLAS_H_ -#define TOPI_CONTRIB_ROCBLAS_H_ +#ifndef TVM_TOPI_CONTRIB_ROCBLAS_H_ +#define TVM_TOPI_CONTRIB_ROCBLAS_H_ #include +#include -#include "topi/detail/extern.h" - +namespace tvm { namespace topi { namespace contrib { -using namespace tvm; + using namespace tvm::te; /*! * \brief Create an op that multiplies lhs and rhs with rocBLAS @@ -57,5 +57,6 @@ inline Tensor rocblas_matmul(const Tensor& lhs, const Tensor& rhs, bool transa, } // namespace contrib } // namespace topi +} // namespace tvm -#endif // TOPI_CONTRIB_ROCBLAS_H_ +#endif // TVM_TOPI_CONTRIB_ROCBLAS_H_ diff --git a/topi/include/topi/cuda/dense.h b/include/tvm/topi/cuda/dense.h similarity index 93% rename from topi/include/topi/cuda/dense.h rename to include/tvm/topi/cuda/dense.h index c8ceebf4b8ad..34af343d4b37 100644 --- a/topi/include/topi/cuda/dense.h +++ b/include/tvm/topi/cuda/dense.h @@ -21,20 +21,21 @@ * \file cuda/dense.h * \brief CUDA schedule for dense operation */ -#ifndef TOPI_CUDA_DENSE_H_ -#define TOPI_CUDA_DENSE_H_ - -#include -#include -#include -#include -#include +#ifndef TVM_TOPI_CUDA_DENSE_H_ +#define TVM_TOPI_CUDA_DENSE_H_ + #include #include #include +#include +#include +#include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace cuda { @@ -149,4 +150,5 @@ inline Schedule schedule_dense(const Target& target, const Array& outs) } // namespace cuda } // namespace topi -#endif // TOPI_CUDA_DENSE_H_ +} // namespace tvm +#endif // TVM_TOPI_CUDA_DENSE_H_ diff --git a/topi/include/topi/cuda/injective.h b/include/tvm/topi/cuda/injective.h similarity index 91% rename from topi/include/topi/cuda/injective.h rename to include/tvm/topi/cuda/injective.h index e7bce053ace1..010fa2ce8567 100644 --- a/topi/include/topi/cuda/injective.h +++ b/include/tvm/topi/cuda/injective.h @@ -21,17 +21,18 @@ * \file cuda/injective.h * \brief CUDA schedule for injective operations */ -#ifndef TOPI_CUDA_INJECTIVE_H_ -#define TOPI_CUDA_INJECTIVE_H_ +#ifndef TVM_TOPI_CUDA_INJECTIVE_H_ +#define TVM_TOPI_CUDA_INJECTIVE_H_ -#include -#include #include #include #include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace cuda { @@ -78,4 +79,5 @@ inline Schedule schedule_injective(const Target& target, const Array& ou } // namespace cuda } // namespace topi -#endif // TOPI_CUDA_INJECTIVE_H_ +} // namespace tvm +#endif // TVM_TOPI_CUDA_INJECTIVE_H_ diff --git a/topi/include/topi/cuda/normalization.h b/include/tvm/topi/cuda/normalization.h similarity index 93% rename from topi/include/topi/cuda/normalization.h rename to include/tvm/topi/cuda/normalization.h index f8f498eaffcf..270b6af2d5e5 100644 --- a/topi/include/topi/cuda/normalization.h +++ b/include/tvm/topi/cuda/normalization.h @@ -21,16 +21,17 @@ * \file cuda/normalization.h * \brief CUDA schedule for LRN and l2 normalization operations */ -#ifndef TOPI_CUDA_NORMALIZATION_H_ -#define TOPI_CUDA_NORMALIZATION_H_ +#ifndef TVM_TOPI_CUDA_NORMALIZATION_H_ +#define TVM_TOPI_CUDA_NORMALIZATION_H_ -#include #include #include #include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace cuda { /*! @@ -70,4 +71,5 @@ inline Schedule schedule_lrn(const Array& outs) { } // namespace cuda } // namespace topi -#endif // TOPI_CUDA_NORMALIZATION_H_ +} // namespace tvm +#endif // TVM_TOPI_CUDA_NORMALIZATION_H_ diff --git a/topi/include/topi/cuda/pooling.h b/include/tvm/topi/cuda/pooling.h similarity index 95% rename from topi/include/topi/cuda/pooling.h rename to include/tvm/topi/cuda/pooling.h index 7e8f55d5157f..0bb9df4a35d1 100644 --- a/topi/include/topi/cuda/pooling.h +++ b/include/tvm/topi/cuda/pooling.h @@ -21,18 +21,19 @@ * \file cuda/pooling.h * \brief CUDA schedule for pooling operations */ -#ifndef TOPI_CUDA_POOLING_H_ -#define TOPI_CUDA_POOLING_H_ +#ifndef TVM_TOPI_CUDA_POOLING_H_ +#define TVM_TOPI_CUDA_POOLING_H_ -#include -#include -#include #include #include #include +#include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace cuda { @@ -182,4 +183,5 @@ inline Schedule schedule_global_pool(const Target& target, const Array& } // namespace cuda } // namespace topi -#endif // TOPI_CUDA_POOLING_H_ +} // namespace tvm +#endif // TVM_TOPI_CUDA_POOLING_H_ diff --git a/topi/include/topi/cuda/reduction.h b/include/tvm/topi/cuda/reduction.h similarity index 96% rename from topi/include/topi/cuda/reduction.h rename to include/tvm/topi/cuda/reduction.h index 377b922afc82..18d448487e5f 100644 --- a/topi/include/topi/cuda/reduction.h +++ b/include/tvm/topi/cuda/reduction.h @@ -21,17 +21,18 @@ * \file cuda/reduction.h * \brief CUDA schedule for reduction operations */ -#ifndef TOPI_CUDA_REDUCTION_H_ -#define TOPI_CUDA_REDUCTION_H_ +#ifndef TVM_TOPI_CUDA_REDUCTION_H_ +#define TVM_TOPI_CUDA_REDUCTION_H_ -#include -#include #include #include #include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace cuda { @@ -194,4 +195,5 @@ Schedule schedule_reduce(const Target& target, Array outs) { } // namespace cuda } // namespace topi -#endif // TOPI_CUDA_REDUCTION_H_ +} // namespace tvm +#endif // TVM_TOPI_CUDA_REDUCTION_H_ diff --git a/topi/include/topi/cuda/softmax.h b/include/tvm/topi/cuda/softmax.h similarity index 93% rename from topi/include/topi/cuda/softmax.h rename to include/tvm/topi/cuda/softmax.h index a3aa857d8c0c..19613cbbdf19 100644 --- a/topi/include/topi/cuda/softmax.h +++ b/include/tvm/topi/cuda/softmax.h @@ -21,17 +21,18 @@ * \file cuda/injective.h * \brief CUDA schedule for injective operations */ -#ifndef TOPI_CUDA_SOFTMAX_H_ -#define TOPI_CUDA_SOFTMAX_H_ +#ifndef TVM_TOPI_CUDA_SOFTMAX_H_ +#define TVM_TOPI_CUDA_SOFTMAX_H_ -#include -#include #include #include #include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace cuda { @@ -98,4 +99,5 @@ inline Schedule schedule_softmax(const Target& target, const Array& outs } // namespace cuda } // namespace topi -#endif // TOPI_CUDA_SOFTMAX_H_ +} // namespace tvm +#endif // TVM_TOPI_CUDA_SOFTMAX_H_ diff --git a/topi/include/topi/detail/array_utils.h b/include/tvm/topi/detail/array_utils.h similarity index 89% rename from topi/include/topi/detail/array_utils.h rename to include/tvm/topi/detail/array_utils.h index d7204722c4f6..89c985695865 100644 --- a/topi/include/topi/detail/array_utils.h +++ b/include/tvm/topi/detail/array_utils.h @@ -21,14 +21,15 @@ * \file array_utils.h * \brief Utility functions for handling arrays */ -#ifndef TOPI_DETAIL_ARRAY_UTILS_H_ -#define TOPI_DETAIL_ARRAY_UTILS_H_ +#ifndef TVM_TOPI_DETAIL_ARRAY_UTILS_H_ +#define TVM_TOPI_DETAIL_ARRAY_UTILS_H_ #include +namespace tvm { namespace topi { namespace detail { -using namespace tvm; + using namespace tvm::te; /*! @@ -51,4 +52,5 @@ inline bool contains(Array array, T item) { } // namespace detail } // namespace topi -#endif // TOPI_DETAIL_ARRAY_UTILS_H_ +} // namespace tvm +#endif // TVM_TOPI_DETAIL_ARRAY_UTILS_H_ diff --git a/topi/include/topi/detail/broadcast.h b/include/tvm/topi/detail/broadcast.h similarity index 96% rename from topi/include/topi/detail/broadcast.h rename to include/tvm/topi/detail/broadcast.h index ca3029327875..6bdebbddff6b 100644 --- a/topi/include/topi/detail/broadcast.h +++ b/include/tvm/topi/detail/broadcast.h @@ -21,16 +21,17 @@ * \brief Detail broadcast. * \file topi/detail/broadcast.h */ -#ifndef TOPI_DETAIL_BROADCAST_H_ -#define TOPI_DETAIL_BROADCAST_H_ +#ifndef TVM_TOPI_DETAIL_BROADCAST_H_ +#define TVM_TOPI_DETAIL_BROADCAST_H_ -#include #include +#include #include #include #include +namespace tvm { namespace topi { namespace detail { @@ -136,5 +137,6 @@ inline tvm::te::Tensor WithBroadcast(FBinaryExpr op, const tvm::te::Tensor& A, } // namespace detail } // namespace topi +} // namespace tvm -#endif // TOPI_DETAIL_BROADCAST_H_ +#endif // TVM_TOPI_DETAIL_BROADCAST_H_ diff --git a/topi/include/topi/detail/constant_utils.h b/include/tvm/topi/detail/constant_utils.h similarity index 95% rename from topi/include/topi/detail/constant_utils.h rename to include/tvm/topi/detail/constant_utils.h index 9bd125119987..03317c2c1dbb 100644 --- a/topi/include/topi/detail/constant_utils.h +++ b/include/tvm/topi/detail/constant_utils.h @@ -21,8 +21,8 @@ * \file constant_utils.h * \brief Utility functions for handling constants in TVM expressions */ -#ifndef TOPI_DETAIL_CONSTANT_UTILS_H_ -#define TOPI_DETAIL_CONSTANT_UTILS_H_ +#ifndef TVM_TOPI_DETAIL_CONSTANT_UTILS_H_ +#define TVM_TOPI_DETAIL_CONSTANT_UTILS_H_ #include #include @@ -32,9 +32,10 @@ #include #include +namespace tvm { namespace topi { namespace detail { -using namespace tvm; + using namespace tvm::te; /*! @@ -122,4 +123,5 @@ inline bool EqualCheck(PrimExpr lhs, PrimExpr rhs) { } // namespace detail } // namespace topi -#endif // TOPI_DETAIL_CONSTANT_UTILS_H_ +} // namespace tvm +#endif // TVM_TOPI_DETAIL_CONSTANT_UTILS_H_ diff --git a/topi/include/topi/detail/extern.h b/include/tvm/topi/detail/extern.h similarity index 97% rename from topi/include/topi/detail/extern.h rename to include/tvm/topi/detail/extern.h index 5349818a2790..48c3e18aa58e 100644 --- a/topi/include/topi/detail/extern.h +++ b/include/tvm/topi/detail/extern.h @@ -21,8 +21,8 @@ * \file detail/extern.h * \brief Helpers for using external functions */ -#ifndef TOPI_DETAIL_EXTERN_H_ -#define TOPI_DETAIL_EXTERN_H_ +#ifndef TVM_TOPI_DETAIL_EXTERN_H_ +#define TVM_TOPI_DETAIL_EXTERN_H_ #include #include @@ -30,9 +30,10 @@ #include #include +namespace tvm { namespace topi { namespace detail { -using namespace tvm; + using namespace tvm::te; /*! @@ -145,4 +146,5 @@ inline PrimExpr call_packed(Array args) { } // namespace detail } // namespace topi -#endif // TOPI_DETAIL_EXTERN_H_ +} // namespace tvm +#endif // TVM_TOPI_DETAIL_EXTERN_H_ diff --git a/topi/include/topi/detail/fuse.h b/include/tvm/topi/detail/fuse.h similarity index 90% rename from topi/include/topi/detail/fuse.h rename to include/tvm/topi/detail/fuse.h index 90c1c2031e52..7305ccef9b1d 100644 --- a/topi/include/topi/detail/fuse.h +++ b/include/tvm/topi/detail/fuse.h @@ -21,14 +21,15 @@ * \file fuse.h * \brief Fuse operation */ -#ifndef TOPI_DETAIL_FUSE_H_ -#define TOPI_DETAIL_FUSE_H_ +#ifndef TVM_TOPI_DETAIL_FUSE_H_ +#define TVM_TOPI_DETAIL_FUSE_H_ #include +namespace tvm { namespace topi { namespace detail { -using namespace tvm; + using namespace tvm::te; /*! @@ -47,4 +48,5 @@ inline IterVar Fuse(Stage stage, const Array& args) { } // namespace detail } // namespace topi -#endif // TOPI_DETAIL_FUSE_H_ +} // namespace tvm +#endif // TVM_TOPI_DETAIL_FUSE_H_ diff --git a/topi/include/topi/detail/pad_utils.h b/include/tvm/topi/detail/pad_utils.h similarity index 91% rename from topi/include/topi/detail/pad_utils.h rename to include/tvm/topi/detail/pad_utils.h index 7c416ecefb3c..96eb49a505e4 100644 --- a/topi/include/topi/detail/pad_utils.h +++ b/include/tvm/topi/detail/pad_utils.h @@ -21,8 +21,8 @@ * \file pad_utils.h * \brief Padding helpers */ -#ifndef TOPI_DETAIL_PAD_UTILS_H_ -#define TOPI_DETAIL_PAD_UTILS_H_ +#ifndef TVM_TOPI_DETAIL_PAD_UTILS_H_ +#define TVM_TOPI_DETAIL_PAD_UTILS_H_ #include #include @@ -30,9 +30,10 @@ #include +namespace tvm { namespace topi { namespace detail { -using namespace tvm; + using namespace tvm::te; /*! @@ -56,4 +57,5 @@ inline Array GetPadTuple(PrimExpr pad_h, PrimExpr pad_w) { } // namespace detail } // namespace topi -#endif // TOPI_DETAIL_PAD_UTILS_H_ +} // namespace tvm +#endif // TVM_TOPI_DETAIL_PAD_UTILS_H_ diff --git a/topi/include/topi/detail/ravel_unravel.h b/include/tvm/topi/detail/ravel_unravel.h similarity index 92% rename from topi/include/topi/detail/ravel_unravel.h rename to include/tvm/topi/detail/ravel_unravel.h index c87f2c997ca6..fc775093e632 100644 --- a/topi/include/topi/detail/ravel_unravel.h +++ b/include/tvm/topi/detail/ravel_unravel.h @@ -21,16 +21,17 @@ * \file ravel_unravel.h * \brief Index ravel and unraval operations */ -#ifndef TOPI_DETAIL_RAVEL_UNRAVEL_H_ -#define TOPI_DETAIL_RAVEL_UNRAVEL_H_ +#ifndef TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_ +#define TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_ #include #include +namespace tvm { namespace topi { namespace detail { -using namespace tvm; + using namespace tvm::te; /*! @@ -76,4 +77,5 @@ inline Array UnravelIndex(PrimExpr idx, Array shape) { } // namespace detail } // namespace topi -#endif // TOPI_DETAIL_RAVEL_UNRAVEL_H_ +} // namespace tvm +#endif // TVM_TOPI_DETAIL_RAVEL_UNRAVEL_H_ diff --git a/topi/include/topi/detail/tensor_utils.h b/include/tvm/topi/detail/tensor_utils.h similarity index 94% rename from topi/include/topi/detail/tensor_utils.h rename to include/tvm/topi/detail/tensor_utils.h index d144c75695ed..7004c358ad4e 100644 --- a/topi/include/topi/detail/tensor_utils.h +++ b/include/tvm/topi/detail/tensor_utils.h @@ -21,14 +21,15 @@ * \file tensor_utils.h * \brief Utility functions for handling tensor */ -#ifndef TOPI_DETAIL_TENSOR_UTILS_H_ -#define TOPI_DETAIL_TENSOR_UTILS_H_ +#ifndef TVM_TOPI_DETAIL_TENSOR_UTILS_H_ +#define TVM_TOPI_DETAIL_TENSOR_UTILS_H_ #include +namespace tvm { namespace topi { namespace detail { -using namespace tvm; + using namespace tvm::te; /*! @@ -90,4 +91,5 @@ inline PrimExpr bilinear_sample_nchw(const Tensor& input, const Array& } // namespace detail } // namespace topi -#endif // TOPI_DETAIL_TENSOR_UTILS_H_ +} // namespace tvm +#endif // TVM_TOPI_DETAIL_TENSOR_UTILS_H_ diff --git a/topi/include/topi/elemwise.h b/include/tvm/topi/elemwise.h similarity index 99% rename from topi/include/topi/elemwise.h rename to include/tvm/topi/elemwise.h index 9b418d03900c..f537c9c865df 100644 --- a/topi/include/topi/elemwise.h +++ b/include/tvm/topi/elemwise.h @@ -21,20 +21,21 @@ * \file elemwise.h * \brief Elementwise op constructions */ -#ifndef TOPI_ELEMWISE_H_ -#define TOPI_ELEMWISE_H_ +#ifndef TVM_TOPI_ELEMWISE_H_ +#define TVM_TOPI_ELEMWISE_H_ -#include #include #include +#include #include #include #include "broadcast.h" +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; // Unary intrinsic operators @@ -525,4 +526,5 @@ inline Tensor fast_erf(const Tensor& x, std::string name = "T_fast_erf", } } // namespace topi -#endif // TOPI_ELEMWISE_H_ +} // namespace tvm +#endif // TVM_TOPI_ELEMWISE_H_ diff --git a/topi/include/topi/generic/default.h b/include/tvm/topi/generic/default.h similarity index 91% rename from topi/include/topi/generic/default.h rename to include/tvm/topi/generic/default.h index 403b943a16e6..752b6ad1537e 100644 --- a/topi/include/topi/generic/default.h +++ b/include/tvm/topi/generic/default.h @@ -21,17 +21,18 @@ * \file generic/default.h * \brief Generic default schedule */ -#ifndef TOPI_GENERIC_DEFAULT_H_ -#define TOPI_GENERIC_DEFAULT_H_ +#ifndef TVM_TOPI_GENERIC_DEFAULT_H_ +#define TVM_TOPI_GENERIC_DEFAULT_H_ -#include -#include #include #include #include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace generic { @@ -78,4 +79,5 @@ inline Schedule default_schedule_auto_inline(const Target& target, const Array -#include -#include #include #include #include +#include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace generic { @@ -64,4 +65,5 @@ inline Schedule schedule_extern(const Target& target, const Array& outs) } // namespace generic } // namespace topi -#endif // TOPI_GENERIC_EXTERN_H_ +} // namespace tvm +#endif // TVM_TOPI_GENERIC_EXTERN_H_ diff --git a/topi/include/topi/generic/injective.h b/include/tvm/topi/generic/injective.h similarity index 90% rename from topi/include/topi/generic/injective.h rename to include/tvm/topi/generic/injective.h index 69962dc645c0..c48c03eee065 100644 --- a/topi/include/topi/generic/injective.h +++ b/include/tvm/topi/generic/injective.h @@ -21,17 +21,18 @@ * \file generic/injective.h * \brief Generic schedule for injective operations */ -#ifndef TOPI_GENERIC_INJECTIVE_H_ -#define TOPI_GENERIC_INJECTIVE_H_ +#ifndef TVM_TOPI_GENERIC_INJECTIVE_H_ +#define TVM_TOPI_GENERIC_INJECTIVE_H_ -#include -#include #include #include #include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace generic { @@ -72,4 +73,5 @@ inline Schedule schedule_injective(const Target& target, const Array& ou } // namespace generic } // namespace topi -#endif // TOPI_GENERIC_INJECTIVE_H_ +} // namespace tvm +#endif // TVM_TOPI_GENERIC_INJECTIVE_H_ diff --git a/topi/include/topi/nn.h b/include/tvm/topi/nn.h similarity index 99% rename from topi/include/topi/nn.h rename to include/tvm/topi/nn.h index 2a195b34fc4f..17eb0d0fcf3f 100644 --- a/topi/include/topi/nn.h +++ b/include/tvm/topi/nn.h @@ -21,21 +21,22 @@ * \brief NN op constructions * \file topi/nn.h */ -#ifndef TOPI_NN_H_ -#define TOPI_NN_H_ +#ifndef TVM_TOPI_NN_H_ +#define TVM_TOPI_NN_H_ -#include -#include #include #include #include #include +#include +#include #include #include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; /*! @@ -447,4 +448,5 @@ inline tvm::te::Tensor group_conv2d_ngchw(const tvm::te::Tensor& I, const tvm::t } } // namespace topi -#endif // TOPI_NN_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_H_ diff --git a/topi/include/topi/nn/batch_matmul.h b/include/tvm/topi/nn/batch_matmul.h similarity index 91% rename from topi/include/topi/nn/batch_matmul.h rename to include/tvm/topi/nn/batch_matmul.h index 80525c427976..bffddca8010f 100644 --- a/topi/include/topi/nn/batch_matmul.h +++ b/include/tvm/topi/nn/batch_matmul.h @@ -21,17 +21,18 @@ * \brief Batch matmul op constructions * \file nn/batch_matmul.h */ -#ifndef TOPI_NN_BATCH_MATMUL_H_ -#define TOPI_NN_BATCH_MATMUL_H_ +#ifndef TVM_TOPI_NN_BATCH_MATMUL_H_ +#define TVM_TOPI_NN_BATCH_MATMUL_H_ -#include #include +#include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! @@ -61,5 +62,6 @@ inline tvm::te::Tensor batch_matmul(const tvm::te::Tensor& x, const tvm::te::Ten } // namespace nn } // namespace topi +} // namespace tvm -#endif // TOPI_NN_BATCH_MATMUL_H_ +#endif // TVM_TOPI_NN_BATCH_MATMUL_H_ diff --git a/topi/include/topi/nn/bias_add.h b/include/tvm/topi/nn/bias_add.h similarity index 87% rename from topi/include/topi/nn/bias_add.h rename to include/tvm/topi/nn/bias_add.h index 18e95deaccb1..03c026c7d75f 100644 --- a/topi/include/topi/nn/bias_add.h +++ b/include/tvm/topi/nn/bias_add.h @@ -21,16 +21,17 @@ * \brief bias_add op constructions * \file nn/bias_add.h */ -#ifndef TOPI_NN_BIAS_ADD_H_ -#define TOPI_NN_BIAS_ADD_H_ +#ifndef TVM_TOPI_NN_BIAS_ADD_H_ +#define TVM_TOPI_NN_BIAS_ADD_H_ -#include -#include -#include #include +#include +#include +#include #include +namespace tvm { namespace topi { namespace nn { @@ -53,4 +54,5 @@ inline tvm::te::Tensor bias_add(const tvm::te::Tensor& data, const tvm::te::Tens } } // namespace nn } // namespace topi -#endif // TOPI_NN_BIAS_ADD_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_BIAS_ADD_H_ diff --git a/topi/include/topi/nn/bnn.h b/include/tvm/topi/nn/bnn.h similarity index 95% rename from topi/include/topi/nn/bnn.h rename to include/tvm/topi/nn/bnn.h index c0626cd43c7f..f72950861b8a 100644 --- a/topi/include/topi/nn/bnn.h +++ b/include/tvm/topi/nn/bnn.h @@ -21,19 +21,20 @@ * \brief Binary op constructions * \file nn/bnn.h */ -#ifndef TOPI_NN_BNN_H_ -#define TOPI_NN_BNN_H_ +#ifndef TVM_TOPI_NN_BNN_H_ +#define TVM_TOPI_NN_BNN_H_ -#include -#include #include #include +#include +#include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! @@ -120,4 +121,5 @@ inline tvm::te::Tensor binary_dense(const tvm::te::Tensor& data, const tvm::te:: } // namespace nn } // namespace topi -#endif // TOPI_NN_BNN_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_BNN_H_ diff --git a/topi/include/topi/nn/dense.h b/include/tvm/topi/nn/dense.h similarity index 93% rename from topi/include/topi/nn/dense.h rename to include/tvm/topi/nn/dense.h index 4ee36c275ef3..ad18cb063f10 100644 --- a/topi/include/topi/nn/dense.h +++ b/include/tvm/topi/nn/dense.h @@ -21,17 +21,18 @@ * \brief Dense op constructions * \file nn/dense.h */ -#ifndef TOPI_NN_DENSE_H_ -#define TOPI_NN_DENSE_H_ +#ifndef TVM_TOPI_NN_DENSE_H_ +#define TVM_TOPI_NN_DENSE_H_ -#include #include +#include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! @@ -76,4 +77,5 @@ inline tvm::te::Tensor dense(const tvm::te::Tensor& data, const tvm::te::Tensor& } // namespace nn } // namespace topi -#endif // TOPI_NN_DENSE_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_DENSE_H_ diff --git a/topi/include/topi/nn/dilate.h b/include/tvm/topi/nn/dilate.h similarity index 95% rename from topi/include/topi/nn/dilate.h rename to include/tvm/topi/nn/dilate.h index 0d3ab89bbae6..a021402e097c 100644 --- a/topi/include/topi/nn/dilate.h +++ b/include/tvm/topi/nn/dilate.h @@ -21,18 +21,19 @@ * \brief Dilate op constructions * \file nn/dilate.h */ -#ifndef TOPI_NN_DILATE_H_ -#define TOPI_NN_DILATE_H_ +#ifndef TVM_TOPI_NN_DILATE_H_ +#define TVM_TOPI_NN_DILATE_H_ -#include #include #include +#include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! @@ -102,4 +103,5 @@ inline Tensor dilate(const Tensor& x, Array strides, std::string name } // namespace nn } // namespace topi -#endif // TOPI_NN_DILATE_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_DILATE_H_ diff --git a/topi/include/topi/nn/flatten.h b/include/tvm/topi/nn/flatten.h similarity index 91% rename from topi/include/topi/nn/flatten.h rename to include/tvm/topi/nn/flatten.h index 1ac5de4a2ed1..cd96d303b920 100644 --- a/topi/include/topi/nn/flatten.h +++ b/include/tvm/topi/nn/flatten.h @@ -21,19 +21,20 @@ * \brief Softmax op constructions * \file nn/flatten.h */ -#ifndef TOPI_NN_FLATTEN_H_ -#define TOPI_NN_FLATTEN_H_ +#ifndef TVM_TOPI_NN_FLATTEN_H_ +#define TVM_TOPI_NN_FLATTEN_H_ -#include -#include #include +#include +#include #include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! @@ -79,4 +80,5 @@ inline Tensor flatten(const Tensor& x, std::string name = "tensor", std::string } // namespace nn } // namespace topi -#endif // TOPI_NN_FLATTEN_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_FLATTEN_H_ diff --git a/topi/include/topi/nn/local_response_norm.h b/include/tvm/topi/nn/local_response_norm.h similarity index 93% rename from topi/include/topi/nn/local_response_norm.h rename to include/tvm/topi/nn/local_response_norm.h index 4e8dfd99a517..0170c503d9ff 100644 --- a/topi/include/topi/nn/local_response_norm.h +++ b/include/tvm/topi/nn/local_response_norm.h @@ -21,17 +21,18 @@ * \brief local response normalization op constructions * \file nn/local_response_norm.h */ -#ifndef TOPI_NN_LOCAL_RESPONSE_NORM_H_ -#define TOPI_NN_LOCAL_RESPONSE_NORM_H_ +#ifndef TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_ +#define TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_ -#include #include +#include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! @@ -78,4 +79,5 @@ inline Tensor lrn(const Tensor& data, int size, int axis = 1, float alpha = 0.00 } } // namespace nn } // namespace topi -#endif // TOPI_NN_LOCAL_RESPONSE_NORM_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_LOCAL_RESPONSE_NORM_H_ diff --git a/topi/include/topi/nn/mapping.h b/include/tvm/topi/nn/mapping.h similarity index 93% rename from topi/include/topi/nn/mapping.h rename to include/tvm/topi/nn/mapping.h index 2bf3314e7377..d6a87169a9a2 100644 --- a/topi/include/topi/nn/mapping.h +++ b/include/tvm/topi/nn/mapping.h @@ -21,17 +21,18 @@ * \brief Mapping op constructions * \file nn/mapping.h */ -#ifndef TOPI_NN_MAPPING_H_ -#define TOPI_NN_MAPPING_H_ +#ifndef TVM_TOPI_NN_MAPPING_H_ +#define TVM_TOPI_NN_MAPPING_H_ -#include #include +#include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! @@ -72,4 +73,5 @@ inline Tensor scale_shift_nhwc(const Tensor& x, const Tensor& scale, const Tenso } // namespace nn } // namespace topi -#endif // TOPI_NN_MAPPING_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_MAPPING_H_ diff --git a/topi/include/topi/nn/pooling.h b/include/tvm/topi/nn/pooling.h similarity index 99% rename from topi/include/topi/nn/pooling.h rename to include/tvm/topi/nn/pooling.h index f6435cd2f42a..b6852ffd01c3 100644 --- a/topi/include/topi/nn/pooling.h +++ b/include/tvm/topi/nn/pooling.h @@ -21,22 +21,23 @@ * \brief Pooling op constructions * \file nn/pooling.h */ -#ifndef TOPI_NN_POOLING_H_ -#define TOPI_NN_POOLING_H_ +#ifndef TVM_TOPI_NN_POOLING_H_ +#define TVM_TOPI_NN_POOLING_H_ -#include -#include -#include -#include #include +#include +#include +#include +#include #include #include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! \brief Pooling type */ @@ -843,4 +844,5 @@ inline Tensor pool3d(const Tensor& x, const Array& kernel_size, } // namespace nn } // namespace topi -#endif // TOPI_NN_POOLING_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_POOLING_H_ diff --git a/topi/include/topi/nn/softmax.h b/include/tvm/topi/nn/softmax.h similarity index 96% rename from topi/include/topi/nn/softmax.h rename to include/tvm/topi/nn/softmax.h index 5ebeb6b8a4bf..2e94f9103c68 100644 --- a/topi/include/topi/nn/softmax.h +++ b/include/tvm/topi/nn/softmax.h @@ -21,19 +21,20 @@ * \brief Softmax op constructions * \file nn/softmax.h */ -#ifndef TOPI_NN_SOFTMAX_H_ -#define TOPI_NN_SOFTMAX_H_ +#ifndef TVM_TOPI_NN_SOFTMAX_H_ +#define TVM_TOPI_NN_SOFTMAX_H_ -#include -#include #include +#include +#include #include #include +namespace tvm { namespace topi { namespace nn { -using namespace tvm; + using namespace tvm::te; /*! @@ -143,4 +144,5 @@ inline Tensor log_softmax(const Tensor& x, std::string name = "tensor", } // namespace nn } // namespace topi -#endif // TOPI_NN_SOFTMAX_H_ +} // namespace tvm +#endif // TVM_TOPI_NN_SOFTMAX_H_ diff --git a/topi/include/topi/reduction.h b/include/tvm/topi/reduction.h similarity index 98% rename from topi/include/topi/reduction.h rename to include/tvm/topi/reduction.h index 85555000dc1c..8a8a947959f1 100644 --- a/topi/include/topi/reduction.h +++ b/include/tvm/topi/reduction.h @@ -21,24 +21,25 @@ * \file topi/reduction.h * \brief Reduction op constructors */ -#ifndef TOPI_REDUCTION_H_ -#define TOPI_REDUCTION_H_ - -#include -#include -#include -#include -#include -#include +#ifndef TVM_TOPI_REDUCTION_H_ +#define TVM_TOPI_REDUCTION_H_ + #include +#include +#include +#include +#include +#include +#include #include #include #include #include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; /*! \brief The operation to use for CommReduce */ @@ -510,4 +511,5 @@ inline Tensor prod(const Tensor& data, const Array& axis, bool keepdims } } // namespace topi -#endif // TOPI_REDUCTION_H_ +} // namespace tvm +#endif // TVM_TOPI_REDUCTION_H_ diff --git a/topi/include/topi/rocm/dense.h b/include/tvm/topi/rocm/dense.h similarity index 89% rename from topi/include/topi/rocm/dense.h rename to include/tvm/topi/rocm/dense.h index e2e04b44fc32..e279152e0faa 100644 --- a/topi/include/topi/rocm/dense.h +++ b/include/tvm/topi/rocm/dense.h @@ -21,21 +21,21 @@ * \file rocm/dense.h * \brief rocm schedule for dense operation */ -#ifndef TOPI_ROCM_DENSE_H_ -#define TOPI_ROCM_DENSE_H_ +#ifndef TVM_TOPI_ROCM_DENSE_H_ +#define TVM_TOPI_ROCM_DENSE_H_ -#include #include #include +#include +#include +#include +#include +#include +#include -#include "topi/contrib/rocblas.h" -#include "topi/cuda/dense.h" -#include "topi/detail/array_utils.h" -#include "topi/generic/extern.h" -#include "topi/nn/dense.h" - +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace rocm { @@ -95,4 +95,5 @@ inline Schedule schedule_dense(const Target& target, const Array& outs) } // namespace rocm } // namespace topi -#endif // TOPI_ROCM_DENSE_H_ +} // namespace tvm +#endif // TVM_TOPI_ROCM_DENSE_H_ diff --git a/topi/include/topi/rocm/injective.h b/include/tvm/topi/rocm/injective.h similarity index 88% rename from topi/include/topi/rocm/injective.h rename to include/tvm/topi/rocm/injective.h index e7415bfd0ff2..295d930e5cd8 100644 --- a/topi/include/topi/rocm/injective.h +++ b/include/tvm/topi/rocm/injective.h @@ -21,18 +21,18 @@ * \file rocm/injective.h * \brief rocm schedule for injective operations */ -#ifndef TOPI_ROCM_INJECTIVE_H_ -#define TOPI_ROCM_INJECTIVE_H_ +#ifndef TVM_TOPI_ROCM_INJECTIVE_H_ +#define TVM_TOPI_ROCM_INJECTIVE_H_ -#include -#include #include #include +#include +#include +#include -#include "topi/cuda/injective.h" - +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace rocm { @@ -63,4 +63,5 @@ inline Schedule schedule_injective(const Target& target, const Array& ou } // namespace rocm } // namespace topi -#endif // TOPI_ROCM_INJECTIVE_H_ +} // namespace tvm +#endif // TVM_TOPI_ROCM_INJECTIVE_H_ diff --git a/topi/include/topi/rocm/normalization.h b/include/tvm/topi/rocm/normalization.h similarity index 87% rename from topi/include/topi/rocm/normalization.h rename to include/tvm/topi/rocm/normalization.h index 832868348b67..2fbb88089286 100644 --- a/topi/include/topi/rocm/normalization.h +++ b/include/tvm/topi/rocm/normalization.h @@ -21,15 +21,16 @@ * \file rocm/normalization.h * \brief rocm schedule for LRN and l2 normalization operations */ -#ifndef TOPI_ROCM_NORMALIZATION_H_ -#define TOPI_ROCM_NORMALIZATION_H_ +#ifndef TVM_TOPI_ROCM_NORMALIZATION_H_ +#define TVM_TOPI_ROCM_NORMALIZATION_H_ -#include #include #include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace rocm { /*! @@ -41,4 +42,5 @@ inline Schedule schedule_lrn(const Array& outs) { return topi::cuda::sch } // namespace rocm } // namespace topi -#endif // TOPI_ROCM_NORMALIZATION_H_ +} // namespace tvm +#endif // TVM_TOPI_ROCM_NORMALIZATION_H_ diff --git a/topi/include/topi/rocm/pooling.h b/include/tvm/topi/rocm/pooling.h similarity index 86% rename from topi/include/topi/rocm/pooling.h rename to include/tvm/topi/rocm/pooling.h index 0b68a0ac5366..993c32bf36ad 100644 --- a/topi/include/topi/rocm/pooling.h +++ b/include/tvm/topi/rocm/pooling.h @@ -21,18 +21,19 @@ * \file rocm/pooling.h * \brief rocm schedule for pooling operations */ -#ifndef TOPI_ROCM_POOLING_H_ -#define TOPI_ROCM_POOLING_H_ +#ifndef TVM_TOPI_ROCM_POOLING_H_ +#define TVM_TOPI_ROCM_POOLING_H_ -#include -#include -#include -#include #include #include +#include +#include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace rocm { @@ -63,4 +64,5 @@ inline Schedule schedule_global_pool(const Target& target, const Array& } // namespace rocm } // namespace topi -#endif // TOPI_ROCM_POOLING_H_ +} // namespace tvm +#endif // TVM_TOPI_ROCM_POOLING_H_ diff --git a/topi/include/topi/rocm/reduction.h b/include/tvm/topi/rocm/reduction.h similarity index 85% rename from topi/include/topi/rocm/reduction.h rename to include/tvm/topi/rocm/reduction.h index 512bf20b4bc1..7beda177ace8 100644 --- a/topi/include/topi/rocm/reduction.h +++ b/include/tvm/topi/rocm/reduction.h @@ -21,18 +21,18 @@ * \file rocm/reduction.h * \brief rocm schedule for reduction operations */ -#ifndef TOPI_ROCM_REDUCTION_H_ -#define TOPI_ROCM_REDUCTION_H_ +#ifndef TVM_TOPI_ROCM_REDUCTION_H_ +#define TVM_TOPI_ROCM_REDUCTION_H_ -#include -#include #include #include +#include +#include +#include -#include "topi/cuda/reduction.h" - +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace rocm { @@ -50,4 +50,5 @@ Schedule schedule_reduce(const Target& target, Array outs) { } // namespace rocm } // namespace topi -#endif // TOPI_ROCM_REDUCTION_H_ +} // namespace tvm +#endif // TVM_TOPI_ROCM_REDUCTION_H_ diff --git a/topi/include/topi/rocm/softmax.h b/include/tvm/topi/rocm/softmax.h similarity index 85% rename from topi/include/topi/rocm/softmax.h rename to include/tvm/topi/rocm/softmax.h index de05c4cec9d3..a2ffd2c46e66 100644 --- a/topi/include/topi/rocm/softmax.h +++ b/include/tvm/topi/rocm/softmax.h @@ -21,18 +21,18 @@ * \file rocm/injective.h * \brief ROCM schedule for injective operations */ -#ifndef TOPI_ROCM_SOFTMAX_H_ -#define TOPI_ROCM_SOFTMAX_H_ +#ifndef TVM_TOPI_ROCM_SOFTMAX_H_ +#define TVM_TOPI_ROCM_SOFTMAX_H_ -#include -#include #include #include +#include +#include +#include -#include "topi/cuda/softmax.h" - +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace rocm { @@ -51,4 +51,5 @@ inline Schedule schedule_softmax(const Target& target, const Array& outs } // namespace rocm } // namespace topi -#endif // TOPI_ROCM_SOFTMAX_H_ +} // namespace tvm +#endif // TVM_TOPI_ROCM_SOFTMAX_H_ diff --git a/topi/include/topi/tags.h b/include/tvm/topi/tags.h similarity index 94% rename from topi/include/topi/tags.h rename to include/tvm/topi/tags.h index 1e9ec446dfa3..3b748ca60ce5 100644 --- a/topi/include/topi/tags.h +++ b/include/tvm/topi/tags.h @@ -21,11 +21,12 @@ * \brief Tag definitions * \file tags.h */ -#ifndef TOPI_TAGS_H_ -#define TOPI_TAGS_H_ +#ifndef TVM_TOPI_TAGS_H_ +#define TVM_TOPI_TAGS_H_ #include +namespace tvm { namespace topi { constexpr auto kElementWise = "elemwise"; @@ -52,5 +53,6 @@ inline bool is_injective(std::string tag) { } } // namespace topi +} // namespace tvm -#endif // TOPI_TAGS_H_ +#endif // TVM_TOPI_TAGS_H_ diff --git a/topi/include/topi/transform.h b/include/tvm/topi/transform.h similarity index 99% rename from topi/include/topi/transform.h rename to include/tvm/topi/transform.h index 0b339d2ecafe..cd19436d4103 100644 --- a/topi/include/topi/transform.h +++ b/include/tvm/topi/transform.h @@ -21,15 +21,15 @@ * \file topi/transform.h * \brief Transform op constructors */ -#ifndef TOPI_TRANSFORM_H_ -#define TOPI_TRANSFORM_H_ +#ifndef TVM_TOPI_TRANSFORM_H_ +#define TVM_TOPI_TRANSFORM_H_ -#include -#include -#include -#include #include #include +#include +#include +#include +#include #include #include @@ -38,8 +38,9 @@ #include #include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; using namespace topi::detail; @@ -1508,4 +1509,5 @@ inline Tensor sparse_to_dense(const Tensor& sparse_indices, const Array } } // namespace topi -#endif // TOPI_TRANSFORM_H_ +} // namespace tvm +#endif // TVM_TOPI_TRANSFORM_H_ diff --git a/topi/include/topi/util.h b/include/tvm/topi/util.h similarity index 92% rename from topi/include/topi/util.h rename to include/tvm/topi/util.h index 133bc8543644..4e0cdc6f2057 100644 --- a/topi/include/topi/util.h +++ b/include/tvm/topi/util.h @@ -21,15 +21,15 @@ * \brief Topi utility function * \file topi/util.h */ -#ifndef TOPI_UTIL_H_ -#define TOPI_UTIL_H_ +#ifndef TVM_TOPI_UTIL_H_ +#define TVM_TOPI_UTIL_H_ #include #include +namespace tvm { namespace topi { -using namespace tvm; using namespace tvm::runtime; /*! \brief Canonicalize an argument that may be Array or int to Array */ @@ -43,4 +43,5 @@ inline Array ArrayOrInt(TVMArgValue arg) { } } } // namespace topi -#endif // TOPI_UTIL_H_ +} // namespace tvm +#endif // TVM_TOPI_UTIL_H_ diff --git a/topi/include/topi/vision/reorg.h b/include/tvm/topi/vision/reorg.h similarity index 89% rename from topi/include/topi/vision/reorg.h rename to include/tvm/topi/vision/reorg.h index 5bd79f67f052..381272bb818c 100644 --- a/topi/include/topi/vision/reorg.h +++ b/include/tvm/topi/vision/reorg.h @@ -21,21 +21,22 @@ * \brief Reorg op constructions * \file vision/reorg.h */ -#ifndef TOPI_VISION_REORG_H_ -#define TOPI_VISION_REORG_H_ +#ifndef TVM_TOPI_VISION_REORG_H_ +#define TVM_TOPI_VISION_REORG_H_ -#include -#include -#include -#include #include +#include +#include +#include +#include #include #include +namespace tvm { namespace topi { namespace vision { -using namespace tvm; + using namespace tvm::te; /*! @@ -76,4 +77,5 @@ inline Tensor reorg(const Tensor& data, int stride = 1, std::string name = "tens } } // namespace vision } // namespace topi -#endif // TOPI_VISION_REORG_H_ +} // namespace tvm +#endif // TVM_TOPI_VISION_REORG_H_ diff --git a/topi/include/topi/x86/bnn.h b/include/tvm/topi/x86/bnn.h similarity index 94% rename from topi/include/topi/x86/bnn.h rename to include/tvm/topi/x86/bnn.h index a59d30da3dce..c8a7235536b5 100644 --- a/topi/include/topi/x86/bnn.h +++ b/include/tvm/topi/x86/bnn.h @@ -21,16 +21,17 @@ * \file x86/bnn.h * \brief x86 schedule for binary operations */ -#ifndef TOPI_X86_BNN_H_ -#define TOPI_X86_BNN_H_ +#ifndef TVM_TOPI_X86_BNN_H_ +#define TVM_TOPI_X86_BNN_H_ -#include -#include #include #include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace x86 { @@ -126,4 +127,5 @@ inline Schedule schedule_binary_dense(const Target& target, const Array& } // namespace x86 } // namespace topi -#endif // TOPI_X86_BNN_H_ +} // namespace tvm +#endif // TVM_TOPI_X86_BNN_H_ diff --git a/topi/include/topi/x86/default.h b/include/tvm/topi/x86/default.h similarity index 93% rename from topi/include/topi/x86/default.h rename to include/tvm/topi/x86/default.h index 07337810a694..9c9856040261 100644 --- a/topi/include/topi/x86/default.h +++ b/include/tvm/topi/x86/default.h @@ -21,17 +21,18 @@ * \file x86/default.h * \brief default x86 schedule */ -#ifndef TOPI_X86_DEFAULT_H_ -#define TOPI_X86_DEFAULT_H_ +#ifndef TVM_TOPI_X86_DEFAULT_H_ +#define TVM_TOPI_X86_DEFAULT_H_ -#include -#include #include #include #include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace x86 { @@ -100,4 +101,5 @@ inline Schedule default_schedule_auto_inline(const Target& target, const Array -#include #include #include +#include +#include +namespace tvm { namespace topi { -using namespace tvm; + using namespace tvm::te; namespace x86 { @@ -80,4 +81,5 @@ inline Schedule schedule_injective(const Target& target, const Array& ou } // namespace x86 } // namespace topi -#endif // TOPI_X86_INJECTIVE_H_ +} // namespace tvm +#endif // TVM_TOPI_X86_INJECTIVE_H_ diff --git a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py index 76f92bea4ce9..fa0186025e02 100644 --- a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py +++ b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py @@ -20,7 +20,7 @@ from abc import abstractmethod import numpy as np -import topi +from tvm import topi import tvm from tvm import te diff --git a/python/tvm/autotvm/task/relay_integration.py b/python/tvm/autotvm/task/relay_integration.py index 67ebda4b5b7a..15d45347361e 100644 --- a/python/tvm/autotvm/task/relay_integration.py +++ b/python/tvm/autotvm/task/relay_integration.py @@ -118,7 +118,7 @@ def extract_from_multiple_program(mods, params, target, target_host=None, ops=No """ # pylint: disable=import-outside-toplevel from tvm import relay - import topi + from tvm import topi env = TaskExtractEnv.get() diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index c3da195d9c8e..c86d17616207 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -22,7 +22,7 @@ import tvm from tvm.ir import IRModule -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from .. import expr as _expr from .. import function as _function diff --git a/python/tvm/relay/frontend/mxnet.py b/python/tvm/relay/frontend/mxnet.py index 327bcd483c67..9a353f3d1930 100644 --- a/python/tvm/relay/frontend/mxnet.py +++ b/python/tvm/relay/frontend/mxnet.py @@ -23,7 +23,7 @@ from tvm.ir import IRModule from tvm import relay -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from .. import analysis from .. import expr as _expr from .. import function as _function diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index a06b0ca425cf..24f1b8b11f97 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -27,7 +27,7 @@ from tvm.ir import IRModule from tvm.relay.prelude import Prelude, StaticTensorArrayOps, get_tensor_array_shape -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from .. import analysis from .. import expr as _expr diff --git a/python/tvm/relay/op/_reduce.py b/python/tvm/relay/op/_reduce.py index 0eeeb95fde5f..015f5ad49846 100644 --- a/python/tvm/relay/op/_reduce.py +++ b/python/tvm/relay/op/_reduce.py @@ -19,7 +19,7 @@ from tvm.runtime import convert from tvm.te.hybrid import script -from topi.util import get_const_int, get_const_tuple +from tvm.topi.util import get_const_int, get_const_tuple from . import op as _reg _reg.register_reduce_schedule("argmax") diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 2ca2a0176318..28336cffdc20 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -18,7 +18,7 @@ """Backend compiler related feature registration""" from tvm.te.hybrid import script -import topi +from tvm import topi from .op import register_compute, register_shape_func from .op import register_broadcast_schedule, register_injective_schedule diff --git a/python/tvm/relay/op/_tensor_grad.py b/python/tvm/relay/op/_tensor_grad.py index 3e87f6078664..aee860392723 100644 --- a/python/tvm/relay/op/_tensor_grad.py +++ b/python/tvm/relay/op/_tensor_grad.py @@ -18,8 +18,8 @@ """Backend compiler related feature registration""" from __future__ import absolute_import -from topi.nn.util import get_pad_tuple -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple +from tvm.topi.util import get_const_tuple from ..expr import Tuple, TupleGetItem, const from . import nn as _nn diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index 4e113f7aedab..a69eb8c491cb 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -21,8 +21,8 @@ from tvm import te from tvm.te.hybrid import script from tvm.runtime import convert -import topi -from topi.util import get_const_int, get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_int, get_const_tuple from . import op as _reg from . import strategy from .op import OpPattern diff --git a/python/tvm/relay/op/dyn/_tensor.py b/python/tvm/relay/op/dyn/_tensor.py index dc2835977fb9..371e4ad2fc6c 100644 --- a/python/tvm/relay/op/dyn/_tensor.py +++ b/python/tvm/relay/op/dyn/_tensor.py @@ -17,7 +17,7 @@ #pylint: disable=invalid-name, unused-argument, len-as-condition """Backend compiler related feature registration for dynamic ops""" -import topi +from tvm import topi from ..op import register_shape_func, register_compute from ..op import register_broadcast_schedule diff --git a/python/tvm/relay/op/image/_image.py b/python/tvm/relay/op/image/_image.py index 795844fad408..2cc3588a0d12 100644 --- a/python/tvm/relay/op/image/_image.py +++ b/python/tvm/relay/op/image/_image.py @@ -21,8 +21,8 @@ from tvm.te.hybrid import script from tvm.runtime import convert -import topi -from topi.util import get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple from .. import op as reg from .. import strategy from ..op import OpPattern diff --git a/python/tvm/relay/op/image/image.py b/python/tvm/relay/op/image/image.py index 62889e0b674e..99a6a0fc313f 100644 --- a/python/tvm/relay/op/image/image.py +++ b/python/tvm/relay/op/image/image.py @@ -74,8 +74,8 @@ def resize3d(data, This operator takes data as input and does 3D scaling to the given scale factor. In the default case, where the data_layout is `NCDHW` - with data of shape (n, c, d, h, w) - out will have a shape (n, c, size[0], size[1], size[2]) + with data of shape `(n, c, d, h, w)` + out will have a shape `(n, c, size[0], size[1], size[2])` method indicates the algorithm to be used while calculating the out value and method can be one of ("trilinear", "nearest_neighbor") diff --git a/python/tvm/relay/op/nn/_nn.py b/python/tvm/relay/op/nn/_nn.py index cea592a51706..2f0966c62b65 100644 --- a/python/tvm/relay/op/nn/_nn.py +++ b/python/tvm/relay/op/nn/_nn.py @@ -18,8 +18,8 @@ """Backend compiler related feature registration""" from __future__ import absolute_import -import topi -from topi.util import get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple from tvm.runtime import convert from tvm.te.hybrid import script diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py index 6ede0beec861..b2df8505e691 100644 --- a/python/tvm/relay/op/nn/nn.py +++ b/python/tvm/relay/op/nn/nn.py @@ -1387,7 +1387,7 @@ def prelu(data, alpha, axis=1): .. math:: - `y = x > 0 ? x : alpha * x` + y = x > 0 ? x : alpha * x Parameters ---------- @@ -2423,14 +2423,14 @@ def bitpack(data, bit_axis=2, pack_type="uint32", name="BitPack"): - r"""Tensor packing for bitserial operations. + """Tensor packing for bitserial operations. + The values along the input tensor's pack_axis are quantized - and packed together into the specified pack_type in a new - bit axis. + and packed together into the specified pack_type in a new bit axis. - For example, consider bitpacking with data to be a tensor with shape [1, 64, 128, 128], + For example, consider bitpacking with data to be a tensor with shape `[1, 64, 128, 128]`, pack_axis=1, bit_axis=4, pack_type=uint8, and bits=2. The output in this case will - be of shape [1, 8, 128, 128, 2]. The dimension of axis 1 has been reduced by a factor + be of shape `[1, 8, 128, 128, 2]`. The dimension of axis 1 has been reduced by a factor of 8 since each value is packed into an 8-bit uint8. Axis 4 is now two bitplanes representing the quantized value of the incoming data. The output tensor is now ready to be used in a bitserial operation. diff --git a/python/tvm/relay/op/strategy/arm_cpu.py b/python/tvm/relay/op/strategy/arm_cpu.py index 84579651af0f..8143cc56495a 100644 --- a/python/tvm/relay/op/strategy/arm_cpu.py +++ b/python/tvm/relay/op/strategy/arm_cpu.py @@ -19,7 +19,7 @@ import re import logging -import topi +from tvm import topi from ....target import arm_isa from .generic import * from .. import op as _op diff --git a/python/tvm/relay/op/strategy/bifrost.py b/python/tvm/relay/op/strategy/bifrost.py index a96463fa6ffa..c975c36fcd63 100644 --- a/python/tvm/relay/op/strategy/bifrost.py +++ b/python/tvm/relay/op/strategy/bifrost.py @@ -17,7 +17,7 @@ """Definition of bifrost operator strategy.""" # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import import re -import topi +from tvm import topi from .generic import * from .. import op as _op diff --git a/python/tvm/relay/op/strategy/cuda.py b/python/tvm/relay/op/strategy/cuda.py index d626a9d9da04..21c3c8394750 100644 --- a/python/tvm/relay/op/strategy/cuda.py +++ b/python/tvm/relay/op/strategy/cuda.py @@ -16,7 +16,7 @@ # under the License. """Definition of CUDA/GPU operator strategy.""" # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import -import topi +from tvm import topi import tvm from tvm.te import SpecializedCondition from tvm.contrib import nvcc diff --git a/python/tvm/relay/op/strategy/generic.py b/python/tvm/relay/op/strategy/generic.py index 62c2948b51e2..bc54577d3fca 100644 --- a/python/tvm/relay/op/strategy/generic.py +++ b/python/tvm/relay/op/strategy/generic.py @@ -19,8 +19,8 @@ import logging import re -import topi -from topi.util import get_const_int, get_const_float, get_const_tuple, get_float_tuple +from tvm import topi +from tvm.topi.util import get_const_int, get_const_float, get_const_tuple, get_float_tuple from .. import op as _op from ....target import generic_func, override_native_generic_func diff --git a/python/tvm/relay/op/strategy/hls.py b/python/tvm/relay/op/strategy/hls.py index d41e85fc484c..90495fb62941 100644 --- a/python/tvm/relay/op/strategy/hls.py +++ b/python/tvm/relay/op/strategy/hls.py @@ -16,7 +16,7 @@ # under the License. """Definition of HLS operator strategy.""" # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import -import topi +from tvm import topi from .generic import * from .. import op as _op diff --git a/python/tvm/relay/op/strategy/intel_graphics.py b/python/tvm/relay/op/strategy/intel_graphics.py index 0ea8d85e3530..568cbff005fa 100644 --- a/python/tvm/relay/op/strategy/intel_graphics.py +++ b/python/tvm/relay/op/strategy/intel_graphics.py @@ -16,7 +16,7 @@ # under the License. """Definition of x86 operator strategy.""" # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import -import topi +from tvm import topi from .generic import * from .. import op as _op diff --git a/python/tvm/relay/op/strategy/mali.py b/python/tvm/relay/op/strategy/mali.py index 5e4a7e5669d2..84af2037ca6b 100644 --- a/python/tvm/relay/op/strategy/mali.py +++ b/python/tvm/relay/op/strategy/mali.py @@ -17,7 +17,7 @@ """Definition of mali operator strategy.""" # pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import import re -import topi +from tvm import topi from .generic import * from .. import op as _op diff --git a/python/tvm/relay/op/strategy/rocm.py b/python/tvm/relay/op/strategy/rocm.py index a80b6ca9234a..e70298a12a89 100644 --- a/python/tvm/relay/op/strategy/rocm.py +++ b/python/tvm/relay/op/strategy/rocm.py @@ -16,7 +16,7 @@ # under the License. """Definition of ROCm operator strategy.""" # pylint: disable=invalid-name,unused-argument,unused-wildcard-import,wildcard-import -import topi +from tvm import topi from .generic import * from .. import op as _op diff --git a/python/tvm/relay/op/strategy/x86.py b/python/tvm/relay/op/strategy/x86.py index b02db416bdc8..eb5b5a5111bd 100644 --- a/python/tvm/relay/op/strategy/x86.py +++ b/python/tvm/relay/op/strategy/x86.py @@ -19,7 +19,7 @@ import logging import re -import topi +from tvm import topi from tvm.te import SpecializedCondition from .generic import * from .. import op as _op diff --git a/python/tvm/relay/op/vision/_rcnn.py b/python/tvm/relay/op/vision/_rcnn.py index 16468e5eabc7..6f5097df49d2 100644 --- a/python/tvm/relay/op/vision/_rcnn.py +++ b/python/tvm/relay/op/vision/_rcnn.py @@ -16,8 +16,8 @@ # under the License. # pylint: disable=invalid-name, unused-argument """Faster R-CNN and Mask R-CNN operations.""" -import topi -from topi.util import get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple from .. import op as reg from .. import strategy from ..op import OpPattern diff --git a/python/tvm/relay/op/vision/_vision.py b/python/tvm/relay/op/vision/_vision.py index f6c4f811f13d..c94cb5a5adb8 100644 --- a/python/tvm/relay/op/vision/_vision.py +++ b/python/tvm/relay/op/vision/_vision.py @@ -18,7 +18,7 @@ """Definition of vision ops""" from __future__ import absolute_import -import topi +from tvm import topi from tvm.te.hybrid import script from .. import op as reg from .. import strategy diff --git a/python/tvm/relay/quantize/_annotate.py b/python/tvm/relay/quantize/_annotate.py index 952a86466300..0bccacd1eb5e 100644 --- a/python/tvm/relay/quantize/_annotate.py +++ b/python/tvm/relay/quantize/_annotate.py @@ -17,7 +17,7 @@ #pylint: disable=unused-argument,inconsistent-return-statements """Internal module for registering attribute for annotation.""" import warnings -import topi +from tvm import topi import tvm._ffi from tvm.relay.op import op as _reg from .. import expr as _expr diff --git a/python/tvm/te/hybrid/util.py b/python/tvm/te/hybrid/util.py index 891d7baf893e..213a48e6d834 100644 --- a/python/tvm/te/hybrid/util.py +++ b/python/tvm/te/hybrid/util.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -"""Internal utilities for parsing Python subset to HalideIR""" +"""Internal utilities for parsing Python subset to TIR""" import ast import inspect diff --git a/topi/python/topi/__init__.py b/python/tvm/topi/__init__.py similarity index 97% rename from topi/python/topi/__init__.py rename to python/tvm/topi/__init__.py index f308aa634ec3..c17b6fd00bf0 100644 --- a/topi/python/topi/__init__.py +++ b/python/tvm/topi/__init__.py @@ -24,8 +24,6 @@ Some of the schedule function may have been specially optimized for a specific workload. """ -from __future__ import absolute_import as _abs - from tvm._ffi.libinfo import __version__ # Ensure C++ schedules get registered first, so python schedules can diff --git a/topi/python/topi/argwhere.py b/python/tvm/topi/argwhere.py similarity index 100% rename from topi/python/topi/argwhere.py rename to python/tvm/topi/argwhere.py diff --git a/topi/python/topi/arm_cpu/__init__.py b/python/tvm/topi/arm_cpu/__init__.py similarity index 100% rename from topi/python/topi/arm_cpu/__init__.py rename to python/tvm/topi/arm_cpu/__init__.py diff --git a/topi/python/topi/arm_cpu/bitserial_conv2d.py b/python/tvm/topi/arm_cpu/bitserial_conv2d.py similarity index 100% rename from topi/python/topi/arm_cpu/bitserial_conv2d.py rename to python/tvm/topi/arm_cpu/bitserial_conv2d.py diff --git a/topi/python/topi/arm_cpu/bitserial_dense.py b/python/tvm/topi/arm_cpu/bitserial_dense.py similarity index 99% rename from topi/python/topi/arm_cpu/bitserial_dense.py rename to python/tvm/topi/arm_cpu/bitserial_dense.py index beed79da49d0..c7aa5674e431 100644 --- a/topi/python/topi/arm_cpu/bitserial_dense.py +++ b/python/tvm/topi/arm_cpu/bitserial_dense.py @@ -20,7 +20,7 @@ import tvm from tvm import te from tvm import autotvm -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from .. import tag from .bitserial_conv2d import _intrin_popcount from ..nn.pad import pad diff --git a/topi/python/topi/arm_cpu/conv2d.py b/python/tvm/topi/arm_cpu/conv2d.py similarity index 100% rename from topi/python/topi/arm_cpu/conv2d.py rename to python/tvm/topi/arm_cpu/conv2d.py diff --git a/topi/python/topi/arm_cpu/conv2d_alter_op.py b/python/tvm/topi/arm_cpu/conv2d_alter_op.py similarity index 100% rename from topi/python/topi/arm_cpu/conv2d_alter_op.py rename to python/tvm/topi/arm_cpu/conv2d_alter_op.py diff --git a/topi/python/topi/arm_cpu/conv2d_gemm.py b/python/tvm/topi/arm_cpu/conv2d_gemm.py similarity index 99% rename from topi/python/topi/arm_cpu/conv2d_gemm.py rename to python/tvm/topi/arm_cpu/conv2d_gemm.py index e97de56a0b65..c8e1a5a7b378 100644 --- a/topi/python/topi/arm_cpu/conv2d_gemm.py +++ b/python/tvm/topi/arm_cpu/conv2d_gemm.py @@ -19,7 +19,7 @@ """GEMM Convolution schedule on ARM""" import tvm from tvm import te -from topi import nn +from tvm.topi import nn from ..util import get_const_tuple from ..nn.util import get_pad_tuple from .tensor_intrin import gemv_quantized, gemv_quantized_impl diff --git a/topi/python/topi/arm_cpu/conv2d_int8.py b/python/tvm/topi/arm_cpu/conv2d_int8.py similarity index 100% rename from topi/python/topi/arm_cpu/conv2d_int8.py rename to python/tvm/topi/arm_cpu/conv2d_int8.py diff --git a/topi/python/topi/arm_cpu/conv2d_spatial_pack.py b/python/tvm/topi/arm_cpu/conv2d_spatial_pack.py similarity index 100% rename from topi/python/topi/arm_cpu/conv2d_spatial_pack.py rename to python/tvm/topi/arm_cpu/conv2d_spatial_pack.py diff --git a/topi/python/topi/arm_cpu/conv2d_transpose.py b/python/tvm/topi/arm_cpu/conv2d_transpose.py similarity index 100% rename from topi/python/topi/arm_cpu/conv2d_transpose.py rename to python/tvm/topi/arm_cpu/conv2d_transpose.py diff --git a/topi/python/topi/arm_cpu/cortex_m7/__init__.py b/python/tvm/topi/arm_cpu/cortex_m7/__init__.py similarity index 100% rename from topi/python/topi/arm_cpu/cortex_m7/__init__.py rename to python/tvm/topi/arm_cpu/cortex_m7/__init__.py diff --git a/topi/python/topi/arm_cpu/cortex_m7/conv2d/__init__.py b/python/tvm/topi/arm_cpu/cortex_m7/conv2d/__init__.py similarity index 100% rename from topi/python/topi/arm_cpu/cortex_m7/conv2d/__init__.py rename to python/tvm/topi/arm_cpu/cortex_m7/conv2d/__init__.py diff --git a/topi/python/topi/arm_cpu/cortex_m7/conv2d/direct.py b/python/tvm/topi/arm_cpu/cortex_m7/conv2d/direct.py similarity index 98% rename from topi/python/topi/arm_cpu/cortex_m7/conv2d/direct.py rename to python/tvm/topi/arm_cpu/cortex_m7/conv2d/direct.py index 7d3e945fef14..3f1a5ff98d88 100644 --- a/topi/python/topi/arm_cpu/cortex_m7/conv2d/direct.py +++ b/python/tvm/topi/arm_cpu/cortex_m7/conv2d/direct.py @@ -20,8 +20,8 @@ import tvm from tvm import autotvm from tvm.autotvm.task import deserialize_args -from topi.nn.conv2d import conv2d_nchw, conv2d_nhwc -from topi.util import get_const_tuple, get_const_int, traverse_inline +from tvm.topi.nn.conv2d import conv2d_nchw, conv2d_nhwc +from tvm.topi.util import get_const_tuple, get_const_int, traverse_inline def conv2d_direct(*args, **kwargs): """Schedule function for directly-scheduled conv2d.""" diff --git a/topi/python/topi/arm_cpu/cortex_m7/conv2d/direct_simd.py b/python/tvm/topi/arm_cpu/cortex_m7/conv2d/direct_simd.py similarity index 98% rename from topi/python/topi/arm_cpu/cortex_m7/conv2d/direct_simd.py rename to python/tvm/topi/arm_cpu/cortex_m7/conv2d/direct_simd.py index fd411251272e..5be9b4c0ca3b 100644 --- a/topi/python/topi/arm_cpu/cortex_m7/conv2d/direct_simd.py +++ b/python/tvm/topi/arm_cpu/cortex_m7/conv2d/direct_simd.py @@ -20,9 +20,9 @@ from tvm import autotvm from tvm.autotvm.task import deserialize_args from tvm import te -from topi.util import simplify, traverse_inline -from topi.nn.pad import pad -from topi.nn.util import get_pad_tuple +from tvm.topi.util import simplify, traverse_inline +from tvm.topi.nn.pad import pad +from tvm.topi.nn.util import get_pad_tuple from ..micro_kernel.gemm import ( intrin_gemm_MxKxN, gemm_MxKxN_impl, diff --git a/topi/python/topi/arm_cpu/cortex_m7/micro_kernel/__init__.py b/python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/__init__.py similarity index 100% rename from topi/python/topi/arm_cpu/cortex_m7/micro_kernel/__init__.py rename to python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/__init__.py diff --git a/topi/python/topi/arm_cpu/cortex_m7/micro_kernel/gemm.py b/python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/gemm.py similarity index 100% rename from topi/python/topi/arm_cpu/cortex_m7/micro_kernel/gemm.py rename to python/tvm/topi/arm_cpu/cortex_m7/micro_kernel/gemm.py diff --git a/topi/python/topi/arm_cpu/depthwise_conv2d.py b/python/tvm/topi/arm_cpu/depthwise_conv2d.py similarity index 100% rename from topi/python/topi/arm_cpu/depthwise_conv2d.py rename to python/tvm/topi/arm_cpu/depthwise_conv2d.py diff --git a/topi/python/topi/arm_cpu/injective.py b/python/tvm/topi/arm_cpu/injective.py similarity index 100% rename from topi/python/topi/arm_cpu/injective.py rename to python/tvm/topi/arm_cpu/injective.py diff --git a/topi/python/topi/arm_cpu/tensor_intrin.py b/python/tvm/topi/arm_cpu/tensor_intrin.py similarity index 100% rename from topi/python/topi/arm_cpu/tensor_intrin.py rename to python/tvm/topi/arm_cpu/tensor_intrin.py diff --git a/topi/python/topi/bifrost/__init__.py b/python/tvm/topi/bifrost/__init__.py similarity index 100% rename from topi/python/topi/bifrost/__init__.py rename to python/tvm/topi/bifrost/__init__.py diff --git a/topi/python/topi/bifrost/conv2d.py b/python/tvm/topi/bifrost/conv2d.py similarity index 100% rename from topi/python/topi/bifrost/conv2d.py rename to python/tvm/topi/bifrost/conv2d.py diff --git a/topi/python/topi/bifrost/dense.py b/python/tvm/topi/bifrost/dense.py similarity index 100% rename from topi/python/topi/bifrost/dense.py rename to python/tvm/topi/bifrost/dense.py diff --git a/topi/python/topi/bifrost/depthwise_conv2d.py b/python/tvm/topi/bifrost/depthwise_conv2d.py similarity index 100% rename from topi/python/topi/bifrost/depthwise_conv2d.py rename to python/tvm/topi/bifrost/depthwise_conv2d.py diff --git a/topi/python/topi/bifrost/gemm.py b/python/tvm/topi/bifrost/gemm.py similarity index 100% rename from topi/python/topi/bifrost/gemm.py rename to python/tvm/topi/bifrost/gemm.py diff --git a/topi/python/topi/bifrost/transforms.py b/python/tvm/topi/bifrost/transforms.py similarity index 100% rename from topi/python/topi/bifrost/transforms.py rename to python/tvm/topi/bifrost/transforms.py diff --git a/topi/python/topi/broadcast.py b/python/tvm/topi/broadcast.py similarity index 100% rename from topi/python/topi/broadcast.py rename to python/tvm/topi/broadcast.py diff --git a/topi/python/topi/cpp/__init__.py b/python/tvm/topi/cpp/__init__.py similarity index 100% rename from topi/python/topi/cpp/__init__.py rename to python/tvm/topi/cpp/__init__.py diff --git a/topi/python/topi/cpp/cuda.py b/python/tvm/topi/cpp/cuda.py similarity index 94% rename from topi/python/topi/cpp/cuda.py rename to python/tvm/topi/cpp/cuda.py index efc31e82e519..ce2efa929824 100644 --- a/topi/python/topi/cpp/cuda.py +++ b/python/tvm/topi/cpp/cuda.py @@ -17,4 +17,4 @@ """FFI for CUDA TOPI ops and schedules""" import tvm._ffi -tvm._ffi._init_api("topi.cuda", "topi.cpp.cuda") +tvm._ffi._init_api("topi.cuda", "tvm.topi.cpp.cuda") diff --git a/topi/python/topi/cpp/generic.py b/python/tvm/topi/cpp/generic.py similarity index 93% rename from topi/python/topi/cpp/generic.py rename to python/tvm/topi/cpp/generic.py index e6bf250cb85c..d314eca8b22d 100644 --- a/topi/python/topi/cpp/generic.py +++ b/python/tvm/topi/cpp/generic.py @@ -17,4 +17,4 @@ """FFI for generic TOPI ops and schedules""" import tvm._ffi -tvm._ffi._init_api("topi.generic", "topi.cpp.generic") +tvm._ffi._init_api("topi.generic", "tvm.topi.cpp.generic") diff --git a/topi/python/topi/cpp/impl.py b/python/tvm/topi/cpp/impl.py similarity index 50% rename from topi/python/topi/cpp/impl.py rename to python/tvm/topi/cpp/impl.py index 1081baa716b7..2c877c300dc9 100644 --- a/topi/python/topi/cpp/impl.py +++ b/python/tvm/topi/cpp/impl.py @@ -15,30 +15,6 @@ # specific language governing permissions and limitations # under the License. """Load Lib for C++ TOPI ops and schedules""" -import sys -import os -import ctypes import tvm._ffi -from tvm._ffi import libinfo - -def _get_lib_names(): - if sys.platform.startswith('win32'): - return ['libtvm_topi.dll', 'tvm_topi.dll'] - if sys.platform.startswith('darwin'): - return ['libtvm_topi.dylib', 'tvm_topi.dylib'] - return ['libtvm_topi.so', 'tvm_topi.so'] - -def _load_lib(): - """Load libary by searching possible path.""" - curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__))) - lib_search = [curr_path, os.path.dirname(curr_path)] - lib_path = libinfo.find_lib_path(_get_lib_names(), lib_search, optional=True) - if lib_path is None: - return None, None - lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL) - return lib, os.path.basename(lib_path[0]) - -_LIB, _LIB_NAME = _load_lib() - -tvm._ffi._init_api("topi", "topi.cpp") +tvm._ffi._init_api("topi", "tvm.topi.cpp") diff --git a/topi/python/topi/cpp/nn.py b/python/tvm/topi/cpp/nn.py similarity index 94% rename from topi/python/topi/cpp/nn.py rename to python/tvm/topi/cpp/nn.py index d11aa27b2c84..0e3cee703de0 100644 --- a/topi/python/topi/cpp/nn.py +++ b/python/tvm/topi/cpp/nn.py @@ -17,4 +17,4 @@ """FFI for NN TOPI ops and schedules""" import tvm._ffi -tvm._ffi._init_api("topi.nn", "topi.cpp.nn") +tvm._ffi._init_api("topi.nn", "tvm.topi.cpp.nn") diff --git a/topi/python/topi/cpp/rocm.py b/python/tvm/topi/cpp/rocm.py similarity index 94% rename from topi/python/topi/cpp/rocm.py rename to python/tvm/topi/cpp/rocm.py index c001a61d1ea5..eab51107beb7 100644 --- a/topi/python/topi/cpp/rocm.py +++ b/python/tvm/topi/cpp/rocm.py @@ -17,4 +17,4 @@ """FFI for Rocm TOPI ops and schedules""" import tvm._ffi -tvm._ffi._init_api("topi.rocm", "topi.cpp.rocm") +tvm._ffi._init_api("topi.rocm", "tvm.topi.cpp.rocm") diff --git a/topi/python/topi/cpp/util.py b/python/tvm/topi/cpp/util.py similarity index 94% rename from topi/python/topi/cpp/util.py rename to python/tvm/topi/cpp/util.py index cc76dd9339c6..ca0b86e5a353 100644 --- a/topi/python/topi/cpp/util.py +++ b/python/tvm/topi/cpp/util.py @@ -17,4 +17,4 @@ """FFI for TOPI utility functions""" import tvm._ffi -tvm._ffi._init_api("topi.util", "topi.cpp.util") +tvm._ffi._init_api("topi.util", "tvm.topi.cpp.util") diff --git a/topi/python/topi/cpp/vision/__init__.py b/python/tvm/topi/cpp/vision/__init__.py similarity index 93% rename from topi/python/topi/cpp/vision/__init__.py rename to python/tvm/topi/cpp/vision/__init__.py index 6034e271bc0e..000602fb399d 100644 --- a/topi/python/topi/cpp/vision/__init__.py +++ b/python/tvm/topi/cpp/vision/__init__.py @@ -20,4 +20,4 @@ from . import yolo -tvm._ffi._init_api("topi.vision", "topi.cpp.vision") +tvm._ffi._init_api("topi.vision", "tvm.topi.cpp.vision") diff --git a/topi/python/topi/cpp/vision/yolo.py b/python/tvm/topi/cpp/vision/yolo.py similarity index 92% rename from topi/python/topi/cpp/vision/yolo.py rename to python/tvm/topi/cpp/vision/yolo.py index ff12498057d9..17e2327295d2 100644 --- a/topi/python/topi/cpp/vision/yolo.py +++ b/python/tvm/topi/cpp/vision/yolo.py @@ -17,4 +17,4 @@ """FFI for Yolo TOPI ops and schedules""" import tvm._ffi -tvm._ffi._init_api("topi.vision.yolo", "topi.cpp.vision.yolo") +tvm._ffi._init_api("topi.vision.yolo", "tvm.topi.cpp.vision.yolo") diff --git a/topi/python/topi/cpp/x86.py b/python/tvm/topi/cpp/x86.py similarity index 94% rename from topi/python/topi/cpp/x86.py rename to python/tvm/topi/cpp/x86.py index 0681ffed2ff5..0034af02c572 100644 --- a/topi/python/topi/cpp/x86.py +++ b/python/tvm/topi/cpp/x86.py @@ -17,4 +17,4 @@ """FFI for x86 TOPI ops and schedules""" import tvm._ffi -tvm._ffi._init_api("topi.x86", "topi.cpp.x86") +tvm._ffi._init_api("topi.x86", "tvm.topi.cpp.x86") diff --git a/topi/python/topi/cuda/__init__.py b/python/tvm/topi/cuda/__init__.py similarity index 100% rename from topi/python/topi/cuda/__init__.py rename to python/tvm/topi/cuda/__init__.py diff --git a/topi/python/topi/cuda/batch_matmul.py b/python/tvm/topi/cuda/batch_matmul.py similarity index 100% rename from topi/python/topi/cuda/batch_matmul.py rename to python/tvm/topi/cuda/batch_matmul.py diff --git a/topi/python/topi/cuda/conv1d.py b/python/tvm/topi/cuda/conv1d.py similarity index 100% rename from topi/python/topi/cuda/conv1d.py rename to python/tvm/topi/cuda/conv1d.py diff --git a/topi/python/topi/cuda/conv1d_transpose_ncw.py b/python/tvm/topi/cuda/conv1d_transpose_ncw.py similarity index 100% rename from topi/python/topi/cuda/conv1d_transpose_ncw.py rename to python/tvm/topi/cuda/conv1d_transpose_ncw.py diff --git a/topi/python/topi/cuda/conv2d.py b/python/tvm/topi/cuda/conv2d.py similarity index 100% rename from topi/python/topi/cuda/conv2d.py rename to python/tvm/topi/cuda/conv2d.py diff --git a/topi/python/topi/cuda/conv2d_alter_op.py b/python/tvm/topi/cuda/conv2d_alter_op.py similarity index 100% rename from topi/python/topi/cuda/conv2d_alter_op.py rename to python/tvm/topi/cuda/conv2d_alter_op.py diff --git a/topi/python/topi/cuda/conv2d_direct.py b/python/tvm/topi/cuda/conv2d_direct.py similarity index 100% rename from topi/python/topi/cuda/conv2d_direct.py rename to python/tvm/topi/cuda/conv2d_direct.py diff --git a/topi/python/topi/cuda/conv2d_hwcn.py b/python/tvm/topi/cuda/conv2d_hwcn.py similarity index 100% rename from topi/python/topi/cuda/conv2d_hwcn.py rename to python/tvm/topi/cuda/conv2d_hwcn.py diff --git a/topi/python/topi/cuda/conv2d_int8.py b/python/tvm/topi/cuda/conv2d_int8.py similarity index 100% rename from topi/python/topi/cuda/conv2d_int8.py rename to python/tvm/topi/cuda/conv2d_int8.py diff --git a/topi/python/topi/cuda/conv2d_nhwc.py b/python/tvm/topi/cuda/conv2d_nhwc.py similarity index 100% rename from topi/python/topi/cuda/conv2d_nhwc.py rename to python/tvm/topi/cuda/conv2d_nhwc.py diff --git a/topi/python/topi/cuda/conv2d_nhwc_tensorcore.py b/python/tvm/topi/cuda/conv2d_nhwc_tensorcore.py similarity index 100% rename from topi/python/topi/cuda/conv2d_nhwc_tensorcore.py rename to python/tvm/topi/cuda/conv2d_nhwc_tensorcore.py diff --git a/topi/python/topi/cuda/conv2d_nhwc_winograd.py b/python/tvm/topi/cuda/conv2d_nhwc_winograd.py similarity index 100% rename from topi/python/topi/cuda/conv2d_nhwc_winograd.py rename to python/tvm/topi/cuda/conv2d_nhwc_winograd.py diff --git a/topi/python/topi/cuda/conv2d_transpose_nchw.py b/python/tvm/topi/cuda/conv2d_transpose_nchw.py similarity index 100% rename from topi/python/topi/cuda/conv2d_transpose_nchw.py rename to python/tvm/topi/cuda/conv2d_transpose_nchw.py diff --git a/topi/python/topi/cuda/conv2d_winograd.py b/python/tvm/topi/cuda/conv2d_winograd.py similarity index 100% rename from topi/python/topi/cuda/conv2d_winograd.py rename to python/tvm/topi/cuda/conv2d_winograd.py diff --git a/topi/python/topi/cuda/conv3d.py b/python/tvm/topi/cuda/conv3d.py similarity index 100% rename from topi/python/topi/cuda/conv3d.py rename to python/tvm/topi/cuda/conv3d.py diff --git a/topi/python/topi/cuda/conv3d_alter_op.py b/python/tvm/topi/cuda/conv3d_alter_op.py similarity index 100% rename from topi/python/topi/cuda/conv3d_alter_op.py rename to python/tvm/topi/cuda/conv3d_alter_op.py diff --git a/topi/python/topi/cuda/conv3d_direct.py b/python/tvm/topi/cuda/conv3d_direct.py similarity index 100% rename from topi/python/topi/cuda/conv3d_direct.py rename to python/tvm/topi/cuda/conv3d_direct.py diff --git a/topi/python/topi/cuda/conv3d_ndhwc_tensorcore.py b/python/tvm/topi/cuda/conv3d_ndhwc_tensorcore.py similarity index 100% rename from topi/python/topi/cuda/conv3d_ndhwc_tensorcore.py rename to python/tvm/topi/cuda/conv3d_ndhwc_tensorcore.py diff --git a/topi/python/topi/cuda/conv3d_transpose_ncdhw.py b/python/tvm/topi/cuda/conv3d_transpose_ncdhw.py similarity index 100% rename from topi/python/topi/cuda/conv3d_transpose_ncdhw.py rename to python/tvm/topi/cuda/conv3d_transpose_ncdhw.py diff --git a/topi/python/topi/cuda/conv3d_winograd.py b/python/tvm/topi/cuda/conv3d_winograd.py similarity index 100% rename from topi/python/topi/cuda/conv3d_winograd.py rename to python/tvm/topi/cuda/conv3d_winograd.py diff --git a/topi/python/topi/cuda/correlation.py b/python/tvm/topi/cuda/correlation.py similarity index 100% rename from topi/python/topi/cuda/correlation.py rename to python/tvm/topi/cuda/correlation.py diff --git a/topi/python/topi/cuda/deformable_conv2d.py b/python/tvm/topi/cuda/deformable_conv2d.py similarity index 100% rename from topi/python/topi/cuda/deformable_conv2d.py rename to python/tvm/topi/cuda/deformable_conv2d.py diff --git a/topi/python/topi/cuda/dense.py b/python/tvm/topi/cuda/dense.py similarity index 100% rename from topi/python/topi/cuda/dense.py rename to python/tvm/topi/cuda/dense.py diff --git a/topi/python/topi/cuda/dense_tensorcore.py b/python/tvm/topi/cuda/dense_tensorcore.py similarity index 100% rename from topi/python/topi/cuda/dense_tensorcore.py rename to python/tvm/topi/cuda/dense_tensorcore.py diff --git a/topi/python/topi/cuda/depthwise_conv2d.py b/python/tvm/topi/cuda/depthwise_conv2d.py similarity index 100% rename from topi/python/topi/cuda/depthwise_conv2d.py rename to python/tvm/topi/cuda/depthwise_conv2d.py diff --git a/topi/python/topi/cuda/group_conv2d_nchw.py b/python/tvm/topi/cuda/group_conv2d_nchw.py similarity index 100% rename from topi/python/topi/cuda/group_conv2d_nchw.py rename to python/tvm/topi/cuda/group_conv2d_nchw.py diff --git a/topi/python/topi/cuda/injective.py b/python/tvm/topi/cuda/injective.py similarity index 100% rename from topi/python/topi/cuda/injective.py rename to python/tvm/topi/cuda/injective.py diff --git a/topi/python/topi/cuda/nms.py b/python/tvm/topi/cuda/nms.py similarity index 100% rename from topi/python/topi/cuda/nms.py rename to python/tvm/topi/cuda/nms.py diff --git a/topi/python/topi/cuda/nn.py b/python/tvm/topi/cuda/nn.py similarity index 100% rename from topi/python/topi/cuda/nn.py rename to python/tvm/topi/cuda/nn.py diff --git a/topi/python/topi/cuda/pooling.py b/python/tvm/topi/cuda/pooling.py similarity index 100% rename from topi/python/topi/cuda/pooling.py rename to python/tvm/topi/cuda/pooling.py diff --git a/topi/python/topi/cuda/rcnn/__init__.py b/python/tvm/topi/cuda/rcnn/__init__.py similarity index 100% rename from topi/python/topi/cuda/rcnn/__init__.py rename to python/tvm/topi/cuda/rcnn/__init__.py diff --git a/topi/python/topi/cuda/rcnn/proposal.py b/python/tvm/topi/cuda/rcnn/proposal.py similarity index 100% rename from topi/python/topi/cuda/rcnn/proposal.py rename to python/tvm/topi/cuda/rcnn/proposal.py diff --git a/topi/python/topi/cuda/reduction.py b/python/tvm/topi/cuda/reduction.py similarity index 100% rename from topi/python/topi/cuda/reduction.py rename to python/tvm/topi/cuda/reduction.py diff --git a/topi/python/topi/cuda/softmax.py b/python/tvm/topi/cuda/softmax.py similarity index 100% rename from topi/python/topi/cuda/softmax.py rename to python/tvm/topi/cuda/softmax.py diff --git a/topi/python/topi/cuda/sort.py b/python/tvm/topi/cuda/sort.py similarity index 100% rename from topi/python/topi/cuda/sort.py rename to python/tvm/topi/cuda/sort.py diff --git a/topi/python/topi/cuda/sparse.py b/python/tvm/topi/cuda/sparse.py similarity index 100% rename from topi/python/topi/cuda/sparse.py rename to python/tvm/topi/cuda/sparse.py diff --git a/topi/python/topi/cuda/ssd/__init__.py b/python/tvm/topi/cuda/ssd/__init__.py similarity index 100% rename from topi/python/topi/cuda/ssd/__init__.py rename to python/tvm/topi/cuda/ssd/__init__.py diff --git a/topi/python/topi/cuda/ssd/multibox.py b/python/tvm/topi/cuda/ssd/multibox.py similarity index 99% rename from topi/python/topi/cuda/ssd/multibox.py rename to python/tvm/topi/cuda/ssd/multibox.py index 22d74438188c..541af0653a15 100644 --- a/topi/python/topi/cuda/ssd/multibox.py +++ b/python/tvm/topi/cuda/ssd/multibox.py @@ -21,7 +21,7 @@ from tvm import te from tvm.tir import if_then_else, exp -import topi +from tvm import topi from ..nms import non_max_suppression diff --git a/topi/python/topi/cuda/tensor_intrin.py b/python/tvm/topi/cuda/tensor_intrin.py similarity index 100% rename from topi/python/topi/cuda/tensor_intrin.py rename to python/tvm/topi/cuda/tensor_intrin.py diff --git a/topi/python/topi/cuda/vision.py b/python/tvm/topi/cuda/vision.py similarity index 100% rename from topi/python/topi/cuda/vision.py rename to python/tvm/topi/cuda/vision.py diff --git a/topi/python/topi/generic/__init__.py b/python/tvm/topi/generic/__init__.py similarity index 100% rename from topi/python/topi/generic/__init__.py rename to python/tvm/topi/generic/__init__.py diff --git a/topi/python/topi/generic/conv2d.py b/python/tvm/topi/generic/conv2d.py similarity index 100% rename from topi/python/topi/generic/conv2d.py rename to python/tvm/topi/generic/conv2d.py diff --git a/topi/python/topi/generic/default.py b/python/tvm/topi/generic/default.py similarity index 100% rename from topi/python/topi/generic/default.py rename to python/tvm/topi/generic/default.py diff --git a/topi/python/topi/generic/extern.py b/python/tvm/topi/generic/extern.py similarity index 100% rename from topi/python/topi/generic/extern.py rename to python/tvm/topi/generic/extern.py diff --git a/topi/python/topi/generic/image.py b/python/tvm/topi/generic/image.py similarity index 100% rename from topi/python/topi/generic/image.py rename to python/tvm/topi/generic/image.py diff --git a/topi/python/topi/generic/injective.py b/python/tvm/topi/generic/injective.py similarity index 100% rename from topi/python/topi/generic/injective.py rename to python/tvm/topi/generic/injective.py diff --git a/topi/python/topi/generic/nn.py b/python/tvm/topi/generic/nn.py similarity index 100% rename from topi/python/topi/generic/nn.py rename to python/tvm/topi/generic/nn.py diff --git a/topi/python/topi/generic/search.py b/python/tvm/topi/generic/search.py similarity index 100% rename from topi/python/topi/generic/search.py rename to python/tvm/topi/generic/search.py diff --git a/topi/python/topi/generic/sort.py b/python/tvm/topi/generic/sort.py similarity index 100% rename from topi/python/topi/generic/sort.py rename to python/tvm/topi/generic/sort.py diff --git a/topi/python/topi/generic/vision.py b/python/tvm/topi/generic/vision.py similarity index 100% rename from topi/python/topi/generic/vision.py rename to python/tvm/topi/generic/vision.py diff --git a/topi/python/topi/generic_op_impl.py b/python/tvm/topi/generic_op_impl.py similarity index 100% rename from topi/python/topi/generic_op_impl.py rename to python/tvm/topi/generic_op_impl.py diff --git a/topi/python/topi/hls/__init__.py b/python/tvm/topi/hls/__init__.py similarity index 100% rename from topi/python/topi/hls/__init__.py rename to python/tvm/topi/hls/__init__.py diff --git a/topi/python/topi/hls/injective.py b/python/tvm/topi/hls/injective.py similarity index 100% rename from topi/python/topi/hls/injective.py rename to python/tvm/topi/hls/injective.py diff --git a/topi/python/topi/hls/nn.py b/python/tvm/topi/hls/nn.py similarity index 100% rename from topi/python/topi/hls/nn.py rename to python/tvm/topi/hls/nn.py diff --git a/topi/python/topi/image/__init__.py b/python/tvm/topi/image/__init__.py similarity index 100% rename from topi/python/topi/image/__init__.py rename to python/tvm/topi/image/__init__.py diff --git a/topi/python/topi/image/dilation2d.py b/python/tvm/topi/image/dilation2d.py similarity index 99% rename from topi/python/topi/image/dilation2d.py rename to python/tvm/topi/image/dilation2d.py index 074ca6c02d08..dd16a21a2977 100644 --- a/topi/python/topi/image/dilation2d.py +++ b/python/tvm/topi/image/dilation2d.py @@ -19,7 +19,7 @@ """Dilation2D operators""" from __future__ import absolute_import as _abs from tvm import te -from topi.util import simplify +from tvm.topi.util import simplify from ..nn.pad import pad from ..nn.util import get_pad_tuple diff --git a/topi/python/topi/image/grid_sample.py b/python/tvm/topi/image/grid_sample.py similarity index 100% rename from topi/python/topi/image/grid_sample.py rename to python/tvm/topi/image/grid_sample.py diff --git a/topi/python/topi/image/resize.py b/python/tvm/topi/image/resize.py similarity index 99% rename from topi/python/topi/image/resize.py rename to python/tvm/topi/image/resize.py index d901babc835b..d6c084503fba 100644 --- a/topi/python/topi/image/resize.py +++ b/python/tvm/topi/image/resize.py @@ -19,7 +19,7 @@ from __future__ import absolute_import import tvm from tvm import te -from topi.util import nchw_pack_layout, nchw_xc_layout +from tvm.topi.util import nchw_pack_layout, nchw_xc_layout from .. import tag def get_2d_indices(indices, layout='NCHW'): @@ -664,6 +664,7 @@ def _nearest_neighbor(*indices): def resize3d(data, size, layout="NCDHW", method="nearest_neighbor", coordinate_transformation_mode="align_corners", out_dtype=None): """Perform resize operation on the data. + Parameters ---------- inputs: tvm.te.Tensor @@ -683,6 +684,7 @@ def resize3d(data, size, layout="NCDHW", method="nearest_neighbor", Method to be used for resizing. out_dtype: string, optional Type to return. If left None will be same as input type. + Returns ------- output : tvm.te.Tensor diff --git a/topi/python/topi/intel_graphics/__init__.py b/python/tvm/topi/intel_graphics/__init__.py similarity index 100% rename from topi/python/topi/intel_graphics/__init__.py rename to python/tvm/topi/intel_graphics/__init__.py diff --git a/topi/python/topi/intel_graphics/conv2d.py b/python/tvm/topi/intel_graphics/conv2d.py similarity index 100% rename from topi/python/topi/intel_graphics/conv2d.py rename to python/tvm/topi/intel_graphics/conv2d.py diff --git a/topi/python/topi/intel_graphics/conv2d_alter_op.py b/python/tvm/topi/intel_graphics/conv2d_alter_op.py similarity index 100% rename from topi/python/topi/intel_graphics/conv2d_alter_op.py rename to python/tvm/topi/intel_graphics/conv2d_alter_op.py diff --git a/topi/python/topi/intel_graphics/depthwise_conv2d.py b/python/tvm/topi/intel_graphics/depthwise_conv2d.py similarity index 100% rename from topi/python/topi/intel_graphics/depthwise_conv2d.py rename to python/tvm/topi/intel_graphics/depthwise_conv2d.py diff --git a/topi/python/topi/mali/__init__.py b/python/tvm/topi/mali/__init__.py similarity index 100% rename from topi/python/topi/mali/__init__.py rename to python/tvm/topi/mali/__init__.py diff --git a/topi/python/topi/mali/conv2d.py b/python/tvm/topi/mali/conv2d.py similarity index 100% rename from topi/python/topi/mali/conv2d.py rename to python/tvm/topi/mali/conv2d.py diff --git a/topi/python/topi/mali/dense.py b/python/tvm/topi/mali/dense.py similarity index 100% rename from topi/python/topi/mali/dense.py rename to python/tvm/topi/mali/dense.py diff --git a/topi/python/topi/mali/depthwise_conv2d.py b/python/tvm/topi/mali/depthwise_conv2d.py similarity index 100% rename from topi/python/topi/mali/depthwise_conv2d.py rename to python/tvm/topi/mali/depthwise_conv2d.py diff --git a/topi/python/topi/math.py b/python/tvm/topi/math.py similarity index 100% rename from topi/python/topi/math.py rename to python/tvm/topi/math.py diff --git a/topi/python/topi/nn/__init__.py b/python/tvm/topi/nn/__init__.py similarity index 100% rename from topi/python/topi/nn/__init__.py rename to python/tvm/topi/nn/__init__.py diff --git a/topi/python/topi/nn/batch_matmul.py b/python/tvm/topi/nn/batch_matmul.py similarity index 100% rename from topi/python/topi/nn/batch_matmul.py rename to python/tvm/topi/nn/batch_matmul.py diff --git a/topi/python/topi/nn/bitserial_conv2d.py b/python/tvm/topi/nn/bitserial_conv2d.py similarity index 100% rename from topi/python/topi/nn/bitserial_conv2d.py rename to python/tvm/topi/nn/bitserial_conv2d.py diff --git a/topi/python/topi/nn/bitserial_dense.py b/python/tvm/topi/nn/bitserial_dense.py similarity index 98% rename from topi/python/topi/nn/bitserial_dense.py rename to python/tvm/topi/nn/bitserial_dense.py index 10635d8e9f2c..97d1fb24c32e 100644 --- a/topi/python/topi/nn/bitserial_dense.py +++ b/python/tvm/topi/nn/bitserial_dense.py @@ -19,7 +19,7 @@ from __future__ import absolute_import import tvm from tvm import te -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from .bitserial_util import bitpack def bitserial_dense(data, weight, data_bits, weight_bits, pack_dtype='uint32', diff --git a/topi/python/topi/nn/bitserial_util.py b/python/tvm/topi/nn/bitserial_util.py similarity index 95% rename from topi/python/topi/nn/bitserial_util.py rename to python/tvm/topi/nn/bitserial_util.py index a25aa91198d8..2b320b867c2b 100644 --- a/topi/python/topi/nn/bitserial_util.py +++ b/python/tvm/topi/nn/bitserial_util.py @@ -19,15 +19,19 @@ import numpy as np import tvm from tvm import te -from topi.transform import concatenate +from tvm.topi.transform import concatenate from ..util import get_const_int def bitpack(data, bits, pack_axis, bit_axis, pack_type, name="QuantizeInput"): """Packs data into format necessary for bitserial computation + + Parameters + ---------- pack_axis : int index of the axis to pack in data bit_axis : int - index of axis to place bit axis in resulting packed data""" + index of axis to place bit axis in resulting packed data + """ ishape = data.shape n = len(ishape) if pack_type == 'uint8': diff --git a/topi/python/topi/nn/bnn.py b/python/tvm/topi/nn/bnn.py similarity index 100% rename from topi/python/topi/nn/bnn.py rename to python/tvm/topi/nn/bnn.py diff --git a/topi/python/topi/nn/conv1d.py b/python/tvm/topi/nn/conv1d.py similarity index 100% rename from topi/python/topi/nn/conv1d.py rename to python/tvm/topi/nn/conv1d.py diff --git a/topi/python/topi/nn/conv1d_transpose.py b/python/tvm/topi/nn/conv1d_transpose.py similarity index 100% rename from topi/python/topi/nn/conv1d_transpose.py rename to python/tvm/topi/nn/conv1d_transpose.py diff --git a/topi/python/topi/nn/conv2d.py b/python/tvm/topi/nn/conv2d.py similarity index 99% rename from topi/python/topi/nn/conv2d.py rename to python/tvm/topi/nn/conv2d.py index 51de4546663a..d3be6bb43341 100644 --- a/topi/python/topi/nn/conv2d.py +++ b/python/tvm/topi/nn/conv2d.py @@ -673,13 +673,15 @@ def conv2d_winograd_weight_transform(kernel, tile_size): def conv2d_winograd_nnpack_weight_transform(kernel, convolution_algorithm, out_dtype): """Weight transformation for winograd - Parameters + + Parameters ---------- kernel: Tensor The raw kernel tensor with layout "NCHW". Only 3x3 kernel is supported for now. convolution_algorithm: int The convolution algorithm for Winograd NNPACK. - Returns + + Returns ------- output : tvm.te.Tensor 4-D with shape [alpha, alpha, CO, CI] @@ -771,8 +773,8 @@ def group_conv2d_nchw(Input, Filter, stride, padding, dilation, groups, out_dtyp def unpack_NCHWc_to_nchw(packed_out, out_dtype): """Unpack conv2d_NCHWc output from layout NCHWc to NCHW - Parameters - ----------- + Parameters + ---------- packed_out : tvm.te.Tensor The output tensor of conv2d_NCHWc. diff --git a/topi/python/topi/nn/conv2d_transpose.py b/python/tvm/topi/nn/conv2d_transpose.py similarity index 100% rename from topi/python/topi/nn/conv2d_transpose.py rename to python/tvm/topi/nn/conv2d_transpose.py diff --git a/topi/python/topi/nn/conv3d.py b/python/tvm/topi/nn/conv3d.py similarity index 100% rename from topi/python/topi/nn/conv3d.py rename to python/tvm/topi/nn/conv3d.py diff --git a/topi/python/topi/nn/conv3d_transpose.py b/python/tvm/topi/nn/conv3d_transpose.py similarity index 100% rename from topi/python/topi/nn/conv3d_transpose.py rename to python/tvm/topi/nn/conv3d_transpose.py diff --git a/topi/python/topi/nn/correlation.py b/python/tvm/topi/nn/correlation.py similarity index 100% rename from topi/python/topi/nn/correlation.py rename to python/tvm/topi/nn/correlation.py diff --git a/topi/python/topi/nn/deformable_conv2d.py b/python/tvm/topi/nn/deformable_conv2d.py similarity index 100% rename from topi/python/topi/nn/deformable_conv2d.py rename to python/tvm/topi/nn/deformable_conv2d.py diff --git a/topi/python/topi/nn/dense.py b/python/tvm/topi/nn/dense.py similarity index 100% rename from topi/python/topi/nn/dense.py rename to python/tvm/topi/nn/dense.py diff --git a/topi/python/topi/nn/depth_to_space.py b/python/tvm/topi/nn/depth_to_space.py similarity index 100% rename from topi/python/topi/nn/depth_to_space.py rename to python/tvm/topi/nn/depth_to_space.py diff --git a/topi/python/topi/nn/depthwise_conv2d.py b/python/tvm/topi/nn/depthwise_conv2d.py similarity index 100% rename from topi/python/topi/nn/depthwise_conv2d.py rename to python/tvm/topi/nn/depthwise_conv2d.py diff --git a/topi/python/topi/nn/dilate.py b/python/tvm/topi/nn/dilate.py similarity index 100% rename from topi/python/topi/nn/dilate.py rename to python/tvm/topi/nn/dilate.py diff --git a/topi/python/topi/nn/elemwise.py b/python/tvm/topi/nn/elemwise.py similarity index 95% rename from topi/python/topi/nn/elemwise.py rename to python/tvm/topi/nn/elemwise.py index 1315a48cc0ef..e851c64a2783 100644 --- a/topi/python/topi/nn/elemwise.py +++ b/python/tvm/topi/nn/elemwise.py @@ -63,12 +63,14 @@ def _compute(*indices): @tvm.te.tag_scope(tag=tag.BROADCAST) def prelu(x, slope, axis=1): - """ PReLU. + """PReLU. It accepts two arguments: an input ``x`` and a weight array ``W`` and computes the output as :math:`PReLU(x) y = x > 0 ? x : W * x`, where :math:`*` is an elementwise multiplication for each sample in the batch. - Arguments: + + Parameters + ---------- x : tvm.te.Tensor Input argument. @@ -78,12 +80,14 @@ def prelu(x, slope, axis=1): axis : int The axis where the channel data needs to be applied - Returns: + Returns + ------- y : tvm.te.Tensor The result. - Links: - [http://arxiv.org/pdf/1502.01852v1.pdf] + Links + ----- + [http://arxiv.org/pdf/1502.01852v1.pdf] """ assert len(slope.shape) == 1 diff --git a/topi/python/topi/nn/fifo_buffer.py b/python/tvm/topi/nn/fifo_buffer.py similarity index 100% rename from topi/python/topi/nn/fifo_buffer.py rename to python/tvm/topi/nn/fifo_buffer.py diff --git a/topi/python/topi/nn/flatten.py b/python/tvm/topi/nn/flatten.py similarity index 100% rename from topi/python/topi/nn/flatten.py rename to python/tvm/topi/nn/flatten.py diff --git a/topi/python/topi/nn/local_response_norm.py b/python/tvm/topi/nn/local_response_norm.py similarity index 100% rename from topi/python/topi/nn/local_response_norm.py rename to python/tvm/topi/nn/local_response_norm.py diff --git a/topi/python/topi/nn/mapping.py b/python/tvm/topi/nn/mapping.py similarity index 100% rename from topi/python/topi/nn/mapping.py rename to python/tvm/topi/nn/mapping.py diff --git a/topi/python/topi/nn/pad.py b/python/tvm/topi/nn/pad.py similarity index 100% rename from topi/python/topi/nn/pad.py rename to python/tvm/topi/nn/pad.py diff --git a/topi/python/topi/nn/pooling.py b/python/tvm/topi/nn/pooling.py similarity index 100% rename from topi/python/topi/nn/pooling.py rename to python/tvm/topi/nn/pooling.py diff --git a/topi/python/topi/nn/softmax.py b/python/tvm/topi/nn/softmax.py similarity index 100% rename from topi/python/topi/nn/softmax.py rename to python/tvm/topi/nn/softmax.py diff --git a/topi/python/topi/nn/space_to_depth.py b/python/tvm/topi/nn/space_to_depth.py similarity index 100% rename from topi/python/topi/nn/space_to_depth.py rename to python/tvm/topi/nn/space_to_depth.py diff --git a/topi/python/topi/nn/sparse.py b/python/tvm/topi/nn/sparse.py similarity index 100% rename from topi/python/topi/nn/sparse.py rename to python/tvm/topi/nn/sparse.py diff --git a/topi/python/topi/nn/upsampling.py b/python/tvm/topi/nn/upsampling.py similarity index 99% rename from topi/python/topi/nn/upsampling.py rename to python/tvm/topi/nn/upsampling.py index 008e52e337ae..96a13efc541a 100644 --- a/topi/python/topi/nn/upsampling.py +++ b/python/tvm/topi/nn/upsampling.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. """TVM operator upsampling compute.""" -import topi +from tvm import topi from tvm import te from ..util import simplify diff --git a/topi/python/topi/nn/util.py b/python/tvm/topi/nn/util.py similarity index 100% rename from topi/python/topi/nn/util.py rename to python/tvm/topi/nn/util.py diff --git a/topi/python/topi/nn/winograd_util.py b/python/tvm/topi/nn/winograd_util.py similarity index 100% rename from topi/python/topi/nn/winograd_util.py rename to python/tvm/topi/nn/winograd_util.py diff --git a/topi/python/topi/reduction.py b/python/tvm/topi/reduction.py similarity index 100% rename from topi/python/topi/reduction.py rename to python/tvm/topi/reduction.py diff --git a/topi/python/topi/rocm/__init__.py b/python/tvm/topi/rocm/__init__.py similarity index 100% rename from topi/python/topi/rocm/__init__.py rename to python/tvm/topi/rocm/__init__.py diff --git a/topi/python/topi/rocm/conv2d.py b/python/tvm/topi/rocm/conv2d.py similarity index 100% rename from topi/python/topi/rocm/conv2d.py rename to python/tvm/topi/rocm/conv2d.py diff --git a/topi/python/topi/rocm/dense.py b/python/tvm/topi/rocm/dense.py similarity index 100% rename from topi/python/topi/rocm/dense.py rename to python/tvm/topi/rocm/dense.py diff --git a/topi/python/topi/rocm/nn.py b/python/tvm/topi/rocm/nn.py similarity index 100% rename from topi/python/topi/rocm/nn.py rename to python/tvm/topi/rocm/nn.py diff --git a/topi/python/topi/scatter.py b/python/tvm/topi/scatter.py similarity index 100% rename from topi/python/topi/scatter.py rename to python/tvm/topi/scatter.py diff --git a/topi/python/topi/scatter_add.py b/python/tvm/topi/scatter_add.py similarity index 100% rename from topi/python/topi/scatter_add.py rename to python/tvm/topi/scatter_add.py diff --git a/topi/python/topi/sort.py b/python/tvm/topi/sort.py similarity index 100% rename from topi/python/topi/sort.py rename to python/tvm/topi/sort.py diff --git a/topi/python/topi/sparse/__init__.py b/python/tvm/topi/sparse/__init__.py similarity index 100% rename from topi/python/topi/sparse/__init__.py rename to python/tvm/topi/sparse/__init__.py diff --git a/topi/python/topi/sparse/csrmm.py b/python/tvm/topi/sparse/csrmm.py similarity index 100% rename from topi/python/topi/sparse/csrmm.py rename to python/tvm/topi/sparse/csrmm.py diff --git a/topi/python/topi/sparse/csrmv.py b/python/tvm/topi/sparse/csrmv.py similarity index 100% rename from topi/python/topi/sparse/csrmv.py rename to python/tvm/topi/sparse/csrmv.py diff --git a/topi/python/topi/sparse/dense.py b/python/tvm/topi/sparse/dense.py similarity index 100% rename from topi/python/topi/sparse/dense.py rename to python/tvm/topi/sparse/dense.py diff --git a/topi/python/topi/tag.py b/python/tvm/topi/tag.py similarity index 100% rename from topi/python/topi/tag.py rename to python/tvm/topi/tag.py diff --git a/topi/python/topi/tensor.py b/python/tvm/topi/tensor.py similarity index 100% rename from topi/python/topi/tensor.py rename to python/tvm/topi/tensor.py diff --git a/topi/python/topi/testing/__init__.py b/python/tvm/topi/testing/__init__.py similarity index 100% rename from topi/python/topi/testing/__init__.py rename to python/tvm/topi/testing/__init__.py diff --git a/topi/python/topi/testing/adaptive_pool_python.py b/python/tvm/topi/testing/adaptive_pool_python.py similarity index 100% rename from topi/python/topi/testing/adaptive_pool_python.py rename to python/tvm/topi/testing/adaptive_pool_python.py diff --git a/topi/python/topi/testing/batch_matmul.py b/python/tvm/topi/testing/batch_matmul.py similarity index 100% rename from topi/python/topi/testing/batch_matmul.py rename to python/tvm/topi/testing/batch_matmul.py diff --git a/topi/python/topi/testing/bilinear_resize_python.py b/python/tvm/topi/testing/bilinear_resize_python.py similarity index 98% rename from topi/python/topi/testing/bilinear_resize_python.py rename to python/tvm/topi/testing/bilinear_resize_python.py index 4d12d39b48f5..c43fd2c2e924 100644 --- a/topi/python/topi/testing/bilinear_resize_python.py +++ b/python/tvm/topi/testing/bilinear_resize_python.py @@ -18,7 +18,7 @@ """Bilinear Scale in python""" import math import numpy as np -from topi.util import nchw_pack_layout +from tvm.topi.util import nchw_pack_layout def bilinear_resize_python(image, out_size, layout, coordinate_transformation_mode="align_corners"): """ Bilinear scaling using python""" diff --git a/topi/python/topi/testing/common.py b/python/tvm/topi/testing/common.py similarity index 99% rename from topi/python/topi/testing/common.py rename to python/tvm/topi/testing/common.py index 7bc5c5d8f60a..721493e81b43 100644 --- a/topi/python/topi/testing/common.py +++ b/python/tvm/topi/testing/common.py @@ -18,7 +18,7 @@ """Common utility for topi test""" import tvm -import topi +from tvm import topi _injective_schedule = { "generic": topi.generic.schedule_injective, diff --git a/topi/python/topi/testing/conv1d_ncw_python.py b/python/tvm/topi/testing/conv1d_ncw_python.py similarity index 98% rename from topi/python/topi/testing/conv1d_ncw_python.py rename to python/tvm/topi/testing/conv1d_ncw_python.py index 90ee7de66808..84a463fcd404 100644 --- a/topi/python/topi/testing/conv1d_ncw_python.py +++ b/python/tvm/topi/testing/conv1d_ncw_python.py @@ -17,7 +17,7 @@ # pylint: disable=unused-variable, invalid-name """1D convolution in python""" import numpy as np -from topi.nn.util import get_pad_tuple1d +from tvm.topi.nn.util import get_pad_tuple1d def dilate_np(x, dilation): diff --git a/topi/python/topi/testing/conv1d_transpose_ncw_python.py b/python/tvm/topi/testing/conv1d_transpose_ncw_python.py similarity index 94% rename from topi/python/topi/testing/conv1d_transpose_ncw_python.py rename to python/tvm/topi/testing/conv1d_transpose_ncw_python.py index b472f33f1cda..0a5d22c770c5 100644 --- a/topi/python/topi/testing/conv1d_transpose_ncw_python.py +++ b/python/tvm/topi/testing/conv1d_transpose_ncw_python.py @@ -18,8 +18,8 @@ """Transposed 1D convolution in python""" import numpy as np import scipy -import topi -from topi.nn.util import get_pad_tuple1d +import tvm.topi.testing +from tvm.topi.nn.util import get_pad_tuple1d def conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding): """Transposed 1D convolution operator in NCW layout. @@ -60,7 +60,7 @@ def conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding): assert opad < stride_w fpad_left, fpad_right = get_pad_tuple1d(padding, filter_w) # dilate stage - dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_w]) + dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_w]) # padding stage bpad_left = filter_w - 1 - fpad_left bpad_right = filter_w - 1 - fpad_right + opad diff --git a/topi/python/topi/testing/conv2d_hwcn_python.py b/python/tvm/topi/testing/conv2d_hwcn_python.py similarity index 98% rename from topi/python/topi/testing/conv2d_hwcn_python.py rename to python/tvm/topi/testing/conv2d_hwcn_python.py index 489e7eb683df..fd5d9a7c655b 100644 --- a/topi/python/topi/testing/conv2d_hwcn_python.py +++ b/python/tvm/topi/testing/conv2d_hwcn_python.py @@ -18,7 +18,7 @@ """Convolution in python""" import numpy as np import scipy.signal -from topi.nn.util import get_pad_tuple +from tvm.topi.nn.util import get_pad_tuple def conv2d_hwcn_python(a_np, w_np, stride, padding): diff --git a/topi/python/topi/testing/conv2d_nchw_python.py b/python/tvm/topi/testing/conv2d_nchw_python.py similarity index 98% rename from topi/python/topi/testing/conv2d_nchw_python.py rename to python/tvm/topi/testing/conv2d_nchw_python.py index 9f7ae7a62df1..cb855a4d5405 100644 --- a/topi/python/topi/testing/conv2d_nchw_python.py +++ b/python/tvm/topi/testing/conv2d_nchw_python.py @@ -18,7 +18,7 @@ """Convolution in python""" import numpy as np import scipy.signal -from topi.nn.util import get_pad_tuple +from tvm.topi.nn.util import get_pad_tuple def _conv2d_nchw_python(a_np, w_np, stride, padding): diff --git a/topi/python/topi/testing/conv2d_nhwc_python.py b/python/tvm/topi/testing/conv2d_nhwc_python.py similarity index 98% rename from topi/python/topi/testing/conv2d_nhwc_python.py rename to python/tvm/topi/testing/conv2d_nhwc_python.py index 7c021785544c..17d072a51b60 100644 --- a/topi/python/topi/testing/conv2d_nhwc_python.py +++ b/python/tvm/topi/testing/conv2d_nhwc_python.py @@ -18,7 +18,7 @@ """Convolution in python""" import numpy as np import scipy.signal -from topi.nn.util import get_pad_tuple +from tvm.topi.nn.util import get_pad_tuple def _conv2d_nhwc_python(a_np, w_np, stride, padding): diff --git a/topi/python/topi/testing/conv2d_transpose_python.py b/python/tvm/topi/testing/conv2d_transpose_python.py similarity index 96% rename from topi/python/topi/testing/conv2d_transpose_python.py rename to python/tvm/topi/testing/conv2d_transpose_python.py index 83e9287711ef..47f9cf1de2b2 100644 --- a/topi/python/topi/testing/conv2d_transpose_python.py +++ b/python/tvm/topi/testing/conv2d_transpose_python.py @@ -18,8 +18,8 @@ """Transposed convolution in python""" import numpy as np import scipy -import topi -from topi.nn.util import get_pad_tuple +import tvm.topi.testing +from tvm.topi.nn.util import get_pad_tuple def conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding): @@ -59,7 +59,7 @@ def conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding): opad_h, opad_w = output_padding assert opad_h < stride_h and opad_w < stride_w # dilate stage - dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w]) + dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_h, stride_w]) # padding stage fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w)) bpad_top = filter_h - 1 - fpad_top diff --git a/topi/python/topi/testing/conv3d_ncdhw_python.py b/python/tvm/topi/testing/conv3d_ncdhw_python.py similarity index 98% rename from topi/python/topi/testing/conv3d_ncdhw_python.py rename to python/tvm/topi/testing/conv3d_ncdhw_python.py index 0b2620fc290c..85b124a7ce4f 100644 --- a/topi/python/topi/testing/conv3d_ncdhw_python.py +++ b/python/tvm/topi/testing/conv3d_ncdhw_python.py @@ -18,7 +18,7 @@ """Convolution 3D in python""" import numpy as np import scipy.signal -from topi.nn.util import get_pad_tuple3d +from tvm.topi.nn.util import get_pad_tuple3d def _conv3d_ncdhw_python(a_np, w_np, stride, padding): diff --git a/topi/python/topi/testing/conv3d_ndhwc_python.py b/python/tvm/topi/testing/conv3d_ndhwc_python.py similarity index 98% rename from topi/python/topi/testing/conv3d_ndhwc_python.py rename to python/tvm/topi/testing/conv3d_ndhwc_python.py index 85b991f3ec5f..b9330ec8f61f 100644 --- a/topi/python/topi/testing/conv3d_ndhwc_python.py +++ b/python/tvm/topi/testing/conv3d_ndhwc_python.py @@ -18,7 +18,7 @@ """Convolution 3D in python""" import numpy as np import scipy.signal -from topi.nn.util import get_pad_tuple3d +from tvm.topi.nn.util import get_pad_tuple3d def conv3d_ndhwc_python(a_np, w_np, stride, padding): diff --git a/topi/python/topi/testing/conv3d_transpose_ncdhw_python.py b/python/tvm/topi/testing/conv3d_transpose_ncdhw_python.py similarity index 92% rename from topi/python/topi/testing/conv3d_transpose_ncdhw_python.py rename to python/tvm/topi/testing/conv3d_transpose_ncdhw_python.py index 8140eb76d2db..8d03397e3dbe 100644 --- a/topi/python/topi/testing/conv3d_transpose_ncdhw_python.py +++ b/python/tvm/topi/testing/conv3d_transpose_ncdhw_python.py @@ -17,8 +17,8 @@ # pylint: disable=invalid-name, line-too-long, unused-variable, too-many-locals, too-many-branches """Convolution 3D transpose in python""" import numpy as np -import topi -from topi.nn.util import get_pad_tuple3d +import tvm.topi.testing +from tvm.topi.nn.util import get_pad_tuple3d def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding): @@ -51,7 +51,7 @@ def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding): stride_d, stride_h, stride_w = stride # dilate stage - dilated_a_np = topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w]) + dilated_a_np = tvm.topi.testing.dilate_python(a_np, [1, 1, stride_d, stride_h, stride_w]) # padding stage fpad_front, fpad_top, fpad_left, fpad_back, fpad_bottom, fpad_right = get_pad_tuple3d( @@ -81,6 +81,6 @@ def conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding): out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w w_np = np.flip(w_np, axis=[2, 3, 4]).transpose((1, 0, 2, 3, 4)) - b_np = topi.testing.conv3d_ncdhw_python(padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0)) + b_np = tvm.topi.testing.conv3d_ncdhw_python(padded_a_np, w_np, stride=(1, 1, 1), padding=(0, 0, 0)) return b_np diff --git a/topi/python/topi/testing/correlation_nchw_python.py b/python/tvm/topi/testing/correlation_nchw_python.py similarity index 100% rename from topi/python/topi/testing/correlation_nchw_python.py rename to python/tvm/topi/testing/correlation_nchw_python.py diff --git a/topi/python/topi/testing/crop_and_resize_python.py b/python/tvm/topi/testing/crop_and_resize_python.py similarity index 100% rename from topi/python/topi/testing/crop_and_resize_python.py rename to python/tvm/topi/testing/crop_and_resize_python.py diff --git a/topi/python/topi/testing/deformable_conv2d_nchw_python.py b/python/tvm/topi/testing/deformable_conv2d_nchw_python.py similarity index 99% rename from topi/python/topi/testing/deformable_conv2d_nchw_python.py rename to python/tvm/topi/testing/deformable_conv2d_nchw_python.py index 80e2a18250ce..fe48ea522f0b 100644 --- a/topi/python/topi/testing/deformable_conv2d_nchw_python.py +++ b/python/tvm/topi/testing/deformable_conv2d_nchw_python.py @@ -18,7 +18,7 @@ """Deformable convolution in python""" import itertools import numpy as np -from topi.nn.util import get_pad_tuple +from tvm.topi.nn.util import get_pad_tuple def deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups): diff --git a/topi/python/topi/testing/depth_to_space.py b/python/tvm/topi/testing/depth_to_space.py similarity index 100% rename from topi/python/topi/testing/depth_to_space.py rename to python/tvm/topi/testing/depth_to_space.py diff --git a/topi/python/topi/testing/depthwise_conv2d_python.py b/python/tvm/topi/testing/depthwise_conv2d_python.py similarity index 100% rename from topi/python/topi/testing/depthwise_conv2d_python.py rename to python/tvm/topi/testing/depthwise_conv2d_python.py diff --git a/topi/python/topi/testing/dilate_python.py b/python/tvm/topi/testing/dilate_python.py similarity index 100% rename from topi/python/topi/testing/dilate_python.py rename to python/tvm/topi/testing/dilate_python.py diff --git a/topi/python/topi/testing/gather_nd_python.py b/python/tvm/topi/testing/gather_nd_python.py similarity index 100% rename from topi/python/topi/testing/gather_nd_python.py rename to python/tvm/topi/testing/gather_nd_python.py diff --git a/topi/python/topi/testing/gather_python.py b/python/tvm/topi/testing/gather_python.py similarity index 100% rename from topi/python/topi/testing/gather_python.py rename to python/tvm/topi/testing/gather_python.py diff --git a/topi/python/topi/testing/grid_sample_python.py b/python/tvm/topi/testing/grid_sample_python.py similarity index 100% rename from topi/python/topi/testing/grid_sample_python.py rename to python/tvm/topi/testing/grid_sample_python.py diff --git a/topi/python/topi/testing/l2_normalize_python.py b/python/tvm/topi/testing/l2_normalize_python.py similarity index 100% rename from topi/python/topi/testing/l2_normalize_python.py rename to python/tvm/topi/testing/l2_normalize_python.py diff --git a/topi/python/topi/testing/lrn_python.py b/python/tvm/topi/testing/lrn_python.py similarity index 100% rename from topi/python/topi/testing/lrn_python.py rename to python/tvm/topi/testing/lrn_python.py diff --git a/topi/python/topi/testing/one_hot.py b/python/tvm/topi/testing/one_hot.py similarity index 100% rename from topi/python/topi/testing/one_hot.py rename to python/tvm/topi/testing/one_hot.py diff --git a/topi/python/topi/testing/pool1d_python.py b/python/tvm/topi/testing/pool1d_python.py similarity index 100% rename from topi/python/topi/testing/pool1d_python.py rename to python/tvm/topi/testing/pool1d_python.py diff --git a/topi/python/topi/testing/pool3d_python.py b/python/tvm/topi/testing/pool3d_python.py similarity index 100% rename from topi/python/topi/testing/pool3d_python.py rename to python/tvm/topi/testing/pool3d_python.py diff --git a/topi/python/topi/testing/pool_grad_python.py b/python/tvm/topi/testing/pool_grad_python.py similarity index 100% rename from topi/python/topi/testing/pool_grad_python.py rename to python/tvm/topi/testing/pool_grad_python.py diff --git a/topi/python/topi/testing/reorg_python.py b/python/tvm/topi/testing/reorg_python.py similarity index 100% rename from topi/python/topi/testing/reorg_python.py rename to python/tvm/topi/testing/reorg_python.py diff --git a/topi/python/topi/testing/roi_align_python.py b/python/tvm/topi/testing/roi_align_python.py similarity index 100% rename from topi/python/topi/testing/roi_align_python.py rename to python/tvm/topi/testing/roi_align_python.py diff --git a/topi/python/topi/testing/roi_pool_python.py b/python/tvm/topi/testing/roi_pool_python.py similarity index 100% rename from topi/python/topi/testing/roi_pool_python.py rename to python/tvm/topi/testing/roi_pool_python.py diff --git a/topi/python/topi/testing/sequence_mask_python.py b/python/tvm/topi/testing/sequence_mask_python.py similarity index 100% rename from topi/python/topi/testing/sequence_mask_python.py rename to python/tvm/topi/testing/sequence_mask_python.py diff --git a/topi/python/topi/testing/slice_axis_python.py b/python/tvm/topi/testing/slice_axis_python.py similarity index 100% rename from topi/python/topi/testing/slice_axis_python.py rename to python/tvm/topi/testing/slice_axis_python.py diff --git a/topi/python/topi/testing/softmax_python.py b/python/tvm/topi/testing/softmax_python.py similarity index 100% rename from topi/python/topi/testing/softmax_python.py rename to python/tvm/topi/testing/softmax_python.py diff --git a/topi/python/topi/testing/space_to_depth.py b/python/tvm/topi/testing/space_to_depth.py similarity index 100% rename from topi/python/topi/testing/space_to_depth.py rename to python/tvm/topi/testing/space_to_depth.py diff --git a/topi/python/topi/testing/strided_slice_python.py b/python/tvm/topi/testing/strided_slice_python.py similarity index 100% rename from topi/python/topi/testing/strided_slice_python.py rename to python/tvm/topi/testing/strided_slice_python.py diff --git a/topi/python/topi/testing/trilinear_resize3d_python.py b/python/tvm/topi/testing/trilinear_resize3d_python.py similarity index 100% rename from topi/python/topi/testing/trilinear_resize3d_python.py rename to python/tvm/topi/testing/trilinear_resize3d_python.py diff --git a/topi/python/topi/testing/upsampling_python.py b/python/tvm/topi/testing/upsampling_python.py similarity index 99% rename from topi/python/topi/testing/upsampling_python.py rename to python/tvm/topi/testing/upsampling_python.py index f2fa80f5a2e6..8cc00ad4cf9c 100644 --- a/topi/python/topi/testing/upsampling_python.py +++ b/python/tvm/topi/testing/upsampling_python.py @@ -18,7 +18,7 @@ """Upsampling in python""" import math import numpy as np -from topi.util import nchw_pack_layout +from tvm.topi.util import nchw_pack_layout def upsample_nearest(arr, scale): diff --git a/topi/python/topi/transform.py b/python/tvm/topi/transform.py similarity index 97% rename from topi/python/topi/transform.py rename to python/tvm/topi/transform.py index 159412f77e4f..6bfa4732426e 100644 --- a/topi/python/topi/transform.py +++ b/python/tvm/topi/transform.py @@ -19,7 +19,7 @@ from __future__ import absolute_import as _abs import tvm from tvm import te -import topi +from tvm import topi from . import cpp from . import tag from .util import within_index, make_idx @@ -48,19 +48,22 @@ def expand_like(a, shape_like, axis): This operation can always be composed of unsqueezing and expanding dims on those unsqueezed axes. - Examples:: - input = [ 12. 19. 27.] - input.shape = (3,) + Examples + -------- + .. code-block:: - new_shape_array = [[[1,2],[2,3],[1,3]], - [[1,4],[4,3],[5,2]], - [[7,1],[7,2],[7,3]]] - new_shape_array.shape = (3, 3, 2) + input = [ 12. 19. 27.] + input.shape = (3,) - expand_like(input, [1,2], new_shape_array) = - [[[12,12],[12,12],[12,12]], - [[19,19],[19,19],[19,19]], - [[27,27],[27,27],[27,27]]] + new_shape_array = [[[1,2],[2,3],[1,3]], + [[1,4],[4,3],[5,2]], + [[7,1],[7,2],[7,3]]] + new_shape_array.shape = (3, 3, 2) + + expand_like(input, [1,2], new_shape_array) = + [[[12,12],[12,12],[12,12]], + [[19,19],[19,19],[19,19]], + [[27,27],[27,27],[27,27]]] Parameters ---------- @@ -70,6 +73,7 @@ def expand_like(a, shape_like, axis): The tensor to with target shape. axis: list of int axis to be expanded on + Returns ------- ret : tvm.te.Tensor @@ -458,7 +462,7 @@ def gather_nd(a, indices): def matmul(a, b, transp_a=False, transp_b=False): """ Creates an operation that calculates a matrix multiplication (row-major notation): - A(i, k) * B(k, j) + A(i, k) * B(k, j) if trans_a == trans_b, the usual transposed combinations, otherwise Parameters diff --git a/topi/python/topi/util.py b/python/tvm/topi/util.py similarity index 99% rename from topi/python/topi/util.py rename to python/tvm/topi/util.py index cc437325e0d6..5bde1cb1d2f5 100644 --- a/topi/python/topi/util.py +++ b/python/tvm/topi/util.py @@ -365,8 +365,8 @@ def get_shape(src_shape, src_layout, dst_layout): def within_index(b, e, s, i): """Return a boolean value that indicates if i is within the given index. - Parameter - --------- + Parameters + ---------- b : Expr beginning of the index @@ -400,8 +400,8 @@ def make_idx(b, e, s, z, i): The returned value is only meaningful if within_index() returns True for the same set of parameters. - Parameter - --------- + Parameters + ---------- b : Expr beginning of the index diff --git a/topi/python/topi/vision/__init__.py b/python/tvm/topi/vision/__init__.py similarity index 100% rename from topi/python/topi/vision/__init__.py rename to python/tvm/topi/vision/__init__.py diff --git a/topi/python/topi/vision/nms.py b/python/tvm/topi/vision/nms.py similarity index 100% rename from topi/python/topi/vision/nms.py rename to python/tvm/topi/vision/nms.py diff --git a/topi/python/topi/vision/rcnn/__init__.py b/python/tvm/topi/vision/rcnn/__init__.py similarity index 100% rename from topi/python/topi/vision/rcnn/__init__.py rename to python/tvm/topi/vision/rcnn/__init__.py diff --git a/topi/python/topi/vision/rcnn/proposal.py b/python/tvm/topi/vision/rcnn/proposal.py similarity index 100% rename from topi/python/topi/vision/rcnn/proposal.py rename to python/tvm/topi/vision/rcnn/proposal.py diff --git a/topi/python/topi/vision/rcnn/roi_align.py b/python/tvm/topi/vision/rcnn/roi_align.py similarity index 100% rename from topi/python/topi/vision/rcnn/roi_align.py rename to python/tvm/topi/vision/rcnn/roi_align.py diff --git a/topi/python/topi/vision/rcnn/roi_pool.py b/python/tvm/topi/vision/rcnn/roi_pool.py similarity index 100% rename from topi/python/topi/vision/rcnn/roi_pool.py rename to python/tvm/topi/vision/rcnn/roi_pool.py diff --git a/topi/python/topi/vision/reorg.py b/python/tvm/topi/vision/reorg.py similarity index 100% rename from topi/python/topi/vision/reorg.py rename to python/tvm/topi/vision/reorg.py diff --git a/topi/python/topi/vision/ssd/__init__.py b/python/tvm/topi/vision/ssd/__init__.py similarity index 100% rename from topi/python/topi/vision/ssd/__init__.py rename to python/tvm/topi/vision/ssd/__init__.py diff --git a/topi/python/topi/vision/ssd/multibox.py b/python/tvm/topi/vision/ssd/multibox.py similarity index 99% rename from topi/python/topi/vision/ssd/multibox.py rename to python/tvm/topi/vision/ssd/multibox.py index e5b92156bdc3..6534503b6735 100644 --- a/topi/python/topi/vision/ssd/multibox.py +++ b/python/tvm/topi/vision/ssd/multibox.py @@ -21,7 +21,7 @@ from tvm.te import hybrid from tvm.tir import exp, sqrt -import topi +from tvm import topi from ..nms import non_max_suppression diff --git a/topi/python/topi/x86/__init__.py b/python/tvm/topi/x86/__init__.py similarity index 100% rename from topi/python/topi/x86/__init__.py rename to python/tvm/topi/x86/__init__.py diff --git a/topi/python/topi/x86/batch_matmul.py b/python/tvm/topi/x86/batch_matmul.py similarity index 100% rename from topi/python/topi/x86/batch_matmul.py rename to python/tvm/topi/x86/batch_matmul.py diff --git a/topi/python/topi/x86/binarize_pack.py b/python/tvm/topi/x86/binarize_pack.py similarity index 100% rename from topi/python/topi/x86/binarize_pack.py rename to python/tvm/topi/x86/binarize_pack.py diff --git a/topi/python/topi/x86/binary_dense.py b/python/tvm/topi/x86/binary_dense.py similarity index 100% rename from topi/python/topi/x86/binary_dense.py rename to python/tvm/topi/x86/binary_dense.py diff --git a/topi/python/topi/x86/bitserial_conv2d.py b/python/tvm/topi/x86/bitserial_conv2d.py similarity index 100% rename from topi/python/topi/x86/bitserial_conv2d.py rename to python/tvm/topi/x86/bitserial_conv2d.py diff --git a/topi/python/topi/x86/bitserial_dense.py b/python/tvm/topi/x86/bitserial_dense.py similarity index 99% rename from topi/python/topi/x86/bitserial_dense.py rename to python/tvm/topi/x86/bitserial_dense.py index cbc6ac83ffd9..8d5736b605e1 100644 --- a/topi/python/topi/x86/bitserial_dense.py +++ b/python/tvm/topi/x86/bitserial_dense.py @@ -20,7 +20,7 @@ import tvm from tvm import te from tvm import autotvm -from topi.util import get_const_int, get_const_tuple +from tvm.topi.util import get_const_int, get_const_tuple from .. import tag from ..nn.bitserial_util import bitpack, binary_op_multiplier diff --git a/topi/python/topi/x86/conv1d.py b/python/tvm/topi/x86/conv1d.py similarity index 100% rename from topi/python/topi/x86/conv1d.py rename to python/tvm/topi/x86/conv1d.py diff --git a/topi/python/topi/x86/conv2d.py b/python/tvm/topi/x86/conv2d.py similarity index 100% rename from topi/python/topi/x86/conv2d.py rename to python/tvm/topi/x86/conv2d.py diff --git a/topi/python/topi/x86/conv2d_alter_op.py b/python/tvm/topi/x86/conv2d_alter_op.py similarity index 100% rename from topi/python/topi/x86/conv2d_alter_op.py rename to python/tvm/topi/x86/conv2d_alter_op.py diff --git a/topi/python/topi/x86/conv2d_avx_1x1.py b/python/tvm/topi/x86/conv2d_avx_1x1.py similarity index 100% rename from topi/python/topi/x86/conv2d_avx_1x1.py rename to python/tvm/topi/x86/conv2d_avx_1x1.py diff --git a/topi/python/topi/x86/conv2d_avx_common.py b/python/tvm/topi/x86/conv2d_avx_common.py similarity index 100% rename from topi/python/topi/x86/conv2d_avx_common.py rename to python/tvm/topi/x86/conv2d_avx_common.py diff --git a/topi/python/topi/x86/conv2d_int8.py b/python/tvm/topi/x86/conv2d_int8.py similarity index 100% rename from topi/python/topi/x86/conv2d_int8.py rename to python/tvm/topi/x86/conv2d_int8.py diff --git a/topi/python/topi/x86/conv2d_transpose.py b/python/tvm/topi/x86/conv2d_transpose.py similarity index 100% rename from topi/python/topi/x86/conv2d_transpose.py rename to python/tvm/topi/x86/conv2d_transpose.py diff --git a/topi/python/topi/x86/conv3d.py b/python/tvm/topi/x86/conv3d.py similarity index 100% rename from topi/python/topi/x86/conv3d.py rename to python/tvm/topi/x86/conv3d.py diff --git a/topi/python/topi/x86/conv3d_transpose.py b/python/tvm/topi/x86/conv3d_transpose.py similarity index 100% rename from topi/python/topi/x86/conv3d_transpose.py rename to python/tvm/topi/x86/conv3d_transpose.py diff --git a/topi/python/topi/x86/dense.py b/python/tvm/topi/x86/dense.py similarity index 100% rename from topi/python/topi/x86/dense.py rename to python/tvm/topi/x86/dense.py diff --git a/topi/python/topi/x86/depthwise_conv2d.py b/python/tvm/topi/x86/depthwise_conv2d.py similarity index 100% rename from topi/python/topi/x86/depthwise_conv2d.py rename to python/tvm/topi/x86/depthwise_conv2d.py diff --git a/topi/python/topi/x86/injective.py b/python/tvm/topi/x86/injective.py similarity index 100% rename from topi/python/topi/x86/injective.py rename to python/tvm/topi/x86/injective.py diff --git a/topi/python/topi/x86/nn.py b/python/tvm/topi/x86/nn.py similarity index 100% rename from topi/python/topi/x86/nn.py rename to python/tvm/topi/x86/nn.py diff --git a/topi/python/topi/x86/pooling.py b/python/tvm/topi/x86/pooling.py similarity index 100% rename from topi/python/topi/x86/pooling.py rename to python/tvm/topi/x86/pooling.py diff --git a/topi/python/topi/x86/reduction.py b/python/tvm/topi/x86/reduction.py similarity index 100% rename from topi/python/topi/x86/reduction.py rename to python/tvm/topi/x86/reduction.py diff --git a/topi/python/topi/x86/roi_align.py b/python/tvm/topi/x86/roi_align.py similarity index 100% rename from topi/python/topi/x86/roi_align.py rename to python/tvm/topi/x86/roi_align.py diff --git a/topi/python/topi/x86/sparse.py b/python/tvm/topi/x86/sparse.py similarity index 100% rename from topi/python/topi/x86/sparse.py rename to python/tvm/topi/x86/sparse.py diff --git a/topi/python/topi/x86/tensor_intrin.py b/python/tvm/topi/x86/tensor_intrin.py similarity index 100% rename from topi/python/topi/x86/tensor_intrin.py rename to python/tvm/topi/x86/tensor_intrin.py diff --git a/topi/python/topi/x86/util.py b/python/tvm/topi/x86/util.py similarity index 100% rename from topi/python/topi/x86/util.py rename to python/tvm/topi/x86/util.py diff --git a/rust/tvm-graph-rt/tests/test_graph_serde.rs b/rust/tvm-graph-rt/tests/test_graph_serde.rs index 5209facedc50..7d8e867a151f 100644 --- a/rust/tvm-graph-rt/tests/test_graph_serde.rs +++ b/rust/tvm-graph-rt/tests/test_graph_serde.rs @@ -34,13 +34,7 @@ fn test_load_graph() { let output = std::process::Command::new(mf_dir!("/tests/build_model.py")) .env( "PYTHONPATH", - concat!( - mf_dir!("/../../python"), - ":", - mf_dir!("/../../nnvm/python"), - ":", - mf_dir!("/../../topi/python") - ), + concat!(mf_dir!("/../../python"), ":", mf_dir!("/../../nnvm/python")), ) .output() .expect("Failed to build test model"); diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc index 3c4faf79147c..9ee57278e2f9 100644 --- a/src/relay/backend/compile_engine.cc +++ b/src/relay/backend/compile_engine.cc @@ -23,7 +23,6 @@ */ #include "compile_engine.h" -#include #include #include #include @@ -37,6 +36,7 @@ #include #include #include +#include #include #include diff --git a/src/relay/op/annotation/annotation.cc b/src/relay/op/annotation/annotation.cc index 6be9b0d4a3d5..d3eb4f96ed09 100644 --- a/src/relay/op/annotation/annotation.cc +++ b/src/relay/op/annotation/annotation.cc @@ -23,12 +23,12 @@ * \brief Registration of annotation operators. */ -#include #include #include #include #include #include +#include #include "../../transforms/infer_layout_util.h" #include "../type_relations.h" diff --git a/src/relay/op/debug.cc b/src/relay/op/debug.cc index 56b7d4405490..4b5e7d97f87d 100644 --- a/src/relay/op/debug.cc +++ b/src/relay/op/debug.cc @@ -22,10 +22,10 @@ * \brief Property def of nn operators. */ -#include #include #include #include +#include #include diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 007b3dd86028..2bb87acd7dce 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -23,12 +23,12 @@ */ #include "transform.h" -#include -#include #include #include #include #include +#include +#include #include #include @@ -89,7 +89,7 @@ TVM_REGISTER_GLOBAL("relay.op.dyn._make.reshape").set_body_typed(MakeReshape); RELAY_REGISTER_OP("dyn.reshape") .describe(R"code(Reshapes the input array based on the values in the newshape array. - + To give user more convenience in without doing manual shape inference, some dimensions of the shape can take special values from the set {0, -1, -3}. The significance of each is explained below: @@ -120,7 +120,7 @@ RELAY_REGISTER_OP("dyn.reshape") data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20) data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12) - Special values -2 and -4 from the standard reshape op would introduce dynamic rank + Special values -2 and -4 from the standard reshape op would introduce dynamic rank in this op. Thus, they are not permitted. )code" TVM_ADD_FILELINE) diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc index de73b44aed4d..771024502b21 100644 --- a/src/relay/op/memory/memory.cc +++ b/src/relay/op/memory/memory.cc @@ -22,12 +22,12 @@ * \brief Operators for manifest shape-aware memory allocation in Relay. */ -#include #include #include #include #include #include +#include #include "../../transforms/infer_layout_util.h" #include "../op_common.h" diff --git a/src/relay/op/nn/correlation.cc b/src/relay/op/nn/correlation.cc index 67f42b7d3e85..5970cc75b2a9 100644 --- a/src/relay/op/nn/correlation.cc +++ b/src/relay/op/nn/correlation.cc @@ -21,11 +21,11 @@ * \file correlation.cc * \brief Correlation operators */ -#include #include #include #include #include +#include #include diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index 7013c02fde20..19348c018dbf 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -24,14 +24,14 @@ #include "nn.h" -#include -#include -#include -#include #include #include #include #include +#include +#include +#include +#include #include #include diff --git a/src/relay/op/nn/pad.cc b/src/relay/op/nn/pad.cc index 52259c535125..d7103602deca 100644 --- a/src/relay/op/nn/pad.cc +++ b/src/relay/op/nn/pad.cc @@ -21,11 +21,11 @@ * \file pad.cc * \brief Implementation of operator pad */ -#include #include #include #include #include +#include #include diff --git a/src/relay/op/nn/pooling.cc b/src/relay/op/nn/pooling.cc index 63f0ce539d82..1e5306035a6c 100644 --- a/src/relay/op/nn/pooling.cc +++ b/src/relay/op/nn/pooling.cc @@ -23,11 +23,11 @@ */ #include "pooling.h" -#include #include #include #include #include +#include #include diff --git a/src/relay/op/tensor/binary.cc b/src/relay/op/tensor/binary.cc index 93bce155d955..df128ff05338 100644 --- a/src/relay/op/tensor/binary.cc +++ b/src/relay/op/tensor/binary.cc @@ -21,9 +21,9 @@ * \file binary.cc * \brief binary broadcast operators. */ -#include #include #include +#include #include "../op_common.h" #include "../type_relations.h" diff --git a/src/relay/op/tensor/reduce.cc b/src/relay/op/tensor/reduce.cc index 6ffcc675560d..9fd140092954 100644 --- a/src/relay/op/tensor/reduce.cc +++ b/src/relay/op/tensor/reduce.cc @@ -21,11 +21,11 @@ * \file reduce.cc * \brief Reduction operators. */ -#include -#include #include #include #include +#include +#include #include #include diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 99a1f594d9e5..9427dedfe3fa 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -23,11 +23,6 @@ */ #include "transform.h" -#include -#include -#include -#include -#include #include #include #include @@ -35,6 +30,11 @@ #include #include #include +#include +#include +#include +#include +#include #include diff --git a/src/relay/op/tensor/unary.cc b/src/relay/op/tensor/unary.cc index 5809798c9983..938142fccc7d 100644 --- a/src/relay/op/tensor/unary.cc +++ b/src/relay/op/tensor/unary.cc @@ -21,11 +21,11 @@ * \file unary.cc * \brief Unary operators. */ -#include -#include #include #include #include +#include +#include #include "../make_op.h" #include "../op_common.h" diff --git a/src/relay/op/vision/yolo.cc b/src/relay/op/vision/yolo.cc index e54473f68ef7..cfd81131be73 100644 --- a/src/relay/op/vision/yolo.cc +++ b/src/relay/op/vision/yolo.cc @@ -21,9 +21,9 @@ * \file yolo.cc * \brief Yolo related operators */ -#include #include #include +#include #include diff --git a/src/relay/op/vm/vm.cc b/src/relay/op/vm/vm.cc index 6e611d623d35..59756ea6ffab 100644 --- a/src/relay/op/vm/vm.cc +++ b/src/relay/op/vm/vm.cc @@ -22,13 +22,13 @@ * \brief Dialect operators for Relay VM. */ -#include #include #include #include #include #include #include +#include #include "../../transforms/infer_layout_util.h" #include "../op_common.h" diff --git a/src/te/autodiff/adjoint.cc b/src/te/autodiff/adjoint.cc index 772213da5cca..8b7c428ac8a4 100644 --- a/src/te/autodiff/adjoint.cc +++ b/src/te/autodiff/adjoint.cc @@ -30,11 +30,11 @@ * (3) and sum them together to get the adjoint of the input itself. * The three steps are computed recursively. */ -#include -#include #include #include #include +#include +#include #include #include diff --git a/topi/src/broadcast.cc b/src/topi/broadcast.cc similarity index 97% rename from topi/src/broadcast.cc rename to src/topi/broadcast.cc index e13c09ebb922..a06d91401580 100644 --- a/topi/src/broadcast.cc +++ b/src/topi/broadcast.cc @@ -21,11 +21,12 @@ * \brief Registration of broadcast operators * \file broadcast.cc */ -#include -#include #include #include +#include +#include +namespace tvm { namespace topi { using namespace tvm; @@ -76,3 +77,4 @@ TVM_REGISTER_GLOBAL("topi.broadcast_to").set_body([](TVMArgs args, TVMRetValue* }); } // namespace topi +} // namespace tvm diff --git a/topi/src/elemwise.cc b/src/topi/elemwise.cc similarity index 98% rename from topi/src/elemwise.cc rename to src/topi/elemwise.cc index 10ac8f8c4cee..7a39758f6cf6 100644 --- a/topi/src/elemwise.cc +++ b/src/topi/elemwise.cc @@ -21,10 +21,11 @@ * \brief Registration of elemwise operators * \file elemwise.cc */ -#include #include #include +#include +namespace tvm { namespace topi { using namespace tvm; @@ -155,3 +156,4 @@ TVM_REGISTER_GLOBAL("topi.bitwise_not").set_body([](TVMArgs args, TVMRetValue* r }); } // namespace topi +} // namespace tvm diff --git a/topi/src/nn.cc b/src/topi/nn.cc similarity index 92% rename from topi/src/nn.cc rename to src/topi/nn.cc index 3ec47787ec6e..4a209b2f2932 100644 --- a/topi/src/nn.cc +++ b/src/topi/nn.cc @@ -21,20 +21,21 @@ * \brief Registration of NN operators * \file nn.cc */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tvm { namespace topi { using namespace tvm; @@ -151,3 +152,4 @@ TVM_REGISTER_GLOBAL("topi.nn.binary_dense").set_body([](TVMArgs args, TVMRetValu }); } // namespace topi +} // namespace tvm diff --git a/topi/src/reduction.cc b/src/topi/reduction.cc similarity index 95% rename from topi/src/reduction.cc rename to src/topi/reduction.cc index b981495411ba..b5c6690e1676 100644 --- a/topi/src/reduction.cc +++ b/src/topi/reduction.cc @@ -21,11 +21,12 @@ * \brief Registration of reduction operators * \file reduction.cc */ -#include -#include #include #include +#include +#include +namespace tvm { namespace topi { using namespace tvm; @@ -64,3 +65,4 @@ TVM_REGISTER_GLOBAL("topi.any").set_body([](TVMArgs args, TVMRetValue* rv) { }); } // namespace topi +} // namespace tvm diff --git a/topi/src/schedule.cc b/src/topi/schedule.cc similarity index 94% rename from topi/src/schedule.cc rename to src/topi/schedule.cc index b974acaf2dd5..333833a4ce5d 100644 --- a/topi/src/schedule.cc +++ b/src/topi/schedule.cc @@ -23,31 +23,32 @@ */ #define TOPI_REDUCE_ATLEAST1D 0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include #include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace tvm { namespace topi { using namespace tvm; @@ -319,3 +320,4 @@ TVM_REGISTER_GENERIC_FUNC(dense) .register_func({"rocm"}, WrapDenseOp(topi::rocm::dense_rocm)); } // namespace topi +} // namespace tvm diff --git a/topi/src/transform.cc b/src/topi/transform.cc similarity index 98% rename from topi/src/transform.cc rename to src/topi/transform.cc index ab39a5eb4c6e..7a76c60ec2b6 100644 --- a/topi/src/transform.cc +++ b/src/topi/transform.cc @@ -21,11 +21,12 @@ * \brief Registration of transform operators * \file transform.cc */ -#include -#include #include #include +#include +#include +namespace tvm { namespace topi { using namespace tvm; @@ -176,3 +177,4 @@ TVM_REGISTER_GLOBAL("topi.one_hot").set_body([](TVMArgs args, TVMRetValue* rv) { }); } // namespace topi +} // namespace tvm diff --git a/topi/src/vision.cc b/src/topi/vision.cc similarity index 94% rename from topi/src/vision.cc rename to src/topi/vision.cc index 0485177cf9d5..e64eec452c0e 100644 --- a/topi/src/vision.cc +++ b/src/topi/vision.cc @@ -21,10 +21,11 @@ * \brief Registration of vision operators * \file vision.cc */ -#include #include #include +#include +namespace tvm { namespace topi { using namespace tvm; @@ -35,3 +36,4 @@ TVM_REGISTER_GLOBAL("topi.vision.reorg").set_body([](TVMArgs args, TVMRetValue* }); } // namespace topi +} // namespace tvm diff --git a/tests/cpp/auto_scheduler_test.cc b/tests/cpp/auto_scheduler_test.cc index 85266057548c..b2de0ef6bf4d 100644 --- a/tests/cpp/auto_scheduler_test.cc +++ b/tests/cpp/auto_scheduler_test.cc @@ -19,10 +19,10 @@ #include #include -#include #include #include #include +#include #include diff --git a/tests/cpp/build_module_test.cc b/tests/cpp/build_module_test.cc index 206470f2e136..2462fd1e733f 100644 --- a/tests/cpp/build_module_test.cc +++ b/tests/cpp/build_module_test.cc @@ -19,10 +19,10 @@ #include #include -#include #include #include #include +#include #include #include diff --git a/tests/cpp/relay_build_module_test.cc b/tests/cpp/relay_build_module_test.cc index 636593f9803e..5de4ada6cb7b 100644 --- a/tests/cpp/relay_build_module_test.cc +++ b/tests/cpp/relay_build_module_test.cc @@ -18,8 +18,6 @@ */ #include -#include -#include #include #include #include @@ -32,6 +30,8 @@ #include #include #include +#include +#include using namespace tvm; using namespace tvm::relay; diff --git a/tests/cpp/relay_transform_sequential_test.cc b/tests/cpp/relay_transform_sequential_test.cc index bb4bf928b018..1a12aec7054d 100644 --- a/tests/cpp/relay_transform_sequential_test.cc +++ b/tests/cpp/relay_transform_sequential_test.cc @@ -18,8 +18,6 @@ */ #include -#include -#include #include #include #include @@ -32,6 +30,8 @@ #include #include #include +#include +#include using namespace tvm; diff --git a/tests/cpp/topi_ewise_test.cc b/tests/cpp/topi_ewise_test.cc index 10c7b9d7464b..22ef8c7dffaa 100644 --- a/tests/cpp/topi_ewise_test.cc +++ b/tests/cpp/topi_ewise_test.cc @@ -18,9 +18,10 @@ */ #include -#include #include +#include +namespace tvm { namespace topi { TEST(Tensor, Basic) { using namespace tvm; @@ -29,6 +30,7 @@ TEST(Tensor, Basic) { auto C = topi::exp(A); } } // namespace topi +} // namespace tvm int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); diff --git a/tests/cpp/utvm_runtime_standalone_test.cc b/tests/cpp/utvm_runtime_standalone_test.cc index 70709b0f96a1..6f9577463310 100644 --- a/tests/cpp/utvm_runtime_standalone_test.cc +++ b/tests/cpp/utvm_runtime_standalone_test.cc @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -43,6 +42,7 @@ #include #include #include +#include TVM_REGISTER_GLOBAL("test.sch").set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) { *rv = topi::generic::schedule_injective(args[0], args[1]); diff --git a/tests/python/contrib/test_cblas.py b/tests/python/contrib/test_cblas.py index 54f4ff6002d1..00ddcd3061ac 100644 --- a/tests/python/contrib/test_cblas.py +++ b/tests/python/contrib/test_cblas.py @@ -18,7 +18,7 @@ import tvm from tvm import te import numpy as np -import topi.testing +import tvm.topi.testing from tvm.contrib import cblas def verify_matmul_add(m, l, n, transa=False, transb=False, dtype="float32"): @@ -131,7 +131,7 @@ def get_numpy(a, b, transa, transb): a = a.transpose(0, 2, 1) if not transb: b = b.transpose(0, 2, 1) - return topi.testing.batch_matmul(a, b) + return tvm.topi.testing.batch_matmul(a, b) def verify(target="llvm"): if not tvm.runtime.enabled(target): diff --git a/tests/python/contrib/test_cudnn.py b/tests/python/contrib/test_cudnn.py index 17cb0d1f0f1c..61822c849a7e 100644 --- a/tests/python/contrib/test_cudnn.py +++ b/tests/python/contrib/test_cudnn.py @@ -19,7 +19,7 @@ from tvm.contrib import cudnn from tvm.contrib.nvcc import have_fp16 import numpy as np -import topi.testing +import tvm.topi.testing def verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1): in_channel = 4 @@ -79,10 +79,10 @@ def verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1): w = tvm.nd.array(w_np, ctx) y = tvm.nd.array(y_np, ctx) if tensor_format == 0: - c_np = topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups) + c_np = tvm.topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups) elif tensor_format == 1: wt = w_np.transpose((1, 2, 3, 0)) #OHWI => HWIO - c_np = topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups) + c_np = tvm.topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups) f(x, w, y) tvm.testing.assert_allclose(y.asnumpy(), c_np, atol=1e-2, rtol=1e-2) @@ -154,7 +154,7 @@ def verify_conv3d(data_dtype, conv_dtype, tensor_format=0, groups=1): w = tvm.nd.array(w_np, ctx) y = tvm.nd.array(y_np, ctx) if tensor_format == 0: - c_np = topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups) + c_np = tvm.topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups) else: raise AssertionError("For now, conv3d tensor format only support: 0(NCHW)") @@ -172,7 +172,7 @@ def verify_softmax(shape, axis, dtype="float32"): ctx = tvm.gpu(0) a_np = np.random.uniform(size=shape).astype(dtype) - b_np = topi.testing.softmax_python(a_np) + b_np = tvm.topi.testing.softmax_python(a_np) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(b_np, ctx) f = tvm.build(s, [A, B], "cuda", target_host="llvm", name="softmax") @@ -187,7 +187,7 @@ def verify_softmax_4d(shape, dtype="float32"): ctx = tvm.gpu(0) n, c, h, w = shape a_np = np.random.uniform(size=shape).astype(dtype) - b_np = topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c)) + b_np = tvm.topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c)) b_np = b_np.reshape(n, h, w, c).transpose(0, 3, 1, 2) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(b_np, ctx) diff --git a/tests/python/contrib/test_gemm_acc16.py b/tests/python/contrib/test_gemm_acc16.py index 1fd5974cd2dc..9ae2c9f0c832 100644 --- a/tests/python/contrib/test_gemm_acc16.py +++ b/tests/python/contrib/test_gemm_acc16.py @@ -18,7 +18,7 @@ import tvm from tvm import te import numpy as np -from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int16 +from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int16 def benchmark_fc_int8_acc16(): diff --git a/tests/python/contrib/test_gemm_acc32_vnni.py b/tests/python/contrib/test_gemm_acc32_vnni.py index f723ccb1d235..37101a80ea77 100644 --- a/tests/python/contrib/test_gemm_acc32_vnni.py +++ b/tests/python/contrib/test_gemm_acc32_vnni.py @@ -19,8 +19,8 @@ import tvm from tvm import te import numpy as np -from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake -from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32 +from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake +from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32 import pytest diff --git a/tests/python/contrib/test_miopen.py b/tests/python/contrib/test_miopen.py index ed671e0c4810..deffbe9f4980 100644 --- a/tests/python/contrib/test_miopen.py +++ b/tests/python/contrib/test_miopen.py @@ -55,7 +55,7 @@ def test_conv2d(): data_type=1) yshape = [x.value for x in Y.shape] - import topi + from tvm import topi s = te.create_schedule(Y.op) def verify(): diff --git a/tests/python/contrib/test_mxnet_bridge.py b/tests/python/contrib/test_mxnet_bridge.py index 37c164483e18..230e8db4540d 100644 --- a/tests/python/contrib/test_mxnet_bridge.py +++ b/tests/python/contrib/test_mxnet_bridge.py @@ -22,7 +22,7 @@ def mxnet_check(): User can directly run this script to verify correctness. """ import mxnet as mx - import topi + from tvm import topi import tvm from tvm import te import numpy as np diff --git a/tests/python/contrib/test_nnpack.py b/tests/python/contrib/test_nnpack.py index 505199a55724..81fcb123ebc1 100644 --- a/tests/python/contrib/test_nnpack.py +++ b/tests/python/contrib/test_nnpack.py @@ -18,7 +18,7 @@ from tvm import te import numpy as np import scipy.signal -from topi.nn.util import get_pad_tuple +from tvm.topi.nn.util import get_pad_tuple from tvm.contrib import nnpack import pytest diff --git a/tests/python/contrib/test_tedd.py b/tests/python/contrib/test_tedd.py index 58ff06418201..c6c480e69b96 100644 --- a/tests/python/contrib/test_tedd.py +++ b/tests/python/contrib/test_tedd.py @@ -17,7 +17,7 @@ from tvm import te import numpy as np import re -import topi +from tvm import topi def findany(pattern, str): diff --git a/tests/python/frontend/coreml/test_forward.py b/tests/python/frontend/coreml/test_forward.py index 179f5b41c1d7..6f058f47a0ba 100644 --- a/tests/python/frontend/coreml/test_forward.py +++ b/tests/python/frontend/coreml/test_forward.py @@ -22,11 +22,11 @@ import tvm from tvm import te from tvm.contrib import graph_runtime -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm import relay from tvm.relay.testing.config import ctx_list -from topi.testing import conv2d_nchw_python +from tvm.topi.testing import conv2d_nchw_python import coremltools as cm import model_zoo @@ -186,11 +186,11 @@ def verify_UpsampleLayerParams(input_dim, scale, mode): a_np = np.full(input_dim, 1, dtype=dtype) if mode == 'NN': - b_np = topi.testing.upsampling_python(a_np, (scale, scale)) + b_np = tvm.topi.testing.upsampling_python(a_np, (scale, scale)) else: new_h = input_dim[2] * scale new_w = input_dim[3] * scale - b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW') + b_np = tvm.topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW') input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] @@ -215,7 +215,7 @@ def verify_l2_normalize(input_dim, eps): dtype = "float32" a_np = np.random.uniform(size=input_dim).astype(dtype) - b_np = topi.testing.l2_normalize_python(a_np, eps, 1) + b_np = tvm.topi.testing.l2_normalize_python(a_np, eps, 1) input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] @@ -234,7 +234,7 @@ def verify_lrn(input_dim, size, bias, alpha, beta): dtype = "float32" axis=1 a_np = np.random.uniform(size=input_dim).astype(dtype) - b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta) + b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta) input = [('input', datatypes.Array(*input_dim))] output = [('output', datatypes.Array(*b_np.shape))] diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 8654bf0fdafd..56ea96d7b7e5 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -20,8 +20,8 @@ from onnx import helper, TensorProto, mapping import torch import torchvision -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import tvm from tvm import te from tvm import relay @@ -615,7 +615,7 @@ def test_isnan(): def verify_gather_nd(in_shape, indices, dtype): x = np.random.uniform(size=in_shape).astype(dtype) indices = np.array(indices, dtype="int32") - out_np = topi.testing.gather_nd_python(x, indices) + out_np = tvm.topi.testing.gather_nd_python(x, indices) y = helper.make_node("GatherND", ['in', 'indices'], ['out']) @@ -823,7 +823,7 @@ def _test_upsample_nearest(): 'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) - out_array = topi.testing.upsampling_python( + out_array = tvm.topi.testing.upsampling_python( in_array, (scale, scale), "NCHW") graph = helper.make_graph([y], @@ -848,7 +848,7 @@ def _test_upsample3d_nearest(): 'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) - out_array = topi.testing.upsampling3d_python( + out_array = tvm.topi.testing.upsampling3d_python( in_array, (scale, scale, scale), "NCDHW") graph = helper.make_graph([y], @@ -872,7 +872,7 @@ def _test_upsample_bilinear(): 'out'], mode='linear', scales=[1.0, 1.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) - out_array = topi.testing.bilinear_resize_python( + out_array = tvm.topi.testing.bilinear_resize_python( in_array, (3*scale, 3*scale), "NCHW") graph = helper.make_graph([y], @@ -896,7 +896,7 @@ def _test_upsample_bilinear_opset9(): y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear') scales = [1, 1, 2, 2] in_array = np.random.uniform(size=in_shape).astype(np.float32) - out_array = topi.testing.bilinear_resize_python( + out_array = tvm.topi.testing.bilinear_resize_python( in_array, (3*scale, 3*scale), "NCHW") ref_node = helper.make_node('Constant', @@ -931,7 +931,7 @@ def _test_upsample3d_trilinear(): y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear') scales = [1.0, 1.0, 2.0, 2.0, 2.0] in_array = np.random.uniform(size=in_shape).astype(np.float32) - out_array = topi.testing.trilinear_resize3d_python( + out_array = tvm.topi.testing.trilinear_resize3d_python( in_array, (3*scale, 3*scale, 3*scale), "NCDHW", coordinate_transformation_mode="half_pixel") ref_array = np.array(scales) @@ -968,7 +968,7 @@ def _test_softmax(inshape, axis): opname = 'Softmax' indata = np.random.uniform(size=inshape).astype(np.float32) outshape = inshape - outdata = topi.testing.softmax_python(indata) + outdata = tvm.topi.testing.softmax_python(indata) if isinstance(axis, int): y = helper.make_node(opname, ['in'], ['out'], axis=axis) elif axis is None: @@ -1705,7 +1705,7 @@ def Scale_x(x, scale): def test_LogSoftmax(): _test_onnx_op_elementwise((1, 4), - topi.testing.log_softmax_python, + tvm.topi.testing.log_softmax_python, {}, 'float32', 'LogSoftmax', diff --git a/tests/python/integration/test_winograd_nnpack.py b/tests/python/integration/test_winograd_nnpack.py index c974496d3ead..994a047df742 100644 --- a/tests/python/integration/test_winograd_nnpack.py +++ b/tests/python/integration/test_winograd_nnpack.py @@ -21,9 +21,9 @@ from tvm.autotvm.task.space import FallbackConfigEntity from tvm.contrib import nnpack from tvm.contrib.pickle_memoize import memoize -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from pytest import skip @@ -47,8 +47,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) c_np += b_np diff --git a/tests/python/relay/test_any.py b/tests/python/relay/test_any.py index f9d9c932faa5..a84020d5681a 100644 --- a/tests/python/relay/test_any.py +++ b/tests/python/relay/test_any.py @@ -22,7 +22,7 @@ from tvm import relay from tvm.relay.loops import while_loop from tvm.relay.testing import run_infer_type as infer_type -import topi.testing +import tvm.topi.testing def int32(val): return relay.const(val, 'int32') @@ -652,7 +652,7 @@ def verify_any_strided_slice(data_shape, begin_shape, end_shape, strides_shape, np_end = np.random.randint(5, 10, size=end_shape, dtype="int32") np_strides = np.random.randint(1, 2 if slice_mode == "size" else 3, size=strides_shape, dtype="int32") # target numpy result - ref_res = topi.testing.strided_slice_python(np_data, np_begin, np_end, np_strides, slice_mode) + ref_res = tvm.topi.testing.strided_slice_python(np_data, np_begin, np_end, np_strides, slice_mode) # Relay Module mod = tvm.IRModule() @@ -827,7 +827,7 @@ def verify_any_crop_and_resize(data_shape, boxes_shape, box_indices_shape, crop_ mod["main"] = relay.Function([data, boxes, box_indices], y) data_np = np.random.uniform(size=data_shape).astype(dtype) boxes_np = np.random.uniform(size=static_boxes).astype(dtype) - box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype) + box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype) for kind in ["debug", "vm"]: ex = relay.create_executor(kind, mod=mod, ctx=tvm.cpu(), target="llvm") result = ex.evaluate()(data_np, boxes_np, box_indices_np) @@ -835,8 +835,8 @@ def verify_any_crop_and_resize(data_shape, boxes_shape, box_indices_shape, crop_ def test_any_crop_and_resize(): verify_any_crop_and_resize( - data_shape=(1, 234, 234, 256), - boxes_shape=(relay.Any(), 4), + data_shape=(1, 234, 234, 256), + boxes_shape=(relay.Any(), 4), box_indices_shape=(relay.Any(),), crop_size=(14, 14), layout='NHWC', @@ -844,8 +844,8 @@ def test_any_crop_and_resize(): static_box_indices_shape=(128,), ref_out_shape=(128, 14, 14, 256)) verify_any_crop_and_resize( - data_shape=(1, 256, 234, 234), - boxes_shape=(relay.Any(), 4), + data_shape=(1, 256, 234, 234), + boxes_shape=(relay.Any(), 4), box_indices_shape=(relay.Any(),), crop_size=(14, 14), layout='NCHW', diff --git a/tests/python/relay/test_backend_compile_engine.py b/tests/python/relay/test_backend_compile_engine.py index 1b4e08f7eb7b..6bc170d2b5af 100644 --- a/tests/python/relay/test_backend_compile_engine.py +++ b/tests/python/relay/test_backend_compile_engine.py @@ -20,7 +20,7 @@ import tvm.testing from tvm import relay from tvm import autotvm -import topi +from tvm import topi from tvm.relay.testing import run_infer_type from tvm.relay.testing.temp_op_attr import TempOpAttr diff --git a/tests/python/relay/test_op_fast_math.py b/tests/python/relay/test_op_fast_math.py index a771d29a431d..cb959555fc8c 100644 --- a/tests/python/relay/test_op_fast_math.py +++ b/tests/python/relay/test_op_fast_math.py @@ -19,7 +19,7 @@ from scipy import special import tvm import tvm.relay as relay -import topi +from tvm import topi from tvm import te from tvm.contrib import graph_runtime diff --git a/tests/python/relay/test_op_grad_level2.py b/tests/python/relay/test_op_grad_level2.py index 7985836ee002..8b434d62a3b9 100644 --- a/tests/python/relay/test_op_grad_level2.py +++ b/tests/python/relay/test_op_grad_level2.py @@ -16,8 +16,8 @@ # under the License. import numpy as np -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import tvm from tvm import te from tvm import relay @@ -38,9 +38,10 @@ def verify_max_pool2d_grad(x_shape, pool_size, strides, padding, ceil_mode): ph, pw = padding y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape) out_grad = np.ones(shape=y_shape) - ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=pool_size, strides=strides, - padding=[ph, pw, ph, pw], - pool_type='max', ceil_mode=ceil_mode) + ref_grad = tvm.topi.testing.pool_grad_nchw( + data, out_grad, pool_size=pool_size, strides=strides, + padding=[ph, pw, ph, pw], + pool_type='max', ceil_mode=ceil_mode) for target, ctx in ctx_list(): intrp = relay.create_executor(ctx=ctx, target=target) @@ -66,9 +67,10 @@ def verify_avg_pool2d_grad(x_shape, pool_size, strides, padding, ceil_mode, coun ph, pw = padding y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape) out_grad = np.ones(shape=y_shape) - ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=pool_size, strides=strides, - padding=[ph, pw, ph, pw], - pool_type='avg', ceil_mode=ceil_mode) + ref_grad = tvm.topi.testing.pool_grad_nchw( + data, out_grad, pool_size=pool_size, strides=strides, + padding=[ph, pw, ph, pw], + pool_type='avg', ceil_mode=ceil_mode) for target, ctx in ctx_list(): intrp = relay.create_executor(ctx=ctx, target=target) @@ -93,9 +95,10 @@ def verify_global_avg_pool2d_grad(x_shape): data = np.random.rand(*x_shape).astype("float32") y_shape = topi.util.get_const_tuple(fwd_func.ret_type.shape) out_grad = np.ones(shape=y_shape) - ref_grad = topi.testing.pool_grad_nchw(data, out_grad, pool_size=(x_shape[2], x_shape[3]), - strides=(1, 1), padding=[0, 0, 0, 0], pool_type='avg', - ceil_mode=False) + ref_grad = tvm.topi.testing.pool_grad_nchw( + data, out_grad, pool_size=(x_shape[2], x_shape[3]), + strides=(1, 1), padding=[0, 0, 0, 0], pool_type='avg', + ceil_mode=False) for target, ctx in ctx_list(): intrp = relay.create_executor(ctx=ctx, target=target) diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index c58ff0f1aaba..4616a14dcdde 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -22,7 +22,7 @@ from tvm import relay from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type -import topi.testing +import tvm.topi.testing from tvm.contrib.nvcc import have_fp16 @@ -196,7 +196,7 @@ def test_softmax(): assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], y) x_data = np.random.uniform(size=shape).astype(dtype) - ref_res = topi.testing.softmax_python(x_data) + ref_res = tvm.topi.testing.softmax_python(x_data) for target, ctx in ctx_list(): intrp = relay.create_executor("graph", ctx=ctx, target=target) op_res = intrp.evaluate(func)(x_data) @@ -216,7 +216,7 @@ def test_log_softmax(): assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], y) x_data = np.random.uniform(size=shape).astype(dtype) - ref_res = topi.testing.log_softmax_python(x_data) + ref_res = tvm.topi.testing.log_softmax_python(x_data) for target, ctx in ctx_list(): intrp = relay.create_executor("graph", ctx=ctx, target=target) op_res = intrp.evaluate(func)(x_data) diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index a79f1a514fa7..f65407acbcc9 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -19,12 +19,12 @@ import numpy as np import tvm from tvm import te -import topi.testing +import tvm.topi.testing from tvm import relay from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing def test_checkpoint(): @@ -213,7 +213,7 @@ def test_broadcast_to_like(): x = relay.Var("x", relay.ty.TensorType(shape , dtype)) y = relay.Var("y", relay.ty.TensorType(shape_like, dtype)) z = relay.broadcast_to_like(x, y) - + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(shape_like, dtype) @@ -221,7 +221,7 @@ def test_broadcast_to_like(): x = np.random.uniform(size=shape).astype(dtype) y = np.random.uniform(size=shape_like).astype(dtype) ref_res = np.broadcast_to(x, shape_like) - + for target, ctx in ctx_list(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, ctx=ctx, target=target) @@ -318,7 +318,7 @@ def verify_batch_matmul(x_shape, y_shape, out_shape, dtype="float32"): func = relay.Function([x, y], z) x_np = np.random.uniform(size=x_shape).astype(dtype) y_np = np.random.uniform(size=y_shape).astype(dtype) - z_np = topi.testing.batch_matmul(x_np, y_np) + z_np = tvm.topi.testing.batch_matmul(x_np, y_np) for target, ctx in ctx_list(): for kind in ["graph", "debug"]: @@ -378,7 +378,7 @@ def verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc): func = relay.Function([x], y) np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype) - np_out = topi.testing.adaptive_pool(np_data, out_size, pool_type, layout) + np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) @@ -421,7 +421,7 @@ def _verify(data_shape, mask_value, axis, dtype, itype): func = relay.Function([data, valid_length], out) data_np = np.random.uniform(size=data_shape).astype(dtype) valid_length_np = np.random.randint(0, max_length, size=nbatch).astype(itype) - gt_out_np = topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis) + gt_out_np = tvm.topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis) for target, ctx in ctx_list(): for kind in ["graph", "debug"]: @@ -456,7 +456,7 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): assert checked.checked_type == relay.ty.TensorType(_get_oshape(indices_shape, depth, axis), dtype) func = relay.Function([indices], out) indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32") - out_np = topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype) + out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype) for target, ctx in ctx_list(): for kind in ["graph", "debug"]: diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index cd54d9f505ed..b26d6e4046a6 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -24,8 +24,8 @@ from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type from tvm.contrib import util -import topi.testing -from topi.cuda.conv3d_winograd import _infer_tile_size +import tvm.topi.testing +from tvm.topi.cuda.conv3d_winograd import _infer_tile_size def test_conv1d_infer_type(): @@ -97,7 +97,7 @@ def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) - ref_res = topi.testing.conv1d_ncw_python( + ref_res = tvm.topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation) for target, ctx in ctx_list(): @@ -210,9 +210,9 @@ def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape, func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) - dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) + dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is None: - ref_res = topi.testing.conv2d_nchw_python( + ref_res = tvm.topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: @@ -271,7 +271,7 @@ def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, kshape = (32, 1, 3, 3) run_test_conv2d("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3), - fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw( + fref=lambda x, w: tvm.topi.testing.depthwise_conv2d_python_nchw( x, w, (1, 1), "SAME")) # depthwise conv2d for arm_cpu @@ -352,7 +352,7 @@ def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) - ref_res = topi.testing.conv2d_nchw_python( + ref_res = tvm.topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) @@ -456,9 +456,9 @@ def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) - dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) + dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is None: - ref_res = topi.testing.conv3d_ncdhw_python( + ref_res = tvm.topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: @@ -501,9 +501,9 @@ def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) - dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) + dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is None: - ref_res = topi.testing.conv3d_ndhwc_python( + ref_res = tvm.topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) @@ -574,7 +574,7 @@ def run_test_conv3d_cuda(dtype, out_dtype, scale, dshape, kshape, data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) - ref_res = topi.testing.conv3d_ncdhw_python( + ref_res = tvm.topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) @@ -664,7 +664,7 @@ def test_conv3d_transpose_ncdhw_run(): data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) - ref_res = topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1) + ref_res = tvm.topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) @@ -714,7 +714,7 @@ def test_conv2d_transpose_nchw_run(): dtype = "float32" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) - ref_res = topi.testing.conv2d_transpose_nchw_python( + ref_res = tvm.topi.testing.conv2d_transpose_nchw_python( data, kernel, 2, 1, (1, 1)) for target, ctx in ctx_list(): @@ -741,7 +741,7 @@ def test_conv2d_transpose_nhwc_run(): kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) # use true kshape layout here - HWOI - ref_res = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', + ref_res = tvm.topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1, output_padding=(1, 1)) for target, ctx in ctx_list(): @@ -763,7 +763,7 @@ def test_conv1d_transpose_ncw_run(): dtype = "float32" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) - ref_res = topi.testing.conv1d_transpose_ncw_python( + ref_res = tvm.topi.testing.conv1d_transpose_ncw_python( data, kernel, 2, 1, output_padding=(1,)) for target, ctx in ctx_list(): @@ -900,7 +900,7 @@ def _test_pool1d(opfunc, pool_size=(2,), strides=(2,), padding=(0, 0)): y = opfunc(x, pool_size=pool_size, strides=strides, padding=padding) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) - ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,), + ref_res = tvm.topi.testing.pool1d_ncw_python(data, (2,), (2,), (0, 0), (1, 3, 16), pool_type, False) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) @@ -938,7 +938,7 @@ def _test_pool3d(opfunc, assert out_shape == f_out_shape, \ "Output shape mismatch. expected {}, actual {}".format(out_shape, f_out_shape) data = np.random.uniform(size=dshape).astype(dtype) - ref_res = topi.testing.pool3d_ncdhw_python(data, pool_size, strides, + ref_res = tvm.topi.testing.pool3d_ncdhw_python(data, pool_size, strides, padding, out_shape, pool_type, False) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) @@ -1080,7 +1080,7 @@ def test_lrn(): assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) - ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta) + ref_res = tvm.topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) @@ -1108,7 +1108,7 @@ def test_l2_normalize(): assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) - ref_res = topi.testing.l2_normalize_python(x_data, eps, axis) + ref_res = tvm.topi.testing.l2_normalize_python(x_data, eps, axis) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) @@ -1163,9 +1163,9 @@ def get_shape(): func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method == "nearest_neighbor": - ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout) + ref = tvm.topi.testing.upsampling_python(data, (scale_h, scale_w), layout) else: - ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), + ref = tvm.topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout) for target, ctx in ctx_list(): executor = relay.create_executor("graph", ctx=ctx, target=target) @@ -1208,9 +1208,9 @@ def get_shape(): func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method == "nearest_neighbor": - ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout) + ref = tvm.topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout) else: - ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\ + ref = tvm.topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\ int(round(h*scale_h)),\ int(round(w*scale_w))), layout) for target, ctx in ctx_list(): @@ -1421,7 +1421,7 @@ def _test_correlation(data_shape, kernel_size, max_displacement, stride1, stride func = relay.Function([data1, data2], y) data1_np = np.random.uniform(size=data_shape).astype(dtype) data2_np = np.random.uniform(size=data_shape).astype(dtype) - ref_res = topi.testing.correlation_nchw_python(data1_np, data2_np, kernel_size, max_displacement, stride1, stride2, padding, is_multiply) + ref_res = tvm.topi.testing.correlation_nchw_python(data1_np, data2_np, kernel_size, max_displacement, stride1, stride2, padding, is_multiply) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index 74231cb0d5a1..c800b1c947c6 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -20,7 +20,7 @@ from tvm import relay from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type -import topi.testing +import tvm.topi.testing def test_binary_op(): @@ -305,7 +305,7 @@ def verify(dshape, begin, end, strides, output, slice_mode="end", # target numpy result x_data = np.random.uniform(size=dshape).astype("float32") - ref_res = topi.testing.strided_slice_python( + ref_res = tvm.topi.testing.strided_slice_python( x_data, begin, end, strides, slice_mode) if attr_const: @@ -380,7 +380,7 @@ def verify(dshape, begin, end, strides, vshape, test_ref=True): return x_data = np.random.uniform(size=dshape).astype("float32") v_data = np.random.uniform(size=vshape).astype("float32") - ref_res = topi.testing.strided_set_python( + ref_res = tvm.topi.testing.strided_set_python( x_data, v_data, begin, end, strides) for target, ctx in ctx_list(): intrp = relay.create_executor("graph", ctx=ctx, target=target) diff --git a/tests/python/relay/test_op_level5.py b/tests/python/relay/test_op_level5.py index 3a94fc69e001..70678031156d 100644 --- a/tests/python/relay/test_op_level5.py +++ b/tests/python/relay/test_op_level5.py @@ -23,7 +23,7 @@ from tvm import relay from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type -import topi.testing +import tvm.topi.testing def test_resize_infer_type(): @@ -49,9 +49,9 @@ def verify_resize(dshape, scale, method, layout): x_data = np.random.uniform(size=dshape).astype("float32") if method == "bilinear": - ref_res = topi.testing.bilinear_resize_python(x_data, size, layout) + ref_res = tvm.topi.testing.bilinear_resize_python(x_data, size, layout) else: - ref_res = topi.testing.upsampling_python(x_data, (scale, scale), layout) + ref_res = tvm.topi.testing.upsampling_python(x_data, (scale, scale), layout) x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.image.resize(x, size, layout, method, "align_corners") assert "size=" in z.astext() @@ -91,9 +91,9 @@ def verify_resize(dshape, scale, method, layout): x_data = np.random.uniform(size=dshape).astype("float32") if method == "trilinear": - ref_res = topi.testing.trilinear_resize3d_python(x_data, size, layout) + ref_res = tvm.topi.testing.trilinear_resize3d_python(x_data, size, layout) else: - ref_res = topi.testing.upsampling3d_python(x_data, (scale, scale, scale), layout) + ref_res = tvm.topi.testing.upsampling3d_python(x_data, (scale, scale, scale), layout) x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.image.resize3d(x, size, layout, method, "align_corners") assert "size=" in z.astext() @@ -116,7 +116,7 @@ def verify_crop_and_resize(img_shape, boxes, box_indices, crop_size, image_data = np.random.uniform(size=img_shape).astype("float32") - ref_res = topi.testing.crop_and_resize_python(image_data, + ref_res = tvm.topi.testing.crop_and_resize_python(image_data, boxes, box_indices, crop_size, @@ -463,7 +463,7 @@ def verify_roi_align(data_shape, rois_shape, pooled_size, spatial_scale, sample_ np_data = np.random.uniform(size=data_shape).astype("float32") np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi) - ref_res = topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size, + ref_res = tvm.topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size, spatial_scale=spatial_scale, sample_ratio=sample_ratio) for target, ctx in ctx_list(): @@ -495,7 +495,7 @@ def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale): np_data = np.random.uniform(size=data_shape).astype("float32") np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32') - ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size, + ref_res = tvm.topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size, spatial_scale=spatial_scale) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) @@ -590,7 +590,7 @@ def verify_yolo_reorg(shape, stride, out_shape): def test_yolo_reorg(): def verify_yolo_reorg(shape, stride): x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32") - ref_res = topi.testing.reorg_python(x_data, stride) + ref_res = tvm.topi.testing.reorg_python(x_data, stride) x = relay.var("x", relay.TensorType(shape, "float32")) z = relay.vision.yolo_reorg(x, stride=stride) @@ -658,7 +658,7 @@ def test_run(batch, in_channel, size, out_channel, deformable_groups, groups): data = np.random.uniform(size=data_shape).astype(dtype) offset = np.random.uniform(size=offset_shape).astype(dtype) kernel = np.random.uniform(size=kernel_shape).astype(dtype) - ref_res = topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups) + ref_res = tvm.topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups) for target, ctx in ctx_list(): for kind in ["graph", "debug"]: @@ -679,7 +679,7 @@ def verify_depth_to_space(dshape, block_size, layout, mode): x_data = np.random.uniform(size=dshape).astype("float32") if layout == "NHWC": x_data = np.transpose(x_data, axes=[0, 3, 1, 2]) - ref_res = topi.testing.depth_to_space_python(x_data, block_size, mode=mode) + ref_res = tvm.topi.testing.depth_to_space_python(x_data, block_size, mode=mode) if layout == "NHWC": x_data = np.transpose(x_data, axes=[0, 2, 3, 1]) ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1]) @@ -711,7 +711,7 @@ def verify_space_to_depth(dshape, block_size, layout): x_data = np.random.uniform(size=dshape).astype("float32") if layout == "NHWC": x_data = np.transpose(x_data, axes=[0, 3, 1, 2]) - ref_res = topi.testing.space_to_depth_python(x_data, block_size) + ref_res = tvm.topi.testing.space_to_depth_python(x_data, block_size) if layout == "NHWC": x_data = np.transpose(x_data, axes=[0, 2, 3, 1]) ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1]) @@ -850,7 +850,7 @@ def verify_affine_grid(num_batch, target_shape): func = relay.Function([data], y) data_np = np.random.uniform(size=data_shape).astype(dtype) - ref_res = topi.testing.affine_grid_python(data_np, target_shape) + ref_res = tvm.topi.testing.affine_grid_python(data_np, target_shape) for target, ctx in ctx_list(): for kind in ["graph", "debug"]: @@ -876,7 +876,7 @@ def verify_grid_sample(data_shape, grid_shape): data_np = np.random.uniform(size=data_shape).astype(dtype) grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype) - ref_res = topi.testing.grid_sample_nchw_python(data_np, grid_np, method='bilinear') + ref_res = tvm.topi.testing.grid_sample_nchw_python(data_np, grid_np, method='bilinear') for target, ctx in ctx_list(): for kind in ["graph", "debug"]: diff --git a/tests/python/relay/test_op_qnn_concatenate.py b/tests/python/relay/test_op_qnn_concatenate.py index fb60e9805206..19025c74733f 100644 --- a/tests/python/relay/test_op_qnn_concatenate.py +++ b/tests/python/relay/test_op_qnn_concatenate.py @@ -20,7 +20,7 @@ import numpy as np from tvm import relay from tvm.contrib import graph_runtime -import topi.testing +import tvm.topi.testing def test_same_io_qnn_params(): data_dtype = 'int32' diff --git a/tests/python/relay/test_op_qnn_mul.py b/tests/python/relay/test_op_qnn_mul.py index 6516871d3fb5..4fbb4e94912b 100644 --- a/tests/python/relay/test_op_qnn_mul.py +++ b/tests/python/relay/test_op_qnn_mul.py @@ -20,7 +20,7 @@ import numpy as np from tvm import relay from tvm.contrib import graph_runtime -import topi.testing +import tvm.topi.testing # "unquantize" a quantized tensor def recover(data, scale, zp): diff --git a/tests/python/relay/test_pass_alter_op_layout.py b/tests/python/relay/test_pass_alter_op_layout.py index 77105f0f4cab..85dd2edb9185 100644 --- a/tests/python/relay/test_pass_alter_op_layout.py +++ b/tests/python/relay/test_pass_alter_op_layout.py @@ -681,7 +681,7 @@ def before(): y = relay.Function(analysis.free_vars(y), y) return y - import topi + from tvm import topi def alter_conv2d(attrs, inputs, tinfos, out_type): with tvm.target.create("llvm"): return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type) @@ -1016,7 +1016,7 @@ def expected_nhwc(): def test_alter_layout_nhwc_arm(): """ Check that AlterOplayout does not alter NHWC data layout. """ def alter_conv2d(attrs, inputs, tinfos, out_type): - import topi + from tvm import topi with tvm.target.create("llvm -device=arm_cpu"): return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type) @@ -1077,7 +1077,7 @@ def update(self, target, workload, cfg): self.memory[key] = cfg def alter_conv2d(attrs, inputs, tinfos, out_type): - import topi + from tvm import topi with tvm.target.create("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"): with Int8Fallback(): tmp = topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type) diff --git a/topi/tests/python/common.py b/tests/python/topi/python/common.py similarity index 98% rename from topi/tests/python/common.py rename to tests/python/topi/python/common.py index eeaf6325cec2..735072c1ca4d 100644 --- a/topi/tests/python/common.py +++ b/tests/python/topi/python/common.py @@ -20,7 +20,7 @@ from tvm import te from tvm import autotvm from tvm.autotvm.task.space import FallbackConfigEntity -import topi +from tvm import topi def get_all_backend(): """return all supported target diff --git a/topi/tests/python/test_fifo_buffer.py b/tests/python/topi/python/test_fifo_buffer.py similarity index 95% rename from topi/tests/python/test_fifo_buffer.py rename to tests/python/topi/python/test_fifo_buffer.py index 676c1f975c93..9af30f9dc779 100644 --- a/topi/tests/python/test_fifo_buffer.py +++ b/tests/python/topi/python/test_fifo_buffer.py @@ -18,8 +18,8 @@ import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import numpy as np from tvm.contrib.pickle_memoize import memoize @@ -55,7 +55,7 @@ def check_device(device): with tvm.target.create(device): out = topi.nn.fifo_buffer(data, buffer, axis=axis) - s = topi.testing.get_injective_schedule(device)([out]) + s = tvm.topi.testing.get_injective_schedule(device)([out]) buffer_tvm = tvm.nd.array(buffer_np, ctx=ctx) data_tvm = tvm.nd.array(data_np, ctx=ctx) @@ -129,11 +129,11 @@ def check_device(device): return print(' Running on target: {}'.format(device)) - conv2d_nchw, schedule_conv2d_nchw = topi.testing.get_conv2d_nchw_implement(device) + conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device) with tvm.target.create(device): out = topi.nn.fifo_buffer(inc_input, context, axis=buffer_axis) - s = topi.testing.get_injective_schedule(device)([out]) + s = tvm.topi.testing.get_injective_schedule(device)([out]) update_context = tvm.build(s, [inc_input, context, out], device, name='update_context') out = conv2d_nchw(context, kernel, stride, padding, dilate, dtype) @@ -141,12 +141,12 @@ def check_device(device): conv2d_inc = tvm.build(s, [context, kernel, out], device, name='conv2d_inc') out = topi.nn.fifo_buffer(inc_output, output_window, axis=buffer_axis) - s = topi.testing.get_injective_schedule(device)([out]) + s = tvm.topi.testing.get_injective_schedule(device)([out]) update_output_window = tvm.build(s, [inc_output, output_window, out], device, name='update_output_window') out = topi.nn.fifo_buffer(inc_input, input_window, axis=buffer_axis) - s = topi.testing.get_injective_schedule(device)([out]) + s = tvm.topi.testing.get_injective_schedule(device)([out]) update_input_window = tvm.build(s, [inc_input, input_window, out], device, name='update_input_window') diff --git a/topi/tests/python/test_topi_basic.py b/tests/python/topi/python/test_topi_basic.py similarity index 97% rename from topi/tests/python/test_topi_basic.py rename to tests/python/topi/python/test_topi_basic.py index a83ff50bd5b1..e1e5cf8674fe 100644 --- a/topi/tests/python/test_topi_basic.py +++ b/tests/python/topi/python/test_topi_basic.py @@ -16,8 +16,8 @@ # under the License. import tvm from tvm import te -import topi -from topi import util +from tvm import topi +from tvm.topi import util def test_util(): diff --git a/topi/tests/python/test_topi_batch_matmul.py b/tests/python/topi/python/test_topi_batch_matmul.py similarity index 91% rename from topi/tests/python/test_topi_batch_matmul.py rename to tests/python/topi/python/test_topi_batch_matmul.py index 716f40700339..c8cddb661dca 100644 --- a/topi/tests/python/test_topi_batch_matmul.py +++ b/tests/python/topi/python/test_topi_batch_matmul.py @@ -18,9 +18,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from tvm.contrib.pickle_memoize import memoize from common import get_all_backend @@ -41,7 +41,7 @@ def verify_batch_matmul(batch, M, N, K): def get_ref_data(): a_np = np.random.uniform(size=(batch, M, K)).astype(dtype) b_np = np.random.uniform(size=(batch, N, K)).astype(dtype) - c_np = topi.testing.batch_matmul(a_np, b_np) + c_np = tvm.topi.testing.batch_matmul(a_np, b_np) return (a_np, b_np, c_np) # get the test data a_np, b_np, c_np = get_ref_data() @@ -53,7 +53,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _batch_matmul_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _batch_matmul_implement) out = fcompute(x, y) s = fschedule([out]) a = tvm.nd.array(a_np, ctx) diff --git a/topi/tests/python/test_topi_bitserial_conv2d.py b/tests/python/topi/python/test_topi_bitserial_conv2d.py similarity index 91% rename from topi/tests/python/test_topi_bitserial_conv2d.py rename to tests/python/topi/python/test_topi_bitserial_conv2d.py index 44811d189189..74a3a03d2b4b 100644 --- a/topi/tests/python/test_topi_bitserial_conv2d.py +++ b/tests/python/topi/python/test_topi_bitserial_conv2d.py @@ -17,9 +17,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from tvm.contrib.pickle_memoize import memoize def generate_quantized_np(shape, bits, out_dtype): @@ -51,9 +51,9 @@ def get_ref_data(): w_ = np.copy(w_np).astype(out_dtype) for x in np.nditer(w_, op_flags=['readwrite']): x[...] = 1 if x == 1 else -1 - b_np = topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding) + b_np = tvm.topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding) else: - b_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding) + b_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() @@ -89,9 +89,9 @@ def get_ref_data(): w_ = np.copy(w_np).astype(out_dtype) for x in np.nditer(w_, op_flags=['readwrite']): x[...] = 1 if x == 1 else -1 - b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype) + b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype) else: - b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype) + b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() diff --git a/topi/tests/python/test_topi_bitserial_conv2d_rasp.py b/tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py similarity index 92% rename from topi/tests/python/test_topi_bitserial_conv2d_rasp.py rename to tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py index 643a7d5aa814..7b32c79e1761 100644 --- a/topi/tests/python/test_topi_bitserial_conv2d_rasp.py +++ b/tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py @@ -19,9 +19,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple def generate_quantized_np(shape, bits, out_dtype): np.random.seed(0) @@ -68,9 +68,9 @@ def get_ref_data(): w_ = np.copy(w_np).astype(out_dtype) for x in np.nditer(w_, op_flags=['readwrite']): x[...] = 1 if x == 1 else -1 - b_np = topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype) + b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype) else: - b_np = topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype) + b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(out_dtype) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() a = tvm.nd.array(a_np, ctx) diff --git a/topi/tests/python/test_topi_bitserial_dense.py b/tests/python/topi/python/test_topi_bitserial_dense.py similarity index 94% rename from topi/tests/python/test_topi_bitserial_dense.py rename to tests/python/topi/python/test_topi_bitserial_dense.py index fbb20a663f3b..19a4d94a5c49 100644 --- a/topi/tests/python/test_topi_bitserial_dense.py +++ b/tests/python/topi/python/test_topi_bitserial_dense.py @@ -19,9 +19,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from tvm.contrib.pickle_memoize import memoize _bitserial_dense_implement = { @@ -57,7 +57,7 @@ def get_ref_data(a_shape, b_shape, input_dtype): input_dtype = 'uint8' if "arm_cpu" in target else "uint32" A = te.placeholder((batch, in_dim), dtype=input_dtype, name='A') B = te.placeholder((out_dim, in_dim), dtype=input_dtype, name='B') - fcompute, fschedule = topi.testing.dispatch(target, _bitserial_dense_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(target, _bitserial_dense_implement) C = fcompute(A, B, activation_bits, weight_bits, input_dtype, out_dtype, unipolar) s = fschedule([C]) diff --git a/topi/tests/python/test_topi_bnn.py b/tests/python/topi/python/test_topi_bnn.py similarity index 97% rename from topi/tests/python/test_topi_bnn.py rename to tests/python/topi/python/test_topi_bnn.py index 275f34fd916e..ac1646005a1d 100644 --- a/topi/tests/python/test_topi_bnn.py +++ b/tests/python/topi/python/test_topi_bnn.py @@ -18,8 +18,8 @@ import numpy as np import tvm from tvm import te -import topi -from topi.util import get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple from tvm.contrib.pickle_memoize import memoize diff --git a/topi/tests/python/test_topi_broadcast.py b/tests/python/topi/python/test_topi_broadcast.py similarity index 97% rename from topi/tests/python/test_topi_broadcast.py rename to tests/python/topi/python/test_topi_broadcast.py index f3e0300a2d81..4ac985e057b9 100644 --- a/topi/tests/python/test_topi_broadcast.py +++ b/tests/python/topi/python/test_topi_broadcast.py @@ -18,8 +18,8 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from common import get_all_backend @@ -35,7 +35,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(B) + s = tvm.topi.testing.get_broadcast_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="broadcast_to") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = np.broadcast_to(data_npy, out_shape) @@ -83,7 +83,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(C) + s = tvm.topi.testing.get_broadcast_schedule(device)(C) foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + ftopi.__name__) lhs_npy, lhs_nd = gen_operand(lhs_shape, lhs_min, lhs_max, ctx) @@ -245,7 +245,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(B) + s = tvm.topi.testing.get_broadcast_schedule(device)(B) foo = tvm.build(s, [A, B], device, name=name) data_npy = indata.astype(A.dtype) @@ -286,7 +286,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(B) + s = tvm.topi.testing.get_broadcast_schedule(device)(B) foo = tvm.build(s, [A, B], device, name=name) data_npy = np.random.uniform(size=shape).astype(A.dtype) @@ -328,7 +328,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(C) + s = tvm.topi.testing.get_broadcast_schedule(device)(C) foo = tvm.build(s, [A, B, C], device, name=name) lhs_nd = tvm.nd.array(lhs, ctx) diff --git a/topi/tests/python/test_topi_clip.py b/tests/python/topi/python/test_topi_clip.py similarity index 93% rename from topi/tests/python/test_topi_clip.py rename to tests/python/topi/python/test_topi_clip.py index 38617ee11443..b3d95dd2e07a 100644 --- a/topi/tests/python/test_topi_clip.py +++ b/tests/python/topi/python/test_topi_clip.py @@ -18,9 +18,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from tvm.contrib.pickle_memoize import memoize from common import get_all_backend @@ -45,7 +45,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx) diff --git a/topi/tests/python/test_topi_conv1d.py b/tests/python/topi/python/test_topi_conv1d.py similarity index 92% rename from topi/tests/python/test_topi_conv1d.py rename to tests/python/topi/python/test_topi_conv1d.py index 972a3f195a4f..49f2cd1125a3 100644 --- a/topi/tests/python/test_topi_conv1d.py +++ b/tests/python/topi/python/test_topi_conv1d.py @@ -19,10 +19,10 @@ import itertools import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -67,7 +67,7 @@ def get_ref_data(layout): else: np_in = a_np np_w = w_np - b_np = topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilation) + b_np = tvm.topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilation) if layout == 'NWC': b_np = np.transpose(b_np, [0, 2, 1]) return a_np, w_np, b_np @@ -80,9 +80,9 @@ def check_device(device): print("Skip because %s is not enabled" % device) return if layout == "NCW": - fcompute, fschedule = topi.testing.dispatch(device, _conv1d_ncw_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_ncw_implement) else: - fcompute, fschedule = topi.testing.dispatch(device, _conv1d_nwc_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_nwc_implement) with tvm.target.create(device): B = fcompute(A, W, stride, padding, dilation, 'float32') s = fschedule([B]) diff --git a/topi/tests/python/test_topi_conv1d_transpose_ncw.py b/tests/python/topi/python/test_topi_conv1d_transpose_ncw.py similarity index 92% rename from topi/tests/python/test_topi_conv1d_transpose_ncw.py rename to tests/python/topi/python/test_topi_conv1d_transpose_ncw.py index 0cecbef8399b..7efa96d807b6 100644 --- a/topi/tests/python/test_topi_conv1d_transpose_ncw.py +++ b/tests/python/topi/python/test_topi_conv1d_transpose_ncw.py @@ -19,10 +19,10 @@ import itertools import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend _conv1d_transpose_ncw_implement = { @@ -43,7 +43,7 @@ def verify_conv1d_transpose_ncw(batch, in_channel, in_size, num_filter, kernel, def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) - b_np = topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding) + b_np = tvm.topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding) c_np = np.maximum(b_np, 0) return a_np, w_np, b_np, c_np @@ -55,7 +55,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _conv1d_transpose_ncw_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv1d_transpose_ncw_implement) B = fcompute(A, W, stride, padding, A.dtype, output_padding) C = topi.nn.relu(B) s1 = fschedule([B]) diff --git a/topi/tests/python/test_topi_conv2d_NCHWc.py b/tests/python/topi/python/test_topi_conv2d_NCHWc.py similarity index 97% rename from topi/tests/python/test_topi_conv2d_NCHWc.py rename to tests/python/topi/python/test_topi_conv2d_NCHWc.py index a072d2abdafc..95d5633bc1f8 100644 --- a/topi/tests/python/test_topi_conv2d_NCHWc.py +++ b/tests/python/topi/python/test_topi_conv2d_NCHWc.py @@ -20,11 +20,11 @@ import tvm from tvm import te from tvm import autotvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.nn.util import get_pad_tuple -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -81,8 +81,8 @@ def get_ref_data(): a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype) w_np = np.random.uniform(size=(num_filter, in_channel, kernel, kernel)).astype(dtype) b_np = np.random.uniform(size=(num_filter, 1, 1)).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) if add_bias: c_np += b_np if add_relu: diff --git a/topi/tests/python/test_topi_conv2d_hwcn.py b/tests/python/topi/python/test_topi_conv2d_hwcn.py similarity index 92% rename from topi/tests/python/test_topi_conv2d_hwcn.py rename to tests/python/topi/python/test_topi_conv2d_hwcn.py index 41192bd45deb..20b1b4dfa8e5 100644 --- a/topi/tests/python/test_topi_conv2d_hwcn.py +++ b/tests/python/topi/python/test_topi_conv2d_hwcn.py @@ -19,10 +19,10 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple _conv2d_hwcn_implement = { @@ -48,8 +48,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=b_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) - c1_np = topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) + c1_np = tvm.topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding) c2_np = c1_np + b_np c3_np = np.maximum(c2_np, 0) return a_np, w_np, b_np, c1_np, c2_np, c3_np @@ -63,7 +63,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _conv2d_hwcn_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_hwcn_implement) t_conv = fcompute(A, W, stride, padding, dilation) t_bias = topi.add(t_conv, B) t_relu = topi.nn.relu(t_bias) diff --git a/topi/tests/python/test_topi_conv2d_int8.py b/tests/python/topi/python/test_topi_conv2d_int8.py similarity index 97% rename from topi/tests/python/test_topi_conv2d_int8.py rename to tests/python/topi/python/test_topi_conv2d_int8.py index 5659147f8c41..615dc515b1f4 100644 --- a/topi/tests/python/test_topi_conv2d_int8.py +++ b/tests/python/topi/python/test_topi_conv2d_int8.py @@ -21,12 +21,12 @@ from tvm import te from tvm import autotvm from tvm.autotvm.task.space import FallbackConfigEntity -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.nn.util import get_pad_tuple -from topi.util import get_const_tuple -from topi.arm_cpu.conv2d_gemm import is_aarch64_arm +from tvm.topi.nn.util import get_pad_tuple +from tvm.topi.util import get_const_tuple +from tvm.topi.arm_cpu.conv2d_gemm import is_aarch64_arm from common import get_all_backend, Int8Fallback @@ -113,8 +113,8 @@ def get_ref_data(): a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype) w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) - c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype) + dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) + c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) @@ -203,8 +203,8 @@ def get_ref_data(): a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype) w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype) # convert to NCHWc _, _, out_height, out_width = c_np.shape @@ -278,8 +278,8 @@ def get_ref_data(): a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype) w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) diff --git a/topi/tests/python/test_topi_conv2d_nchw.py b/tests/python/topi/python/test_topi_conv2d_nchw.py similarity index 96% rename from topi/tests/python/test_topi_conv2d_nchw.py rename to tests/python/topi/python/test_topi_conv2d_nchw.py index 11b799c712c0..dcdf0a776099 100644 --- a/topi/tests/python/test_topi_conv2d_nchw.py +++ b/tests/python/topi/python/test_topi_conv2d_nchw.py @@ -20,11 +20,11 @@ import tvm from tvm import te from tvm import autotvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.nn.util import get_pad_tuple -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -51,8 +51,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) if add_bias: c_np += b_np if add_relu: @@ -71,7 +71,7 @@ def check_device(device): if "cudnn" in device: fcompute, fschedule = topi.cuda.conv2d_cudnn, topi.cuda.schedule_conv2d_cudnn else: - fcompute, fschedule = topi.testing.get_conv2d_nchw_implement(device) + fcompute, fschedule = tvm.topi.testing.get_conv2d_nchw_implement(device) with tvm.target.create(device): if "cudnn" in device: diff --git a/topi/tests/python/test_topi_conv2d_nhwc.py b/tests/python/topi/python/test_topi_conv2d_nhwc.py similarity index 91% rename from topi/tests/python/test_topi_conv2d_nhwc.py rename to tests/python/topi/python/test_topi_conv2d_nhwc.py index e027d5a7ccd9..7750f235c6c5 100644 --- a/topi/tests/python/test_topi_conv2d_nhwc.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc.py @@ -19,10 +19,10 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple @@ -50,8 +50,8 @@ def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride, p def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) - b_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) + b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() @@ -61,7 +61,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nhwc_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_implement) B = fcompute(A, W, stride, padding, dilation, dtype) s = fschedule([B]) ctx = tvm.context(device, 0) diff --git a/topi/tests/python/test_topi_conv2d_nhwc_pack_int8.py b/tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py similarity index 91% rename from topi/tests/python/test_topi_conv2d_nhwc_pack_int8.py rename to tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py index a5d532c4e016..4439d6ae13eb 100644 --- a/topi/tests/python/test_topi_conv2d_nhwc_pack_int8.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py @@ -22,10 +22,10 @@ from tvm import te from tvm import autotvm from tvm.autotvm.task.space import FallbackConfigEntity -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple def verify_conv2d_1x1_nhwc_pack_int8(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1): @@ -43,8 +43,8 @@ def verify_conv2d_1x1_nhwc_pack_int8(batch, in_channel, in_size, num_filter, ker def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(adtype) w_np = np.random.uniform(size=w_shape).astype(wdtype) - dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) - b_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) + b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() diff --git a/topi/tests/python/test_topi_conv2d_nhwc_tensorcore.py b/tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py similarity index 92% rename from topi/tests/python/test_topi_conv2d_nhwc_tensorcore.py rename to tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py index cc327849caea..8375df34323c 100644 --- a/topi/tests/python/test_topi_conv2d_nhwc_tensorcore.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py @@ -19,13 +19,13 @@ import numpy as np import tvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm import te from tvm.contrib.pickle_memoize import memoize from tvm.contrib import nvcc -from topi.nn.util import get_pad_tuple -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple +from tvm.topi.util import get_const_tuple _conv2d_nhwc_tensorcore_implement = { @@ -57,8 +57,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) c_np += b_np @@ -78,7 +78,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nhwc_tensorcore_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_tensorcore_implement) C = fcompute(A, W, stride, padding, dilation, 'float32') if add_bias: C = topi.add(C, bias) diff --git a/topi/tests/python/test_topi_conv2d_nhwc_winograd.py b/tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py similarity index 93% rename from topi/tests/python/test_topi_conv2d_nhwc_winograd.py rename to tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py index 7cb40417d2cc..00b40bfbe826 100644 --- a/topi/tests/python/test_topi_conv2d_nhwc_winograd.py +++ b/tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py @@ -20,13 +20,13 @@ import numpy as np import tvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm import te from tvm.contrib.pickle_memoize import memoize from tvm.contrib import nvcc -from topi.nn.util import get_pad_tuple -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple +from tvm.topi.util import get_const_tuple _conv2d_nhwc_winograd_tensorcore = { @@ -65,8 +65,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) - c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) + c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) c_np += b_np @@ -84,10 +84,10 @@ def check_device(device): print("Running on target: %s" % device) with tvm.target.create(device): if bgemm == "direct": - fcompute, fschedule = topi.testing.dispatch(device, + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_winograd_direct) elif bgemm == "tensorcore": - fcompute, fschedule = topi.testing.dispatch(device, + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_winograd_tensorcore) C = fcompute(A, W, stride, padding, dilation, 'float32') if add_bias: diff --git a/topi/tests/python/test_topi_conv2d_transpose_nchw.py b/tests/python/topi/python/test_topi_conv2d_transpose_nchw.py similarity index 94% rename from topi/tests/python/test_topi_conv2d_transpose_nchw.py rename to tests/python/topi/python/test_topi_conv2d_transpose_nchw.py index 11f1903d0649..6c43b2d980cf 100644 --- a/topi/tests/python/test_topi_conv2d_transpose_nchw.py +++ b/tests/python/topi/python/test_topi_conv2d_transpose_nchw.py @@ -18,10 +18,10 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -51,7 +51,7 @@ def verify_conv2d_transpose_nchw(batch, in_channel, in_size, num_filter, kernel, def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) - b_np = topi.testing.conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding) + b_np = tvm.topi.testing.conv2d_transpose_nchw_python(a_np, w_np, stride, padding, output_padding) c_np = np.maximum(b_np, 0) return a_np, w_np, b_np, c_np @@ -64,7 +64,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _conv2d_transpose_nchw_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_transpose_nchw_implement) B = fcompute(A, W, [stride_height, stride_width], [pad_top, pad_left, pad_bottom, pad_right], diff --git a/topi/tests/python/test_topi_conv2d_winograd.py b/tests/python/topi/python/test_topi_conv2d_winograd.py similarity index 93% rename from topi/tests/python/test_topi_conv2d_winograd.py rename to tests/python/topi/python/test_topi_conv2d_winograd.py index cfbc30063d09..800aaea5363a 100644 --- a/topi/tests/python/test_topi_conv2d_winograd.py +++ b/tests/python/topi/python/test_topi_conv2d_winograd.py @@ -21,11 +21,11 @@ from tvm import te from tvm import autotvm from tvm.autotvm.task.space import FallbackConfigEntity -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.nn.util import get_pad_tuple -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple +from tvm.topi.util import get_const_tuple _conv2d_nchw_winograd_implement = { @@ -57,8 +57,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) c_np += b_np @@ -75,7 +75,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _conv2d_nchw_winograd_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nchw_winograd_implement) C = fcompute(A, W, stride, padding, dilation, dtype) if add_bias: C = topi.add(C, bias) diff --git a/topi/tests/python/test_topi_conv3d_ncdhw.py b/tests/python/topi/python/test_topi_conv3d_ncdhw.py similarity index 92% rename from topi/tests/python/test_topi_conv3d_ncdhw.py rename to tests/python/topi/python/test_topi_conv3d_ncdhw.py index c3e01aeb7a64..ad2b93ce00ce 100644 --- a/topi/tests/python/test_topi_conv3d_ncdhw.py +++ b/tests/python/topi/python/test_topi_conv3d_ncdhw.py @@ -20,11 +20,11 @@ import tvm from tvm import te from tvm import autotvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.nn.util import get_pad_tuple3d -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple3d +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -56,8 +56,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation)) - c_np = topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation)) + c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding) if add_bias: c_np += b_np if add_relu: @@ -72,7 +72,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ncdhw_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement) with tvm.target.create(device): C = fcompute(A, W, (stride, stride, stride), padding, (dilation, dilation, dilation), dtype) diff --git a/topi/tests/python/test_topi_conv3d_ndhwc.py b/tests/python/topi/python/test_topi_conv3d_ndhwc.py similarity index 91% rename from topi/tests/python/test_topi_conv3d_ndhwc.py rename to tests/python/topi/python/test_topi_conv3d_ndhwc.py index 8526bb1fc90a..b80f96bfb26d 100644 --- a/topi/tests/python/test_topi_conv3d_ndhwc.py +++ b/tests/python/topi/python/test_topi_conv3d_ndhwc.py @@ -19,10 +19,10 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -53,8 +53,8 @@ def verify_conv3d_ndhwc(batch, in_channel, in_size, num_filter, kernel, stride, def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1)) - b_np = topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1)) + b_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding) return a_np, w_np, b_np a_np, w_np, b_np = get_ref_data() @@ -64,7 +64,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ndhwc_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ndhwc_implement) with tvm.target.create(device): B = fcompute(A, W, stride, padding, dilation, dtype) s = fschedule([B]) diff --git a/topi/tests/python/test_topi_conv3d_ndhwc_tensorcore.py b/tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py similarity index 92% rename from topi/tests/python/test_topi_conv3d_ndhwc_tensorcore.py rename to tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py index f98550f7b4c4..2adc34864c13 100644 --- a/topi/tests/python/test_topi_conv3d_ndhwc_tensorcore.py +++ b/tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py @@ -19,13 +19,13 @@ import numpy as np import tvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm import te from tvm.contrib.pickle_memoize import memoize from tvm.contrib import nvcc -from topi.nn.util import get_pad_tuple3d -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple3d +from tvm.topi.util import get_const_tuple _conv3d_ndhwc_tensorcore_implement = { @@ -58,8 +58,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation)) - c_np = topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) c_np += b_np @@ -79,7 +79,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ndhwc_tensorcore_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ndhwc_tensorcore_implement) C = fcompute(A, W, stride, padding, dilation, 'float32') if add_bias: C = topi.add(C, bias) diff --git a/topi/tests/python/test_topi_conv3d_transpose_ncdhw.py b/tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py similarity index 94% rename from topi/tests/python/test_topi_conv3d_transpose_ncdhw.py rename to tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py index 8b081987fd12..6ee386e5a069 100644 --- a/topi/tests/python/test_topi_conv3d_transpose_ncdhw.py +++ b/tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py @@ -18,10 +18,10 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -49,7 +49,7 @@ def verify_conv3d_transpose_ncdhw(batch, in_channel, in_size, num_filter, kernel def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) - b_np = topi.testing.conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding) + b_np = tvm.topi.testing.conv3d_transpose_ncdhw_python(a_np, w_np, stride, padding) c_np = np.maximum(b_np, 0) return a_np, w_np, b_np, c_np @@ -62,7 +62,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _conv3d_transpose_ncdhw_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_transpose_ncdhw_implement) B = fcompute(A, W, [stride_depth, stride_height, stride_width], [pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right], diff --git a/topi/tests/python/test_topi_conv3d_winograd.py b/tests/python/topi/python/test_topi_conv3d_winograd.py similarity index 93% rename from topi/tests/python/test_topi_conv3d_winograd.py rename to tests/python/topi/python/test_topi_conv3d_winograd.py index 6d0d99d00b10..6e261305b9a4 100644 --- a/topi/tests/python/test_topi_conv3d_winograd.py +++ b/tests/python/topi/python/test_topi_conv3d_winograd.py @@ -20,11 +20,11 @@ import tvm from tvm import te from tvm import autotvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.nn.util import get_pad_tuple3d -from topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple3d +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -66,8 +66,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation)) - c_np = topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation)) + c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding) if add_bias: c_np += b_np if add_relu: @@ -82,7 +82,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _conv3d_ncdhw_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement) with tvm.target.create(device): C = fcompute(A, W, (stride, stride, stride), padding, (dilation, dilation, dilation), dtype) diff --git a/topi/tests/python/test_topi_correlation.py b/tests/python/topi/python/test_topi_correlation.py similarity index 92% rename from topi/tests/python/test_topi_correlation.py rename to tests/python/topi/python/test_topi_correlation.py index 663564fab469..f5eb51c8a6af 100644 --- a/topi/tests/python/test_topi_correlation.py +++ b/tests/python/topi/python/test_topi_correlation.py @@ -19,10 +19,10 @@ import tvm from tvm import te from tvm import autotvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -47,7 +47,7 @@ def verify_correlation_nchw(data_shape, kernel_size, max_displacement, stride1, def get_ref_data(): a_np = np.random.uniform(size=data_shape).astype(dtype) b_np = np.random.uniform(size=data_shape).astype(dtype) - c_np = topi.testing.correlation_nchw_python(a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply) + c_np = tvm.topi.testing.correlation_nchw_python(a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply) return a_np, b_np, c_np a_np, b_np, c_np = get_ref_data() @@ -58,7 +58,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch( + fcompute, fschedule = tvm.topi.testing.dispatch( device, _correlation_implement) with tvm.target.create(device): C = fcompute(A, B, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply) diff --git a/topi/tests/python/test_topi_deformable_conv2d.py b/tests/python/topi/python/test_topi_deformable_conv2d.py similarity index 93% rename from topi/tests/python/test_topi_deformable_conv2d.py rename to tests/python/topi/python/test_topi_deformable_conv2d.py index a88525407e27..a2a01fc7ea1f 100644 --- a/topi/tests/python/test_topi_deformable_conv2d.py +++ b/tests/python/topi/python/test_topi_deformable_conv2d.py @@ -18,10 +18,10 @@ import tvm from tvm import te from tvm import autotvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -53,7 +53,7 @@ def get_ref_data(): offset_np = np.random.randn(*offset_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - c_np = topi.testing.deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding, + c_np = tvm.topi.testing.deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups) return a_np, offset_np, w_np, c_np @@ -66,7 +66,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _deformable_conv2d_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_implement) with tvm.target.create(device): C = fcompute(A, Offset, W, stride, padding, dilation, deformable_groups, groups, dtype) diff --git a/topi/tests/python/test_topi_dense.py b/tests/python/topi/python/test_topi_dense.py similarity index 97% rename from topi/tests/python/test_topi_dense.py rename to tests/python/topi/python/test_topi_dense.py index 6294c7d6818f..517cb4d3ecc6 100644 --- a/topi/tests/python/test_topi_dense.py +++ b/tests/python/topi/python/test_topi_dense.py @@ -18,9 +18,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from tvm.contrib.pickle_memoize import memoize from common import get_all_backend, Int8Fallback @@ -63,7 +63,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - for fcompute, fschedule in topi.testing.dispatch(device, _dense_implement): + for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement): with tvm.target.create(device): D = fcompute(A, B, C if use_bias else None) D = topi.nn.relu(D) diff --git a/topi/tests/python/test_topi_dense_tensorcore.py b/tests/python/topi/python/test_topi_dense_tensorcore.py similarity index 95% rename from topi/tests/python/test_topi_dense_tensorcore.py rename to tests/python/topi/python/test_topi_dense_tensorcore.py index f74f31e740bc..8a645e6b45ca 100644 --- a/topi/tests/python/test_topi_dense_tensorcore.py +++ b/tests/python/topi/python/test_topi_dense_tensorcore.py @@ -18,9 +18,9 @@ """Test code for dense tensorcore operator""" import numpy as np import tvm -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from tvm import te from tvm.contrib.pickle_memoize import memoize from tvm.contrib import nvcc @@ -60,7 +60,7 @@ def check_device(device): print("skip because gpu does not support Tensor Cores") return print("Running on target: %s" % device) - for fcompute, fschedule in topi.testing.dispatch(device, _dense_implement): + for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement): with tvm.target.create(device): D = fcompute(A, B, C if use_bias else None) D = topi.nn.relu(D) diff --git a/topi/tests/python/test_topi_depth_to_space.py b/tests/python/topi/python/test_topi_depth_to_space.py similarity index 94% rename from topi/tests/python/test_topi_depth_to_space.py rename to tests/python/topi/python/test_topi_depth_to_space.py index b21eb9773c32..380f656bf599 100644 --- a/topi/tests/python/test_topi_depth_to_space.py +++ b/tests/python/topi/python/test_topi_depth_to_space.py @@ -18,8 +18,8 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from common import get_all_backend @@ -45,7 +45,7 @@ def verify_depth_to_space(block_size, batch, in_channel, in_height, in_width, la B = topi.nn.depth_to_space(A, block_size=block_size, layout=layout, mode=mode) if layout == 'NHWC': a_np = np.transpose(a_np, axes=[0, 3, 1, 2]) - b_np = topi.testing.depth_to_space_python(a_np, block_size, mode=mode) + b_np = tvm.topi.testing.depth_to_space_python(a_np, block_size, mode=mode) if layout == 'NHWC': a_np = np.transpose(a_np, axes=[0, 2, 3, 1]) b_np = np.transpose(b_np, axes=[0, 2, 3, 1]) @@ -57,7 +57,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) diff --git a/topi/tests/python/test_topi_depthwise_conv2d.py b/tests/python/topi/python/test_topi_depthwise_conv2d.py similarity index 96% rename from topi/tests/python/test_topi_depthwise_conv2d.py rename to tests/python/topi/python/test_topi_depthwise_conv2d.py index 693348918d3e..397861713f73 100644 --- a/topi/tests/python/test_topi_depthwise_conv2d.py +++ b/tests/python/topi/python/test_topi_depthwise_conv2d.py @@ -17,11 +17,11 @@ import tvm from tvm import te from tvm import autotvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import numpy as np -from topi.util import get_const_tuple -from topi.nn.util import get_pad_tuple +from tvm.topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple from tvm.contrib.pickle_memoize import memoize from common import get_all_backend @@ -73,7 +73,7 @@ def check_device(device): return print("Running on target: %s" % device) - impl_list = topi.testing.dispatch(device, _depthwise_conv2d_nchw_implement)[:] + impl_list = tvm.topi.testing.dispatch(device, _depthwise_conv2d_nchw_implement)[:] if device == "llvm" and channel_multiplier == 1 and dilation == 1: impl_list.append((topi.x86.depthwise_conv2d_nchw, topi.x86.schedule_depthwise_conv2d_nchw)) @@ -105,11 +105,11 @@ def check_device(device): def get_ref_data(): input_np = np.random.uniform(size=input_shape).astype(dtype) filter_np = np.random.uniform(size=filter_shape).astype(dtype) - dilated_filter_np = topi.testing.dilate_python(filter_np, (1, 1, dilation, dilation)) + dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, (1, 1, dilation, dilation)) scale_np = np.random.uniform(size=scale_shape).astype(dtype) shift_np = np.random.uniform(size=shift_shape).astype(dtype) # correctness with scipy - depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw( + depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw( input_np, dilated_filter_np, stride, padding) scale_shift_scipy = np.zeros(shape=scale_shift_shape) for c in range(in_channel * channel_multiplier): @@ -176,7 +176,7 @@ def check_device(device): return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _depthwise_conv2d_nhwc_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _depthwise_conv2d_nhwc_implement) with tvm.target.create(device): # declare DepthwiseConv2d = fcompute(Input, Filter, @@ -204,11 +204,11 @@ def check_device(device): def get_ref_data(): input_np = np.random.uniform(size=input_shape).astype(dtype) filter_np = np.random.uniform(size=filter_shape).astype(dtype) - dilated_filter_np = topi.testing.dilate_python(filter_np, (dilation, dilation, 1, 1)) + dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, (dilation, dilation, 1, 1)) scale_np = np.random.uniform(size=scale_shape).astype(dtype) shift_np = np.random.uniform(size=shift_shape).astype(dtype) # correctness with scipy - depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nhwc( + depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc( input_np, dilated_filter_np, stride=[stride_h, stride_w], padding=padding) scale_shift_scipy = np.zeros(shape=scale_shift_shape) for c in range(in_channel * channel_multiplier): @@ -329,7 +329,7 @@ def get_ref_data(): input_np = np.random.uniform(size=input_shape).astype(dtype) filter_np = np.random.uniform(size=filter_shape).astype(dtype) # correctness with scipy - depthwise_conv2d_scipy = topi.testing.depthwise_conv2d_python_nchw( + depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw( input_np, filter_np, stride, padding) relu_scipy = np.maximum(depthwise_conv2d_scipy, 0) return (_transform_data(input_np, ic_block), diff --git a/topi/tests/python/test_topi_depthwise_conv2d_back_input.py b/tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py similarity index 94% rename from topi/tests/python/test_topi_depthwise_conv2d_back_input.py rename to tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py index aac0cd523b0b..ba8bfcc72a4e 100644 --- a/topi/tests/python/test_topi_depthwise_conv2d_back_input.py +++ b/tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py @@ -16,14 +16,14 @@ # under the License. import tvm from tvm import te -import topi +from tvm import topi import numpy as np from tvm.contrib.pickle_memoize import memoize from scipy import signal -from topi.util import get_const_tuple -from topi.nn.util import get_pad_tuple -import topi.testing -from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc +from tvm.topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple +import tvm.topi.testing +from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc def verify_depthwise_conv2d_back_input(batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h): @@ -67,7 +67,7 @@ def check_device(device): def get_ref_data(): out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype) filter_np = np.random.uniform(size=filter_shape).astype(dtype) - dilated_out_grad_np = topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1]) + dilated_out_grad_np = tvm.topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1]) # padding params in forward propagation fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple([padding_h, padding_w], (filter_h, filter_w)) # padding params in backward propagation diff --git a/topi/tests/python/test_topi_depthwise_conv2d_back_weight.py b/tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py similarity index 94% rename from topi/tests/python/test_topi_depthwise_conv2d_back_weight.py rename to tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py index 4602d098bf91..599225d0a667 100644 --- a/topi/tests/python/test_topi_depthwise_conv2d_back_weight.py +++ b/tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py @@ -16,14 +16,14 @@ # under the License. import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import numpy as np from tvm.contrib.pickle_memoize import memoize from scipy import signal -from topi.util import get_const_tuple -from topi.nn.util import get_pad_tuple -from topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc +from tvm.topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple +from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc def verify_depthwise_conv2d_back_weight(batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h): @@ -67,7 +67,7 @@ def check_device(device): def get_ref_data(): out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype) input_np = np.random.uniform(size=in_shape).astype(dtype) - dilated_out_grad_np = topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1]) + dilated_out_grad_np = tvm.topi.testing.dilate_python(out_grad_np, [1, stride_h, stride_w, 1]) pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple([padding_h, padding_w], (filter_h, filter_w)) padded_input_np = np.zeros((batch, in_h+pad_top+pad_bottom, in_w+pad_left+pad_right, in_channel)) diff --git a/topi/tests/python/test_topi_dilate.py b/tests/python/topi/python/test_topi_dilate.py similarity index 94% rename from topi/tests/python/test_topi_dilate.py rename to tests/python/topi/python/test_topi_dilate.py index 1e69383238c7..60f2083ea86c 100644 --- a/topi/tests/python/test_topi_dilate.py +++ b/tests/python/topi/python/test_topi_dilate.py @@ -16,8 +16,8 @@ # under the License. import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import numpy as np @@ -30,7 +30,7 @@ def _test_dilate(input_size, strides): Output = topi.nn.dilate(Input, strides) schedule = te.create_schedule(Output.op) input_np = np.random.uniform(size=input_size).astype(Input.dtype) - output_np = topi.testing.dilate_python(input_np, strides) + output_np = tvm.topi.testing.dilate_python(input_np, strides) input_tvm = tvm.nd.array(input_np, ctx=ctx) output_size = topi.util.get_const_tuple(Output.shape) output_tvm = tvm.nd.array(np.zeros(shape=output_size).astype(Output.dtype), ctx=ctx) diff --git a/topi/tests/python/test_topi_group_conv2d.py b/tests/python/topi/python/test_topi_group_conv2d.py similarity index 94% rename from topi/tests/python/test_topi_group_conv2d.py rename to tests/python/topi/python/test_topi_group_conv2d.py index 6909bbee8bb0..6050d452140c 100644 --- a/topi/tests/python/test_topi_group_conv2d.py +++ b/tests/python/topi/python/test_topi_group_conv2d.py @@ -21,10 +21,10 @@ from tvm import te from tvm import autotvm from tvm.autotvm.task.space import FallbackConfigEntity -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend, Int8Fallback @@ -56,8 +56,8 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype(dtype) w_np = np.random.uniform(size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype) if add_bias: b_np = np.random.uniform(size=bias_shape).astype(dtype) @@ -77,7 +77,7 @@ def check_device(device): print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _group_conv2d_nchw_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _group_conv2d_nchw_implement) C = fcompute(A, W, stride, padding, dilation, groups, dtype) if add_bias: C = topi.add(C, bias) @@ -128,8 +128,8 @@ def get_ref_data(): a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype) w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype) b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(dtype) # convert to NCHWc _, _, out_height, out_width = c_np.shape diff --git a/topi/tests/python/test_topi_group_conv2d_NCHWc_int8.py b/tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py similarity index 96% rename from topi/tests/python/test_topi_group_conv2d_NCHWc_int8.py rename to tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py index ef3faae4b841..6afe44e51466 100644 --- a/topi/tests/python/test_topi_group_conv2d_NCHWc_int8.py +++ b/tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py @@ -21,10 +21,10 @@ import tvm from tvm import te from tvm import autotvm -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple import pytest from common import get_all_backend @@ -69,7 +69,7 @@ def verify_group_conv2d_NCHWc_int8(batch, in_channel, groups, in_size, num_filte def get_ref_data(): a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype("uint8") w_np = np.random.uniform(size=(num_filter, in_channel//groups, kernel, kernel)).astype("int8") - c_np = topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups) + c_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups) return _transform_data(a_np, ic_block), _transform_kernel(w_np, ic_block, oc_block), \ _transform_data(c_np, oc_block) diff --git a/topi/tests/python/test_topi_image.py b/tests/python/topi/python/test_topi_image.py similarity index 91% rename from topi/tests/python/test_topi_image.py rename to tests/python/topi/python/test_topi_image.py index 012ed4207a1b..8d0092901c5c 100644 --- a/topi/tests/python/test_topi_image.py +++ b/tests/python/topi/python/test_topi_image.py @@ -18,8 +18,8 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize from common import get_all_backend @@ -41,11 +41,11 @@ def verify_resize(batch, in_channel, in_height, in_width, out_height, out_width, 'Layout not supported {} '.format(layout)) B = topi.image.resize(A, (out_height, out_width), layout=layout, coordinate_transformation_mode=coord_trans, method=method) if method == "bilinear": - b_np = topi.testing.bilinear_resize_python(a_np, (out_height, out_width), layout, coord_trans) + b_np = tvm.topi.testing.bilinear_resize_python(a_np, (out_height, out_width), layout, coord_trans) else: scale_h = out_height / in_height scale_w = out_width / in_width - b_np = topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout) + b_np = tvm.topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout) def check_device(device): ctx = tvm.context(device, 0) @@ -54,7 +54,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) @@ -103,13 +103,13 @@ def verify_resize3d(batch, in_channel, in_depth, in_height, in_width, out_depth, coordinate_transformation_mode=coordinate_transformation_mode, method=method) if method == "trilinear": - b_np = topi.testing.trilinear_resize3d_python(a_np, (out_depth, out_height, out_width), layout, + b_np = tvm.topi.testing.trilinear_resize3d_python(a_np, (out_depth, out_height, out_width), layout, coordinate_transformation_mode) else: scale_d = out_depth / in_depth scale_h = out_height / in_height scale_w = out_width / in_width - b_np = topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout) + b_np = tvm.topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout) def check_device(device): ctx = tvm.context(device, 0) @@ -118,7 +118,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) @@ -168,7 +168,7 @@ def verify_crop_and_resize(image_shape, np_boxes, np_box_indices, np_crop_size, out = topi.image.crop_and_resize(images, boxes, box_ind, np_crop_size, layout=layout, method=method, extrapolation_value=extrapolation_value) - baseline_np = topi.testing.crop_and_resize_python(np_images, np_boxes, np_box_indices, + baseline_np = tvm.topi.testing.crop_and_resize_python(np_images, np_boxes, np_box_indices, np_crop_size, layout, method, extrapolation_value) def check_device(device): @@ -178,7 +178,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(out) + s = tvm.topi.testing.get_injective_schedule(device)(out) tvm_images = tvm.nd.array(np_images, ctx) tvm_boxes = tvm.nd.array(np_boxes, ctx) tvm_indices = tvm.nd.array(np_box_indices, ctx) @@ -216,7 +216,7 @@ def verify_affine_grid(num_batch, target_shape): @memoize("topi.tests.test_affine_grid.verify_affine_grid") def get_ref_data(): data_np = np.random.uniform(size=data_shape).astype(dtype) - out_np = topi.testing.affine_grid_python(data_np, target_shape) + out_np = tvm.topi.testing.affine_grid_python(data_np, target_shape) return data_np, out_np data_np, out_np = get_ref_data() @@ -228,7 +228,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(out) + s = tvm.topi.testing.get_injective_schedule(device)(out) tvm_data = tvm.nd.array(data_np, ctx) tvm_out = tvm.nd.empty(out_np.shape, dtype, ctx) f = tvm.build(s, [data, out], device) @@ -256,7 +256,7 @@ def get_ref_data(): data_np = np.random.uniform(size=data_shape).astype(dtype) # allow grid values to be out-of-bound grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype) - out_np = topi.testing.grid_sample_nchw_python(data_np, grid_np, 'bilinear') + out_np = tvm.topi.testing.grid_sample_nchw_python(data_np, grid_np, 'bilinear') return data_np, grid_np, out_np data_np, grid_np, out_np = get_ref_data() @@ -268,7 +268,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(out) + s = tvm.topi.testing.get_injective_schedule(device)(out) tvm_data = tvm.nd.array(data_np, ctx) tvm_grid = tvm.nd.array(grid_np, ctx) tvm_out = tvm.nd.empty(out_np.shape, dtype, ctx) diff --git a/topi/tests/python/test_topi_lrn.py b/tests/python/topi/python/test_topi_lrn.py similarity index 90% rename from topi/tests/python/test_topi_lrn.py rename to tests/python/topi/python/test_topi_lrn.py index 7e003a7a52b2..2d57d078407c 100644 --- a/topi/tests/python/test_topi_lrn.py +++ b/tests/python/topi/python/test_topi_lrn.py @@ -18,9 +18,9 @@ import numpy as np import tvm from tvm import te -import topi -from topi.util import get_const_tuple -import topi.testing +from tvm import topi +from tvm.topi.util import get_const_tuple +import tvm.topi.testing _lrn_schedule = { "generic": topi.generic.schedule_lrn, @@ -38,7 +38,7 @@ def verify_lrn(shape, size, axis, bias, alpha, beta): dtype = A.dtype a_np = np.random.uniform(size=shape).astype(dtype) - b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta) + b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta) def check_device(device): if not tvm.runtime.enabled(device): @@ -46,7 +46,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _lrn_schedule) + s_func = tvm.topi.testing.dispatch(device, _lrn_schedule) s = s_func([B]) ctx = tvm.context(device, 0) a = tvm.nd.array(a_np, ctx) diff --git a/topi/tests/python/test_topi_math.py b/tests/python/topi/python/test_topi_math.py similarity index 96% rename from topi/tests/python/test_topi_math.py rename to tests/python/topi/python/test_topi_math.py index 6f1e8588fd7c..8a9754ed6f96 100644 --- a/topi/tests/python/test_topi_math.py +++ b/tests/python/topi/python/test_topi_math.py @@ -19,9 +19,9 @@ from scipy import special import tvm from tvm import te -import topi -import topi.testing -from topi import util +from tvm import topi +import tvm.topi.testing +from tvm.topi import util from common import get_all_backend @@ -64,7 +64,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device, name=name) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros_like(b_np), ctx) @@ -104,7 +104,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="isnan") a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros_like(b_np), ctx) @@ -134,7 +134,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device, name=name) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros_like(b_np), ctx) @@ -188,7 +188,7 @@ def verify(from_dtype, to_dtype, low=-100, high=100): continue print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device) a = tvm.nd.array(a_np, ctx) b = tvm.nd.empty(shape=shape, dtype=to_dtype, ctx=ctx) diff --git a/topi/tests/python/test_topi_matmul.py b/tests/python/topi/python/test_topi_matmul.py similarity index 97% rename from topi/tests/python/test_topi_matmul.py rename to tests/python/topi/python/test_topi_matmul.py index 0c0a365688b3..4ffa29e77d60 100644 --- a/topi/tests/python/test_topi_matmul.py +++ b/tests/python/topi/python/test_topi_matmul.py @@ -17,8 +17,8 @@ import numpy as np import tvm from tvm import te -import topi -from topi.util import get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple def with_tvm(lam, *args): """ Take numpy arrays as args, convert them to TVM tensors and call `lam`. diff --git a/topi/tests/python/test_topi_pooling.py b/tests/python/topi/python/test_topi_pooling.py similarity index 95% rename from topi/tests/python/test_topi_pooling.py rename to tests/python/topi/python/test_topi_pooling.py index 048de8168aa8..b24dd85927b1 100644 --- a/topi/tests/python/test_topi_pooling.py +++ b/tests/python/topi/python/test_topi_pooling.py @@ -20,9 +20,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from common import get_all_backend _pool_schedule = { @@ -98,7 +98,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _pool_schedule) + s_func = tvm.topi.testing.dispatch(device, _pool_schedule) s = s_func(B, layout) a = tvm.nd.array(a_np, ctx) @@ -140,7 +140,7 @@ def verify_pool_grad(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_inc a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) out_grad_np = np.random.uniform(low=0.001, size=bshape).astype(dtype) - pool_grad_np = topi.testing.pool_grad_nchw(a_np, out_grad_np, pool_size=(kh, kw), + pool_grad_np = tvm.topi.testing.pool_grad_nchw(a_np, out_grad_np, pool_size=(kh, kw), strides=(sh, sw), padding=padding, pool_type=pool_type, ceil_mode=ceil_mode, count_include_pad=count_include_pad) @@ -154,7 +154,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _pool_grad_schedule) + s_func = tvm.topi.testing.dispatch(device, _pool_grad_schedule) s = s_func(PoolGrad) a = tvm.nd.array(a_np, ctx) @@ -229,7 +229,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _adaptive_pool_schedule) + s_func = tvm.topi.testing.dispatch(device, _adaptive_pool_schedule) if device == "cuda": s = s_func(B, layout) else: @@ -258,7 +258,7 @@ def test_global_pool(): def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="float32"): """verify function of adaptive_pool""" np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype) - np_out = topi.testing.adaptive_pool(np_data, out_size, pool_type, layout) + np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout) oshape = np_out.shape data = te.placeholder(dshape, name="data", dtype=dtype) @@ -275,7 +275,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _adaptive_pool_schedule) + s_func = tvm.topi.testing.dispatch(device, _adaptive_pool_schedule) if device == "cuda": s = s_func(out, layout) else: @@ -326,7 +326,7 @@ def verify_pool3d(n, ic, ih, kh, sh, padding, pool_type, output_shape = [int(i) for i in B.shape] input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype) - ref_np = topi.testing.pool3d_ncdhw_python(input_np, kernel, stride, padding, + ref_np = tvm.topi.testing.pool3d_ncdhw_python(input_np, kernel, stride, padding, output_shape, pool_type, count_include_pad, ceil_mode) def check_device(device): @@ -336,7 +336,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _pool_schedule) + s_func = tvm.topi.testing.dispatch(device, _pool_schedule) s = s_func(B, layout) a = tvm.nd.array(input_np, ctx) @@ -381,7 +381,7 @@ def verify_pool1d(n, ic, iw, kw, sw, padding, pool_type, output_shape = [int(i) for i in B.shape] input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype) - ref_np = topi.testing.pool1d_ncw_python(input_np, kernel, stride, padding, + ref_np = tvm.topi.testing.pool1d_ncw_python(input_np, kernel, stride, padding, output_shape, pool_type, count_include_pad, ceil_mode) def check_device(device): @@ -391,7 +391,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _pool_schedule) + s_func = tvm.topi.testing.dispatch(device, _pool_schedule) s = s_func(B, layout) a = tvm.nd.array(input_np, ctx) diff --git a/topi/tests/python/test_topi_reduce.py b/tests/python/topi/python/test_topi_reduce.py similarity index 98% rename from topi/tests/python/test_topi_reduce.py rename to tests/python/topi/python/test_topi_reduce.py index cc84fe006f64..d84182f21ffd 100644 --- a/topi/tests/python/test_topi_reduce.py +++ b/tests/python/topi/python/test_topi_reduce.py @@ -19,8 +19,8 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from common import get_all_backend @@ -76,7 +76,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_reduce_schedule(device)(B) + s = tvm.topi.testing.get_reduce_schedule(device)(B) foo = tvm.build(s, [A, B], device, name=type) # Test diff --git a/topi/tests/python/test_topi_relu.py b/tests/python/topi/python/test_topi_relu.py similarity index 96% rename from topi/tests/python/test_topi_relu.py rename to tests/python/topi/python/test_topi_relu.py index 4d4166ff6487..1114b3fa3c8c 100644 --- a/topi/tests/python/test_topi_relu.py +++ b/tests/python/topi/python/test_topi_relu.py @@ -19,9 +19,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple from tvm.contrib.nvcc import have_fp16 from common import get_all_backend @@ -43,7 +43,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_elemwise_schedule(device)(B) + s = tvm.topi.testing.get_elemwise_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx) diff --git a/topi/tests/python/test_topi_reorg.py b/tests/python/topi/python/test_topi_reorg.py similarity index 91% rename from topi/tests/python/test_topi_reorg.py rename to tests/python/topi/python/test_topi_reorg.py index 09c2f2f966de..e5a19474029a 100644 --- a/topi/tests/python/test_topi_reorg.py +++ b/tests/python/topi/python/test_topi_reorg.py @@ -16,11 +16,11 @@ # under the License. """Example code to do reorg.""" import numpy as np -import topi -from topi.util import get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple import tvm from tvm import te -import topi.testing +import tvm.topi.testing _reorg_schedule = { "generic": topi.generic.schedule_reorg, @@ -39,7 +39,7 @@ def verify_reorg(batch, in_size, in_channel, stride): def get_ref_data_reorg(): a_np = np.random.uniform(size=a_shape).astype(dtype) - b_np = topi.testing.reorg_python(a_np, stride) + b_np = tvm.topi.testing.reorg_python(a_np, stride) return a_np, b_np a_np, b_np = get_ref_data_reorg() @@ -52,7 +52,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _reorg_schedule) + s_func = tvm.topi.testing.dispatch(device, _reorg_schedule) s = s_func([B]) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx) diff --git a/topi/tests/python/test_topi_softmax.py b/tests/python/topi/python/test_topi_softmax.py similarity index 90% rename from topi/tests/python/test_topi_softmax.py rename to tests/python/topi/python/test_topi_softmax.py index e21307405db7..1ff69be7bc87 100644 --- a/topi/tests/python/test_topi_softmax.py +++ b/tests/python/topi/python/test_topi_softmax.py @@ -19,10 +19,10 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import logging -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple from common import get_all_backend @@ -40,7 +40,7 @@ def check_device(A, B, a_np, b_np, device, name): return print("Running on target: %s" % device) with tvm.target.create(device): - s_func = topi.testing.dispatch(device, _softmax_schedule) + s_func = tvm.topi.testing.dispatch(device, _softmax_schedule) s = s_func(B) a = tvm.nd.array(a_np, ctx) @@ -57,7 +57,7 @@ def verify_softmax(m, n, dtype="float32"): tvm.lower(s, [A, B], simple_mode=True) a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype) - b_np = topi.testing.softmax_python(a_np) + b_np = tvm.topi.testing.softmax_python(a_np) for device in get_all_backend(): check_device(A, B, a_np, b_np, device, "softmax") @@ -68,7 +68,7 @@ def verify_softmax_4d(shape, dtype="float32"): _, c, h, w = shape a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype) - b_np = topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c)) + b_np = tvm.topi.testing.softmax_python(a_np.transpose(0, 2, 3, 1).reshape(h*w, c)) b_np = b_np.reshape(1, h, w, c).transpose(0, 3, 1, 2) for device in get_all_backend(): @@ -87,7 +87,7 @@ def verify_log_softmax(m, n, dtype="float32"): s = te.create_schedule([B.op]) tvm.lower(s, [A, B], simple_mode=True) a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype) - b_np = topi.testing.log_softmax_python(a_np) + b_np = tvm.topi.testing.log_softmax_python(a_np) for device in get_all_backend(): check_device(A, B, a_np, b_np, device, "log_softmax") diff --git a/topi/tests/python/test_topi_sort.py b/tests/python/topi/python/test_topi_sort.py similarity index 95% rename from topi/tests/python/test_topi_sort.py rename to tests/python/topi/python/test_topi_sort.py index 2728733e2394..7abfe586a4e0 100644 --- a/topi/tests/python/test_topi_sort.py +++ b/tests/python/topi/python/test_topi_sort.py @@ -19,8 +19,8 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing _argsort_implement = { "generic": (topi.argsort, topi.generic.schedule_argsort), @@ -58,7 +58,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _argsort_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _argsort_implement) out = fcompute(data, axis=axis, is_ascend=is_ascend) s = fschedule(out) @@ -102,7 +102,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _topk_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _topk_implement) outs = fcompute(data, k, axis, ret_type, is_ascend, dtype) outs = outs if isinstance(outs, list) else [outs] s = fschedule(outs) diff --git a/topi/tests/python/test_topi_space_to_depth.py b/tests/python/topi/python/test_topi_space_to_depth.py similarity index 94% rename from topi/tests/python/test_topi_space_to_depth.py rename to tests/python/topi/python/test_topi_space_to_depth.py index 11a009d3fde9..f659c33d3739 100644 --- a/topi/tests/python/test_topi_space_to_depth.py +++ b/tests/python/topi/python/test_topi_space_to_depth.py @@ -18,8 +18,8 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from common import get_all_backend @@ -45,7 +45,7 @@ def verify_space_to_depth(block_size, batch, in_channel, in_height, in_width, la B = topi.nn.space_to_depth(A, block_size=block_size, layout=layout) if layout == 'NHWC': a_np = np.transpose(a_np, axes=[0, 3, 1, 2]) - b_np = topi.testing.space_to_depth_python(a_np, block_size) + b_np = tvm.topi.testing.space_to_depth_python(a_np, block_size) if layout == 'NHWC': a_np = np.transpose(a_np, axes=[0, 2, 3, 1]) b_np = np.transpose(b_np, axes=[0, 2, 3, 1]) @@ -57,7 +57,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) diff --git a/topi/tests/python/test_topi_sparse.py b/tests/python/topi/python/test_topi_sparse.py similarity index 98% rename from topi/tests/python/test_topi_sparse.py rename to tests/python/topi/python/test_topi_sparse.py index 748181dc650b..e5fd0e9e6684 100644 --- a/topi/tests/python/test_topi_sparse.py +++ b/tests/python/topi/python/test_topi_sparse.py @@ -18,9 +18,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple import tvm.contrib.sparse as tvmsp from collections import namedtuple import time @@ -307,7 +307,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _sparse_dense_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement) with tvm.target.create(device): Y = fcompute(X, W_data, W_indices, W_indptr) if use_relu: @@ -355,7 +355,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _sparse_dense_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement) with tvm.target.create(device): Y = fcompute(X, W_data, W_indices, W_indptr) s = fschedule([Y]) diff --git a/topi/tests/python/test_topi_tensor.py b/tests/python/topi/python/test_topi_tensor.py similarity index 97% rename from topi/tests/python/test_topi_tensor.py rename to tests/python/topi/python/test_topi_tensor.py index 68ea7ab6d7d9..34442845a869 100644 --- a/topi/tests/python/test_topi_tensor.py +++ b/tests/python/topi/python/test_topi_tensor.py @@ -18,8 +18,8 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize from tvm.contrib.nvcc import have_fp16 @@ -100,7 +100,7 @@ def check_device(device): A = te.placeholder((n, m), name='A', dtype=dtype) B = te.compute((n, m), lambda i, j: A[i, j] + tvm.tir.const(1, A.dtype), name='B') - S = topi.testing.get_elemwise_schedule(device)(B) + S = tvm.topi.testing.get_elemwise_schedule(device)(B) fun = tvm.build(S, [A, B], device) np_A = tvm.nd.empty((n, m), A.dtype, ctx).copyfrom( diff --git a/topi/tests/python/test_topi_transform.py b/tests/python/topi/python/test_topi_transform.py similarity index 94% rename from topi/tests/python/test_topi_transform.py rename to tests/python/topi/python/test_topi_transform.py index ee7f114cb3de..13d24d59aab0 100644 --- a/topi/tests/python/test_topi_transform.py +++ b/tests/python/topi/python/test_topi_transform.py @@ -19,8 +19,8 @@ import pytest import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.nvcc import have_fp16 from common import get_all_backend @@ -35,7 +35,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(B) + s = tvm.topi.testing.get_broadcast_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="expand_dims") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = data_npy.reshape(out_shape) @@ -61,7 +61,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_elemwise_schedule(device)(B) + s = tvm.topi.testing.get_elemwise_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="reinterpret") data_npy = generator(in_shape).astype(in_dtype) out_npy = data_npy.view(B.dtype) @@ -84,7 +84,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="transpose") data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype) out_npy = data_npy.transpose(axes) @@ -107,7 +107,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="reshape") data_npy = np.random.normal(size=src_shape).astype(A.dtype) out_npy = np.reshape(data_npy, newshape=dst_shape) @@ -130,7 +130,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="squeeze") data_npy = np.random.normal(size=src_shape).astype(A.dtype) @@ -156,7 +156,7 @@ def get_concat_schedule(target): for key in target.keys: if key in schedule_map: return schedule_map[key] - return topi.testing.get_injective_schedule(target) + return tvm.topi.testing.get_injective_schedule(target) tensor_l = [] for i, shape in enumerate(shapes): @@ -194,7 +194,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(out_tensor) + s = tvm.topi.testing.get_broadcast_schedule(device)(out_tensor) foo = tvm.build(s, tensor_l + [out_tensor], device, name="stack") data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes] @@ -218,7 +218,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(tensor_l) + s = tvm.topi.testing.get_injective_schedule(device)(tensor_l) foo = tvm.build(s, [A] + list(tensor_l), device, name="split") data_npy = np.random.normal(size=src_shape).astype(A.dtype) @@ -277,7 +277,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="reverse") x_np = np.random.uniform(size=in_shape).astype(A.dtype) @@ -305,7 +305,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(C) + s = tvm.topi.testing.get_injective_schedule(device)(C) foo = tvm.build(s, [A, B, C], device, name="reverse_sequence") @@ -387,7 +387,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(out_tensor) + s = tvm.topi.testing.get_injective_schedule(device)(out_tensor) foo = tvm.build(s, [A] + [indices] + [out_tensor] , device, name="take") shape_size = 1 @@ -422,11 +422,11 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="stride_slice") x_np = np.random.uniform(size=in_shape).astype(A.dtype) - out_npy = topi.testing.strided_slice_python( + out_npy = tvm.topi.testing.strided_slice_python( x_np, begin, end, strides) + 1 data_nd = tvm.nd.array(x_np, ctx) out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=A.dtype) @@ -454,7 +454,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) if strides is not None: foo = tvm.build(s, [A, V, b, e, st, B], device, name="stride_set") @@ -466,7 +466,7 @@ def check_device(device): v_np = np.random.uniform(size=v_shape).astype(V.dtype) b_np = np.asarray(begin).astype('int32') e_np = np.asarray(end).astype('int32') - out_npy = topi.testing.strided_set_python( + out_npy = tvm.topi.testing.strided_set_python( x_np, v_np, begin, end, strides) + 1 data_nd = tvm.nd.array(x_np, ctx) v_nd = tvm.nd.array(v_np, ctx) @@ -497,10 +497,10 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(out_tensor) + s = tvm.topi.testing.get_injective_schedule(device)(out_tensor) func = tvm.build(s, [var_data, var_indices, out_tensor] , device, name="gather") - out_npys = topi.testing.gather_python(data, axis, indices) + out_npys = tvm.topi.testing.gather_python(data, axis, indices) data_nd = tvm.nd.array(data, ctx) indices_nd = tvm.nd.array(indices, ctx) @@ -525,14 +525,14 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(out_tensor) + s = tvm.topi.testing.get_injective_schedule(device)(out_tensor) func = tvm.build(s, [A, indices, out_tensor] , device, name="take") shape_size = 1 for i in range(len(src_shape)): shape_size = shape_size * src_shape[i] data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape)) - out_npys = topi.testing.gather_nd_python(data_npy, indices_src) + out_npys = tvm.topi.testing.gather_nd_python(data_npy, indices_src) data_nd = tvm.nd.array(data_npy, ctx) indices_nd = tvm.nd.array(indices_src, ctx) @@ -564,7 +564,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(A) + s = tvm.topi.testing.get_injective_schedule(device)(A) f = tvm.build(s, [A], device, name="arange") a_nd = tvm.nd.empty(a_np.shape, dtype='float32', ctx=ctx) f(a_nd) @@ -583,7 +583,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(B) + s = tvm.topi.testing.get_broadcast_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="repeat") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = np.repeat(data_npy, repeats, axis) @@ -605,7 +605,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(B) + s = tvm.topi.testing.get_broadcast_schedule(device)(B) foo = tvm.build(s, [A, B], device, name="tile") data_npy = np.random.uniform(size=in_shape).astype(A.dtype) out_npy = np.tile(data_npy, reps) @@ -630,7 +630,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(C) + s = tvm.topi.testing.get_broadcast_schedule(device)(C) f = tvm.build(s, [Cond, A, B, C], device, name="where") cond_npy = np.random.uniform(low=-1, high=1, size=in_shape).astype(dtype) x_npy = np.random.uniform(size=in_shape).astype(dtype) @@ -658,10 +658,10 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(one_hot_result) + s = tvm.topi.testing.get_injective_schedule(device)(one_hot_result) fn = tvm.build(s, [indices, one_hot_result], device, name="one_hot") indices_npy = np.random.randint(0, depth, size=indices_shape).astype(indices.dtype) - out_npy = topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype) + out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype) indices_nd = tvm.nd.array(indices_npy, ctx) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(one_hot_result.dtype), ctx) fn(indices_nd, out_nd) @@ -691,7 +691,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(Z) + s = tvm.topi.testing.get_injective_schedule(device)(Z) foo = tvm.build(s, [X, Y, Z], device, name="unravel_index") out_npy = np.unravel_index(x_data, y_data) @@ -727,7 +727,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(D) + s = tvm.topi.testing.get_injective_schedule(device)(D) foo = tvm.build(s, args + [D], device, name="sparse_to_dense") @@ -816,7 +816,7 @@ def test_squeeze(): ctx = tvm.context(device, 0) if ctx.exist: with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(C) + s = tvm.topi.testing.get_injective_schedule(device)(C) func = tvm.build(s, [A, C]) a = tvm.nd.array(np.array((1, 2)).astype('float32'), ctx=ctx) c = tvm.nd.empty((1,), dtype='float32', ctx=ctx) @@ -948,7 +948,7 @@ def check_device(device): tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=B.dtype) print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) f = tvm.build(s, [A, B], device, name="layout_transform") f(tvm_input, tvm_output) tvm.testing.assert_allclose(tvm_output.asnumpy(), output) @@ -975,7 +975,7 @@ def check_device(device): tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=dtype) print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) f = tvm.build(s, [A, B], device, name="shape") f(tvm_input, tvm_output) tvm.testing.assert_allclose(tvm_output.asnumpy(), output) @@ -995,7 +995,7 @@ def test_sequence_mask(): C = topi.sequence_mask(A, B, axis=axis, mask_value=mask_value) A_data = np.random.normal(0, 1, in_shape).astype(np.float32) B_data = np.random.randint(1, max_length, (batch_size,)).astype(np.int32) - C_gt_data = topi.testing.sequence_mask(A_data, B_data, mask_value, axis) + C_gt_data = tvm.topi.testing.sequence_mask(A_data, B_data, mask_value, axis) def check_device(device): ctx = tvm.context(device, 0) @@ -1007,7 +1007,7 @@ def check_device(device): tvm_C = tvm.nd.empty(in_shape, ctx=ctx, dtype="float32") print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(C) + s = tvm.topi.testing.get_injective_schedule(device)(C) f = tvm.build(s, [A, B, C], device, name="SequenceMask") f(tvm_A, tvm_B, tvm_C) tvm.testing.assert_allclose(tvm_C.asnumpy(), C_gt_data) @@ -1032,7 +1032,7 @@ def check_device(device): tvm_output = tvm.nd.empty((), ctx=ctx, dtype=B.dtype) print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) f = tvm.build(s, [A, B], device, name="ndarray_size") f(tvm_input, tvm_output) tvm.testing.assert_allclose(tvm_output.asnumpy(), output) @@ -1050,7 +1050,7 @@ def check_device(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) - conv2d_compute, conv2d_schedule = topi.testing.get_conv2d_nchw_implement(device) + conv2d_compute, conv2d_schedule = tvm.topi.testing.get_conv2d_nchw_implement(device) data = te.placeholder((2, 1, 2, 4), 'int8', 'data') w = te.placeholder((3, 1, 2, 2), 'int8', 'w') conv1 = conv2d_compute(data, w, 1, 0, 1, 'int32') diff --git a/topi/tests/python/test_topi_upsampling.py b/tests/python/topi/python/test_topi_upsampling.py similarity index 93% rename from topi/tests/python/test_topi_upsampling.py rename to tests/python/topi/python/test_topi_upsampling.py index 874471b830fd..04cc31092402 100644 --- a/topi/tests/python/test_topi_upsampling.py +++ b/tests/python/topi/python/test_topi_upsampling.py @@ -18,10 +18,10 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import math -from topi.util import nchw_pack_layout +from tvm.topi.util import nchw_pack_layout from common import get_all_backend @@ -54,9 +54,9 @@ def verify_upsampling(batch, in_channel, in_height, in_width, scale_h, scale_w, if method == "bilinear": out_size = (int(round(in_height*scale_h)), int(round(in_width*scale_w))) - b_np = topi.testing.bilinear_resize_python(a_np, out_size, layout, "asymmetric") + b_np = tvm.topi.testing.bilinear_resize_python(a_np, out_size, layout, "asymmetric") else: - b_np = topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout) + b_np = tvm.topi.testing.upsampling_python(a_np, (scale_h, scale_w), layout) def check_device(device): ctx = tvm.context(device, 0) @@ -65,7 +65,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) @@ -136,10 +136,10 @@ def verify_upsampling3d(batch, in_channel, in_depth, in_height, in_width, scale_ if method == "trilinear": out_size = (int(round(in_depth*scale_d)), int(round(in_height*scale_h)), int(round(in_width*scale_w))) - b_np = topi.testing.trilinear_resize3d_python(a_np, out_size, layout, + b_np = tvm.topi.testing.trilinear_resize3d_python(a_np, out_size, layout, coordinate_transformation_mode="half_pixel") else: - b_np = topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout) + b_np = tvm.topi.testing.upsampling3d_python(a_np, (scale_d, scale_h, scale_w), layout) def check_device(device): ctx = tvm.context(device, 0) @@ -148,7 +148,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_injective_schedule(device)(B) + s = tvm.topi.testing.get_injective_schedule(device)(B) a = tvm.nd.array(a_np, ctx) b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx) f = tvm.build(s, [A, B], device) diff --git a/topi/tests/python/test_topi_util.py b/tests/python/topi/python/test_topi_util.py similarity index 98% rename from topi/tests/python/test_topi_util.py rename to tests/python/topi/python/test_topi_util.py index 534b6993d411..345e7f9baf1a 100644 --- a/topi/tests/python/test_topi_util.py +++ b/tests/python/topi/python/test_topi_util.py @@ -16,7 +16,7 @@ # under the License. """Test code for util""" -import topi +from tvm import topi def verify_get_shape(src_shape, src_layout, dst_layout, expect_shape): diff --git a/topi/tests/python/test_topi_vision.py b/tests/python/topi/python/test_topi_vision.py similarity index 95% rename from topi/tests/python/test_topi_vision.py rename to tests/python/topi/python/test_topi_vision.py index b74e19346f30..e0e2205ba0bf 100644 --- a/topi/tests/python/test_topi_vision.py +++ b/tests/python/topi/python/test_topi_vision.py @@ -20,12 +20,12 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing from tvm.contrib.pickle_memoize import memoize -from topi.util import get_const_tuple -from topi.vision import ssd, non_max_suppression, get_valid_counts +from tvm.topi.util import get_const_tuple +from tvm.topi.vision import ssd, non_max_suppression, get_valid_counts _get_valid_counts_implement = { "generic": (topi.vision.get_valid_counts, topi.generic.schedule_get_valid_counts), @@ -93,7 +93,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _get_valid_counts_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _get_valid_counts_implement) data = te.placeholder(dshape, name="data", dtype=dtype) outs = fcompute(data, score_threshold, id_index, score_index) s = fschedule(outs) @@ -148,7 +148,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _nms_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _nms_implement) out = fcompute(data, valid_count, indices, max_output_size, iou_threshold, force_suppress, top_k, coord_start=coord_start, score_index=score_index, id_index=id_index, return_indices=False) @@ -252,7 +252,7 @@ def check_device(device): return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _multibox_prior_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _multibox_prior_implement) with tvm.target.create(device): out = fcompute(data, sizes, ratios, steps, offsets, clip) s = fschedule(out) @@ -297,7 +297,7 @@ def check_device(device): return print("Running on target: %s" % device) - fcompute, fschedule = topi.testing.dispatch(device, _multibox_detection_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _multibox_detection_implement) with tvm.target.create(device): out = fcompute(cls_prob, loc_preds, anchors) s = fschedule(out) @@ -326,7 +326,7 @@ def get_ref_data(): a_np = np.random.uniform(size=a_shape).astype('float32') rois_np = np.random.uniform(size=rois_shape).astype('float32') * in_size rois_np[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi) - b_np = topi.testing.roi_align_nchw_python(a_np, rois_np, pooled_size=pooled_size, + b_np = tvm.topi.testing.roi_align_nchw_python(a_np, rois_np, pooled_size=pooled_size, spatial_scale=spatial_scale, sample_ratio=sample_ratio) @@ -342,7 +342,7 @@ def check_device(device): print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _roi_align_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _roi_align_implement) b = fcompute(a, rois, pooled_size=pooled_size, spatial_scale=spatial_scale, sample_ratio=sample_ratio) @@ -379,7 +379,7 @@ def get_ref_data(): rois_np = np.random.uniform(size=rois_shape).astype('float32') * in_size rois_np[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32') - b_np = topi.testing.roi_pool_nchw_python(a_np, rois_np, pooled_size=pooled_size, + b_np = tvm.topi.testing.roi_pool_nchw_python(a_np, rois_np, pooled_size=pooled_size, spatial_scale=spatial_scale) return a_np, rois_np, b_np @@ -395,7 +395,7 @@ def check_device(device): with tvm.target.create(device): b = topi.vision.rcnn.roi_pool_nchw(a, rois, pooled_size=pooled_size, spatial_scale=spatial_scale) - s_func = topi.testing.dispatch(device, _roi_pool_schedule) + s_func = tvm.topi.testing.dispatch(device, _roi_pool_schedule) s = s_func(b) tvm_a = tvm.nd.array(a_np, ctx) @@ -426,7 +426,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - fcompute, fschedule = topi.testing.dispatch(device, _proposal_implement) + fcompute, fschedule = tvm.topi.testing.dispatch(device, _proposal_implement) out = fcompute(cls_prob, bbox_pred, im_info, **attrs) s = fschedule(out) f = tvm.build(s, [cls_prob, bbox_pred, im_info, out], device) diff --git a/tests/python/unittest/test_auto_scheduler_common.py b/tests/python/unittest/test_auto_scheduler_common.py index fa22fdc5597c..b67178ec4370 100644 --- a/tests/python/unittest/test_auto_scheduler_common.py +++ b/tests/python/unittest/test_auto_scheduler_common.py @@ -20,7 +20,7 @@ import threading from tvm import te, auto_scheduler -import topi +from tvm import topi @auto_scheduler.register_workload diff --git a/tests/python/unittest/test_auto_scheduler_compute_dag.py b/tests/python/unittest/test_auto_scheduler_compute_dag.py index d9c24b97171e..2530d554e8ee 100644 --- a/tests/python/unittest/test_auto_scheduler_compute_dag.py +++ b/tests/python/unittest/test_auto_scheduler_compute_dag.py @@ -17,7 +17,8 @@ """Test ComputeDAG (replay, infer bound)""" -import tvm, topi +import tvm +from tvm import topi from tvm import auto_scheduler, te from test_auto_scheduler_common import get_tiled_matmul, matmul_auto_scheduler_test diff --git a/tests/python/unittest/test_auto_scheduler_loop_state.py b/tests/python/unittest/test_auto_scheduler_loop_state.py index 5c501ac2e2b7..a051e8189423 100644 --- a/tests/python/unittest/test_auto_scheduler_loop_state.py +++ b/tests/python/unittest/test_auto_scheduler_loop_state.py @@ -21,7 +21,7 @@ import tvm from tvm import auto_scheduler, te -import topi +from tvm import topi from test_auto_scheduler_common import matmul_auto_scheduler_test, conv2d_nchw_bn_relu diff --git a/tests/python/unittest/test_auto_scheduler_measure.py b/tests/python/unittest/test_auto_scheduler_measure.py index e65f19146f53..9282667c025a 100644 --- a/tests/python/unittest/test_auto_scheduler_measure.py +++ b/tests/python/unittest/test_auto_scheduler_measure.py @@ -18,7 +18,7 @@ """ Test measurement and log serialization. """ import tvm -import topi +from tvm import topi from tvm import te, auto_scheduler import tempfile diff --git a/tests/python/unittest/test_runtime_heterogeneous.py b/tests/python/unittest/test_runtime_heterogeneous.py index 343b86717028..58174dd442a9 100644 --- a/tests/python/unittest/test_runtime_heterogeneous.py +++ b/tests/python/unittest/test_runtime_heterogeneous.py @@ -22,7 +22,7 @@ import tvm from tvm import te from tvm.contrib import graph_runtime, util -import topi +from tvm import topi def get_simplex_graph(host_dev_type, device_dev_type): r""" Return the hand-crafted json object where only one copy node is diff --git a/tests/python/unittest/test_target_codegen_cuda.py b/tests/python/unittest/test_target_codegen_cuda.py index 4cd08d0f75c4..7fdd2592ee5f 100644 --- a/tests/python/unittest/test_target_codegen_cuda.py +++ b/tests/python/unittest/test_target_codegen_cuda.py @@ -18,7 +18,7 @@ import tvm from tvm import te import numpy as np -import topi +from tvm import topi import unittest from tvm.contrib.nvcc import have_fp16, have_int8 from tvm.contrib import nvcc @@ -881,14 +881,14 @@ def test_unrolled_vectorization(): dtype = 'float32' target = 'cuda' - + ## Compute declaration N = 128 A = te.placeholder((N, N), name='A') B = te.placeholder((N, N), name='B') k = te.reduce_axis((0, N), name='k') C = te.compute((N, N), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name='C') - + ## Schedule s = te.create_schedule([C.op]) CC = s.cache_write(C, "local") @@ -903,7 +903,7 @@ def test_unrolled_vectorization(): ko, ki = s[CC].split(k, 2) s[CC].unroll(ki) s[CC].vectorize(j) - + ## Check correctness ctx = tvm.context(target) a_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), ctx=ctx) diff --git a/tests/python/unittest/test_target_codegen_llvm.py b/tests/python/unittest/test_target_codegen_llvm.py index cf8250401246..519d18786e28 100644 --- a/tests/python/unittest/test_target_codegen_llvm.py +++ b/tests/python/unittest/test_target_codegen_llvm.py @@ -16,7 +16,7 @@ # under the License. import tvm from tvm import te -import topi +from tvm import topi from tvm.contrib import util, clang import numpy as np import ctypes diff --git a/tests/python/unittest/test_target_custom_datatypes.py b/tests/python/unittest/test_target_custom_datatypes.py index f6723e2b1ee1..eb48d83af077 100644 --- a/tests/python/unittest/test_target_custom_datatypes.py +++ b/tests/python/unittest/test_target_custom_datatypes.py @@ -18,7 +18,7 @@ import tvm from tvm import te from ctypes import * -import topi +from tvm import topi import numpy as np tgt = "llvm" diff --git a/tests/python/unittest/test_te_autodiff.py b/tests/python/unittest/test_te_autodiff.py index c756de050b08..54158745e1bd 100644 --- a/tests/python/unittest/test_te_autodiff.py +++ b/tests/python/unittest/test_te_autodiff.py @@ -18,8 +18,8 @@ import tvm from tvm import te from tvm.testing import check_numerical_grads, assert_allclose -import topi -from topi.util import get_const_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple import numpy as np diff --git a/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py b/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py index 977dfc3d6b26..1f1791447ab1 100644 --- a/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py +++ b/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py @@ -16,7 +16,7 @@ # under the License. import tvm from tvm import te -import topi +from tvm import topi import numpy as np from tvm.contrib import nvcc diff --git a/tests/python/unittest/test_te_schedule_tensor_core.py b/tests/python/unittest/test_te_schedule_tensor_core.py index c6196d8765ca..aa87665455df 100644 --- a/tests/python/unittest/test_te_schedule_tensor_core.py +++ b/tests/python/unittest/test_te_schedule_tensor_core.py @@ -17,7 +17,7 @@ import tvm from tvm import te import numpy as np -from topi.testing import conv2d_nhwc_python +from tvm.topi.testing import conv2d_nhwc_python from tvm.contrib import nvcc VERIFY = True diff --git a/tests/python/unittest/test_te_tensor.py b/tests/python/unittest/test_te_tensor.py index 662eff09260c..3d22c0f207f7 100644 --- a/tests/python/unittest/test_te_tensor.py +++ b/tests/python/unittest/test_te_tensor.py @@ -17,7 +17,7 @@ import tvm import numpy as np from tvm import te -from topi.nn.pooling import pool +from tvm.topi.nn.pooling import pool def test_tensor(): m = te.size_var('m') diff --git a/tests/python/unittest/test_te_tensor_overload.py b/tests/python/unittest/test_te_tensor_overload.py index 053bd15f2990..9d091a2cefc5 100644 --- a/tests/python/unittest/test_te_tensor_overload.py +++ b/tests/python/unittest/test_te_tensor_overload.py @@ -17,9 +17,9 @@ import numpy as np import tvm from tvm import te -import topi -import topi.testing -from topi.util import get_const_tuple +from tvm import topi +import tvm.topi.testing +from tvm.topi.util import get_const_tuple def test_operator_type_and_tags(): @@ -109,7 +109,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_elemwise_schedule(device)(B) + s = tvm.topi.testing.get_elemwise_schedule(device)(B) k_ = 2 foo = tvm.build(s, [A, B, k] + sh, device, name="tensor_scalar_" + typ) @@ -155,7 +155,7 @@ def check_device(device): return print("Running on target: %s" % device) with tvm.target.create(device): - s = topi.testing.get_broadcast_schedule(device)(C) + s = tvm.topi.testing.get_broadcast_schedule(device)(C) foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + typ) lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype) @@ -191,7 +191,7 @@ def check_device(device): return print("Running on target: %s" % device) - conv2d_nchw, schedule_conv2d_nchw = topi.testing.get_conv2d_nchw_implement(device) + conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device) k = 10.0 dilation = (1, 1) @@ -215,7 +215,7 @@ def check_device(device): a_npy = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype) w_npy = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype) - b_npy = topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding) + b_npy = tvm.topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding) c_npy = np.random.uniform(size=get_const_tuple(B.shape)).astype(B.dtype) if typ == "add": c_npy = b_npy + k diff --git a/tests/python/unittest/test_tir_data_layout.py b/tests/python/unittest/test_tir_data_layout.py index 86a71da6dbeb..c3a666100682 100644 --- a/tests/python/unittest/test_tir_data_layout.py +++ b/tests/python/unittest/test_tir_data_layout.py @@ -18,7 +18,7 @@ import tvm from tvm import te -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple def test_layout(): layout = tvm.tir.layout("NCHW16c") diff --git a/tests/python/unittest/test_tir_intrin.py b/tests/python/unittest/test_tir_intrin.py index 26bf80f5e1a5..0920603af218 100644 --- a/tests/python/unittest/test_tir_intrin.py +++ b/tests/python/unittest/test_tir_intrin.py @@ -16,7 +16,7 @@ # under the License. import tvm from tvm import te -import topi +from tvm import topi from tvm.contrib import util, clang import numpy as np import ctypes @@ -84,7 +84,7 @@ def run_test(tvm_intrin, np_func): f(a, b) tvm.testing.assert_allclose( b.asnumpy(), np_func(a.asnumpy()), atol=1e-5, rtol=1e-5) - + for func in test_funcs: run_test(*func) diff --git a/tests/python/unittest/test_tir_transform_bf16_legalize.py b/tests/python/unittest/test_tir_transform_bf16_legalize.py index 599ddba41015..1d57db63a394 100644 --- a/tests/python/unittest/test_tir_transform_bf16_legalize.py +++ b/tests/python/unittest/test_tir_transform_bf16_legalize.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. import tvm -import topi +from tvm import topi from tvm import te diff --git a/tests/python/unittest/test_tir_transform_loop_partition.py b/tests/python/unittest/test_tir_transform_loop_partition.py index ce8c16e87413..73642e0f4438 100644 --- a/tests/python/unittest/test_tir_transform_loop_partition.py +++ b/tests/python/unittest/test_tir_transform_loop_partition.py @@ -430,7 +430,7 @@ def test_conv_tiling(): def test_multilevel_splitting_with_indivisble_factors(): - import topi + from tvm import topi A = te.placeholder((130,), dtype="float32") B = topi.nn.relu(A) s = te.create_schedule(B.op) diff --git a/tests/scripts/setup-pytest-env.sh b/tests/scripts/setup-pytest-env.sh index 414186c97850..61c079aa4744 100755 --- a/tests/scripts/setup-pytest-env.sh +++ b/tests/scripts/setup-pytest-env.sh @@ -27,4 +27,4 @@ fi set -u export TVM_PATH=`pwd` -export PYTHONPATH=${TVM_PATH}/python:${TVM_PATH}/topi/python +export PYTHONPATH=${TVM_PATH}/python diff --git a/tests/scripts/task_golang.sh b/tests/scripts/task_golang.sh index 0ff6c39d602c..7a93b47c2913 100755 --- a/tests/scripts/task_golang.sh +++ b/tests/scripts/task_golang.sh @@ -22,7 +22,7 @@ set -u export LD_LIBRARY_PATH="lib:${LD_LIBRARY_PATH:-}" tvm_root="$(git rev-parse --show-toplevel)" -export PYTHONPATH="$tvm_root/python":"$tvm_root/topi/python" +export PYTHONPATH="$tvm_root/python" # to avoid CI CPU thread throttling. export TVM_BIND_THREADS=0 diff --git a/tests/scripts/task_python_topi.sh b/tests/scripts/task_python_topi.sh index e483d5f7f4a6..3bc3caf825cf 100755 --- a/tests/scripts/task_python_topi.sh +++ b/tests/scripts/task_python_topi.sh @@ -31,4 +31,4 @@ make cython3 # cleanup pycache find . -type f -path "*.pyc" | xargs rm -f -python3 -m pytest topi/tests/python +python3 -m pytest tests/python/topi/ diff --git a/tests/scripts/task_rust.sh b/tests/scripts/task_rust.sh index 6d159f671cd3..d7b9a5b74406 100755 --- a/tests/scripts/task_rust.sh +++ b/tests/scripts/task_rust.sh @@ -22,7 +22,7 @@ set -u export TVM_HOME="$(git rev-parse --show-toplevel)" export LD_LIBRARY_PATH="$TVM_HOME/lib:$TVM_HOME/build:${LD_LIBRARY_PATH:-}" -export PYTHONPATH="$TVM_HOME/python":"$TVM_HOME/topi/python" +export PYTHONPATH="$TVM_HOME/python" export RUST_DIR="$TVM_HOME/rust" diff --git a/topi/python/setup.py b/topi/python/setup.py deleted file mode 100644 index 683717931378..000000000000 --- a/topi/python/setup.py +++ /dev/null @@ -1,127 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# pylint: disable=invalid-name, exec-used -"""Setup TOPI package.""" -from __future__ import absolute_import -import os -import shutil -import sys - -from setuptools import find_packages -from setuptools.dist import Distribution - -if "--inplace" in sys.argv: - from distutils.core import setup - from distutils.extension import Extension -else: - from setuptools import setup - from setuptools.extension import Extension - -CURRENT_DIR = os.path.dirname(__file__) - - -def get_lib_names(): - if sys.platform.startswith('win32'): - return ['libtvm_topi.dll', 'tvm_topi.dll'] - if sys.platform.startswith('darwin'): - return ['libtvm_topi.dylib', 'tvm_topi.dylib'] - return ['libtvm_topi.so', 'tvm_topi.so'] - - -def get_lib_path(): - """Get library path, name and version""" - # We can not import `libinfo.py` in setup.py directly since __init__.py - # Will be invoked which introduces dependences - libinfo_py = os.path.join(CURRENT_DIR, '../../python/tvm/_ffi/libinfo.py') - libinfo = {'__file__': libinfo_py} - exec(compile(open(libinfo_py, "rb").read(), - libinfo_py, 'exec'), libinfo, libinfo) - version = libinfo['__version__'] - if not os.getenv('CONDA_BUILD'): - lib_path = libinfo['find_lib_path'](get_lib_names()) - libs = [lib_path[0]] - if libs[0].find("runtime") == -1: - for name in lib_path[1:]: - if name.find("runtime") != -1: - libs.append(name) - break - else: - libs = None - return libs, version - - -LIB_LIST, __version__ = get_lib_path() - -if not os.getenv('CONDA_BUILD'): - curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) - for i, path in enumerate(LIB_LIST): - LIB_LIST[i] = os.path.relpath(path, curr_path) - setup_kwargs = { - "include_package_data": True, - "data_files": [('topi', LIB_LIST)] - } -else: - setup_kwargs = {} - - -include_libs = False -wheel_include_libs = False -if not os.getenv('CONDA_BUILD'): - if "bdist_wheel" in sys.argv: - wheel_include_libs = True - else: - include_libs = True - -# For bdist_wheel only -if wheel_include_libs: - with open("MANIFEST.in", "w") as fo: - for path in LIB_LIST: - shutil.copy(path, os.path.join(CURRENT_DIR, 'topi')) - _, libname = os.path.split(path) - fo.write("include topi/%s\n" % libname) - setup_kwargs = { - "include_package_data": True - } - -if include_libs: - curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) - for i, path in enumerate(LIB_LIST): - LIB_LIST[i] = os.path.relpath(path, curr_path) - setup_kwargs = { - "include_package_data": True, - "data_files": [('topi', LIB_LIST)] - } - -setup(name='topi', - version=__version__, - description="TOPI: TVM operator index", - install_requires=[ - "numpy", - "decorator", - ], - packages=find_packages(), - url='https://github.com/apache/incubator-tvm', - **setup_kwargs) - - -if wheel_include_libs: - # Wheel cleanup - os.remove("MANIFEST.in") - for path in LIB_LIST: - _, libname = os.path.split(path) - os.remove("topi/%s" % libname) diff --git a/tutorials/autotvm/tune_conv2d_cuda.py b/tutorials/autotvm/tune_conv2d_cuda.py index 3cdbb84ab722..904315109cba 100644 --- a/tutorials/autotvm/tune_conv2d_cuda.py +++ b/tutorials/autotvm/tune_conv2d_cuda.py @@ -50,8 +50,8 @@ import tvm from tvm import te -import topi -from topi.testing import conv2d_nchw_python +from tvm import topi +from tvm.topi.testing import conv2d_nchw_python from tvm import autotvm diff --git a/tutorials/frontend/deploy_model_on_android.py b/tutorials/frontend/deploy_model_on_android.py index 55eb3aad76ce..2e3e389bd4e2 100644 --- a/tutorials/frontend/deploy_model_on_android.py +++ b/tutorials/frontend/deploy_model_on_android.py @@ -79,7 +79,7 @@ # # .. code-block:: bash # -# echo 'export PYTHONPATH=/workspace/python:/workspace/topi/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc +# echo 'export PYTHONPATH=/workspace/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc # source ~/.bashrc ################################################################# diff --git a/tutorials/language/tedd.py b/tutorials/language/tedd.py index a6cd8019e31e..7edcde99575a 100644 --- a/tutorials/language/tedd.py +++ b/tutorials/language/tedd.py @@ -39,7 +39,7 @@ """ import tvm from tvm import te -import topi +from tvm import topi from tvm.contrib import tedd ###################################################################### diff --git a/tutorials/language/tensorize.py b/tutorials/language/tensorize.py index 8a77c7764648..ac5b50f10ee2 100644 --- a/tutorials/language/tensorize.py +++ b/tutorials/language/tensorize.py @@ -180,7 +180,7 @@ def gemv_impl(): # func = tvm.build(s, [A, B, C], target="llvm", name="gemv") -from topi.util import get_const_tuple +from tvm.topi.util import get_const_tuple dtype = A.dtype ctx = tvm.context("cpu", 0) a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype) diff --git a/tutorials/topi/intro_topi.py b/tutorials/topi/intro_topi.py index 5bb5f0a66e30..5938b692119c 100644 --- a/tutorials/topi/intro_topi.py +++ b/tutorials/topi/intro_topi.py @@ -27,7 +27,7 @@ import tvm from tvm import te -import topi +from tvm import topi import numpy as np ###################################################################### diff --git a/vta/python/vta/__init__.py b/vta/python/vta/__init__.py index 70c003c92b57..d39f982823f9 100644 --- a/vta/python/vta/__init__.py +++ b/vta/python/vta/__init__.py @@ -30,7 +30,7 @@ __version__ = "0.1.0" -# do not import topi when running vta.exec.rpc_server +# do not from tvm import topi when running vta.exec.rpc_server # to maintain minimum dependency on the board if sys.argv[0] not in ("-c", "-m"): from . import top diff --git a/vta/python/vta/top/bitpack.py b/vta/python/vta/top/bitpack.py index 7a0710053b87..48a5c1cc4a10 100644 --- a/vta/python/vta/top/bitpack.py +++ b/vta/python/vta/top/bitpack.py @@ -21,7 +21,7 @@ import tvm from tvm import te -from topi import util +from tvm.topi import util from tvm.relay.op.op import register_compute, register_injective_schedule from tvm.relay.op.op import register_pattern, OpPattern diff --git a/vta/python/vta/top/op.py b/vta/python/vta/top/op.py index 2198ed4c191f..8280798642b3 100644 --- a/vta/python/vta/top/op.py +++ b/vta/python/vta/top/op.py @@ -20,7 +20,7 @@ import tvm from tvm import te -import topi +from tvm import topi from tvm.relay.op import op as reg from tvm.relay.op import strategy as _strategy diff --git a/vta/python/vta/top/vta_conv2d.py b/vta/python/vta/top/vta_conv2d.py index 5b23ddeba1c1..799b10535991 100644 --- a/vta/python/vta/top/vta_conv2d.py +++ b/vta/python/vta/top/vta_conv2d.py @@ -21,7 +21,7 @@ import tvm from tvm import te from tvm import autotvm -import topi +from tvm import topi from .util import is_packed_layout from ..environment import get_env diff --git a/vta/python/vta/top/vta_conv2d_transpose.py b/vta/python/vta/top/vta_conv2d_transpose.py index ddfebc2cc8c1..ea0dfce74a8e 100644 --- a/vta/python/vta/top/vta_conv2d_transpose.py +++ b/vta/python/vta/top/vta_conv2d_transpose.py @@ -21,9 +21,9 @@ import tvm from tvm import te from tvm import autotvm -import topi -from topi.util import get_const_tuple -from topi.nn.util import get_pad_tuple +from tvm import topi +from tvm.topi.util import get_const_tuple +from tvm.topi.nn.util import get_pad_tuple from ..environment import get_env diff --git a/vta/python/vta/top/vta_dense.py b/vta/python/vta/top/vta_dense.py index 912f41f30dfb..0b9826175625 100644 --- a/vta/python/vta/top/vta_dense.py +++ b/vta/python/vta/top/vta_dense.py @@ -21,7 +21,7 @@ import tvm from tvm import te from tvm import autotvm -import topi +from tvm import topi from ..environment import get_env diff --git a/vta/python/vta/top/vta_group_conv2d.py b/vta/python/vta/top/vta_group_conv2d.py index d470fb77038b..36768c3ca637 100644 --- a/vta/python/vta/top/vta_group_conv2d.py +++ b/vta/python/vta/top/vta_group_conv2d.py @@ -21,7 +21,7 @@ import tvm from tvm import te from tvm import autotvm -import topi +from tvm import topi from ..environment import get_env diff --git a/vta/python/vta/transform.py b/vta/python/vta/transform.py index d9f47f1f71ec..eb051f5da3f9 100644 --- a/vta/python/vta/transform.py +++ b/vta/python/vta/transform.py @@ -18,7 +18,7 @@ # pylint: disable=len-as-condition, no-else-return, unused-argument, invalid-name import tvm from tvm import te -from topi import util +from tvm.topi import util from .environment import get_env diff --git a/vta/scripts/tune_conv2d.py b/vta/scripts/tune_conv2d.py index 6d0b5d435b3b..6095d9684f6f 100644 --- a/vta/scripts/tune_conv2d.py +++ b/vta/scripts/tune_conv2d.py @@ -24,7 +24,7 @@ import tvm from tvm import te from tvm import autotvm -import topi +from tvm import topi import vta import vta.testing diff --git a/vta/scripts/tune_conv2d_transpose.py b/vta/scripts/tune_conv2d_transpose.py index b7c380ed7239..551e6f9508fb 100644 --- a/vta/scripts/tune_conv2d_transpose.py +++ b/vta/scripts/tune_conv2d_transpose.py @@ -24,7 +24,7 @@ import tvm from tvm import te from tvm import autotvm -import topi +from tvm import topi import vta import vta.testing diff --git a/vta/scripts/tune_dense.py b/vta/scripts/tune_dense.py index e54de1d4ea70..b1711fa14377 100644 --- a/vta/scripts/tune_dense.py +++ b/vta/scripts/tune_dense.py @@ -24,7 +24,7 @@ import tvm from tvm import te from tvm import autotvm -import topi +from tvm import topi import vta import vta.testing diff --git a/vta/scripts/tune_group_conv2d.py b/vta/scripts/tune_group_conv2d.py index 72f9525320ef..d8dcc0260097 100644 --- a/vta/scripts/tune_group_conv2d.py +++ b/vta/scripts/tune_group_conv2d.py @@ -24,7 +24,7 @@ import tvm from tvm import te from tvm import autotvm -import topi +from tvm import topi import vta import vta.testing diff --git a/vta/scripts/tune_resnet.py b/vta/scripts/tune_resnet.py index 2d358d335389..2d15335f071a 100644 --- a/vta/scripts/tune_resnet.py +++ b/vta/scripts/tune_resnet.py @@ -22,7 +22,7 @@ import numpy as np from PIL import Image -import topi +from tvm import topi import tvm from tvm import te from tvm import rpc, autotvm, relay diff --git a/vta/tests/python/integration/test_benchmark_topi_conv2d.py b/vta/tests/python/integration/test_benchmark_topi_conv2d.py index b3c36e85d56b..3affbacc2114 100644 --- a/vta/tests/python/integration/test_benchmark_topi_conv2d.py +++ b/vta/tests/python/integration/test_benchmark_topi_conv2d.py @@ -30,8 +30,8 @@ from tvm import autotvm from tvm.contrib import util from tvm.contrib.pickle_memoize import memoize -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import vta from vta import program_fpga, reconfig_runtime import vta.testing @@ -143,7 +143,7 @@ def get_ref_data(): a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype) w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype) b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype) - r_np = topi.testing.conv2d_nchw_python( + r_np = tvm.topi.testing.conv2d_nchw_python( a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad).astype(env.acc_dtype) return a_np, w_np, b_np, r_np diff --git a/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py b/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py index 558c3aba51fb..80a68486e059 100644 --- a/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py +++ b/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py @@ -30,8 +30,8 @@ from tvm import autotvm from tvm.contrib import util from tvm.contrib.pickle_memoize import memoize -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import vta from vta import program_fpga, reconfig_runtime import vta.testing @@ -134,7 +134,7 @@ def get_ref_data(): w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1)) a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype) w_np = np.random.randint(w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)).astype(kernel.dtype) - r_np = topi.testing.conv2d_transpose_nchw_python( + r_np = tvm.topi.testing.conv2d_transpose_nchw_python( a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad, (wl.o_hpad, wl.o_wpad)).astype(env.acc_dtype) return a_np, w_np, r_np diff --git a/vta/tests/python/integration/test_benchmark_topi_dense.py b/vta/tests/python/integration/test_benchmark_topi_dense.py index 95c491a6d723..3affb36a30d3 100644 --- a/vta/tests/python/integration/test_benchmark_topi_dense.py +++ b/vta/tests/python/integration/test_benchmark_topi_dense.py @@ -28,8 +28,8 @@ from tvm import autotvm from tvm.contrib import util from tvm.contrib.pickle_memoize import memoize -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import vta from vta import program_fpga, reconfig_runtime import vta.testing diff --git a/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py b/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py index 1d5838ce8cda..1fed5a077fdf 100644 --- a/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py +++ b/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py @@ -29,8 +29,8 @@ from tvm import relay from tvm import autotvm from tvm.contrib import util -import topi -import topi.testing +from tvm import topi +import tvm.topi.testing import vta from vta import program_fpga, reconfig_runtime import vta.testing @@ -135,7 +135,7 @@ def get_ref_data(): a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype) w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype) b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype) - r_np = topi.testing.conv2d_nchw_python( + r_np = tvm.topi.testing.conv2d_nchw_python( a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad, wl.groups).astype(env.acc_dtype) return a_np, w_np, b_np, r_np diff --git a/vta/tests/python/unittest/test_vta_insn.py b/vta/tests/python/unittest/test_vta_insn.py index c76636a4d242..be347a03782b 100644 --- a/vta/tests/python/unittest/test_vta_insn.py +++ b/vta/tests/python/unittest/test_vta_insn.py @@ -18,7 +18,7 @@ import tvm from tvm import te import numpy as np -import topi +from tvm import topi from tvm.contrib import util import vta diff --git a/vta/tutorials/autotvm/tune_relay_vta.py b/vta/tutorials/autotvm/tune_relay_vta.py index a92b1ee5d90b..118400617aee 100644 --- a/vta/tutorials/autotvm/tune_relay_vta.py +++ b/vta/tutorials/autotvm/tune_relay_vta.py @@ -58,7 +58,7 @@ import numpy as np from PIL import Image -import topi +from tvm import topi import tvm from tvm import te from tvm import rpc, autotvm, relay diff --git a/vta/tutorials/optimize/convolution_opt.py b/vta/tutorials/optimize/convolution_opt.py index 0564a6ace179..d364feff93b7 100644 --- a/vta/tutorials/optimize/convolution_opt.py +++ b/vta/tutorials/optimize/convolution_opt.py @@ -123,7 +123,7 @@ # :align: center # :width: 480px -import topi +from tvm import topi # 2D convolution layer dimensions taken from ResNet-18 architecture # (9th convolutional layer) @@ -371,7 +371,7 @@ # ensure correctness. # This library facilitates 2D convolution testing -from topi.testing import conv2d_nchw_python +from tvm.topi.testing import conv2d_nchw_python # Compile the TVM module my_conv = vta.build(s, [data, kernel, res], "ext_dev", env.target_host, name="my_conv")