Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[REFACTOR] tvm.hybrid -> te.hybrid #5223

Merged
merged 1 commit into from
Apr 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 0 additions & 23 deletions docs/api/python/hybrid.rst

This file was deleted.

1 change: 0 additions & 1 deletion docs/api/python/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ Python API
rpc
contrib
graph_runtime
hybrid
relay/index
vta/index
topi
8 changes: 8 additions & 0 deletions docs/api/python/te.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,11 @@ tvm.te
:members:
:imported-members:
:autosummary:


tvm.te.hybrid
-------------
.. automodule:: tvm.te.hybrid
:members:
:imported-members:
:autosummary:
10 changes: 5 additions & 5 deletions docs/langref/hybrid_script.rst
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ Software Emulation
~~~~~~~~~~~~~~~~~~

Both software emulation and compilation are supported. To define a function,
you need to use ``tvm.hybrid.script`` decorator to indicate this is a hybrid function:
you need to use ``tvm.te.hybrid.script`` decorator to indicate this is a hybrid function:

.. code-block:: python

@tvm.hybrid.script
@tvm.te.hybrid.script
def outer_product(a, b, c):
c = output_tensor((100, 99), 'float32')
for i in range(a.shape[0]):
Expand Down Expand Up @@ -85,7 +85,7 @@ to LLVM module.
Tuning
~~~~~~

Follow up the example above, you can use some tvm like interfaces to tune the code:
Follow up the example above, you can use some tvm like interfaces to tune the code:

.. code-block:: python

Expand All @@ -101,7 +101,7 @@ loop manipulation (``split`` and ``fuse``), and ``reorder``.

This is a preliminary function, so users should be in charge of the correctness
of the functionality after tuning. Specifically, users should be careful when
fusing and reorderding imperfect loops.
fusing and reorderding imperfect loops.

Loops
~~~~~
Expand All @@ -119,7 +119,7 @@ Users can access containers by either constants or constants loops annotated.

.. code-block:: python

@tvm.hybrid.script
@tvm.te.hybrid.script
def foo(a, b): # b is a tvm.container.Array
c = output_tensor(a.shape, a.dtype)
for i in const_range(len(a)): # because you have b access, i should be explicitly annotated as const_range
Expand Down
3 changes: 0 additions & 3 deletions python/tvm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,6 @@
# tvm.driver
from .driver import build, lower

# tvm.hybrid
from . import hybrid

# others
from . import arith

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/_reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@
from __future__ import absolute_import

from tvm.runtime import convert
from tvm.te.hybrid import script
from topi.util import get_const_int, get_const_tuple
from . import op as _reg
from ...hybrid import script

_reg.register_reduce_schedule("argmax")
_reg.register_reduce_schedule("argmin")
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
import topi

from tvm.runtime import convert
from tvm.te.hybrid import script
from topi.util import get_const_tuple
from .op import register_compute, register_shape_func
from .op import register_broadcast_schedule, register_injective_schedule
from .op import register_pattern, OpPattern
from ...hybrid import script


register_broadcast_schedule("log")
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.te.hybrid import script
from tvm.runtime import convert
import topi
from topi.util import get_const_int, get_const_tuple
from . import op as _reg
from . import strategy
from .op import OpPattern
from ...hybrid import script

_reg.register_broadcast_schedule("broadcast_to")
_reg.register_broadcast_schedule("broadcast_to_like")
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,11 @@
from topi.util import get_const_tuple

from tvm.runtime import convert
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .._tensor import elemwise_shape_func
from ....hybrid import script

# relu
reg.register_broadcast_schedule("nn.relu")
Expand Down
1 change: 1 addition & 0 deletions python/tvm/te/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,4 @@

from .tensor import PlaceholderOp, ComputeOp, TensorComputeOp, ScanOp, ExternOp, HybridOp
from .autodiff import gradient
from . import hybrid
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@
import inspect
import tvm._ffi
from tvm.driver.build_module import form_body

from .._ffi.base import decorate
from tvm._ffi.base import decorate

from .module import HybridModule
from .parser import source_to_op
Expand Down Expand Up @@ -95,4 +94,4 @@ def build(sch, inputs, outputs, name="hybrid_func"):
return HybridModule(src, name)


tvm._ffi._init_api("tvm.hybrid")
tvm._ffi._init_api("tvm.hybrid", __name__)
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

import ast

from ..contrib import util
from tvm.contrib import util
from .util import _internal_assert
from .util import _is_tvm_arg_types
from .parser import source_to_op
Expand Down Expand Up @@ -52,7 +52,7 @@ def __init__(self, src=None, name=None):
temp = util.tempdir()
dst = temp.relpath("script.py")
with open(dst, 'w') as f:
f.write("import tvm\[email protected]\n%s" % src)
f.write("import tvm\n@tvm.te.hybrid.script\n%s" % src)

if name is not None:
self.name = name
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"""Intrinsics of TVM-Python Hybrid Script for Python emulation runtime"""

import numpy
from .. import target
from tvm import target


class bind(object): #pylint: disable=invalid-name
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

from tvm import te
from tvm.contrib import util
from tvm.hybrid import script
from tvm.hybrid.runtime import HYBRID_GLOBALS
from tvm.te.hybrid import script
from tvm.te.hybrid.runtime import HYBRID_GLOBALS

@pytest.mark.skip
def run_and_check(func, args, var_dict={}, target='llvm', sch=None, outs=None):
Expand Down Expand Up @@ -80,7 +80,7 @@ def tvm_val_2_py_val(val):

module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))]
module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
h_module = tvm.hybrid.build(sch, module_args, module_outs)
h_module = te.hybrid.build(sch, module_args, module_outs)

return h_module, module_args, module_outs

Expand Down Expand Up @@ -146,7 +146,7 @@ def test_outer_product():
temp = util.tempdir()
path = temp.relpath('%s.py' % func.name)
func.save(path)
func_ = tvm.hybrid.HybridModule()
func_ = te.hybrid.HybridModule()
func_.load(path)
run_and_check(func_, ins, {n: 99, m: 101}, outs=outs)

Expand Down Expand Up @@ -348,7 +348,7 @@ def raw(a, b):
run_and_check(func, ins, outs=outs, target='cuda')


@tvm.hybrid.script
@te.hybrid.script
def foo(a):
c = output_tensor((a.shape[0],), a.dtype)
total = allocate((1,), a.dtype, 'local')
Expand All @@ -370,7 +370,7 @@ def foo(a):
func, ins, outs = run_and_check(foo, [a], target='cuda')
run_and_check(func, ins, outs=outs, target='cuda')

@tvm.hybrid.script
@te.hybrid.script
def max_threads(a):
b = output_tensor(a.shape, a.dtype)
n = a.shape[0]
Expand Down Expand Up @@ -433,7 +433,7 @@ def intrin_int(a):

# test non caconical loops
def test_non_zero():
@tvm.hybrid.script
@te.hybrid.script
def blur(a):
b = output_tensor((30, 30), 'float32')
for i in range(2, 32):
Expand All @@ -449,7 +449,7 @@ def blur(a):
func, ins, outs = run_and_check(blur, [a])
run_and_check(func, ins, outs=outs)

@tvm.hybrid.script
@te.hybrid.script
def triangle(a, b):
c = output_tensor((10, 10), dtype='float32')
for i in range(10):
Expand All @@ -464,7 +464,7 @@ def triangle(a, b):
run_and_check(func, ins, outs=outs)

def test_allocate():
@tvm.hybrid.script
@te.hybrid.script
def blur2d(a):
b = output_tensor((30, 30), 'float32')
for i in range(30):
Expand All @@ -483,7 +483,7 @@ def blur2d(a):
run_and_check(func, ins, outs=outs)

if tvm.gpu().exist:
@tvm.hybrid.script
@te.hybrid.script
def share_vec_add(a, b):
c = output_tensor((256, ), 'float32')
shared = allocate((256, ), 'float32', 'shared')
Expand All @@ -505,7 +505,7 @@ def share_vec_add(a, b):
print('[Warning] No GPU found! Skip shared mem test!')

def test_upstream():
@tvm.hybrid.script
@te.hybrid.script
def upstream(a):
b = output_tensor((20, ), 'float32')
for i in range(20):
Expand Down Expand Up @@ -535,7 +535,7 @@ def upstream(a):
tvm.testing.assert_allclose(tvm_d.asnumpy(), ref, 1e-5, 1e-5)

def test_downstream():
@tvm.hybrid.script
@te.hybrid.script
def downstream(a):
b = output_tensor((20, ), 'float32')
for i in range(20):
Expand All @@ -562,7 +562,7 @@ def downstream(a):
tvm.testing.assert_allclose(tvm_c.asnumpy(), ref, 1e-5, 1e-5)

def test_const_param():
@tvm.hybrid.script
@te.hybrid.script
def add_something(a, b):
c = output_tensor((11, ), 'int32')
for i in range(11):
Expand All @@ -588,7 +588,7 @@ def add_something(a, b):
tvm.testing.assert_allclose(nd_c.asnumpy(), ref, 1e-5, 1e-5)

def test_value_index():
@tvm.hybrid.script
@te.hybrid.script
def kernel_a(a):
b = output_tensor((16, ), 'int32')
c = output_tensor((4, 4), 'int32')
Expand All @@ -597,7 +597,7 @@ def kernel_a(a):
c[i // 4, i % 4] = a[i] + 1
return b, c

@tvm.hybrid.script
@te.hybrid.script
def kernel_b(b, a):
c = output_tensor((4, 4), 'int32')
for i in range(4):
Expand All @@ -621,7 +621,7 @@ def kernel_b(b, a):
tvm.testing.assert_allclose(res.asnumpy(), ref)

def test_func_call():
@tvm.hybrid.script
@te.hybrid.script
def foo(a, b):
for i in range(len(a)):
a[i] = i + 1.0
Expand All @@ -640,7 +640,7 @@ def foo(a, b):
run_and_check(func, ins, outs=outs)

def test_bool():
@tvm.hybrid.script
@te.hybrid.script
def foo(a):
b = output_tensor(a.shape, a.dtype)
b[0] = 1.2
Expand All @@ -655,7 +655,7 @@ def foo(a):
run_and_check(func, ins, outs=outs)

def test_const_range():
@tvm.hybrid.script
@te.hybrid.script
def foo(a, b):
c = output_tensor(a.shape, a.dtype)
d = output_tensor(a.shape, 'int32')
Expand All @@ -675,7 +675,7 @@ def foo(a, b):
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)

@tvm.hybrid.script
@te.hybrid.script
def goo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
Expand All @@ -692,7 +692,7 @@ def goo(a, b):
func, ins, outs = run_and_check(goo, [a, b])
run_and_check(func, ins, outs=outs)

@tvm.hybrid.script
@te.hybrid.script
def hoo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
Expand Down Expand Up @@ -779,7 +779,7 @@ def test_capture():
constant_list = [[1, 2], [3, n]]
const_value = 1

@tvm.hybrid.script
@te.hybrid.script
def add_something(a):
c = output_tensor((constant_tuple[1],), 'int32')
for i in range(constant_tuple[1]):
Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_tir_pass_storage_rewrite.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ def test_replace_dataflow():


def test_large_input():
@tvm.hybrid.script
@te.hybrid.script
def compute(a, b):
n = 16384
c = output_tensor((n, n), 'int32')
Expand Down
2 changes: 1 addition & 1 deletion topi/python/topi/argwhere.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks
"""Argwhere operator"""
from tvm import hybrid
from tvm.te import hybrid

@hybrid.script
def hybrid_argwhere_1d(output_shape, condition):
Expand Down
Loading