diff --git a/apps/bundle_deploy/bundle.cc b/apps/bundle_deploy/bundle.cc index 14f0b7edc301..22f8ba300dec 100644 --- a/apps/bundle_deploy/bundle.cc +++ b/apps/bundle_deploy/bundle.cc @@ -34,7 +34,7 @@ TVM_BUNDLE_FUNCTION void *tvm_runtime_create() { const std::string json_data(&build_graph_json[0], &build_graph_json[0] + build_graph_json_len); tvm::runtime::Module mod_syslib = - (*tvm::runtime::Registry::Get("module._GetSystemLib"))(); + (*tvm::runtime::Registry::Get("runtime.SystemLib"))(); int device_type = kDLCPU; int device_id = 0; tvm::runtime::Module mod = diff --git a/apps/dso_plugin_module/README.md b/apps/dso_plugin_module/README.md index c8803b106521..8dc9622d8ed4 100644 --- a/apps/dso_plugin_module/README.md +++ b/apps/dso_plugin_module/README.md @@ -19,7 +19,7 @@ Example Plugin Module ===================== This folder contains an example that implements a C++ module -that can be directly loaded as TVM's DSOModule (via tvm.module.load) +that can be directly loaded as TVM's DSOModule (via tvm.runtime.load_module) ## Guideline diff --git a/apps/dso_plugin_module/test_plugin_module.py b/apps/dso_plugin_module/test_plugin_module.py index d02ea07b1023..6304ef9573fb 100644 --- a/apps/dso_plugin_module/test_plugin_module.py +++ b/apps/dso_plugin_module/test_plugin_module.py @@ -19,7 +19,7 @@ def test_plugin_module(): curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) - mod = tvm.module.load(os.path.join(curr_path, "lib", "plugin_module.so")) + mod = tvm.runtime.load_module(os.path.join(curr_path, "lib", "plugin_module.so")) # NOTE: we need to make sure all managed resources returned # from mod get destructed before mod get unloaded. # diff --git a/apps/extension/tests/test_ext.py b/apps/extension/tests/test_ext.py index a5e7e0f69456..257ecd684175 100644 --- a/apps/extension/tests/test_ext.py +++ b/apps/extension/tests/test_ext.py @@ -30,7 +30,7 @@ def test_ext_dev(): B = tvm.compute((n,), lambda *i: A(*i) + 1.0, name='B') s = tvm.create_schedule(B.op) def check_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return f = tvm.build(s, [A, B], "ext_dev", "llvm") ctx = tvm.ext_dev(0) @@ -74,7 +74,7 @@ def test_extern_call(): s = tvm.create_schedule(B.op) def check_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return f = tvm.build(s, [A, B], "llvm") ctx = tvm.cpu(0) diff --git a/apps/howto_deploy/cpp_deploy.cc b/apps/howto_deploy/cpp_deploy.cc index 5f5c88a6b527..a386dffa0b30 100644 --- a/apps/howto_deploy/cpp_deploy.cc +++ b/apps/howto_deploy/cpp_deploy.cc @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -79,7 +79,7 @@ int main(void) { // For libraries that are directly packed as system lib and linked together with the app // We can directly use GetSystemLib to get the system wide library. LOG(INFO) << "Verify load function from system lib"; - tvm::runtime::Module mod_syslib = (*tvm::runtime::Registry::Get("module._GetSystemLib"))(); + tvm::runtime::Module mod_syslib = (*tvm::runtime::Registry::Get("runtime.SystemLib"))(); Verify(mod_syslib, "addonesys"); return 0; } diff --git a/apps/howto_deploy/python_deploy.py b/apps/howto_deploy/python_deploy.py index f744aa68bf6d..07a27fe9426c 100644 --- a/apps/howto_deploy/python_deploy.py +++ b/apps/howto_deploy/python_deploy.py @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -26,21 +26,21 @@ def verify(mod, fname): f = mod.get_function(fname) # Use tvm.nd.array to convert numpy ndarray to tvm # NDArray type, so that function can be invoked normally - N = 10 + N = 10 x = tvm.nd.array(np.arange(N, dtype=np.float32)) y = tvm.nd.array(np.zeros(N, dtype=np.float32)) # Invoke the function f(x, y) - np_x = x.asnumpy() - np_y = y.asnumpy() + np_x = x.asnumpy() + np_y = y.asnumpy() # Verify correctness of function assert(np.all([xi+1 == yi for xi, yi in zip(np_x, np_y)])) print("Finish verification...") - + if __name__ == "__main__": # The normal dynamic loading method for deployment - mod_dylib = tvm.module.load("lib/test_addone_dll.so") + mod_dylib = tvm.runtime.load_module("lib/test_addone_dll.so") print("Verify dynamic loading from test_addone_dll.so") verify(mod_dylib, "addone") # There might be methods to use the system lib way in diff --git a/apps/sgx/run_model.py b/apps/sgx/run_model.py index 9f21c2685d45..fb39e34dc601 100644 --- a/apps/sgx/run_model.py +++ b/apps/sgx/run_model.py @@ -23,7 +23,7 @@ def main(): ctx = tvm.context('cpu', 0) - model = tvm.module.load(osp.join(CWD, 'build', 'enclave.signed.so')) + model = tvm.runtime.load_module(osp.join(CWD, 'build', 'enclave.signed.so')) inp = tvm.nd.array(np.ones((1, 3, 224, 224), dtype='float32'), ctx) out = tvm.nd.array(np.empty((1, 1000), dtype='float32'), ctx) model(inp, out) diff --git a/docs/api/python/bridge.rst b/docs/api/python/bridge.rst deleted file mode 100644 index 72ffaad53efb..000000000000 --- a/docs/api/python/bridge.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, - software distributed under the License is distributed on an - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied. See the License for the - specific language governing permissions and limitations - under the License. - -Framework Bridge APIs ---------------------- - -tvm.contrib.mxnet -~~~~~~~~~~~~~~~~~ -.. automodule:: tvm.contrib.mxnet - :members: diff --git a/docs/api/python/contrib.rst b/docs/api/python/contrib.rst index be0d81aca9fd..95465116cea6 100644 --- a/docs/api/python/contrib.rst +++ b/docs/api/python/contrib.rst @@ -15,8 +15,8 @@ specific language governing permissions and limitations under the License. -Additional Contrib APIs ------------------------ +tvm.contrib +----------- .. automodule:: tvm.contrib tvm.contrib.cblas @@ -43,6 +43,11 @@ tvm.contrib.cublas :members: +tvm.contrib.dlpack +~~~~~~~~~~~~~~~~~~ +.. automodule:: tvm.contrib.dlpack + :members: + tvm.contrib.emscripten ~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: tvm.contrib.emscripten @@ -53,6 +58,11 @@ tvm.contrib.miopen .. automodule:: tvm.contrib.miopen :members: +tvm.contrib.mxnet +~~~~~~~~~~~~~~~~~ +.. automodule:: tvm.contrib.mxnet + :members: + tvm.contrib.ndk ~~~~~~~~~~~~~~~ .. automodule:: tvm.contrib.ndk @@ -118,7 +128,6 @@ tvm.contrib.util :members: - tvm.contrib.xcode ~~~~~~~~~~~~~~~~~ .. automodule:: tvm.contrib.xcode diff --git a/docs/api/python/dev.rst b/docs/api/python/dev.rst index 8a0a70588bc3..f9d9410946c9 100644 --- a/docs/api/python/dev.rst +++ b/docs/api/python/dev.rst @@ -20,14 +20,7 @@ Developer API This page contains modules that are used by developers of TVM. Many of these APIs are PackedFunc registered in C++ backend. -tvm.object -~~~~~~~~~~ -.. automodule:: tvm.object -.. autoclass:: tvm.object.Object - :members: - -.. autofunction:: tvm.register_object tvm.expr ~~~~~~~~ diff --git a/docs/api/python/index.rst b/docs/api/python/index.rst index 7a8566eec7ba..c17f5c6865b0 100644 --- a/docs/api/python/index.rst +++ b/docs/api/python/index.rst @@ -22,6 +22,8 @@ Python API :maxdepth: 2 tvm + runtime + ndarray intrin tensor schedule @@ -29,7 +31,6 @@ Python API build module error - ndarray container function autotvm @@ -37,6 +38,7 @@ Python API rpc bridge contrib + ffi dev topi vta/index diff --git a/docs/api/python/module.rst b/docs/api/python/module.rst deleted file mode 100644 index 2185d1698d18..000000000000 --- a/docs/api/python/module.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, - software distributed under the License is distributed on an - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied. See the License for the - specific language governing permissions and limitations - under the License. - -tvm.module ----------- -.. automodule:: tvm.module - :members: diff --git a/docs/api/python/ndarray.rst b/docs/api/python/ndarray.rst index d4d0351ea035..6f00eedac3e9 100644 --- a/docs/api/python/ndarray.rst +++ b/docs/api/python/ndarray.rst @@ -15,22 +15,22 @@ specific language governing permissions and limitations under the License. -tvm.ndarray ------------ -.. automodule:: tvm.ndarray +tvm.runtime.ndarray +------------------- +.. automodule:: tvm.runtime.ndarray -.. autoclass:: tvm.ndarray.TVMContext +.. autoclass:: tvm.nd.NDArray :members: + :inherited-members: + -.. autoclass:: tvm.ndarray.NDArray +.. autoclass:: tvm.runtime.TVMContext :members: - :inherited-members: +.. autofunction:: tvm.context .. autofunction:: tvm.cpu .. autofunction:: tvm.gpu .. autofunction:: tvm.opencl .. autofunction:: tvm.metal -.. autofunction:: tvm.ndarray.array -.. autofunction:: tvm.ndarray.empty - -.. autofunction:: tvm.register_extension +.. autofunction:: tvm.nd.array +.. autofunction:: tvm.nd.empty diff --git a/docs/api/python/function.rst b/docs/api/python/runtime.rst similarity index 70% rename from docs/api/python/function.rst rename to docs/api/python/runtime.rst index 195e7b54d100..37b8606c81c8 100644 --- a/docs/api/python/function.rst +++ b/docs/api/python/runtime.rst @@ -15,10 +15,31 @@ specific language governing permissions and limitations under the License. -tvm.Function ------------- -.. autoclass:: tvm.Function +tvm.runtime +----------- + +.. automodule:: tvm.runtime + + +.. autoclass:: tvm.runtime.PackedFunc + :members: .. autofunction:: tvm.register_func .. autofunction:: tvm.get_global_func + + +.. autoclass:: tvm.runtime.Module + :members: + +.. autofunction:: tvm.runtime.load_module + +.. autofunction:: tvm.runtime.system_lib + +.. autofunction:: tvm.runtime.enabled + + +.. autoclass:: tvm.runtime.Object + :members: + +.. autofunction:: tvm.register_object diff --git a/docs/deploy/aocl_fpga.md b/docs/deploy/aocl_fpga.md index 71d9d0cb7485..1f13025d3668 100644 --- a/docs/deploy/aocl_fpga.md +++ b/docs/deploy/aocl_fpga.md @@ -57,8 +57,8 @@ import os tgt="aocl_sw_emu" -fadd = tvm.module.load("myadd.so") -fadd_dev = tvm.module.load("myadd.aocx") +fadd = tvm.runtime.load("myadd.so") +fadd_dev = tvm.runtime.load("myadd.aocx") fadd.import_module(fadd_dev) ctx = tvm.context(tgt, 0) diff --git a/docs/deploy/aws_fpga.md b/docs/deploy/aws_fpga.md index 97101efb7241..82492afbd351 100644 --- a/docs/deploy/aws_fpga.md +++ b/docs/deploy/aws_fpga.md @@ -57,11 +57,11 @@ import os tgt="sdaccel" -fadd = tvm.module.load("myadd.so") +fadd = tvm.runtime.load("myadd.so") if os.environ.get("XCL_EMULATION_MODE"): - fadd_dev = tvm.module.load("myadd.xclbin") + fadd_dev = tvm.runtime.load("myadd.xclbin") else: - fadd_dev = tvm.module.load("myadd.awsxclbin") + fadd_dev = tvm.runtime.load("myadd.awsxclbin") fadd.import_module(fadd_dev) ctx = tvm.context(tgt, 0) diff --git a/docs/dev/introduction_to_module_serialization.rst b/docs/dev/introduction_to_module_serialization.rst index 5b436e963bdf..78f6d7117dd8 100644 --- a/docs/dev/introduction_to_module_serialization.rst +++ b/docs/dev/introduction_to_module_serialization.rst @@ -53,7 +53,7 @@ Let us build one ResNet-18 workload for GPU as an example first. resnet18_lib.export_library(path_lib) # load it back - loaded_lib = tvm.module.load(path_lib) + loaded_lib = tvm.runtime.load(path_lib) assert loaded_lib.type_key == "library" assert loaded_lib.imported_modules[0].type_key == "cuda" @@ -177,7 +177,7 @@ support arbitrary modules to import ideally. Deserialization **************** -The entrance API is ``tvm.module.load``. This function +The entrance API is ``tvm.runtime.load``. This function is to call ``_LoadFromFile`` in fact. If we dig it a little deeper, this is ``Module::LoadFromFile``. In our example, the file is ``deploy.so``, according to the function logic, we will call ``module.loadfile_so`` in diff --git a/docs/dev/relay_bring_your_own_codegen.rst b/docs/dev/relay_bring_your_own_codegen.rst index 91cf60839f58..b735bb809e08 100644 --- a/docs/dev/relay_bring_your_own_codegen.rst +++ b/docs/dev/relay_bring_your_own_codegen.rst @@ -174,7 +174,7 @@ In ``src/relay/backend/contrib/codegen_c/codegen.cc``, we first create a codegen class CodegenC : public ExprVisitor, public CodegenCBase { public: - explicit CodegenC(const std::string& id) { this->ext_func_id_ = id; } + explicit CodegenC(const std::string& id) { this->ext_func_id_ = id; } void VisitExpr_(const VarNode* node) { ; } void VisitExpr_(const CallNode* call) final { ; } @@ -196,7 +196,7 @@ In ``src/relay/backend/contrib/codegen_c/codegen.cc``, we first create a codegen /*! \brief The declaration statements of buffers. */ std::vector buf_decl_; /*! \brief The name and index pairs for output. */ - std::vector> out_; + std::vector> out_; } The ``CodegenC`` class inherits two classes: ``ExprVisitor`` provides abilities to traverse subgraphs and collects the required information and generate subgraph functions such as ``gcc_0_``; ``CodegenCBase`` provides abilities and utilities to generate wrapper functions such as ``gcc_0`` in the above example. As can be seen, we only need to implement three functions in this codegen class to make it work. @@ -278,7 +278,7 @@ Again, we want to highlight the notes in the above code: curr_node <- Process curr_node curr_node <- Put "buf_0" as an input buffer (a) out_ = {} (b) out_ = {} (c) out_ = {("buf_0", 20)} - + We can see in the above figure, class variable ``out_`` is empty before visiting the argument node, and it was filled with the output buffer name and size of ``arg_node``. As a result, when we finished visiting the argument node, we know the proper input buffer we should put by looking at ``out_``. You will find out how we update ``out_`` at the end of this section as well as the next section. @@ -539,7 +539,7 @@ Then the ExampleJON of this subgraph looks like: input 0 10 10 input 1 10 10 input 2 10 10 - input 3 10 10 + input 3 10 10 add 4 inputs: 0 1 shape: 10 10 sub 5 inputs: 4 2 shape: 10 10 add 6 inputs: 5 3 shape: 10 10 @@ -903,7 +903,7 @@ We also need to register this function to enable the corresponding Python API: TVM_REGISTER_GLOBAL("module.loadbinary_examplejson") .set_body_typed(ExampleJsonModule::LoadFromBinary); -The above registration means when users call ``tvm.module.load(lib_path)`` API and the exported library has an ExampleJSON stream, our ``LoadFromBinary`` will be invoked to create the same customized runtime module. +The above registration means when users call ``tvm.runtime.load(lib_path)`` API and the exported library has an ExampleJSON stream, our ``LoadFromBinary`` will be invoked to create the same customized runtime module. In addition, if you want to support module creation directly from an ExampleJSON file, you can also implement a simple function and register a Python API as follows: @@ -928,7 +928,7 @@ In addition, if you want to support module creation directly from an ExampleJSON *rv = ExampleJsonModule::Create(args[0]); }); -It means users can manually write/modify an ExampleJSON file, and use Python API ``tvm.module.load("mysubgraph.examplejson", "examplejson")`` to construct a customized module. +It means users can manually write/modify an ExampleJSON file, and use Python API ``tvm.runtime.load("mysubgraph.examplejson", "examplejson")`` to construct a customized module. ******* Summary @@ -947,12 +947,12 @@ In summary, here is a checklist for you to refer: * A runtime module class derived from ``ModuleNode`` with following functions (for your graph representation). - * Constructor. + * Constructor. * ``GetFunction`` to generate a TVM runtime compatible ``PackedFunc``. * ``Run`` to execute a subgraph. * Register a runtime creation API. * ``SaveToBinary`` and ``LoadFromBinary`` to serialize/deserialize customized runtime module. - * Register ``LoadFromBinary`` API to support ``tvm.module.load(your_module_lib_path)``. + * Register ``LoadFromBinary`` API to support ``tvm.runtime.load(your_module_lib_path)``. * (optional) ``Create`` to support customized runtime module construction from subgraph file in your representation. * An annotator to annotate a user Relay program to make use of your compiler and runtime (TBA). diff --git a/include/tvm/runtime/module.h b/include/tvm/runtime/module.h index 27d8e83ac5ff..3c43ae090efc 100644 --- a/include/tvm/runtime/module.h +++ b/include/tvm/runtime/module.h @@ -211,6 +211,13 @@ class TVM_DLL ModuleNode : public Object { std::shared_ptr > import_cache_; }; +/*! + * \brief Check if runtime module is enabled for target. + * \param target The target module name. + * \return Whether runtime is enabled. + */ +TVM_DLL bool RuntimeEnabled(const std::string& target); + /*! \brief namespace for constant symbols */ namespace symbol { /*! \brief Global variable to store module context. */ diff --git a/jvm/core/src/test/scripts/test_add_gpu.py b/jvm/core/src/test/scripts/test_add_gpu.py index f41992b8b15e..e3f4fbfedaf3 100644 --- a/jvm/core/src/test/scripts/test_add_gpu.py +++ b/jvm/core/src/test/scripts/test_add_gpu.py @@ -20,7 +20,7 @@ from tvm.contrib import cc, util def test_add(target_dir): - if not tvm.module.enabled("cuda"): + if not tvm.runtime.enabled("cuda"): print("skip %s because cuda is not enabled..." % __file__) return n = tvm.var("n") diff --git a/python/tvm/__init__.py b/python/tvm/__init__.py index e7657204e469..580a0714558c 100644 --- a/python/tvm/__init__.py +++ b/python/tvm/__init__.py @@ -29,12 +29,8 @@ # top-level alias # tvm.runtime from .runtime.object import Object -from .runtime.packed_func import PackedFunc as Function from .runtime.ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl from .runtime.ndarray import vpi, rocm, opengl, ext_dev, micro_dev -from .runtime import module -from .runtime import ndarray -# pylint: disable=reimported from .runtime import ndarray as nd # others diff --git a/python/tvm/_ffi/_ctypes/object.py b/python/tvm/_ffi/_ctypes/object.py index 907b7ddef616..934e33ff5891 100644 --- a/python/tvm/_ffi/_ctypes/object.py +++ b/python/tvm/_ffi/_ctypes/object.py @@ -92,3 +92,22 @@ def __init_handle_by_constructor__(self, fconstructor, *args): if not isinstance(handle, ObjectHandle): handle = ObjectHandle(handle) self.handle = handle + + def same_as(self, other): + """Check object identity. + + Parameters + ---------- + other : object + The other object to compare against. + + Returns + ------- + result : bool + The comparison result. + """ + if not isinstance(other, ObjectBase): + return False + if self.handle is None: + return other.handle is None + return self.handle.value == other.handle.value diff --git a/python/tvm/_ffi/_cython/object.pxi b/python/tvm/_ffi/_cython/object.pxi index 25a9c3fb70cf..2a345cad684e 100644 --- a/python/tvm/_ffi/_cython/object.pxi +++ b/python/tvm/_ffi/_cython/object.pxi @@ -99,3 +99,20 @@ cdef class ObjectBase: (fconstructor).chandle, kTVMObjectHandle, args, &chandle) self.chandle = chandle + + def same_as(self, other): + """Check object identity. + + Parameters + ---------- + other : object + The other object to compare against. + + Returns + ------- + result : bool + The comparison result. + """ + if not isinstance(other, ObjectBase): + return False + return self.chandle == (other).chandle diff --git a/python/tvm/api.py b/python/tvm/api.py index c5369193f881..d27cd20574a5 100644 --- a/python/tvm/api.py +++ b/python/tvm/api.py @@ -19,6 +19,7 @@ from numbers import Integral as _Integral import tvm._ffi +import tvm.runtime._ffi_node_api from tvm.runtime import convert, const, DataType from ._ffi.base import string_types, TVMError @@ -108,10 +109,10 @@ def load_json(json_str): """ try: - return _api_internal._load_json(json_str) + return tvm.runtime._ffi_node_api.LoadJSON(json_str) except TVMError: json_str = json_compact.upgrade_json(json_str) - return _api_internal._load_json(json_str) + return tvm.runtime._ffi_node_api.LoadJSON(json_str) def save_json(node): @@ -127,7 +128,7 @@ def save_json(node): json_str : str Saved json string. """ - return _api_internal._save_json(node) + return tvm.runtime._ffi_node_api.SaveJSON(node) def var(name="tindex", dtype=int32): diff --git a/python/tvm/build_module.py b/python/tvm/build_module.py index a5bb3d071c48..898d394a0b37 100644 --- a/python/tvm/build_module.py +++ b/python/tvm/build_module.py @@ -21,6 +21,7 @@ """ import warnings import tvm._ffi +import tvm.runtime from tvm.runtime import Object, ndarray from . import api @@ -31,7 +32,6 @@ from . import ir_pass from . import stmt as _stmt from . import container -from . import module from . import codegen from . import target as _target from . import make @@ -628,7 +628,7 @@ def build(inputs, target_host = tar break if not target_host: - target_host = "llvm" if module.enabled("llvm") else "stackvm" + target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm" fhost_all = [] device_modules = [] diff --git a/python/tvm/container.py b/python/tvm/container.py index 094d7ca76136..3019ad2368fd 100644 --- a/python/tvm/container.py +++ b/python/tvm/container.py @@ -19,6 +19,7 @@ from tvm.runtime import Object, ObjectTypes from tvm.runtime.container import getitem_helper +from tvm.runtime import _ffi_node_api from . import _api_internal @@ -33,10 +34,10 @@ class Array(Object): """ def __getitem__(self, idx): return getitem_helper( - self, _api_internal._ArrayGetItem, len(self), idx) + self, _ffi_node_api.ArrayGetItem, len(self), idx) def __len__(self): - return _api_internal._ArraySize(self) + return _ffi_node_api.ArraySize(self) @tvm._ffi.register_object @@ -62,18 +63,18 @@ class Map(Object): You can use convert to create a dict[Object-> Object] into a Map """ def __getitem__(self, k): - return _api_internal._MapGetItem(self, k) + return _ffi_node_api.MapGetItem(self, k) def __contains__(self, k): - return _api_internal._MapCount(self, k) != 0 + return _ffi_node_api.MapCount(self, k) != 0 def items(self): """Get the items from the map""" - akvs = _api_internal._MapItems(self) + akvs = _ffi_node_api.MapItems(self) return [(akvs[i], akvs[i+1]) for i in range(0, len(akvs), 2)] def __len__(self): - return _api_internal._MapSize(self) + return _ffi_node_api.MapSize(self) @tvm._ffi.register_object @@ -84,7 +85,7 @@ class StrMap(Map): """ def items(self): """Get the items from the map""" - akvs = _api_internal._MapItems(self) + akvs = _ffi_node_api.MapItems(self) return [(akvs[i].value, akvs[i+1]) for i in range(0, len(akvs), 2)] diff --git a/python/tvm/contrib/debugger/debug_result.py b/python/tvm/contrib/debugger/debug_result.py index 3fc0d1574b8c..26c16e3135e8 100644 --- a/python/tvm/contrib/debugger/debug_result.py +++ b/python/tvm/contrib/debugger/debug_result.py @@ -269,7 +269,7 @@ def save_tensors(params): param_bytes: bytearray Serialized parameters. """ - _save_tensors = tvm.get_global_func("_save_param_dict") + _save_tensors = tvm.get_global_func("tvm.relay._save_param_dict") args = [] for k, v in params.items(): diff --git a/python/tvm/contrib/dlpack.py b/python/tvm/contrib/dlpack.py index a42536745a7d..dfffc3fa7e56 100644 --- a/python/tvm/contrib/dlpack.py +++ b/python/tvm/contrib/dlpack.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. """Wrapping functions to bridge frameworks with DLPack support to TVM""" -from .. import ndarray +from tvm.runtime import ndarray def convert_func(tvm_func, tensor_type, to_dlpack_func): """Convert a tvm function into one that accepts a tensor from another diff --git a/python/tvm/contrib/mxnet.py b/python/tvm/contrib/mxnet.py index 19c50f01819c..e3b234c9aa07 100644 --- a/python/tvm/contrib/mxnet.py +++ b/python/tvm/contrib/mxnet.py @@ -17,8 +17,9 @@ """MXNet bridge wrap Function MXNet's async function.""" from __future__ import absolute_import as _abs -from .. import api, _api_internal, ndarray -from ..module import Module +import tvm._ffi.registry +import tvm.runtime._ffi_api +from tvm.runtime import Module # pylint: disable=invalid-name _wrap_async = None @@ -60,7 +61,7 @@ def _get_bridge_func(): "MXTVMBridge not exist in mxnet package," " please update to latest version") - fdict = api.extract_ext_funcs(mxnet.base._LIB.MXTVMBridge) + fdict = tvm._ffi.registry.extract_ext_funcs(mxnet.base._LIB.MXTVMBridge) ret = fdict["WrapAsyncCall"] ret.is_global = True return ret @@ -69,7 +70,8 @@ def _get_bridge_func(): if _wrap_async is None: # Register extension type in first time _wrap_async = _get_bridge_func() - ndarray.register_extension(mxnet.nd.NDArray) + tvm._ffi.registry.register_extension(mxnet.nd.NDArray) const_loc = const_loc if const_loc else [] - return _wrap_async(func, _api_internal._TVMSetStream, len(const_loc), *const_loc) + return _wrap_async(func, tvm.runtime._ffi_api.TVMSetStream, + len(const_loc), *const_loc) diff --git a/python/tvm/contrib/sparse.py b/python/tvm/contrib/sparse.py index 28c703da3262..2a51637fe6ce 100644 --- a/python/tvm/contrib/sparse.py +++ b/python/tvm/contrib/sparse.py @@ -16,12 +16,13 @@ # under the License. """Tensor and Operation class for computation declaration.""" # pylint: disable=invalid-name -from __future__ import absolute_import as _abs import numpy as _np +from tvm.runtime import ndarray as _nd + from .. import expr as _expr from .. import api as _api from .. import tensor as _tensor -from .. import ndarray as _nd + float32 = "float32" itype = 'int32' diff --git a/python/tvm/expr.py b/python/tvm/expr.py index 910061a767ac..00e85200b3c6 100644 --- a/python/tvm/expr.py +++ b/python/tvm/expr.py @@ -32,7 +32,7 @@ """ # pylint: disable=missing-docstring import tvm._ffi -from tvm.runtime import Object, ObjectGeneric, DataType, TypeCode +from tvm.runtime import Object, ObjectGeneric, DataType, TypeCode, const from . import make as _make from . import generic as _generic @@ -101,7 +101,7 @@ def __mod__(self, other): return _make._OpFloorMod(self, other) def __neg__(self): - neg_one = _api_internal._const(-1, self.dtype) + neg_one = const(-1, self.dtype) return self.__mul__(neg_one) def __lshift__(self, other): diff --git a/python/tvm/micro/base.py b/python/tvm/micro/base.py index a46d1bb99619..9f50f9855303 100644 --- a/python/tvm/micro/base.py +++ b/python/tvm/micro/base.py @@ -138,7 +138,7 @@ def create_micro_mod(c_mod, dev_config): Parameters ---------- - c_mod : tvm.module.Module + c_mod : tvm.runtime.Module module with "c" as its target backend dev_config : Dict[str, Any] @@ -146,7 +146,7 @@ def create_micro_mod(c_mod, dev_config): Return ------ - micro_mod : tvm.module.Module + micro_mod : tvm.runtim.Module micro module for the target device """ temp_dir = _util.tempdir() @@ -154,14 +154,14 @@ def create_micro_mod(c_mod, dev_config): c_mod.export_library( lib_obj_path, fcompile=cross_compiler(dev_config, LibType.OPERATOR)) - micro_mod = tvm.module.load(lib_obj_path) + micro_mod = tvm.runtime.load_module(lib_obj_path) return micro_mod def cross_compiler(dev_config, lib_type): """Create a cross-compile function that wraps `create_lib` for a `Binutil` instance. - For use in `tvm.module.Module.export_library`. + For use in `tvm.runtime.Module.export_library`. Parameters ---------- diff --git a/python/tvm/relay/backend/compile_engine.py b/python/tvm/relay/backend/compile_engine.py index 956ad55404bf..172480da7286 100644 --- a/python/tvm/relay/backend/compile_engine.py +++ b/python/tvm/relay/backend/compile_engine.py @@ -104,7 +104,7 @@ def lower_shape_func(self, source_func, target=None): return _backend._CompileEngineLowerShapeFunc(self, key) def jit(self, source_func, target=None): - """JIT a source_func to a tvm.Function. + """JIT a source_func to a tvm.runtime.PackedFunc. Parameters ---------- @@ -116,7 +116,7 @@ def jit(self, source_func, target=None): Returns ------- - jited_func: tvm.Function + jited_func: tvm.runtime.PackedFunc The result of jited function. """ key = _get_cache_key(source_func, target) diff --git a/python/tvm/relay/backend/interpreter.py b/python/tvm/relay/backend/interpreter.py index f85f92fbdd59..3759bc9950af 100644 --- a/python/tvm/relay/backend/interpreter.py +++ b/python/tvm/relay/backend/interpreter.py @@ -84,14 +84,14 @@ def _convert_args(self, expr, args, kwargs): expr: relay.Expr The expression to evaluate - args: List[tvm.NDArray] + args: List[tvm.nd.NDArray] The arguments to pass to the evaluator. kwargs: Dict[str, tvm.NDArrray] The keyword arguments to pass to the evaluator. Returns: - args: List[tvm.NDArray] + args: List[tvm.nd.NDArray] The new arguments with all keyword arguments placed in the correct slot. """ assert expr is not None diff --git a/python/tvm/relay/backend/vm.py b/python/tvm/relay/backend/vm.py index 53b56d5d6055..67afe0945ccc 100644 --- a/python/tvm/relay/backend/vm.py +++ b/python/tvm/relay/backend/vm.py @@ -85,7 +85,7 @@ def save(self): can then be saved to disk and later deserialized into a new Executable. - lib : :py:class:`~tvm.module.Module` + lib : :py:class:`~tvm.runtime.Module` The runtime module that contains the generated code. It is basically a library that is composed of hardware dependent code. @@ -125,7 +125,7 @@ def save(self): lib.export_library(path_lib) with open(tmp.relpath("code.ro"), "wb") as fo: fo.write(code) - loaded_lib = tvm.module.load(path_lib) + loaded_lib = tvm.runtime.load_module(path_lib) loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read()) # deserialize. des_exec = relay.vm.Executable.load_exec(loaded_code, loaded_code) @@ -147,7 +147,7 @@ def load_exec(bytecode, lib): bytecode : bytearray The binary blob representing a the Relay VM bytecode. - lib : :py:class:`~tvm.module.Module` + lib : :py:class:`~tvm.runtime.Module` The runtime module that contains the generated code. Returns @@ -161,8 +161,8 @@ def load_exec(bytecode, lib): raise TypeError("bytecode is expected to be the type of bytearray " + "or TVMByteArray, but received {}".format(type(code))) - if lib is not None and not isinstance(lib, tvm.module.Module): - raise TypeError("lib is expected to be the type of tvm.module.Module" + + if lib is not None and not isinstance(lib, tvm.runtime.Module): + raise TypeError("lib is expected to be the type of tvm.runtime.Module" + ", but received {}".format(type(lib))) return Executable(_vm.Load_Executable(bytecode, lib)) @@ -270,7 +270,7 @@ def get_function_params(self, func_name): class VirtualMachine(object): """Relay VM runtime.""" def __init__(self, mod): - if not isinstance(mod, (Executable, tvm.module.Module)): + if not isinstance(mod, (Executable, tvm.runtime.Module)): raise TypeError("mod is expected to be the type of Executable or " + "tvm.Module, but received {}".format(type(mod))) m = mod.module if isinstance(mod, Executable) else mod @@ -534,7 +534,7 @@ def _update_target_host(self, target, target_host): target_host = tgt break if not target_host: - target_host = "llvm" if tvm.module.enabled("llvm") else "stackvm" + target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm" if isinstance(target_host, str): target_host = tvm.target.create(target_host) return target_host diff --git a/python/tvm/relay/frontend/caffe2.py b/python/tvm/relay/frontend/caffe2.py index 456d782e521f..a010099c2bee 100644 --- a/python/tvm/relay/frontend/caffe2.py +++ b/python/tvm/relay/frontend/caffe2.py @@ -567,8 +567,8 @@ def from_caffe2(init_net, predict_net, shape=None, dtype="float32"): mod : tvm.relay.Module The module that optimizations will be performed on. - params : dict of str to tvm.ndarray - Dict of converted parameters stored in tvm.ndarray format + params : dict of str to tvm.nd.NDArray + Dict of converted parameters stored in tvm.nd.NDArray format """ caffe2 = Caffe2NetDef(shape, dtype) diff --git a/python/tvm/relay/frontend/coreml.py b/python/tvm/relay/frontend/coreml.py index a24043df135d..d07b3f45fe1a 100644 --- a/python/tvm/relay/frontend/coreml.py +++ b/python/tvm/relay/frontend/coreml.py @@ -455,7 +455,7 @@ def from_coreml(model, shape=None): mod : tvm.relay.Module The relay module for compilation. - params : dict of str to tvm.NDArray + params : dict of str to tvm.nd.NDArray The parameter dict to be used by Relay. """ try: diff --git a/python/tvm/relay/frontend/darknet.py b/python/tvm/relay/frontend/darknet.py index a2a72eaf57ca..0ed7b2112383 100644 --- a/python/tvm/relay/frontend/darknet.py +++ b/python/tvm/relay/frontend/darknet.py @@ -843,7 +843,7 @@ def from_darknet(net, mod : tvm.relay.Module The relay module for compilation. - params : dict of str to tvm.NDArray + params : dict of str to tvm.nd.NDArray The parameter dict to be used by relay """ diff --git a/python/tvm/relay/frontend/keras.py b/python/tvm/relay/frontend/keras.py index 27d75fe81b58..5458b517aafd 100644 --- a/python/tvm/relay/frontend/keras.py +++ b/python/tvm/relay/frontend/keras.py @@ -756,7 +756,7 @@ def from_keras(model, shape=None): mod : tvm.relay.Module The relay module for compilation. - params : dict of str to tvm.NDArray + params : dict of str to tvm.nd.NDArray The parameter dict to be used by Relay. """ def _check_model_is_tf_keras(): diff --git a/python/tvm/relay/frontend/mxnet.py b/python/tvm/relay/frontend/mxnet.py index 508439354fb1..97e28a933c89 100644 --- a/python/tvm/relay/frontend/mxnet.py +++ b/python/tvm/relay/frontend/mxnet.py @@ -2012,7 +2012,7 @@ def from_mxnet(symbol, mod : tvm.relay.Module The relay module for compilation - params : dict of str to tvm.NDArray + params : dict of str to tvm.nd.NDArray The parameter dict to be used by nnvm """ try: diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index fdf6bd77ee46..ce7e01e96668 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1791,7 +1791,7 @@ def from_onnx(model, mod : tvm.relay.Module The relay module for compilation - params : dict of str to tvm.NDArray + params : dict of str to tvm.nd.NDArray The parameter dict to be used by relay """ try: diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 2232300af8b8..8f7f4e63ec81 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -2655,8 +2655,8 @@ def from_tensorflow(graph, layout="NHWC", shape=None, outputs=None): mod : tvm.relay.Module The module that optimizations will be performed on. - params : dict of str to tvm.ndarray - Dict of converted parameters stored in tvm.ndarray format + params : dict of str to tvm.nd.NDArray + Dict of converted parameters stored in tvm.nd.NDArray format """ g = GraphProto() mod, params = g.from_tensorflow(graph, layout, shape, outputs) diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 7eeb1f52acaa..cefd4085b67c 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -1896,7 +1896,7 @@ def from_tflite(model, shape_dict, dtype_dict): mod : tvm.relay.Module The relay module for compilation. - params : dict of str to tvm.NDArray + params : dict of str to tvm.nd.NDArray The parameter dict to be used by relay """ try: diff --git a/python/tvm/rpc/server.py b/python/tvm/rpc/server.py index ab077877c05c..ea9c2da31e99 100644 --- a/python/tvm/rpc/server.py +++ b/python/tvm/rpc/server.py @@ -41,7 +41,7 @@ from tvm._ffi.base import py_str from tvm._ffi.libinfo import find_lib_path -from tvm.runtime.module import load as _load_module +from tvm.runtime.module import load_module as _load_module from tvm.contrib import util from . import base from . base import TrackerCode diff --git a/python/tvm/runtime/__init__.py b/python/tvm/runtime/__init__.py index a6ca614b55cb..24db0e86f22b 100644 --- a/python/tvm/runtime/__init__.py +++ b/python/tvm/runtime/__init__.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -"""TVM runtime.""" +"""TVM runtime namespace.""" # class exposures from .packed_func import PackedFunc @@ -27,6 +27,4 @@ from .object_generic import convert_to_object, convert, const from .ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl from .ndarray import vpi, rocm, opengl, ext_dev, micro_dev -from .module import load as load_module - -DataType = DataType +from .module import load_module, enabled, system_lib diff --git a/python/tvm/runtime/_ffi_api.py b/python/tvm/runtime/_ffi_api.py new file mode 100644 index 000000000000..a07193ea9852 --- /dev/null +++ b/python/tvm/runtime/_ffi_api.py @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""FFI APIs for tvm.runtime""" +import tvm._ffi + +# Exports functions registered via TVM_REGISTER_GLOBAL with the "runtime" prefix. +# e.g. TVM_REGISTER_GLOBAL("runtime.ModuleLoadFromFile") +tvm._ffi._init_api("runtime", __name__) diff --git a/python/tvm/runtime/_ffi_node_api.py b/python/tvm/runtime/_ffi_node_api.py new file mode 100644 index 000000000000..adaa376b8a8d --- /dev/null +++ b/python/tvm/runtime/_ffi_node_api.py @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# pylint: disable=invalid-name, unused-argument +"""FFI for tvm.runtime.extra""" +import tvm._ffi + +# The implementations below are default ones when the corresponding +# functions are not available in the runtime only mode. +# They will be overriden via _init_api to the ones registered +# via TVM_REGISTER_GLOBAL in the compiler mode. +def AsRepr(obj): + return obj.type_key() + "(" + obj.handle.value + ")" + + +def NodeListAttrNames(obj): + return lambda x: 0 + + +def NodeGetAttr(obj, name): + raise AttributeError() + + +def SaveJSON(obj): + raise RuntimeError( + "Do not support object serialization in runtime only mode") + + +def LoadJSON(json_str): + raise RuntimeError( + "Do not support object serialization in runtime only mode") + + +# Exports functions registered via TVM_REGISTER_GLOBAL with the "node" prefix. +# e.g. TVM_REGISTER_GLOBAL("node.AsRepr") +tvm._ffi._init_api("node", __name__) diff --git a/python/tvm/runtime/module.py b/python/tvm/runtime/module.py index 0d21c384f88d..4ed542549f17 100644 --- a/python/tvm/runtime/module.py +++ b/python/tvm/runtime/module.py @@ -26,6 +26,8 @@ from tvm._ffi.libinfo import find_include_path from .packed_func import PackedFunc, PackedFuncHandle, _set_class_module +from . import _ffi_api + # profile result of time evaluator ProfileResult = namedtuple("ProfileResult", ["mean", "results"]) @@ -52,7 +54,7 @@ def entry_func(self): Returns ------- - f : Function + f : tvm.runtime.PackedFunc The entry function if exist """ if self._entry: @@ -73,7 +75,7 @@ def get_function(self, name, query_imports=False): Returns ------- - f : Function + f : tvm.runtime.PackedFunc The result function. """ ret_handle = PackedFuncHandle() @@ -91,7 +93,7 @@ def import_module(self, module): Parameters ---------- - module : Module + module : tvm.runtime.Module The other module. """ check_call(_LIB.TVMModImport(self.handle, module.handle)) @@ -114,7 +116,7 @@ def __repr__(self): @property def type_key(self): """Get type key of the module.""" - return _GetTypeKey(self) + return _ffi_api.ModuleGetTypeKey(self) def get_source(self, fmt=""): """Get source code from module, if available. @@ -129,7 +131,7 @@ def get_source(self, fmt=""): source : str The result source code. """ - return _GetSource(self, fmt) + return _ffi_api.ModuleGetSource(self, fmt) @property def imported_modules(self): @@ -140,8 +142,8 @@ def imported_modules(self): modules : list of Module The module """ - nmod = _ImportsSize(self) - return [_GetImport(self, i) for i in range(nmod)] + nmod = _ffi_api.ModuleImportsSize(self) + return [_ffi_api.ModuleGetImport(self, i) for i in range(nmod)] def save(self, file_name, fmt=""): """Save the module to file. @@ -158,9 +160,9 @@ def save(self, file_name, fmt=""): See Also -------- - Module.export_library : export the module to shared library. + runtime.Module.export_library : export the module to shared library. """ - _SaveToFile(self, file_name, fmt) + _ffi_api.ModuleSaveToFile(self, file_name, fmt) def time_evaluator(self, func_name, ctx, number=10, repeat=1, min_repeat_ms=0): """Get an evaluator that measures time cost of running function. @@ -199,13 +201,14 @@ def time_evaluator(self, func_name, ctx, number=10, repeat=1, min_repeat_ms=0): Returns ------- - ftimer : Function + ftimer : function The function that takes same argument as func and returns a ProfileResult. The ProfileResult reports `repeat` time costs in seconds. """ try: - feval = _RPCTimeEvaluator( - self, func_name, ctx.device_type, ctx.device_id, number, repeat, min_repeat_ms) + feval = _ffi_api.RPCTimeEvaluator( + self, func_name, ctx.device_type, ctx.device_id, + number, repeat, min_repeat_ms) def evaluator(*args): """Internal wrapped evaluator.""" @@ -314,13 +317,13 @@ def export_library(self, if self.imported_modules: if enabled("llvm") and llvm_target_triple: path_obj = temp.relpath("devc.o") - m = _PackImportsToLLVM(self, is_system_lib, llvm_target_triple) + m = _ffi_api.ModulePackImportsToLLVM(self, is_system_lib, llvm_target_triple) m.save(path_obj) files.append(path_obj) else: path_cc = temp.relpath("devc.cc") with open(path_cc, "w") as f: - f.write(_PackImportsToC(self, is_system_lib)) + f.write(_ffi_api.ModulePackImportsToC(self, is_system_lib)) files.append(path_cc) if has_c_module: @@ -349,13 +352,13 @@ def system_lib(): Returns ------- - module : Module + module : runtime.Module The system-wide library module. """ - return _GetSystemLib() + return _ffi_api.SystemLib() -def load(path, fmt=""): +def load_module(path, fmt=""): """Load module from file. Parameters @@ -369,7 +372,7 @@ def load(path, fmt=""): Returns ------- - module : Module + module : runtime.Module The loaded module Note @@ -396,7 +399,7 @@ def load(path, fmt=""): elif path.endswith(".obj"): fmt = "micro_dev" # Redirect to the load API - return _LoadFromFile(path, fmt) + return _ffi_api.ModuleLoadFromFile(path, fmt) def enabled(target): @@ -416,11 +419,9 @@ def enabled(target): -------- The following code checks if gpu is enabled. - >>> tvm.module.enabled("gpu") + >>> tvm.runtime.enabled("gpu") """ - return _Enabled(target) + return _ffi_api.RuntimeEnabled(target) _set_class_module(Module) - -tvm._ffi._init_api("tvm.module", "tvm.runtime.module") diff --git a/python/tvm/runtime/ndarray.py b/python/tvm/runtime/ndarray.py index 7fbad7c943a5..b34f365d9cba 100644 --- a/python/tvm/runtime/ndarray.py +++ b/python/tvm/runtime/ndarray.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-import -"""Runtime NDArray api""" +"""Runtime NDArray API""" import ctypes import numpy as np import tvm._ffi @@ -146,7 +146,7 @@ def copyfrom(self, source_array): return self def __repr__(self): - res = "\n".format(self.shape, self.context) + res = "\n".format(self.shape, self.context) res += self.asnumpy().__repr__() return res @@ -203,7 +203,7 @@ def context(dev_type, dev_id=0): Returns ------- - ctx: TVMContext + ctx: tvm.runtime.TVMContext The corresponding context. Examples diff --git a/python/tvm/runtime/object.py b/python/tvm/runtime/object.py index 6b4b77b14c39..f725c1908ba5 100644 --- a/python/tvm/runtime/object.py +++ b/python/tvm/runtime/object.py @@ -19,7 +19,7 @@ import ctypes from tvm._ffi.base import _FFI_MODE, _RUNTIME_ONLY, check_call, _LIB, c_str -from .. import _api_internal +from . import _ffi_api, _ffi_node_api try: # pylint: disable=wrong-import-position,unused-import @@ -41,22 +41,22 @@ def _new_object(cls): class Object(ObjectBase): """Base class for all tvm's runtime objects.""" def __repr__(self): - return _api_internal._format_str(self) + return _ffi_node_api.AsRepr(self) def __dir__(self): - fnames = _api_internal._NodeListAttrNames(self) + fnames = _ffi_node_api.NodeListAttrNames(self) size = fnames(-1) return [fnames(i) for i in range(size)] def __getattr__(self, name): try: - return _api_internal._NodeGetAttr(self, name) + return _ffi_node_api.NodeGetAttr(self, name) except AttributeError: raise AttributeError( "%s has no attribute %s" % (str(type(self)), name)) def __hash__(self): - return _api_internal._raw_ptr(self) + return _ffi_api.ObjectHash(self) def __eq__(self, other): return self.same_as(other) @@ -71,25 +71,19 @@ def __reduce__(self): def __getstate__(self): handle = self.handle if handle is not None: - return {'handle': _api_internal._save_json(self)} + return {'handle': _ffi_node_api.SaveJSON(self)} return {'handle': None} def __setstate__(self, state): - # pylint: disable=assigning-non-slot + # pylint: disable=assigning-non-slot, assignment-from-no-return handle = state['handle'] if handle is not None: json_str = handle - other = _api_internal._load_json(json_str) + other = _ffi_node_api.LoadJSON(json_str) self.handle = other.handle other.handle = None else: self.handle = None - def same_as(self, other): - """check object identity equality""" - if not isinstance(other, Object): - return False - return self.__hash__() == other.__hash__() - _set_class_object(Object) diff --git a/python/tvm/runtime/object_generic.py b/python/tvm/runtime/object_generic.py index 499f1cb287fa..22354db7737c 100644 --- a/python/tvm/runtime/object_generic.py +++ b/python/tvm/runtime/object_generic.py @@ -19,7 +19,7 @@ from numbers import Number, Integral from tvm._ffi.base import string_types -from .. import _api_internal +from . import _ffi_node_api from .object import ObjectBase, _set_class_object_generic from .ndarray import NDArrayBase from .packed_func import PackedFuncBase, convert_to_tvm_func @@ -56,10 +56,10 @@ def convert_to_object(value): if isinstance(value, Number): return const(value) if isinstance(value, string_types): - return _api_internal._str(value) + return _ffi_node_api.String(value) if isinstance(value, (list, tuple)): value = [convert_to_object(x) for x in value] - return _api_internal._Array(*value) + return _ffi_node_api.Array(*value) if isinstance(value, dict): vlist = [] for item in value.items(): @@ -68,7 +68,7 @@ def convert_to_object(value): raise ValueError("key of map must already been a container type") vlist.append(item[0]) vlist.append(convert_to_object(item[1])) - return _api_internal._Map(*vlist) + return _ffi_node_api.Map(*vlist) if isinstance(value, ObjectGeneric): return value.asobject() if value is None: @@ -133,9 +133,9 @@ def const(value, dtype=None): if dtype is None: dtype = _scalar_type_inference(value) if dtype == "uint64" and value >= (1 << 63): - return _api_internal._LargeUIntImm( + return _ffi_node_api.LargeUIntImm( dtype, value & ((1 << 32) - 1), value >> 32) - return _api_internal._const(value, dtype) + return _ffi_node_api._const(value, dtype) _set_class_object_generic(ObjectGeneric, convert_to_object) diff --git a/python/tvm/runtime/packed_func.py b/python/tvm/runtime/packed_func.py index 05cdef4b490c..a04e32be0ea2 100644 --- a/python/tvm/runtime/packed_func.py +++ b/python/tvm/runtime/packed_func.py @@ -47,7 +47,7 @@ class PackedFunc(PackedFuncBase): For example, the developer function exposed in tvm.ir_pass are actually C++ functions that are registered as PackedFunc - The following are list of common usage scenario of tvm.Function. + The following are list of common usage scenario of tvm.runtime.PackedFunc. - Automatic exposure of C++ API into python - To call PackedFunc from python side diff --git a/python/tvm/target.py b/python/tvm/target.py index a5052714666c..e149c890e0ad 100644 --- a/python/tvm/target.py +++ b/python/tvm/target.py @@ -47,7 +47,7 @@ Build TVM system library module. System lib is a global module that contains self registered functions in program startup. User can get the module using - :any:`tvm.module.system_lib`. + :any:`tvm.runtime.system_lib`. It is useful in environments where dynamic loading api like dlopen is banned. The system lib will be available as long as the result code is linked by the program. diff --git a/rust/frontend/README.md b/rust/frontend/README.md index c61ba847c1f2..9dce7ea1d920 100644 --- a/rust/frontend/README.md +++ b/rust/frontend/README.md @@ -125,7 +125,7 @@ import tvm from tvm.contrib import cc def test_add(target_dir): - if not tvm.module.enabled("cuda"): + if not tvm.runtime.enabled("cuda"): print("skip {__file__} because cuda is not enabled...".format(__file__=__file__)) return n = tvm.var("n") diff --git a/rust/frontend/examples/resnet/src/build_resnet.py b/rust/frontend/examples/resnet/src/build_resnet.py index 2497a41c6ef7..e71381888c1f 100644 --- a/rust/frontend/examples/resnet/src/build_resnet.py +++ b/rust/frontend/examples/resnet/src/build_resnet.py @@ -60,7 +60,7 @@ def build(target_dir): if args.pretrained: # needs mxnet installed from mxnet.gluon.model_zoo.vision import get_model - + # if `--pretrained` is enabled, it downloads a pretrained # resnet18 trained on imagenet1k dataset for image classification task block = get_model('resnet18_v1', pretrained=True) @@ -111,7 +111,7 @@ def download_img_labels(): def test_build(build_dir): """ Sanity check with random input""" graph = open(osp.join(build_dir, "deploy_graph.json")).read() - lib = tvm.module.load(osp.join(build_dir, "deploy_lib.so")) + lib = tvm.runtime.load(osp.join(build_dir, "deploy_lib.so")) params = bytearray(open(osp.join(build_dir,"deploy_param.params"), "rb").read()) input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32")) ctx = tvm.cpu() diff --git a/src/api/api_base.cc b/src/api/api_base.cc deleted file mode 100644 index 48245fa16e74..000000000000 --- a/src/api/api_base.cc +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - /*! - * Implementation of basic API functions - * \file api_base.cc - */ -#include -#include -#include -#include -#include - -namespace tvm { -TVM_REGISTER_GLOBAL("_format_str") -.set_body([](TVMArgs args, TVMRetValue *ret) { - CHECK(args[0].type_code() == kTVMObjectHandle); - std::ostringstream os; - os << args[0].operator ObjectRef(); - *ret = os.str(); - }); - -TVM_REGISTER_GLOBAL("_raw_ptr") -.set_body([](TVMArgs args, TVMRetValue *ret) { - CHECK(args[0].type_code() == kTVMObjectHandle); - *ret = reinterpret_cast(args[0].value().v_handle); - }); - -TVM_REGISTER_GLOBAL("_save_json") -.set_body_typed(SaveJSON); - -TVM_REGISTER_GLOBAL("_load_json") -.set_body_typed(LoadJSON); - -TVM_REGISTER_GLOBAL("_TVMSetStream") -.set_body_typed(TVMSetStream); - -TVM_REGISTER_GLOBAL("_save_param_dict") -.set_body([](TVMArgs args, TVMRetValue *rv) { - CHECK_EQ(args.size() % 2, 0u); - constexpr uint64_t TVMNDArrayListMagic = 0xF7E58D4F05049CB7; - size_t num_params = args.size() / 2; - std::vector names; - names.reserve(num_params); - std::vector arrays; - arrays.reserve(num_params); - for (size_t i = 0; i < num_params * 2; i += 2) { - names.emplace_back(args[i].operator std::string()); - arrays.emplace_back(args[i + 1].operator DLTensor*()); - } - std::string bytes; - dmlc::MemoryStringStream strm(&bytes); - dmlc::Stream* fo = &strm; - uint64_t header = TVMNDArrayListMagic, reserved = 0; - fo->Write(header); - fo->Write(reserved); - fo->Write(names); - { - uint64_t sz = static_cast(arrays.size()); - fo->Write(sz); - for (size_t i = 0; i < sz; ++i) { - tvm::runtime::SaveDLTensor(fo, arrays[i]); - } - } - TVMByteArray arr; - arr.data = bytes.c_str(); - arr.size = bytes.length(); - *rv = arr; - }); - -} // namespace tvm diff --git a/src/api/api_codegen.cc b/src/api/api_codegen.cc deleted file mode 100644 index ad0b6d99a970..000000000000 --- a/src/api/api_codegen.cc +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Implementation of API functions related to Codegen - * \file c_api_codegen.cc - */ -#include -#include -#include -#include -#include - -namespace tvm { -namespace codegen { - -TVM_REGISTER_GLOBAL("codegen._Build") -.set_body([](TVMArgs args, TVMRetValue *ret) { - if (args[0].IsObjectRef()) { - *ret = Build({args[0]}, args[1]); - } else { - *ret = Build(args[0], args[1]); - } - }); - -TVM_REGISTER_GLOBAL("module._PackImportsToC") -.set_body_typed(PackImportsToC); - -TVM_REGISTER_GLOBAL("module._PackImportsToLLVM") -.set_body_typed(PackImportsToLLVM); -} // namespace codegen -} // namespace tvm diff --git a/src/api/api_lang.cc b/src/api/api_lang.cc index b6ac7d52a3d1..591869e49883 100644 --- a/src/api/api_lang.cc +++ b/src/api/api_lang.cc @@ -21,7 +21,7 @@ * Implementation of API functions related to Higher DSL build. * \file api_lang.cc */ -#include +#include #include #include #include @@ -32,7 +32,6 @@ #include #include - namespace tvm { TVM_REGISTER_GLOBAL("_min_value") @@ -41,172 +40,6 @@ TVM_REGISTER_GLOBAL("_min_value") TVM_REGISTER_GLOBAL("_max_value") .set_body_typed(max_value); -TVM_REGISTER_GLOBAL("_const") -.set_body([](TVMArgs args, TVMRetValue* ret) { - if (args[0].type_code() == kDLInt) { - *ret = tir::make_const(args[1], args[0].operator int64_t()); - } else if (args[0].type_code() == kDLFloat) { - *ret = tir::make_const(args[1], args[0].operator double()); - } else { - LOG(FATAL) << "only accept int or float"; - } - }); - -TVM_REGISTER_GLOBAL("_LargeUIntImm") -.set_body_typed(LargeUIntImm); - -TVM_REGISTER_GLOBAL("_str") -.set_body_typed(tir::StringImmNode::make); - - -TVM_REGISTER_GLOBAL("_Array") -.set_body([](TVMArgs args, TVMRetValue* ret) { - std::vector data; - for (int i = 0; i < args.size(); ++i) { - if (args[i].type_code() != kTVMNullptr) { - data.push_back(args[i].operator ObjectRef()); - } else { - data.push_back(ObjectRef(nullptr)); - } - } - auto node = make_object(); - node->data = std::move(data); - *ret = Array(node); - }); - -TVM_REGISTER_GLOBAL("_ArrayGetItem") -.set_body([](TVMArgs args, TVMRetValue* ret) { - int64_t i = args[1]; - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); - Object* ptr = static_cast(args[0].value().v_handle); - CHECK(ptr->IsInstance()); - auto* n = static_cast(ptr); - CHECK_LT(static_cast(i), n->data.size()) - << "out of bound of array"; - *ret = n->data[static_cast(i)]; - }); - -TVM_REGISTER_GLOBAL("_ArraySize") -.set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); - Object* ptr = static_cast(args[0].value().v_handle); - CHECK(ptr->IsInstance()); - *ret = static_cast( - static_cast(ptr)->data.size()); - }); - -TVM_REGISTER_GLOBAL("_Map") -.set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args.size() % 2, 0); - if (args.size() != 0 && args[0].type_code() == kTVMStr) { - // StrMap - StrMapNode::ContainerType data; - for (int i = 0; i < args.num_args; i += 2) { - CHECK(args[i].type_code() == kTVMStr) - << "key of str map need to be str"; - CHECK(args[i + 1].IsObjectRef()) - << "value of the map to be NodeRef"; - data.emplace(std::make_pair(args[i].operator std::string(), - args[i + 1].operator ObjectRef())); - } - auto node = make_object(); - node->data = std::move(data); - *ret = Map(node); - } else { - // Container node. - MapNode::ContainerType data; - for (int i = 0; i < args.num_args; i += 2) { - CHECK(args[i].IsObjectRef()) - << "key of str map need to be object"; - CHECK(args[i + 1].IsObjectRef()) - << "value of map to be NodeRef"; - data.emplace(std::make_pair(args[i].operator ObjectRef(), - args[i + 1].operator ObjectRef())); - } - auto node = make_object(); - node->data = std::move(data); - *ret = Map(node); - } - }); - -TVM_REGISTER_GLOBAL("_MapSize") -.set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); - Object* ptr = static_cast(args[0].value().v_handle); - if (ptr->IsInstance()) { - auto* n = static_cast(ptr); - *ret = static_cast(n->data.size()); - } else { - CHECK(ptr->IsInstance()); - auto* n = static_cast(ptr); - *ret = static_cast(n->data.size()); - } - }); - -TVM_REGISTER_GLOBAL("_MapGetItem") -.set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); - Object* ptr = static_cast(args[0].value().v_handle); - - if (ptr->IsInstance()) { - CHECK(args[1].type_code() == kTVMObjectHandle); - auto* n = static_cast(ptr); - auto it = n->data.find(args[1].operator ObjectRef()); - CHECK(it != n->data.end()) - << "cannot find the corresponding key in the Map"; - *ret = (*it).second; - } else { - CHECK(ptr->IsInstance()); - auto* n = static_cast(ptr); - auto it = n->data.find(args[1].operator std::string()); - CHECK(it != n->data.end()) - << "cannot find the corresponding key in the Map"; - *ret = (*it).second; - } - }); - -TVM_REGISTER_GLOBAL("_MapCount") -.set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); - Object* ptr = static_cast(args[0].value().v_handle); - - if (ptr->IsInstance()) { - auto* n = static_cast(ptr); - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); - *ret = static_cast( - n->data.count(args[1].operator ObjectRef())); - } else { - CHECK(ptr->IsInstance()); - auto* n = static_cast(ptr); - *ret = static_cast( - n->data.count(args[1].operator std::string())); - } - }); - -TVM_REGISTER_GLOBAL("_MapItems") -.set_body([](TVMArgs args, TVMRetValue* ret) { - CHECK_EQ(args[0].type_code(), kTVMObjectHandle); - Object* ptr = static_cast(args[0].value().v_handle); - - if (ptr->IsInstance()) { - auto* n = static_cast(ptr); - auto rkvs = make_object(); - for (const auto& kv : n->data) { - rkvs->data.push_back(kv.first); - rkvs->data.push_back(kv.second); - } - *ret = Array(rkvs); - } else { - auto* n = static_cast(ptr); - auto rkvs = make_object(); - for (const auto& kv : n->data) { - rkvs->data.push_back(tir::StringImmNode::make(kv.first)); - rkvs->data.push_back(kv.second); - } - *ret = Array(rkvs); - } - }); - TVM_REGISTER_GLOBAL("Range") .set_body([](TVMArgs args, TVMRetValue* ret) { if (args.size() == 1) { diff --git a/src/node/container.cc b/src/node/container.cc new file mode 100644 index 000000000000..25bfe9dbba56 --- /dev/null +++ b/src/node/container.cc @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/*! + * Expose container API to frontend. + * \file src/node/container.cc + */ +#include +#include +#include + +namespace tvm { + +TVM_REGISTER_GLOBAL("node.Array") +.set_body([](TVMArgs args, TVMRetValue* ret) { + std::vector data; + for (int i = 0; i < args.size(); ++i) { + if (args[i].type_code() != kTVMNullptr) { + data.push_back(args[i].operator ObjectRef()); + } else { + data.push_back(ObjectRef(nullptr)); + } + } + auto node = make_object(); + node->data = std::move(data); + *ret = Array(node); + }); + +TVM_REGISTER_GLOBAL("node.ArrayGetItem") +.set_body([](TVMArgs args, TVMRetValue* ret) { + int64_t i = args[1]; + CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + Object* ptr = static_cast(args[0].value().v_handle); + CHECK(ptr->IsInstance()); + auto* n = static_cast(ptr); + CHECK_LT(static_cast(i), n->data.size()) + << "out of bound of array"; + *ret = n->data[static_cast(i)]; + }); + +TVM_REGISTER_GLOBAL("node.ArraySize") +.set_body([](TVMArgs args, TVMRetValue* ret) { + CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + Object* ptr = static_cast(args[0].value().v_handle); + CHECK(ptr->IsInstance()); + *ret = static_cast( + static_cast(ptr)->data.size()); + }); + +TVM_REGISTER_GLOBAL("node.Map") +.set_body([](TVMArgs args, TVMRetValue* ret) { + CHECK_EQ(args.size() % 2, 0); + if (args.size() != 0 && args[0].type_code() == kTVMStr) { + // StrMap + StrMapNode::ContainerType data; + for (int i = 0; i < args.num_args; i += 2) { + CHECK(args[i].type_code() == kTVMStr) + << "key of str map need to be str"; + CHECK(args[i + 1].IsObjectRef()) + << "value of the map to be NodeRef"; + data.emplace(std::make_pair(args[i].operator std::string(), + args[i + 1].operator ObjectRef())); + } + auto node = make_object(); + node->data = std::move(data); + *ret = Map(node); + } else { + // Container node. + MapNode::ContainerType data; + for (int i = 0; i < args.num_args; i += 2) { + CHECK(args[i].IsObjectRef()) + << "key of str map need to be object"; + CHECK(args[i + 1].IsObjectRef()) + << "value of map to be NodeRef"; + data.emplace(std::make_pair(args[i].operator ObjectRef(), + args[i + 1].operator ObjectRef())); + } + auto node = make_object(); + node->data = std::move(data); + *ret = Map(node); + } + }); + + +TVM_REGISTER_GLOBAL("node.MapSize") +.set_body([](TVMArgs args, TVMRetValue* ret) { + CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + Object* ptr = static_cast(args[0].value().v_handle); + if (ptr->IsInstance()) { + auto* n = static_cast(ptr); + *ret = static_cast(n->data.size()); + } else { + CHECK(ptr->IsInstance()); + auto* n = static_cast(ptr); + *ret = static_cast(n->data.size()); + } + }); + +TVM_REGISTER_GLOBAL("node.MapGetItem") +.set_body([](TVMArgs args, TVMRetValue* ret) { + CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + Object* ptr = static_cast(args[0].value().v_handle); + + if (ptr->IsInstance()) { + CHECK(args[1].type_code() == kTVMObjectHandle); + auto* n = static_cast(ptr); + auto it = n->data.find(args[1].operator ObjectRef()); + CHECK(it != n->data.end()) + << "cannot find the corresponding key in the Map"; + *ret = (*it).second; + } else { + CHECK(ptr->IsInstance()); + auto* n = static_cast(ptr); + auto it = n->data.find(args[1].operator std::string()); + CHECK(it != n->data.end()) + << "cannot find the corresponding key in the Map"; + *ret = (*it).second; + } + }); + +TVM_REGISTER_GLOBAL("node.MapCount") +.set_body([](TVMArgs args, TVMRetValue* ret) { + CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + Object* ptr = static_cast(args[0].value().v_handle); + + if (ptr->IsInstance()) { + auto* n = static_cast(ptr); + CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + *ret = static_cast( + n->data.count(args[1].operator ObjectRef())); + } else { + CHECK(ptr->IsInstance()); + auto* n = static_cast(ptr); + *ret = static_cast( + n->data.count(args[1].operator std::string())); + } + }); + +TVM_REGISTER_GLOBAL("node.MapItems") +.set_body([](TVMArgs args, TVMRetValue* ret) { + CHECK_EQ(args[0].type_code(), kTVMObjectHandle); + Object* ptr = static_cast(args[0].value().v_handle); + + if (ptr->IsInstance()) { + auto* n = static_cast(ptr); + auto rkvs = make_object(); + for (const auto& kv : n->data) { + rkvs->data.push_back(kv.first); + rkvs->data.push_back(kv.second); + } + *ret = Array(rkvs); + } else { + auto* n = static_cast(ptr); + auto rkvs = make_object(); + for (const auto& kv : n->data) { + rkvs->data.push_back(tir::StringImmNode::make(kv.first)); + rkvs->data.push_back(kv.second); + } + *ret = Array(rkvs); + } + }); +} // namespace tvm diff --git a/src/node/reflection.cc b/src/node/reflection.cc index 7c0fb8faf5eb..d61d72b82b2f 100644 --- a/src/node/reflection.cc +++ b/src/node/reflection.cc @@ -298,13 +298,12 @@ void MakeNode(const TVMArgs& args, TVMRetValue* rv) { } -TVM_REGISTER_GLOBAL("_NodeGetAttr") +TVM_REGISTER_GLOBAL("node.NodeGetAttr") .set_body(NodeGetAttr); -TVM_REGISTER_GLOBAL("_NodeListAttrNames") +TVM_REGISTER_GLOBAL("node.NodeListAttrNames") .set_body(NodeListAttrNames); TVM_REGISTER_GLOBAL("make._Node") .set_body(MakeNode); - } // namespace tvm diff --git a/src/node/repr_printer.cc b/src/node/repr_printer.cc index ef91d2f5965e..e81a8241cb51 100644 --- a/src/node/repr_printer.cc +++ b/src/node/repr_printer.cc @@ -21,6 +21,7 @@ * Printer utilities * \file node/repr_printer.cc */ +#include #include namespace tvm { @@ -53,4 +54,11 @@ ReprPrinter::FType& ReprPrinter::vtable() { void Dump(const ObjectRef& n) { std::cerr << n << "\n"; } + +TVM_REGISTER_GLOBAL("node.AsRepr") +.set_body_typed([](runtime::ObjectRef obj) { + std::ostringstream os; + os << obj; + return os.str(); +}); } // namespace tvm diff --git a/src/node/serialization.cc b/src/node/serialization.cc index 91b5e1a6cf21..11c9e8fc8cb6 100644 --- a/src/node/serialization.cc +++ b/src/node/serialization.cc @@ -23,7 +23,7 @@ */ #include #include - +#include #include #include #include @@ -455,4 +455,10 @@ ObjectRef LoadJSON(std::string json_str) { } return ObjectRef(nodes.at(jgraph.root)); } + +TVM_REGISTER_GLOBAL("node.SaveJSON") +.set_body_typed(SaveJSON); + +TVM_REGISTER_GLOBAL("node.LoadJSON") +.set_body_typed(LoadJSON); } // namespace tvm diff --git a/src/relay/backend/contrib/codegen_c/codegen.cc b/src/relay/backend/contrib/codegen_c/codegen.cc index 0c9827f675ed..55844479d605 100644 --- a/src/relay/backend/contrib/codegen_c/codegen.cc +++ b/src/relay/backend/contrib/codegen_c/codegen.cc @@ -194,7 +194,7 @@ class CSourceCodegen : public CSourceModuleCodegenBase { } // Create a CSourceModule - const auto* pf = runtime::Registry::Get("module.csource_module_create"); + const auto* pf = runtime::Registry::Get("runtime.CSourceModuleCreate"); CHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module"; return (*pf)(code_stream_.str(), "cc"); } diff --git a/src/relay/backend/contrib/dnnl/codegen.cc b/src/relay/backend/contrib/dnnl/codegen.cc index 759a4421bc1d..6206173dd998 100644 --- a/src/relay/backend/contrib/dnnl/codegen.cc +++ b/src/relay/backend/contrib/dnnl/codegen.cc @@ -282,7 +282,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase { } // Create a CSourceModule - const auto* pf = runtime::Registry::Get("module.csource_module_create"); + const auto* pf = runtime::Registry::Get("runtime.CSourceModuleCreate"); CHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module"; return (*pf)(code_stream_.str(), "cc"); } diff --git a/src/runtime/c_runtime_api.cc b/src/runtime/c_runtime_api.cc index ba762c012fac..c60b4a8b95b1 100644 --- a/src/runtime/c_runtime_api.cc +++ b/src/runtime/c_runtime_api.cc @@ -630,3 +630,7 @@ TVM_REGISTER_GLOBAL("_GetDeviceAttr") DeviceAPIManager::Get(ctx)->GetAttr(ctx, kind, ret); } }); + + +TVM_REGISTER_GLOBAL("runtime.TVMSetStream") +.set_body_typed(TVMSetStream); diff --git a/src/runtime/contrib/example_ext_runtime/example_ext_runtime.cc b/src/runtime/contrib/example_ext_runtime/example_ext_runtime.cc index 28c6841207d8..98078b68c23a 100644 --- a/src/runtime/contrib/example_ext_runtime/example_ext_runtime.cc +++ b/src/runtime/contrib/example_ext_runtime/example_ext_runtime.cc @@ -332,12 +332,12 @@ class ExampleJsonModule : public ModuleNode { std::vector op_id_; }; -TVM_REGISTER_GLOBAL("module.loadfile_examplejson") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_examplejson") .set_body([](TVMArgs args, TVMRetValue* rv) { *rv = ExampleJsonModule::Create(args[0]); }); -TVM_REGISTER_GLOBAL("module.loadbinary_examplejson") +TVM_REGISTER_GLOBAL("runtime.module.loadbinary_examplejson") .set_body_typed(ExampleJsonModule::LoadFromBinary); } // namespace runtime diff --git a/src/runtime/cuda/cuda_module.cc b/src/runtime/cuda/cuda_module.cc index 2d9ba57cbdaa..0550712de9ab 100644 --- a/src/runtime/cuda/cuda_module.cc +++ b/src/runtime/cuda/cuda_module.cc @@ -305,13 +305,13 @@ Module CUDAModuleLoadBinary(void* strm) { return CUDAModuleCreate(data, fmt, fmap, std::string()); } -TVM_REGISTER_GLOBAL("module.loadfile_cubin") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_cubin") .set_body_typed(CUDAModuleLoadFile); -TVM_REGISTER_GLOBAL("module.loadfile_ptx") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_ptx") .set_body_typed(CUDAModuleLoadFile); -TVM_REGISTER_GLOBAL("module.loadbinary_cuda") +TVM_REGISTER_GLOBAL("runtime.module.loadbinary_cuda") .set_body_typed(CUDAModuleLoadBinary); } // namespace runtime } // namespace tvm diff --git a/src/runtime/dso_library.cc b/src/runtime/dso_library.cc index 4df5c0552a3d..378f976dead1 100644 --- a/src/runtime/dso_library.cc +++ b/src/runtime/dso_library.cc @@ -97,7 +97,7 @@ class DSOLibrary final : public Library { #endif }; -TVM_REGISTER_GLOBAL("module.loadfile_so") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_so") .set_body([](TVMArgs args, TVMRetValue* rv) { auto n = make_object(); n->Init(args[0]); diff --git a/src/runtime/library_module.cc b/src/runtime/library_module.cc index 5b364fc9e006..4343c8df3a03 100644 --- a/src/runtime/library_module.cc +++ b/src/runtime/library_module.cc @@ -148,7 +148,7 @@ runtime::Module ProcessModuleBlob(const char* mblob, ObjectPtr lib) { CHECK(stream->Read(&import_tree_row_ptr)); CHECK(stream->Read(&import_tree_child_indices)); } else { - std::string fkey = "module.loadbinary_" + tkey; + std::string fkey = "runtime.module.loadbinary_" + tkey; const PackedFunc* f = Registry::Get(fkey); CHECK(f != nullptr) << "Loader of " << tkey << "(" diff --git a/src/runtime/metal/metal_module.mm b/src/runtime/metal/metal_module.mm index d9b23fc55086..41269b9f1a5d 100644 --- a/src/runtime/metal/metal_module.mm +++ b/src/runtime/metal/metal_module.mm @@ -307,10 +307,10 @@ Module MetalModuleLoadBinary(void* strm) { return MetalModuleCreate(data, fmt, fmap, ""); } -TVM_REGISTER_GLOBAL("module.loadfile_metal") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_metal") .set_body_typed(MetalModuleLoadFile); -TVM_REGISTER_GLOBAL("module.loadbinary_metal") +TVM_REGISTER_GLOBAL("runtime.module.loadbinary_metal") .set_body_typed(MetalModuleLoadBinary); } // namespace runtime } // namespace tvm diff --git a/src/runtime/micro/micro_module.cc b/src/runtime/micro/micro_module.cc index 4a41d4bca28f..50cee34be4a6 100644 --- a/src/runtime/micro/micro_module.cc +++ b/src/runtime/micro/micro_module.cc @@ -101,7 +101,7 @@ PackedFunc MicroModuleNode::GetFunction( } // register loadfile function to load module from Python frontend -TVM_REGISTER_GLOBAL("module.loadfile_micro_dev") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_micro_dev") .set_body([](TVMArgs args, TVMRetValue* rv) { auto n = make_object(); n->InitMicroModule(args[0]); diff --git a/src/runtime/module.cc b/src/runtime/module.cc index 2895b1f7ed64..0eb81df87496 100644 --- a/src/runtime/module.cc +++ b/src/runtime/module.cc @@ -84,7 +84,7 @@ Module Module::LoadFromFile(const std::string& file_name, if (fmt == "dll" || fmt == "dylib" || fmt == "dso") { fmt = "so"; } - std::string load_f_name = "module.loadfile_" + fmt; + std::string load_f_name = "runtime.module.loadfile_" + fmt; const PackedFunc* f = Registry::Get(load_f_name); CHECK(f != nullptr) << "Loader of " << format << "(" @@ -164,42 +164,35 @@ bool RuntimeEnabled(const std::string& target) { return runtime::Registry::Get(f_name) != nullptr; } -TVM_REGISTER_GLOBAL("module._Enabled") -.set_body([](TVMArgs args, TVMRetValue *ret) { - *ret = RuntimeEnabled(args[0]); - }); - -TVM_REGISTER_GLOBAL("module._GetSource") -.set_body([](TVMArgs args, TVMRetValue *ret) { - *ret = args[0].operator Module()->GetSource(args[1]); - }); - -TVM_REGISTER_GLOBAL("module._ImportsSize") -.set_body([](TVMArgs args, TVMRetValue *ret) { - *ret = static_cast( - args[0].operator Module()->imports().size()); - }); - -TVM_REGISTER_GLOBAL("module._GetImport") -.set_body([](TVMArgs args, TVMRetValue *ret) { - *ret = args[0].operator Module()-> - imports().at(args[1].operator int()); - }); - -TVM_REGISTER_GLOBAL("module._GetTypeKey") -.set_body([](TVMArgs args, TVMRetValue *ret) { - *ret = std::string(args[0].operator Module()->type_key()); - }); - -TVM_REGISTER_GLOBAL("module._LoadFromFile") -.set_body([](TVMArgs args, TVMRetValue *ret) { - *ret = Module::LoadFromFile(args[0], args[1]); - }); - -TVM_REGISTER_GLOBAL("module._SaveToFile") -.set_body([](TVMArgs args, TVMRetValue *ret) { - args[0].operator Module()-> - SaveToFile(args[1], args[2]); - }); +TVM_REGISTER_GLOBAL("runtime.RuntimeEnabled") +.set_body_typed(RuntimeEnabled); + +TVM_REGISTER_GLOBAL("runtime.ModuleGetSource") +.set_body_typed([](Module mod, std::string fmt) { + return mod->GetSource(fmt); +}); + +TVM_REGISTER_GLOBAL("runtime.ModuleImportsSize") +.set_body_typed([](Module mod) { + return static_cast(mod->imports().size()); +}); + +TVM_REGISTER_GLOBAL("runtime.ModuleGetImport") +.set_body_typed([](Module mod, int index) { + return mod->imports().at(index); +}); + +TVM_REGISTER_GLOBAL("runtime.ModuleGetTypeKey") +.set_body_typed([](Module mod) { + return std::string(mod->type_key()); +}); + +TVM_REGISTER_GLOBAL("runtime.ModuleLoadFromFile") +.set_body_typed(Module::LoadFromFile); + +TVM_REGISTER_GLOBAL("runtime.ModuleSaveToFile") +.set_body_typed([](Module mod, std::string name, std::string fmt) { + mod->SaveToFile(name, fmt); +}); } // namespace runtime } // namespace tvm diff --git a/src/runtime/object.cc b/src/runtime/object.cc index 602446aa60eb..0d85b9dab42c 100644 --- a/src/runtime/object.cc +++ b/src/runtime/object.cc @@ -21,6 +21,7 @@ * \brief Object type management system. */ #include +#include #include #include #include @@ -202,6 +203,11 @@ uint32_t Object::TypeKey2Index(const std::string& key) { return TypeContext::Global()->TypeKey2Index(key); } + +TVM_REGISTER_GLOBAL("runtime.ObjectHash") +.set_body_typed([](ObjectRef obj) { + return static_cast(ObjectHash()(obj)); +}); } // namespace runtime } // namespace tvm diff --git a/src/runtime/opencl/aocl/aocl_module.cc b/src/runtime/opencl/aocl/aocl_module.cc index 5ee36fd31326..abda5b179a6a 100644 --- a/src/runtime/opencl/aocl/aocl_module.cc +++ b/src/runtime/opencl/aocl/aocl_module.cc @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -66,7 +66,7 @@ Module AOCLModuleLoadFile(const std::string& file_name, return AOCLModuleCreate(data, fmt, fmap, std::string()); } -TVM_REGISTER_GLOBAL("module.loadfile_aocx") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_aocx") .set_body_typed(AOCLModuleLoadFile); } // namespace runtime diff --git a/src/runtime/opencl/opencl_module.cc b/src/runtime/opencl/opencl_module.cc index b530d0e2e01a..fefde72b9508 100644 --- a/src/runtime/opencl/opencl_module.cc +++ b/src/runtime/opencl/opencl_module.cc @@ -278,13 +278,13 @@ Module OpenCLModuleLoadBinary(void* strm) { return OpenCLModuleCreate(data, fmt, fmap, std::string()); } -TVM_REGISTER_GLOBAL("module.loadfile_cl") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_cl") .set_body_typed(OpenCLModuleLoadFile); -TVM_REGISTER_GLOBAL("module.loadfile_clbin") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_clbin") .set_body_typed(OpenCLModuleLoadFile); -TVM_REGISTER_GLOBAL("module.loadbinary_opencl") +TVM_REGISTER_GLOBAL("runtime.module.loadbinary_opencl") .set_body_typed(OpenCLModuleLoadBinary); } // namespace runtime } // namespace tvm diff --git a/src/runtime/opencl/sdaccel/sdaccel_module.cc b/src/runtime/opencl/sdaccel/sdaccel_module.cc index 31db46af8749..4569ec3946df 100644 --- a/src/runtime/opencl/sdaccel/sdaccel_module.cc +++ b/src/runtime/opencl/sdaccel/sdaccel_module.cc @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -77,10 +77,10 @@ Module SDAccelModuleLoadBinary(void* strm) { return SDAccelModuleCreate(data, fmt, fmap, std::string()); } -TVM_REGISTER_GLOBAL("module.loadfile_xclbin") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_xclbin") .set_body_typed(SDAccelModuleLoadFile); -TVM_REGISTER_GLOBAL("module.loadfile_awsxclbin") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_awsxclbin") .set_body_typed(SDAccelModuleLoadFile); } // namespace runtime } // namespace tvm diff --git a/src/runtime/opengl/opengl_module.cc b/src/runtime/opengl/opengl_module.cc index 63d9d21891b5..6435aca1bfdd 100644 --- a/src/runtime/opengl/opengl_module.cc +++ b/src/runtime/opengl/opengl_module.cc @@ -278,17 +278,17 @@ Module OpenGLModuleLoadBinary(void* strm) { return OpenGLModuleCreate(FromJSON(data), fmt, fmap); } -TVM_REGISTER_GLOBAL("module.loadfile_gl") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_gl") .set_body([](TVMArgs args, TVMRetValue* rv) { *rv = OpenGLModuleLoadFile(args[0], args[1]); }); -TVM_REGISTER_GLOBAL("module.loadfile_glbin") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_glbin") .set_body([](TVMArgs args, TVMRetValue* rv) { *rv = OpenGLModuleLoadFile(args[0], args[1]); }); -TVM_REGISTER_GLOBAL("module.loadbinary_opengl") +TVM_REGISTER_GLOBAL("runtime.module.loadbinary_opengl") .set_body([](TVMArgs args, TVMRetValue* rv) { *rv = OpenGLModuleLoadBinary(args[0]); }); diff --git a/src/runtime/rocm/rocm_module.cc b/src/runtime/rocm/rocm_module.cc index 5bc92f5b244a..1f4b830ce434 100644 --- a/src/runtime/rocm/rocm_module.cc +++ b/src/runtime/rocm/rocm_module.cc @@ -254,18 +254,18 @@ Module ROCMModuleLoadBinary(void* strm) { } -TVM_REGISTER_GLOBAL("module.loadbinary_hsaco") +TVM_REGISTER_GLOBAL("runtime.module.loadbinary_hsaco") .set_body_typed(ROCMModuleLoadBinary); -TVM_REGISTER_GLOBAL("module.loadbinary_hip") +TVM_REGISTER_GLOBAL("runtime.module.loadbinary_hip") .set_body_typed(ROCMModuleLoadBinary); -TVM_REGISTER_GLOBAL("module.loadfile_hsaco") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_hsaco") .set_body_typed(ROCMModuleLoadFile); -TVM_REGISTER_GLOBAL("module.loadfile_hip") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_hip") .set_body_typed(ROCMModuleLoadFile); } // namespace runtime } // namespace tvm diff --git a/src/runtime/rpc/rpc_module.cc b/src/runtime/rpc/rpc_module.cc index a28e4b1b5efb..0e48e6fb2708 100644 --- a/src/runtime/rpc/rpc_module.cc +++ b/src/runtime/rpc/rpc_module.cc @@ -234,7 +234,7 @@ Module CreateRPCModule(std::shared_ptr sess) { return Module(n); } -TVM_REGISTER_GLOBAL("module._RPCTimeEvaluator") +TVM_REGISTER_GLOBAL("runtime.RPCTimeEvaluator") .set_body([](TVMArgs args, TVMRetValue* rv) { Module m = args[0]; std::string tkey = m->type_key(); diff --git a/src/runtime/sgx/trusted/runtime.cc b/src/runtime/sgx/trusted/runtime.cc index 05f949c39001..4e28b110180b 100644 --- a/src/runtime/sgx/trusted/runtime.cc +++ b/src/runtime/sgx/trusted/runtime.cc @@ -84,7 +84,7 @@ void tvm_ecall_packed_func(int func_id, TVM_REGISTER_ENCLAVE_FUNC("__tvm_main__") .set_body([](TVMArgs args, TVMRetValue* rv) { - Module mod = (*Registry::Get("module._GetSystemLib"))(); + Module mod = (*Registry::Get("runtime.SystemLib"))(); mod.GetFunction("default_function").CallPacked(args, rv); }); diff --git a/src/runtime/sgx/untrusted/sgx_module.cc b/src/runtime/sgx/untrusted/sgx_module.cc index 541ab420412c..a4109d5c7e12 100644 --- a/src/runtime/sgx/untrusted/sgx_module.cc +++ b/src/runtime/sgx/untrusted/sgx_module.cc @@ -243,7 +243,7 @@ TVM_REGISTER_GLOBAL("__sgx_reserve_space__") } // extern "C" } // namespace sgx -TVM_REGISTER_GLOBAL("module.loadfile_sgx") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_sgx") .set_body([](TVMArgs args, TVMRetValue* rv) { std::shared_ptr node = std::make_shared(); node->Init(args[0]); diff --git a/src/runtime/stackvm/stackvm_module.cc b/src/runtime/stackvm/stackvm_module.cc index b73c7ceaa85a..8b30b750e714 100644 --- a/src/runtime/stackvm/stackvm_module.cc +++ b/src/runtime/stackvm/stackvm_module.cc @@ -106,7 +106,7 @@ class StackVMModuleNode : public runtime::ModuleNode { for (uint64_t i = 0; i < num_imports; ++i) { std::string tkey; CHECK(strm->Read(&tkey)); - std::string fkey = "module.loadbinary_" + tkey; + std::string fkey = "runtime.module.loadbinary_" + tkey; const PackedFunc* f = Registry::Get(fkey); CHECK(f != nullptr) << "Loader of " << tkey << "(" @@ -137,7 +137,7 @@ Module StackVMModuleCreate(std::unordered_map fmap, return StackVMModuleNode::Create(fmap, entry_func); } -TVM_REGISTER_GLOBAL("module.loadfile_stackvm") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_stackvm") .set_body_typed(StackVMModuleNode::LoadFromFile); } // namespace runtime diff --git a/src/runtime/system_library.cc b/src/runtime/system_library.cc index b9d751d09dc4..3eb7b1c46b45 100644 --- a/src/runtime/system_library.cc +++ b/src/runtime/system_library.cc @@ -68,12 +68,12 @@ class SystemLibrary : public Library { std::unordered_map tbl_; }; -TVM_REGISTER_GLOBAL("module._GetSystemLib") -.set_body([](TVMArgs args, TVMRetValue* rv) { +TVM_REGISTER_GLOBAL("runtime.SystemLib") +.set_body_typed([]() { static auto mod = CreateModuleFromLibrary( SystemLibrary::Global()); - *rv = mod; - }); + return mod; +}); } // namespace runtime } // namespace tvm diff --git a/src/runtime/vulkan/vulkan.cc b/src/runtime/vulkan/vulkan.cc index 6f91b8684660..80486406187b 100644 --- a/src/runtime/vulkan/vulkan.cc +++ b/src/runtime/vulkan/vulkan.cc @@ -1143,9 +1143,9 @@ Module VulkanModuleLoadBinary(void* strm) { return VulkanModuleCreate(smap, fmap, ""); } -TVM_REGISTER_GLOBAL("module.loadfile_vulkan").set_body_typed(VulkanModuleLoadFile); +TVM_REGISTER_GLOBAL("runtime.module.loadfile_vulkan").set_body_typed(VulkanModuleLoadFile); -TVM_REGISTER_GLOBAL("module.loadbinary_vulkan").set_body_typed(VulkanModuleLoadBinary); +TVM_REGISTER_GLOBAL("runtime.module.loadbinary_vulkan").set_body_typed(VulkanModuleLoadBinary); TVM_REGISTER_GLOBAL("device_api.vulkan").set_body([](TVMArgs args, TVMRetValue* rv) { DeviceAPI* ptr = VulkanDeviceAPI::Global().get(); diff --git a/src/target/codegen.cc b/src/target/codegen.cc index e3c30dfc4f85..a9c820160cde 100644 --- a/src/target/codegen.cc +++ b/src/target/codegen.cc @@ -244,5 +244,21 @@ runtime::Module PackImportsToLLVM(const runtime::Module& mod, return (*codegen_f)(blob_byte_array, system_lib, target_triple); } +TVM_REGISTER_GLOBAL("codegen._Build") +.set_body([](TVMArgs args, TVMRetValue *ret) { + if (args[0].IsObjectRef()) { + *ret = Build({args[0]}, args[1]); + } else { + *ret = Build(args[0], args[1]); + } + }); + +// Export two auxiliary function to the runtime namespace. +TVM_REGISTER_GLOBAL("runtime.ModulePackImportsToC") +.set_body_typed(PackImportsToC); + +TVM_REGISTER_GLOBAL("runtime.ModulePackImportsToLLVM") +.set_body_typed(PackImportsToLLVM); + } // namespace codegen } // namespace tvm diff --git a/src/target/llvm/llvm_module.cc b/src/target/llvm/llvm_module.cc index 9aaeedd2d219..0832278a6349 100644 --- a/src/target/llvm/llvm_module.cc +++ b/src/target/llvm/llvm_module.cc @@ -368,7 +368,7 @@ TVM_REGISTER_GLOBAL("codegen.llvm_version_major") *rv = major; }); -TVM_REGISTER_GLOBAL("module.loadfile_ll") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_ll") .set_body([](TVMArgs args, TVMRetValue* rv) { auto n = make_object(); n->LoadIR(args[0]); diff --git a/src/target/source/source_module.cc b/src/target/source/source_module.cc index 528b83ce389e..5f133212140c 100644 --- a/src/target/source/source_module.cc +++ b/src/target/source/source_module.cc @@ -184,10 +184,10 @@ runtime::Module DeviceSourceModuleCreate( return runtime::Module(n); } -TVM_REGISTER_GLOBAL("module.source_module_create") +TVM_REGISTER_GLOBAL("runtime.SourceModuleCreate") .set_body_typed(SourceModuleCreate); -TVM_REGISTER_GLOBAL("module.csource_module_create") +TVM_REGISTER_GLOBAL("runtime.CSourceModuleCreate") .set_body_typed(CSourceModuleCreate); } // namespace codegen } // namespace tvm diff --git a/src/tir/ir/op.cc b/src/tir/ir/op.cc index a264915e5fb5..d046f5d9df4e 100644 --- a/src/tir/ir/op.cc +++ b/src/tir/ir/op.cc @@ -21,6 +21,7 @@ * \file expr_operator.cc */ +#include #include #include #include @@ -632,4 +633,23 @@ PrimExpr trunc(PrimExpr x) { return tir::CallNode::make(x.dtype(), "trunc", {x}, tir::CallNode::PureIntrinsic); } + +// expose basic functions to node namespace +TVM_REGISTER_GLOBAL("node._const") +.set_body([](TVMArgs args, TVMRetValue* ret) { + if (args[0].type_code() == kDLInt) { + *ret = tir::make_const(args[1], args[0].operator int64_t()); + } else if (args[0].type_code() == kDLFloat) { + *ret = tir::make_const(args[1], args[0].operator double()); + } else { + LOG(FATAL) << "only accept int or float"; + } + }); + +TVM_REGISTER_GLOBAL("node.LargeUIntImm") +.set_body_typed(LargeUIntImm); + +TVM_REGISTER_GLOBAL("node.String") +.set_body_typed(tir::StringImmNode::make); + } // namespace tvm diff --git a/tests/cpp/build_module_test.cc b/tests/cpp/build_module_test.cc index c799da719eb4..02626a468aa2 100644 --- a/tests/cpp/build_module_test.cc +++ b/tests/cpp/build_module_test.cc @@ -76,8 +76,7 @@ TEST(BuildModule, Heterogeneous) { using namespace tvm; using namespace tvm::te; - const runtime::PackedFunc* pf = runtime::Registry::Get("module._Enabled"); - bool enabled = (*pf)("cuda"); + bool enabled = tvm::runtime::RuntimeEnabled("cuda"); if (!enabled) { LOG(INFO) << "Skip heterogeneous test because cuda is not enabled." << "\n"; diff --git a/tests/cpp/packed_func_test.cc b/tests/cpp/packed_func_test.cc index 349e493881af..8357a70b720b 100644 --- a/tests/cpp/packed_func_test.cc +++ b/tests/cpp/packed_func_test.cc @@ -235,7 +235,7 @@ TEST(PackedFunc, ObjectConversion) { pf1(ObjectRef(x), NDArray()); // testcases for modules - auto* pf = tvm::runtime::Registry::Get("module.source_module_create"); + auto* pf = tvm::runtime::Registry::Get("runtime.SourceModuleCreate"); CHECK(pf != nullptr); Module m = (*pf)("", "xyz"); rv = m; diff --git a/tests/python/contrib/test_cblas.py b/tests/python/contrib/test_cblas.py index 808c07a2e602..99614a8d93ad 100644 --- a/tests/python/contrib/test_cblas.py +++ b/tests/python/contrib/test_cblas.py @@ -37,7 +37,7 @@ def get_numpy(a, b, bb, transa, transb): return np.dot(a, b) + bb def verify(target="llvm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.cblas.matmul", True): @@ -81,7 +81,7 @@ def get_numpy(a, b, transa, transb): return topi.testing.batch_matmul(a, b) def verify(target="llvm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.cblas.matmul", True): diff --git a/tests/python/contrib/test_cublas.py b/tests/python/contrib/test_cublas.py index 4d4789663a9f..a3baa8c829e3 100644 --- a/tests/python/contrib/test_cublas.py +++ b/tests/python/contrib/test_cublas.py @@ -29,7 +29,7 @@ def verify_matmul_add(in_dtype, out_dtype, rtol=1e-5): s = tvm.create_schedule(C.op) def verify(target="cuda"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.cublas.matmul", True): @@ -63,7 +63,7 @@ def verify_matmul_add_igemm(in_dtype, out_dtype, rtol=1e-5): s = tvm.create_schedule(C.op) def verify(target="cuda"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.cublaslt.matmul", True): @@ -114,7 +114,7 @@ def verify_batch_matmul(in_dtype, out_dtype, rtol=1e-5): s = tvm.create_schedule(C.op) def verify(target="cuda"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.cublas.matmul", True): diff --git a/tests/python/contrib/test_cudnn.py b/tests/python/contrib/test_cudnn.py index 9fd6ca1fa8d0..a28f4338851c 100644 --- a/tests/python/contrib/test_cudnn.py +++ b/tests/python/contrib/test_cudnn.py @@ -35,7 +35,7 @@ def verify_conv2d(data_dtype, conv_dtype, tensor_format=0): height = 32 weight = 32 - if not tvm.module.enabled("cuda"): + if not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled...") return if not tvm.get_global_func("tvm.contrib.cudnn.conv.output_shape", True): @@ -110,7 +110,7 @@ def verify_conv3d(data_dtype, conv_dtype, tensor_format=0): height = 32 weight = 32 - if not tvm.module.enabled("cuda"): + if not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled...") return if not tvm.get_global_func("tvm.contrib.cudnn.conv.output_shape", True): diff --git a/tests/python/contrib/test_gemm_acc16.py b/tests/python/contrib/test_gemm_acc16.py index 17f920efeb8a..d83ecdc35b6e 100644 --- a/tests/python/contrib/test_gemm_acc16.py +++ b/tests/python/contrib/test_gemm_acc16.py @@ -33,7 +33,7 @@ def benchmark_fc_int8_acc16(): print("Peak {} Gops/s \n".format(peak)) def verify(target="llvm -mcpu=skylake-avx512"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return diff --git a/tests/python/contrib/test_gemm_acc32_vnni.py b/tests/python/contrib/test_gemm_acc32_vnni.py index 4f535918ba15..e810da7d3b07 100644 --- a/tests/python/contrib/test_gemm_acc32_vnni.py +++ b/tests/python/contrib/test_gemm_acc32_vnni.py @@ -41,7 +41,7 @@ def test_fc_int8_acc32(): # (ignoring processor)" error with the following setting. After LLVM 8.0 is enabled in the # test, we should use cascadelake setting. def verify(target="llvm -mcpu=cascadelake"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return diff --git a/tests/python/contrib/test_miopen.py b/tests/python/contrib/test_miopen.py index 5d82f6a14ce5..d7a46e5d94ad 100644 --- a/tests/python/contrib/test_miopen.py +++ b/tests/python/contrib/test_miopen.py @@ -32,7 +32,7 @@ def test_conv2d(): dilation_w = 1 xshape = [1, in_channel, 128, 128] - if not tvm.module.enabled("rocm"): + if not tvm.runtime.enabled("rocm"): print("skip because rocm is not enabled...") return if not tvm.get_global_func("tvm.contrib.miopen.conv2d.setup", True): diff --git a/tests/python/contrib/test_mps.py b/tests/python/contrib/test_mps.py index dac49df78d06..fc85290c56e7 100644 --- a/tests/python/contrib/test_mps.py +++ b/tests/python/contrib/test_mps.py @@ -19,7 +19,7 @@ from tvm.contrib import mps def test_matmul(): - if not tvm.module.enabled("metal"): + if not tvm.runtime.enabled("metal"): print("skip because %s is not enabled..." % "metal") return n = 1024 @@ -62,7 +62,7 @@ def verify(A, B, D, s, target="metal"): verify(A, B, D, s) def test_conv2d(): - if not tvm.module.enabled("metal"): + if not tvm.runtime.enabled("metal"): print("skip because %s is not enabled..." % "metal") return n = 1 diff --git a/tests/python/contrib/test_nnpack.py b/tests/python/contrib/test_nnpack.py index 2ded24d646c4..af8ae133923d 100644 --- a/tests/python/contrib/test_nnpack.py +++ b/tests/python/contrib/test_nnpack.py @@ -34,7 +34,7 @@ def test_fully_connected_inference(): s = tvm.create_schedule(D.op) def verify(target="llvm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): pytest.skip("%s is not enabled..." % target) if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): pytest.skip("extern function is not available") @@ -104,7 +104,7 @@ def test_convolution_inference(): def verify(target="llvm", algorithm=nnpack.ConvolutionAlgorithm.AUTO, with_bias=True): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): pytest.skip("%s is not enabled..." % target) if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): pytest.skip("extern function is not available") @@ -166,7 +166,7 @@ def test_convolution_inference_without_weight_transform(): def verify(target="llvm", algorithm=nnpack.ConvolutionAlgorithm.AUTO, with_bias=True): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): pytest.skip("%s is not enabled..." % target) if not tvm.get_global_func("tvm.contrib.nnpack.fully_connected_inference", True): pytest.skip("extern function is not available") diff --git a/tests/python/contrib/test_random.py b/tests/python/contrib/test_random.py index 8257c4d7f9e1..f86a42447d81 100644 --- a/tests/python/contrib/test_random.py +++ b/tests/python/contrib/test_random.py @@ -25,7 +25,7 @@ def test_randint(): s = tvm.create_schedule(A.op) def verify(target="llvm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.random.randint", True): @@ -49,7 +49,7 @@ def test_uniform(): s = tvm.create_schedule(A.op) def verify(target="llvm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.random.uniform", True): @@ -73,7 +73,7 @@ def test_normal(): s = tvm.create_schedule(A.op) def verify(target="llvm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.random.normal", True): diff --git a/tests/python/contrib/test_rocblas.py b/tests/python/contrib/test_rocblas.py index 0794524e7fa6..2b6d001d243d 100644 --- a/tests/python/contrib/test_rocblas.py +++ b/tests/python/contrib/test_rocblas.py @@ -28,7 +28,7 @@ def test_matmul_add(): s = tvm.create_schedule(C.op) def verify(target="rocm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True): diff --git a/tests/python/contrib/test_rpc_proxy.py b/tests/python/contrib/test_rpc_proxy.py index e80a47197b0d..df0ee2bb8478 100644 --- a/tests/python/contrib/test_rpc_proxy.py +++ b/tests/python/contrib/test_rpc_proxy.py @@ -37,7 +37,7 @@ def rpc_proxy_check(): web_port = 8888 prox = proxy.Proxy("localhost", web_port=web_port) def check(): - if not tvm.module.enabled("rpc"): + if not tvm.runtime.enabled("rpc"): return @tvm.register_func("rpc.test2.addone") def addone(x): diff --git a/tests/python/integration/test_dot.py b/tests/python/integration/test_dot.py index a08e7d3b1627..db5214b91d1f 100644 --- a/tests/python/integration/test_dot.py +++ b/tests/python/integration/test_dot.py @@ -52,7 +52,7 @@ def test_dot(): fapi = lower(s, [A, B, C]) def verify(target): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("Target %s is not enabled" % target) return f = tvm.codegen.build_module(fapi, target) diff --git a/tests/python/integration/test_ewise.py b/tests/python/integration/test_ewise.py index e3a1087acea3..ecfc83ca010d 100644 --- a/tests/python/integration/test_ewise.py +++ b/tests/python/integration/test_ewise.py @@ -33,7 +33,7 @@ def test_exp(): # one line to build the function. def check_device(device, host="stackvm"): - if not tvm.module.enabled(host): + if not tvm.runtime.enabled(host): return ctx = tvm.context(device, 0) if not ctx.exist: @@ -115,7 +115,7 @@ def test_multiple_cache_write(): s[C].bind(tx, tvm.thread_axis("threadIdx.x")) # one line to build the function. def check_device(device, host="stackvm"): - if not tvm.module.enabled(host): + if not tvm.runtime.enabled(host): return ctx = tvm.context(device, 0) if not ctx.exist: @@ -147,7 +147,7 @@ def test_log_pow_llvm(): # create iter var and assign them tags. bx, tx = s[B].split(B.op.axis[0], factor=32) # one line to build the function. - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return flog = tvm.build(s, [A, B], diff --git a/tests/python/integration/test_ewise_fpga.py b/tests/python/integration/test_ewise_fpga.py index c80b430b92ae..b2c783487074 100644 --- a/tests/python/integration/test_ewise_fpga.py +++ b/tests/python/integration/test_ewise_fpga.py @@ -39,7 +39,7 @@ def test_exp(): # one line to build the function. def check_device(device, host="llvm"): - if not tvm.module.enabled(host): + if not tvm.runtime.enabled(host): return ctx = tvm.context(device, 0) if not ctx.exist: @@ -78,7 +78,7 @@ def test_multi_kernel(): # one line to build the function. def check_device(device, host="llvm"): - if not tvm.module.enabled(host): + if not tvm.runtime.enabled(host): return ctx = tvm.context(device, 0) if not ctx.exist: diff --git a/tests/python/integration/test_reduce.py b/tests/python/integration/test_reduce.py index 1f094c274d01..454354e6e68b 100644 --- a/tests/python/integration/test_reduce.py +++ b/tests/python/integration/test_reduce.py @@ -39,7 +39,7 @@ def test_prim(reducer, np_reducer): # one line to build the function. def check_device(device, host="llvm"): ctx = tvm.context(device, 0) - if not tvm.module.enabled(host): + if not tvm.runtime.enabled(host): return if not ctx.exist: print("skip because %s is not enabled.." % device) @@ -81,7 +81,7 @@ def test_rfactor(): s[BF].parallel(BF.op.axis[0]) # one line to build the function. def check_target(target="llvm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): return ctx = tvm.cpu(0) fapi = tvm.lower(s, args=[A, B]) @@ -111,7 +111,7 @@ def test_rfactor_factor_axis(): s[BF].parallel(BF.op.axis[0]) # one line to build the function. def check_target(target="llvm"): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): return ctx = tvm.cpu(0) fapi = tvm.lower(s, args=[A, B]) @@ -252,7 +252,7 @@ def fidentity(t0, t1): def check_target(): device = 'cpu' - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("skip because %s is not enabled.." % device) return ctx = tvm.context(device, 0) diff --git a/tests/python/relay/test_cpp_build_module.py b/tests/python/relay/test_cpp_build_module.py index e2db81dab1c9..165e00d9c702 100644 --- a/tests/python/relay/test_cpp_build_module.py +++ b/tests/python/relay/test_cpp_build_module.py @@ -61,7 +61,7 @@ def test_basic_build(): def test_fp16_build(): dtype = "float16" - if not tvm.module.enabled("cuda") or not tvm.gpu(0).exist: + if not tvm.runtime.enabled("cuda") or not tvm.gpu(0).exist: print("skip because cuda is not enabled.") return @@ -96,7 +96,7 @@ def test_fp16_build(): def test_fp16_conversion(): def check_conversion(tgt, ctx): - if not tvm.module.enabled(tgt): + if not tvm.runtime.enabled(tgt): print("skip because {} is not enabled.".format(tgt)) return elif tgt == "cuda" and ctx.exist and not have_fp16(ctx.compute_version): diff --git a/tests/python/relay/test_external_codegen.py b/tests/python/relay/test_external_codegen.py index 2cf32e7786ee..13193fc87e07 100644 --- a/tests/python/relay/test_external_codegen.py +++ b/tests/python/relay/test_external_codegen.py @@ -43,7 +43,7 @@ def update_lib(lib): lib_name = 'lib.so' lib_path = tmp_path.relpath(lib_name) lib.export_library(lib_path, fcompile=False, **kwargs) - lib = tvm.module.load(lib_path) + lib = tvm.runtime.load_module(lib_path) return lib diff --git a/tests/python/relay/test_external_runtime.py b/tests/python/relay/test_external_runtime.py index f25322c75b29..5fc03df8a9a0 100644 --- a/tests/python/relay/test_external_runtime.py +++ b/tests/python/relay/test_external_runtime.py @@ -21,8 +21,8 @@ import numpy as np import tvm +import tvm.runtime._ffi_api from tvm import relay -from tvm import module as _tvm_module from tvm.contrib import util tmp_path = util.tempdir() @@ -108,7 +108,7 @@ def generate_csource_module(): TVM_DLL_EXPORT_TYPED_FUNC(json_rt_0, ccompiler_wrapper_0_); ''' - csource_module = _tvm_module.csource_module_create(code, "cc") + csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, "cc") return csource_module @@ -126,7 +126,7 @@ def generate_engine_module(): extern "C" void json_1_(float* json_input4, float* json_input5, float* json_input6, float* json_input7, float* out) { - + std::string graph = "add_2d,10,10\n" "sub_2d,10,10\n" @@ -149,7 +149,7 @@ def generate_engine_module(): extern "C" void json_0_(float* json_input0, float* json_input1, float* json_input2, float* json_input3, float* out) { - + std::string graph = "add_2d,10,10\n" "sub_2d,10,10\n" @@ -174,7 +174,7 @@ def generate_engine_module(): ''' gen_json_engine() - csource_module = _tvm_module.csource_module_create(code, "cc") + csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, "cc") return csource_module @@ -444,7 +444,7 @@ def run_extern(label, get_extern_src, **kwargs): lib_path = tmp_path.relpath(lib_name) csource_module.export_library(lib_path, fcompile=False, **kwargs) # load module for execution. - lib = tvm.module.load(lib_path) + lib = tvm.runtime.load_module(lib_path) mod = tvm.contrib.graph_runtime.create(graph_json, lib, tvm.cpu(0)) x_data = np.random.rand(10, 10).astype('float32') @@ -507,14 +507,14 @@ def test_json_extern(): lib = get_synthetic_lib() - ext_lib = tvm.module.load(subgraph_path, "examplejson") + ext_lib = tvm.runtime.load_module(subgraph_path, "examplejson") lib.import_module(ext_lib) lib_name = 'external.so' lib_path = tmp_path.relpath(lib_name) lib.export_library(lib_path) # load module for execution. - lib = tvm.module.load(lib_path) + lib = tvm.runtime.load_module(lib_path) mod = tvm.contrib.graph_runtime.create(graph_json, lib, tvm.cpu(0)) x_data = np.random.rand(10, 10).astype('float32') diff --git a/tests/python/relay/test_op_level5.py b/tests/python/relay/test_op_level5.py index eb21f338ef07..03e700b3df83 100644 --- a/tests/python/relay/test_op_level5.py +++ b/tests/python/relay/test_op_level5.py @@ -461,7 +461,7 @@ def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs): func = relay.Function([cls_prob, bbox_pred, im_info], z) func = run_infer_type(func) for target in ['llvm', 'cuda']: - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("Skip test because %s is not enabled." % target) continue ctx = tvm.context(target, 0) diff --git a/tests/python/relay/test_pass_annotation.py b/tests/python/relay/test_pass_annotation.py index 14d53a0e2c2c..69ce4c5211be 100644 --- a/tests/python/relay/test_pass_annotation.py +++ b/tests/python/relay/test_pass_annotation.py @@ -557,7 +557,7 @@ def expected(): def test_check_run(): for dev, tgt in [("opencl", "opencl"), ("cuda", "cuda"), ("opencl", str(tvm.target.intel_graphics()))]: - if not tvm.module.enabled(dev): + if not tvm.runtime.enabled(dev): print("Skip test because %s is not enabled." % dev) continue run_fusible_network(dev, tgt) @@ -566,7 +566,7 @@ def test_check_run(): def test_tuple_get_item(): dev = "cuda" - if not tvm.module.enabled(dev): + if not tvm.runtime.enabled(dev): print("Skip test because %s is not enabled." % dev) return diff --git a/tests/python/relay/test_pass_partition_graph.py b/tests/python/relay/test_pass_partition_graph.py index 4ffb37311696..75d3c932f05a 100644 --- a/tests/python/relay/test_pass_partition_graph.py +++ b/tests/python/relay/test_pass_partition_graph.py @@ -182,7 +182,7 @@ def update_lib(lib): lib_name = 'lib.so' lib_path = tmp_path.relpath(lib_name) lib.export_library(lib_path, fcompile=False, **kwargs) - lib = tvm.module.load(lib_path) + lib = tvm.runtime.load_module(lib_path) return lib diff --git a/tests/python/relay/test_vm_serialization.py b/tests/python/relay/test_vm_serialization.py index b31fce763d7c..6f4e09a393ff 100644 --- a/tests/python/relay/test_vm_serialization.py +++ b/tests/python/relay/test_vm_serialization.py @@ -113,7 +113,7 @@ def test_serializer(): code, lib = exe.save() assert isinstance(code, bytearray) - assert isinstance(lib, tvm.module.Module) + assert isinstance(lib, tvm.runtime.Module) def test_save_load(): @@ -133,7 +133,7 @@ def test_save_load(): with open(tmp.relpath("code.ro"), "wb") as fo: fo.write(code) - loaded_lib = tvm.module.load(path_lib) + loaded_lib = tvm.runtime.load_module(path_lib) loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read()) # deserialize. diff --git a/tests/python/unittest/test_codegen_blob.py b/tests/python/unittest/test_codegen_blob.py index 1d715ba68264..2e0cee24097e 100644 --- a/tests/python/unittest/test_codegen_blob.py +++ b/tests/python/unittest/test_codegen_blob.py @@ -24,7 +24,7 @@ def test_resnet18(): for device in ["llvm", "cuda"]: - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("skip because %s is not enabled..." % device) return @@ -53,7 +53,7 @@ def verify(data): with open(temp.relpath("deploy_param.params"), "wb") as fo: fo.write(relay.save_param_dict(graph_params)) - loaded_lib = tvm.module.load(path_lib) + loaded_lib = tvm.runtime.load_module(path_lib) loaded_json = open(temp.relpath("deploy_graph.json")).read() loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read()) data = np.random.uniform(-1, 1, size=(1, 3, 224, 224)).astype("float32") @@ -70,7 +70,7 @@ def verify(data): def test_system_lib(): ctx = tvm.gpu(0) for device in ["llvm", "cuda"]: - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("skip because %s is not enabled..." % device) return nn = 12 @@ -92,7 +92,7 @@ def test_system_lib(): # Load dll, will trigger system library registration dll = ctypes.CDLL(path_lib) # Load the system wide library - m = tvm.module.system_lib() + m = tvm.runtime.system_lib() a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx) b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), ctx) m['add'](a, b) diff --git a/tests/python/unittest/test_codegen_bool.py b/tests/python/unittest/test_codegen_bool.py index 934812b36a6a..33711cbcdb63 100644 --- a/tests/python/unittest/test_codegen_bool.py +++ b/tests/python/unittest/test_codegen_bool.py @@ -29,7 +29,7 @@ def test_cmp_load_store(): def check_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return s = tvm.create_schedule(D.op) xo, xi = s[C].split(C.op.axis[0], factor=4) diff --git a/tests/python/unittest/test_codegen_c_host.py b/tests/python/unittest/test_codegen_c_host.py index c08fcd6afbc1..271237b51503 100644 --- a/tests/python/unittest/test_codegen_c_host.py +++ b/tests/python/unittest/test_codegen_c_host.py @@ -31,7 +31,7 @@ def check_c(): temp = util.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) - m = tvm.module.load(path_dso) + m = tvm.runtime.load_module(path_dso) fadd = m['fadd'] ctx = tvm.cpu(0) # launch the kernel. @@ -79,7 +79,7 @@ def check_c(): temp = util.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) - m = tvm.module.load(path_dso) + m = tvm.runtime.load_module(path_dso) fadd = m["fadd_pipeline"] ctx = tvm.cpu(0) # launch the kernel. @@ -107,7 +107,7 @@ def check_c(): temp = util.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) - m = tvm.module.load(path_dso) + m = tvm.runtime.load_module(path_dso) fadd = m['reinterpret'] ctx = tvm.cpu(0) n = nn diff --git a/tests/python/unittest/test_codegen_cross_llvm.py b/tests/python/unittest/test_codegen_cross_llvm.py index 5018bf7e3b0a..6604038ab2ef 100644 --- a/tests/python/unittest/test_codegen_cross_llvm.py +++ b/tests/python/unittest/test_codegen_cross_llvm.py @@ -42,7 +42,7 @@ def verify_elf(path, e_machine): assert struct.unpack(endian + 'h', arr[0x12:0x14])[0] == e_machine def build_i386(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled..") return temp = util.tempdir() @@ -54,7 +54,7 @@ def build_i386(): def build_arm(): target = "llvm -target=armv7-none-linux-gnueabihf" - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("Skip because %s is not enabled.." % target) return temp = util.tempdir() diff --git a/tests/python/unittest/test_codegen_cuda.py b/tests/python/unittest/test_codegen_cuda.py index e4bad2f6e7c1..bfeb6520e3ee 100644 --- a/tests/python/unittest/test_codegen_cuda.py +++ b/tests/python/unittest/test_codegen_cuda.py @@ -27,7 +27,7 @@ def test_cuda_vectorize_add(): num_thread = 8 def check_cuda(dtype, n, lanes): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return if dtype == "float16": @@ -65,7 +65,7 @@ def check_cuda(dtype, n, lanes): def test_cuda_multiply_add(): num_thread = 8 def check_cuda(dtype, n, lanes): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return if dtype == "int8" and not have_int8(tvm.gpu(0).compute_version): @@ -97,7 +97,7 @@ def check_cuda(dtype, n, lanes): def test_cuda_vectorize_load(): num_thread = 8 def check_cuda(dtype, n, lanes): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return ctx = tvm.gpu(0) @@ -118,7 +118,7 @@ def check_cuda(dtype, n, lanes): def test_cuda_make_int8x4(): def check_cuda(n, value): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return lanes = 4 @@ -153,7 +153,7 @@ def check_inf_nan(ctx, n, value, dtype): # Only need to test compiling here fun(a, c) - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return @@ -168,7 +168,7 @@ def check_inf_nan(ctx, n, value, dtype): def test_cuda_shuffle(): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return @@ -213,7 +213,7 @@ def vectorizer(op): def test_cuda_reducition_binding(): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return @@ -232,7 +232,7 @@ def test_cuda_reducition_binding(): fcuda = tvm.build(s, [A, B], "cuda") def test_rfactor_predicates(): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return @@ -266,7 +266,7 @@ def test_rfactor_predicates(): fcuda = tvm.build(s, [A, B], "cuda") -@unittest.skipIf(not tvm.gpu(0).exist or not tvm.module.enabled("cuda"), "skip because cuda is not enabled..") +@unittest.skipIf(not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"), "skip because cuda is not enabled..") def test_cuda_const_float_to_half(): # This import is required to use nvcc to perform code gen; # otherwise it is found that the code gen is done by nvrtc. diff --git a/tests/python/unittest/test_codegen_device.py b/tests/python/unittest/test_codegen_device.py index 1c3ece2c42a0..fe416e6312d9 100644 --- a/tests/python/unittest/test_codegen_device.py +++ b/tests/python/unittest/test_codegen_device.py @@ -82,7 +82,7 @@ def check_target(device, host="stackvm"): ctx = tvm.context(device, 0) if not ctx.exist: return - if not tvm.module.enabled(host): + if not tvm.runtime.enabled(host): return mhost = tvm.codegen.build_module(fsplits[0], host) mdev = tvm.codegen.build_module(fsplits[1:], device) @@ -102,7 +102,7 @@ def check_module_save(device, host="stackvm"): ctx = tvm.context(device, 0) if not ctx.exist: return - if not tvm.module.enabled(host): + if not tvm.runtime.enabled(host): return if device == "cuda": fmt = "ptx" @@ -115,7 +115,7 @@ def check_module_save(device, host="stackvm"): temp = util.tempdir() mpath = temp.relpath("test.%s" % fmt) mdev.save(mpath) - mdev2 = tvm.module.load(mpath) + mdev2 = tvm.runtime.load_module(mpath) mhost.import_module(mdev2) f = mhost.entry_func # launch the kernel. diff --git a/tests/python/unittest/test_codegen_extern.py b/tests/python/unittest/test_codegen_extern.py index 2062221d2160..03efee58cc57 100644 --- a/tests/python/unittest/test_codegen_extern.py +++ b/tests/python/unittest/test_codegen_extern.py @@ -50,7 +50,7 @@ def extern_generator_gpu(ins, outs): print(tvm.lower(s_gpu, [A, C_gpu], simple_mode=True)) def check_target(target): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): return s = s_gpu if target in ['opencl', 'cuda'] else s_cpu C = C_gpu if target in ['opencl', 'cuda'] else C_cpu @@ -85,7 +85,7 @@ def my_extern_array_func1(aa, bb): def check_target(target): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): return # build and invoke the kernel. f = tvm.build(s, [A, C], target) @@ -115,7 +115,7 @@ def extern_generator(ins, outs): s = tvm.create_schedule(C.op) def check_target(target): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): return # build and invoke the kernel. f = tvm.build(s, [A, C], target) diff --git a/tests/python/unittest/test_codegen_llvm.py b/tests/python/unittest/test_codegen_llvm.py index 26a6f82512a8..a37bc2a736e3 100644 --- a/tests/python/unittest/test_codegen_llvm.py +++ b/tests/python/unittest/test_codegen_llvm.py @@ -50,7 +50,7 @@ def test_llvm_import(): tvm.call_pure_extern("float32", "my_add", A(*i), 1.0), name='B') def check_llvm(use_file): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return if not clang.find_clang(required=False): print("skip because clang is not available") @@ -95,7 +95,7 @@ def test_llvm_large_uintimm(): s = tvm.create_schedule(A.op) def check_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return f = tvm.build(s, [A], "llvm") ctx = tvm.cpu(0) @@ -126,7 +126,7 @@ def test_llvm_add_pipeline(): s[C].vectorize(xi) def check_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return # Specifically allow offset to test codepath when offset is available Ab = tvm.decl_buffer( @@ -167,7 +167,7 @@ def test_llvm_persist_parallel(): s[C].pragma(xi, "parallel_stride_pattern") def check_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return # BUILD and invoke the kernel. f = tvm.build(s, [A, C], "llvm") @@ -185,7 +185,7 @@ def check_llvm(): def test_llvm_flip_pipeline(): def check_llvm(nn, base): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return n = tvm.convert(nn) A = tvm.placeholder((n + base), name='A') @@ -212,7 +212,7 @@ def check_llvm(nn, base): def test_llvm_vadd_pipeline(): def check_llvm(n, lanes): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return A = tvm.placeholder((n,), name='A', dtype="float32x%d" % lanes) B = tvm.compute((n,), lambda i: A[i], name='B') @@ -241,7 +241,7 @@ def check_llvm(n, lanes): def test_llvm_madd_pipeline(): def check_llvm(nn, base, stride): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return n = tvm.convert(nn) A = tvm.placeholder((n + base, stride), name='A') @@ -275,7 +275,7 @@ def test_llvm_temp_space(): s = tvm.create_schedule(C.op) def check_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return # build and invoke the kernel. f = tvm.build(s, [A, C], "llvm") @@ -300,7 +300,7 @@ def test_multiple_func(): s[C].parallel(xo) s[C].vectorize(xi) def check_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return # build two functions f2 = tvm.lower(s, [A, B, C], name="fadd1") @@ -326,7 +326,7 @@ def check_llvm(): def test_llvm_condition(): def check_llvm(n, offset): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return A = tvm.placeholder((n, ), name='A') C = tvm.compute((n,), lambda i: tvm.if_then_else(i >= offset, A[i], 0.0), name='C') @@ -346,7 +346,7 @@ def check_llvm(n, offset): def test_llvm_bool(): def check_llvm(n): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return A = tvm.placeholder((n, ), name='A', dtype="int32") C = tvm.compute((n,), lambda i: A[i].equal(1).astype("float"), name='C') @@ -365,7 +365,7 @@ def check_llvm(n): def test_rank_zero(): def check_llvm(n): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return A = tvm.placeholder((n, ), name='A') scale = tvm.placeholder((), name='scale') @@ -388,7 +388,7 @@ def check_llvm(n): def test_rank_zero_bound_checkers(): def check_llvm(n): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return with tvm.build_config(instrument_bound_checkers=True): A = tvm.placeholder((n, ), name='A') @@ -568,7 +568,7 @@ def test_dwarf_debug_information(): s[C].parallel(xo) s[C].vectorize(xi) def check_llvm_object(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return if tvm.codegen.llvm_version_major() < 5: return @@ -605,7 +605,7 @@ def check_llvm_object(): assert re.search(r"""DW_AT_name.*fadd2""", str(output)) def check_llvm_ir(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return if tvm.codegen.llvm_version_major() < 5: return diff --git a/tests/python/unittest/test_codegen_opencl.py b/tests/python/unittest/test_codegen_opencl.py index 71fc4f9a7f35..cf89608c1302 100644 --- a/tests/python/unittest/test_codegen_opencl.py +++ b/tests/python/unittest/test_codegen_opencl.py @@ -51,7 +51,7 @@ def check_select(ctx, n, dtype): # Only need to test compiling here fun(a, c) - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because opencl is not enabled..") return @@ -79,7 +79,7 @@ def check_inf_nan(ctx, n, value, dtype): # Only need to test compiling here fun(a, c) - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("skip because opencl is not enabled..") return diff --git a/tests/python/unittest/test_codegen_rocm.py b/tests/python/unittest/test_codegen_rocm.py index 9f8ab772c5f7..73f76465649a 100644 --- a/tests/python/unittest/test_codegen_rocm.py +++ b/tests/python/unittest/test_codegen_rocm.py @@ -23,7 +23,7 @@ bx = tvm.thread_axis("blockIdx.x") by = tvm.thread_axis("blockIdx.y") -@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..") +@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..") def test_rocm_cross_thread_reduction(): # based on the reduction tutorial n = tvm.size_var("n") @@ -35,7 +35,7 @@ def test_rocm_cross_thread_reduction(): ko, ki = s[B].split(B.op.reduce_axis[0], factor=16) BF = s.rfactor(B, ki) xo, xi = s[B].split(s[B].op.axis[0], factor=32) - s[B].bind(xo, bx) + s[B].bind(xo, bx) s[B].bind(xi, ty) s[B].bind(s[B].op.reduce_axis[0], tx) s[BF].compute_at(s[B], s[B].op.reduce_axis[0]) @@ -50,8 +50,8 @@ def test_rocm_cross_thread_reduction(): tvm.testing.assert_allclose( b.asnumpy(), np.sum(a.asnumpy(), axis=1), rtol=1e-4) - -@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..") + +@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..") def test_rocm_inf_nan(): def check_inf_nan(ctx, n, value, dtype): A = tvm.placeholder((n,), name='A', dtype=dtype) @@ -74,7 +74,7 @@ def check_inf_nan(ctx, n, value, dtype): check_inf_nan(ctx, 1, float('nan'), 'float32') check_inf_nan(ctx, 1, float('nan'), 'float64') -@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..") +@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..") def test_rocm_reducition_binding(): k = tvm.reduce_axis((0, 32), 'k') A = tvm.placeholder((96, 32), name='A') @@ -88,7 +88,7 @@ def test_rocm_reducition_binding(): mo, _ = s[B].split(B.op.axis[0], 32) s[B].bind(mo, bx) -@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..") +@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..") def test_rocm_copy(): def check_rocm(dtype, n): @@ -106,7 +106,7 @@ def check_rocm(dtype, n): peturb = np.random.uniform(low=0.5, high=1.5) check_rocm(dtype, int(peturb * (2 ** logN))) -@unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..") +@unittest.skipIf(not tvm.rocm(0).exist or not tvm.runtime.enabled("rocm"), "skip because rocm is not enabled..") def test_rocm_vectorize_add(): num_thread = 8 diff --git a/tests/python/unittest/test_codegen_vm_basic.py b/tests/python/unittest/test_codegen_vm_basic.py index eebcb2e71653..60a948db68bb 100644 --- a/tests/python/unittest/test_codegen_vm_basic.py +++ b/tests/python/unittest/test_codegen_vm_basic.py @@ -19,7 +19,7 @@ def run_jit(fapi, check): for target in ["llvm", "stackvm"]: - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): continue f = tvm.codegen.build_module(fapi, target) s = f.get_source() diff --git a/tests/python/unittest/test_codegen_vulkan.py b/tests/python/unittest/test_codegen_vulkan.py index 7c6de2e326d1..d9e3c4399675 100644 --- a/tests/python/unittest/test_codegen_vulkan.py +++ b/tests/python/unittest/test_codegen_vulkan.py @@ -20,7 +20,7 @@ def test_vector_comparison(): - if not tvm.module.enabled("vulkan"): + if not tvm.runtime.enabled("vulkan"): print("Skipping due to no Vulkan module") return @@ -62,7 +62,7 @@ def check_correct_assembly(dtype): def test_vulkan_copy(): def check_vulkan(dtype, n): - if not tvm.vulkan(0).exist or not tvm.module.enabled("vulkan"): + if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"): print("skip because vulkan is not enabled..") return A = tvm.placeholder((n,), name='A', dtype=dtype) @@ -84,7 +84,7 @@ def test_vulkan_vectorize_add(): num_thread = 8 def check_vulkan(dtype, n, lanes): - if not tvm.vulkan(0).exist or not tvm.module.enabled("vulkan"): + if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"): print("skip because vulkan is not enabled..") return A = tvm.placeholder((n,), name='A', dtype="%sx%d" % (dtype, lanes)) @@ -117,7 +117,7 @@ def test_vulkan_stress(): def run_stress(): def worker(): - if not tvm.vulkan(0).exist or not tvm.module.enabled("vulkan"): + if not tvm.vulkan(0).exist or not tvm.runtime.enabled("vulkan"): print("skip because vulkan is not enabled..") return A = tvm.placeholder((n,), name='A', dtype="float32") diff --git a/tests/python/unittest/test_ir_builder.py b/tests/python/unittest/test_ir_builder.py index 527f68669281..748662918edb 100644 --- a/tests/python/unittest/test_ir_builder.py +++ b/tests/python/unittest/test_ir_builder.py @@ -91,7 +91,7 @@ def test_device_ir(A, B, C): name="vector_add", dtype=dtype) s = tvm.create_schedule(C.op) def check_target(target): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): return # build and invoke the kernel. fadd = tvm.build(s, [A, B, C], target) @@ -134,7 +134,7 @@ def test_device_ir(A, B, C): stmt = tvm.schedule.ScheduleOps(s, bounds) def check_target(target): n = 1024 - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): return # build and invoke the kernel. fadd = tvm.build(s, [A, B, C], target) diff --git a/tests/python/unittest/test_lang_buffer.py b/tests/python/unittest/test_lang_buffer.py index e681bd9a5230..9700bbce22b9 100644 --- a/tests/python/unittest/test_lang_buffer.py +++ b/tests/python/unittest/test_lang_buffer.py @@ -137,7 +137,7 @@ def test_buffer_broadcast(): s = tvm.create_schedule(C.op) def check(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return fadd = tvm.build(s, [A, B, C], target='llvm', name='bcast_add', binds={A:Ab, B:Bb}) ctx = tvm.cpu(0) @@ -165,7 +165,7 @@ def test_buffer_broadcast_expr(): s = tvm.create_schedule(C.op) def check_stride(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add', binds={A:Ab, B:Bb, C:Cc}) @@ -177,7 +177,7 @@ def check_stride(): tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) def check_no_stride(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add', binds={A: Ab, B: Bb, C: Cc}) @@ -189,7 +189,7 @@ def check_no_stride(): tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) def check_auto_bind(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return # Let build bind buffers fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add') diff --git a/tests/python/unittest/test_pass_bound_checkers.py b/tests/python/unittest/test_pass_bound_checkers.py index 2cefe22432b0..e62e539178eb 100644 --- a/tests/python/unittest/test_pass_bound_checkers.py +++ b/tests/python/unittest/test_pass_bound_checkers.py @@ -449,7 +449,7 @@ def test_out_of_bounds_tensors_with_diff_shapes3D_llvm(a_shape, b_shape, c_shape @pytest.mark.xfail def test_out_of_bounds_tensors_with_zero_shape_op_with_not_zero_shape_llvm(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return n = 64 A = tvm.placeholder((n, ), name='A') diff --git a/tests/python/unittest/test_pass_lower_intrin.py b/tests/python/unittest/test_pass_lower_intrin.py index 02f8118d56fc..e8bc6b1fd588 100644 --- a/tests/python/unittest/test_pass_lower_intrin.py +++ b/tests/python/unittest/test_pass_lower_intrin.py @@ -40,7 +40,7 @@ def make_binds(i): C = tvm.compute((n,), make_binds) s = tvm.create_schedule([C.op]) - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return f = tvm.build(s, [A, B, C], "llvm") diff --git a/tests/python/unittest/test_pass_rewrite_for_tensor_core.py b/tests/python/unittest/test_pass_rewrite_for_tensor_core.py index 272d2436dd6a..cc99a25d81e9 100644 --- a/tests/python/unittest/test_pass_rewrite_for_tensor_core.py +++ b/tests/python/unittest/test_pass_rewrite_for_tensor_core.py @@ -204,7 +204,7 @@ def tensor_core_batch_matmul(warp_tile_m=16, m=64, n=32, l=96, batch=2): np.testing.assert_allclose(c_np, c.asnumpy(), rtol=1e-3) def test_tensor_core_matmul(): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return if not nvcc.have_tensorcore(tvm.gpu(0).compute_version): @@ -216,7 +216,7 @@ def test_tensor_core_matmul(): tensor_core_matmul(32) #test with warp_tile 32x8x16 def test_tensor_core_batch_matmul(): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return if not nvcc.have_tensorcore(tvm.gpu(0).compute_version): diff --git a/tests/python/unittest/test_runtime_graph.py b/tests/python/unittest/test_runtime_graph.py index f331f5b33ee9..da5bea1f19ff 100644 --- a/tests/python/unittest/test_runtime_graph.py +++ b/tests/python/unittest/test_runtime_graph.py @@ -51,7 +51,7 @@ def test_graph_simple(): graph = json.dumps(graph) def check_verify(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled") return mlib = tvm.build(s, [A, B], "llvm", name="myadd") @@ -62,7 +62,7 @@ def check_verify(): np.testing.assert_equal(out.asnumpy(), a + 1) def check_remote(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled") return mlib = tvm.build(s, [A, B], "llvm", name="myadd") @@ -92,7 +92,7 @@ def check_sharing(): params = {'x': x_in} graph, lib, params = relay.build(func, target="llvm", params=params) - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled") return mod_shared = graph_runtime.create(graph, lib, tvm.cpu(0)) diff --git a/tests/python/unittest/test_runtime_graph_debug.py b/tests/python/unittest/test_runtime_graph_debug.py index 717b23c22689..aeb4809e6c86 100644 --- a/tests/python/unittest/test_runtime_graph_debug.py +++ b/tests/python/unittest/test_runtime_graph_debug.py @@ -53,7 +53,7 @@ def test_graph_simple(): graph = json.dumps(graph) def check_verify(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled") return mlib = tvm.build(s, [A, B], "llvm", name="myadd") @@ -105,7 +105,7 @@ def check_verify(): assert(not os.path.exists(directory)) def check_remote(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled") return mlib = tvm.build(s, [A, B], "llvm", name="myadd") diff --git a/tests/python/unittest/test_runtime_heterogeneous.py b/tests/python/unittest/test_runtime_heterogeneous.py index eb51faef9090..a718ed8342e6 100644 --- a/tests/python/unittest/test_runtime_heterogeneous.py +++ b/tests/python/unittest/test_runtime_heterogeneous.py @@ -118,12 +118,12 @@ def test_simplex_data_transferring(): host = "cpu" target_host = "llvm" host_ctx = tvm.context(host) - if not tvm.module.enabled(target_host): + if not tvm.runtime.enabled(target_host): print("Skip test because llvm is not enabled.") return def check_device(device, target_device): - if not tvm.module.enabled(target_device): + if not tvm.runtime.enabled(target_device): print("Skip test because {} is not enabled.".format(target_device)) return @@ -306,12 +306,12 @@ def test_duplex_data_transferring(): host = "cpu" target_host = "llvm" host_ctx = tvm.context(host) - if not tvm.module.enabled(target_host): + if not tvm.runtime.enabled(target_host): print("Skip test because llvm is not enabled.") return def check_device(device, target_device): - if not tvm.module.enabled(target_device): + if not tvm.runtime.enabled(target_device): print("Skip test because {} is not enabled.".format(target_device)) return @@ -381,7 +381,7 @@ def check_load_module(): mhost.export_library(path_lib) with open(temp.relpath("deploy.json"), "w") as out_file: out_file.write(graph) - loaded_lib = tvm.module.load(path_lib) + loaded_lib = tvm.runtime.load_module(path_lib) loaded_graph = open(temp.relpath("deploy.json")).read() mod = graph_runtime.create(loaded_graph, loaded_lib, ctx) mod.set_input(**params) diff --git a/tests/python/unittest/test_runtime_micro.py b/tests/python/unittest/test_runtime_micro.py index e94c09991b35..f6114dae6f1b 100644 --- a/tests/python/unittest/test_runtime_micro.py +++ b/tests/python/unittest/test_runtime_micro.py @@ -43,7 +43,7 @@ def relay_micro_build(func, dev_config, params=None): Return ------ - mod : tvm.module.Module + mod : tvm.runtime.Module graph runtime module for the target device """ with tvm.build_config(disable_vectorize=True): @@ -57,7 +57,7 @@ def relay_micro_build(func, dev_config, params=None): def test_alloc(): """Test tensor allocation on the device.""" - if not tvm.module.enabled("micro_dev"): + if not tvm.runtime.enabled("micro_dev"): return shape = (1024,) dtype = "float32" @@ -70,7 +70,7 @@ def test_alloc(): def test_add(): """Test a module which performs addition.""" - if not tvm.module.enabled("micro_dev"): + if not tvm.runtime.enabled("micro_dev"): return shape = (1024,) dtype = "float32" @@ -99,7 +99,7 @@ def test_add(): def test_workspace_add(): """Test a module which uses a workspace to compute an intermediate value.""" - if not tvm.module.enabled("micro_dev"): + if not tvm.runtime.enabled("micro_dev"): return shape = (1024,) dtype = "float32" @@ -129,7 +129,7 @@ def test_workspace_add(): def test_graph_runtime(): """Test a program which uses the graph runtime.""" - if not tvm.module.enabled("micro_dev"): + if not tvm.runtime.enabled("micro_dev"): return shape = (1024,) dtype = "float32" @@ -153,7 +153,7 @@ def test_graph_runtime(): def test_multiple_modules(): """Test loading multiple modules on the device simultaneously.""" - if not tvm.module.enabled("micro_dev"): + if not tvm.runtime.enabled("micro_dev"): return shape = (1024,) dtype = "float32" @@ -185,7 +185,7 @@ def test_multiple_modules(): def test_interleave_sessions(): """Test closing and reopening sessions.""" - if not tvm.module.enabled("micro_dev"): + if not tvm.runtime.enabled("micro_dev"): return shape = (1024,) dtype = "float32" @@ -219,7 +219,7 @@ def test_interleave_sessions(): def test_nested_sessions(): """Test entering and exiting nested session contexts.""" - if not tvm.module.enabled("micro_dev"): + if not tvm.runtime.enabled("micro_dev"): return shape = (1024,) dtype = "float32" @@ -246,7 +246,7 @@ def test_nested_sessions(): def test_inactive_session_use(): """Test the use of objects allocated in a session that is no longer active.""" - if not tvm.module.enabled("micro_dev"): + if not tvm.runtime.enabled("micro_dev"): return shape = (1024,) dtype = "float32" diff --git a/tests/python/unittest/test_runtime_module_export.py b/tests/python/unittest/test_runtime_module_export.py index 951ea97bf252..ee82da65c8f4 100644 --- a/tests/python/unittest/test_runtime_module_export.py +++ b/tests/python/unittest/test_runtime_module_export.py @@ -51,15 +51,16 @@ def generate_engine_module(): Engine engine; } ''' + import tvm.runtime._ffi_api gen_engine_header() - csource_module = tvm.module.csource_module_create(code, "cc") + csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, "cc") return csource_module def test_mod_export(): def verify_gpu_mod_export(obj_format): for device in ["llvm", "cuda"]: - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("skip because %s is not enabled..." % device) return @@ -79,14 +80,14 @@ def verify_gpu_mod_export(obj_format): path_lib = temp.relpath(file_name) resnet18_gpu_lib.imported_modules[0].import_module(resnet50_cpu_lib) resnet18_gpu_lib.export_library(path_lib) - loaded_lib = tvm.module.load(path_lib) + loaded_lib = tvm.runtime.load_module(path_lib) assert loaded_lib.type_key == "library" assert loaded_lib.imported_modules[0].type_key == "cuda" assert loaded_lib.imported_modules[0].imported_modules[0].type_key == "library" def verify_multi_dso_mod_export(obj_format): for device in ["llvm"]: - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("skip because %s is not enabled..." % device) return @@ -108,13 +109,13 @@ def verify_multi_dso_mod_export(obj_format): path_lib = temp.relpath(file_name) resnet18_cpu_lib.import_module(f) resnet18_cpu_lib.export_library(path_lib) - loaded_lib = tvm.module.load(path_lib) + loaded_lib = tvm.runtime.load_module(path_lib) assert loaded_lib.type_key == "library" assert loaded_lib.imported_modules[0].type_key == "library" def verify_json_import_dso(obj_format): for device in ["llvm"]: - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("skip because %s is not enabled..." % device) return @@ -148,7 +149,7 @@ def verify_json_import_dso(obj_format): s = tvm.create_schedule(B.op) f = tvm.build(s, [A, B], "llvm", name="myadd") try: - ext_lib = tvm.module.load(subgraph_path, "examplejson") + ext_lib = tvm.runtime.load_module(subgraph_path, "examplejson") except: print("skip because Loader of examplejson is not presented") return @@ -160,7 +161,7 @@ def verify_json_import_dso(obj_format): file_name = "deploy_lib.tar" path_lib = temp.relpath(file_name) ext_lib.export_library(path_lib) - lib = tvm.module.load(path_lib) + lib = tvm.runtime.load_module(path_lib) assert lib.type_key == "examplejson" assert lib.imported_modules[0].type_key == "library" @@ -170,7 +171,7 @@ def verify_multi_c_mod_export(): print("Skip test because gcc is not available.") for device in ["llvm"]: - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("skip because %s is not enabled..." % device) return @@ -191,7 +192,7 @@ def verify_multi_c_mod_export(): resnet18_cpu_lib.import_module(engine_module) kwargs = {"options": ["-O2", "-std=c++11", "-I" + header_file_dir_path.relpath("")]} resnet18_cpu_lib.export_library(path_lib, fcompile=False, **kwargs) - loaded_lib = tvm.module.load(path_lib) + loaded_lib = tvm.runtime.load_module(path_lib) assert loaded_lib.type_key == "library" assert loaded_lib.imported_modules[0].type_key == "library" assert loaded_lib.imported_modules[1].type_key == "library" diff --git a/tests/python/unittest/test_module_load.py b/tests/python/unittest/test_runtime_module_load.py similarity index 93% rename from tests/python/unittest/test_module_load.py rename to tests/python/unittest/test_runtime_module_load.py index b1ef1c6fbb17..e47db94c4353 100644 --- a/tests/python/unittest/test_module_load.py +++ b/tests/python/unittest/test_runtime_module_load.py @@ -32,7 +32,7 @@ import numpy as np path_dso = sys.argv[1] dtype = sys.argv[2] -ff = tvm.module.load(path_dso) +ff = tvm.runtime.load_module(path_dso) a = tvm.nd.array(np.zeros(10, dtype=dtype)) ff(a) np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0])) @@ -40,7 +40,7 @@ """ def test_dso_module_load(): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return dtype = 'int64' temp = util.tempdir() @@ -68,8 +68,8 @@ def save_object(names): save_object([path_obj, path_ll, path_bc]) cc.create_shared(path_dso, [path_obj]) - f1 = tvm.module.load(path_dso) - f2 = tvm.module.load(path_ll) + f1 = tvm.runtime.load_module(path_dso) + f2 = tvm.runtime.load_module(path_ll) a = tvm.nd.array(np.zeros(10, dtype=dtype)) f1(a) np.testing.assert_equal(a.asnumpy(), np.arange(a.shape[0])) @@ -116,13 +116,13 @@ def check_device(device): # test cross compiler function f.export_library(path_dso, cc.cross_compiler("g++")) - f1 = tvm.module.load(path_dso) + f1 = tvm.runtime.load_module(path_dso) a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx) f1(a, b) np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) if sys.platform != "win32": - f2 = tvm.module.system_lib() + f2 = tvm.runtime.system_lib() f2[name](a, b) np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) @@ -136,7 +136,7 @@ def check_stackvm(device): f = tvm.build(s, [A, B], device, "stackvm", name=name) path_dso = temp.relpath("dev_lib.stackvm") f.export_library(path_dso) - f1 = tvm.module.load(path_dso) + f1 = tvm.runtime.load_module(path_dso) a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx) f(a, b) @@ -157,7 +157,7 @@ def test_combine_module_llvm(): def check_llvm(): ctx = tvm.cpu(0) - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled" ) return temp = util.tempdir() @@ -170,7 +170,7 @@ def check_llvm(): fadd2.save(path2) # create shared library with multiple functions cc.create_shared(path_dso, [path1, path2]) - m = tvm.module.load(path_dso) + m = tvm.runtime.load_module(path_dso) fadd1 = m['myadd1'] fadd2 = m['myadd2'] a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx) @@ -182,7 +182,7 @@ def check_llvm(): def check_system_lib(): ctx = tvm.cpu(0) - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled" ) return temp = util.tempdir() @@ -197,7 +197,7 @@ def check_system_lib(): # Load dll, will trigger system library registration dll = ctypes.CDLL(path_dso) # Load the system wide library - mm = tvm.module.system_lib() + mm = tvm.runtime.system_lib() a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), ctx) b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), ctx) mm['myadd1'](a, b) diff --git a/tests/python/unittest/test_runtime_packed_func.py b/tests/python/unittest/test_runtime_packed_func.py index c7120c5bb62f..2c229bc98f4e 100644 --- a/tests/python/unittest/test_runtime_packed_func.py +++ b/tests/python/unittest/test_runtime_packed_func.py @@ -26,7 +26,7 @@ def my_packed_func(*args): return 10 # get it out from global function table f = tvm.get_global_func("my_packed_func") - assert isinstance(f, tvm.Function) + assert isinstance(f, tvm.runtime.PackedFunc) y = f(*targs) assert y == 10 @@ -45,7 +45,7 @@ def my_callback_with_node(y, f): # get it out from global function table f = tvm.get_global_func("my_callback_with_node") - assert isinstance(f, tvm.Function) + assert isinstance(f, tvm.runtime.PackedFunc) y = f(x, f2) assert(y.value == 10) @@ -67,7 +67,7 @@ def myfunc(*args): assert(tuple(args) == targs) f = tvm.convert(myfunc) - assert isinstance(f, tvm.Function) + assert isinstance(f, tvm.runtime.PackedFunc) def test_byte_array(): s = "hello" diff --git a/tests/python/unittest/test_runtime_rpc.py b/tests/python/unittest/test_runtime_rpc.py index 8b16ae1e7ba5..43bb79cf0363 100644 --- a/tests/python/unittest/test_runtime_rpc.py +++ b/tests/python/unittest/test_runtime_rpc.py @@ -57,7 +57,7 @@ def verify_rpc(remote, target, shape, dtype): def test_rpc_simple(): - if not tvm.module.enabled("rpc"): + if not tvm.runtime.enabled("rpc"): return @tvm.register_func("rpc.test.addone") def addone(x): @@ -85,7 +85,7 @@ def remotethrow(name): assert f2("abc", 11) == "abc:11" def test_rpc_array(): - if not tvm.module.enabled("rpc"): + if not tvm.runtime.enabled("rpc"): return x = np.random.randint(0, 10, size=(3, 4)) @tvm.register_func("rpc.test.remote_array_func") @@ -100,7 +100,7 @@ def remote_array_func(y): fremote(r_cpu) def test_rpc_file_exchange(): - if not tvm.module.enabled("rpc"): + if not tvm.runtime.enabled("rpc"): return server = rpc.Server("localhost") remote = rpc.connect(server.host, server.port) @@ -110,7 +110,7 @@ def test_rpc_file_exchange(): assert(rev == blob) def test_rpc_remote_module(): - if not tvm.module.enabled("rpc"): + if not tvm.runtime.enabled("rpc"): return server = rpc.Server("localhost") client = rpc.connect(server.host, server.port) @@ -121,7 +121,7 @@ def test_rpc_remote_module(): s = tvm.create_schedule(B.op) def check_remote(remote): - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled") return temp = util.tempdir() @@ -146,10 +146,10 @@ def check_remote_link_cl(remote): runtime initializes. We leave it as an example on how to do rpc when we want to do linking on remote. """ - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): print("Skip because llvm is not enabled") return - if not tvm.module.enabled("opencl"): + if not tvm.runtime.enabled("opencl"): print("Skip because opencl is not enabled") return temp = util.tempdir() diff --git a/tests/python/unittest/test_schedule_tensor_core.py b/tests/python/unittest/test_schedule_tensor_core.py index 9fe72cd4e5d2..cd9e062dc07b 100644 --- a/tests/python/unittest/test_schedule_tensor_core.py +++ b/tests/python/unittest/test_schedule_tensor_core.py @@ -103,7 +103,7 @@ def intrin_func(ins, outs): def test_tensor_core_batch_matmal(): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return if not nvcc.have_tensorcore(tvm.gpu(0).compute_version): @@ -216,7 +216,7 @@ def test_tensor_core_batch_matmal(): def test_tensor_core_batch_conv(): - if not tvm.gpu(0).exist or not tvm.module.enabled("cuda"): + if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"): print("skip because cuda is not enabled..") return if not nvcc.have_tensorcore(tvm.gpu(0).compute_version): diff --git a/tests/web/prepare_test_libs.py b/tests/web/prepare_test_libs.py index adca96bbe43f..ada40e66f757 100644 --- a/tests/web/prepare_test_libs.py +++ b/tests/web/prepare_test_libs.py @@ -21,7 +21,7 @@ def prepare_test_libs(base_path): target = "llvm -target=asmjs-unknown-emscripten -system-lib" - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): raise RuntimeError("Target %s is not enbaled" % target) n = tvm.var("n") A = tvm.placeholder((n,), name='A') diff --git a/tests/web/websock_rpc_test.py b/tests/web/websock_rpc_test.py index b1b4e4c70f91..92b0ad350bc0 100644 --- a/tests/web/websock_rpc_test.py +++ b/tests/web/websock_rpc_test.py @@ -30,7 +30,7 @@ proxy_port = 9090 def test_rpc_array(): - if not tvm.module.enabled("rpc"): + if not tvm.runtime.enabled("rpc"): return # graph n = tvm.convert(1024) @@ -40,7 +40,7 @@ def test_rpc_array(): remote = rpc.connect(proxy_host, proxy_port, key="js") target = "llvm -target=asmjs-unknown-emscripten -system-lib" def check_remote(): - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): print("Skip because %s is not enabled" % target) return temp = util.tempdir() diff --git a/tests/webgl/test_local_gemm.py b/tests/webgl/test_local_gemm.py index 7d32aee1f0e0..ff3c1a77bb8d 100644 --- a/tests/webgl/test_local_gemm.py +++ b/tests/webgl/test_local_gemm.py @@ -18,9 +18,9 @@ import numpy as np def test_local_gemm(): - if not tvm.module.enabled("opengl"): + if not tvm.runtime.enabled("opengl"): return - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return nn = 1024 diff --git a/tests/webgl/test_local_multi_stage.py b/tests/webgl/test_local_multi_stage.py index 36ae1827da58..578639962bb7 100644 --- a/tests/webgl/test_local_multi_stage.py +++ b/tests/webgl/test_local_multi_stage.py @@ -18,9 +18,9 @@ import numpy as np def test_local_multi_stage(): - if not tvm.module.enabled("opengl"): + if not tvm.runtime.enabled("opengl"): return - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return n = tvm.var("n") diff --git a/tests/webgl/test_local_save_load.py b/tests/webgl/test_local_save_load.py index b2f30a6e632f..0a63a77cf52e 100644 --- a/tests/webgl/test_local_save_load.py +++ b/tests/webgl/test_local_save_load.py @@ -20,9 +20,9 @@ from tvm.contrib import util, emscripten def test_local_save_load(): - if not tvm.module.enabled("opengl"): + if not tvm.runtime.enabled("opengl"): return - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return n = tvm.var("n") @@ -44,7 +44,7 @@ def test_local_save_load(): temp = util.tempdir() path_so = temp.relpath("myadd.so") f.export_library(path_so) - f1 = tvm.module.load(path_so) + f1 = tvm.runtime.load_module(path_so) f1(a, b, c) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) diff --git a/tests/webgl/test_local_topi_dense.py b/tests/webgl/test_local_topi_dense.py index aaac53c67948..d57bfd20f186 100644 --- a/tests/webgl/test_local_topi_dense.py +++ b/tests/webgl/test_local_topi_dense.py @@ -48,7 +48,7 @@ def get_ref_data(): a_np, b_np, c_np, d_np = get_ref_data() def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) diff --git a/tests/webgl/test_local_topi_pooling.py b/tests/webgl/test_local_topi_pooling.py index c3163a204904..c1b66604c6a7 100644 --- a/tests/webgl/test_local_topi_pooling.py +++ b/tests/webgl/test_local_topi_pooling.py @@ -63,7 +63,7 @@ def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode): b_np = np.maximum(b_np, 0.0) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) @@ -103,7 +103,7 @@ def verify_global_pool(n, c, h, w, pool_type): b_np = np.maximum(b_np, 0.0) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) diff --git a/tests/webgl/test_local_topi_softmax.py b/tests/webgl/test_local_topi_softmax.py index ade09b73442c..5d9ed9345e76 100644 --- a/tests/webgl/test_local_topi_softmax.py +++ b/tests/webgl/test_local_topi_softmax.py @@ -37,7 +37,7 @@ def verify_softmax(m, n): b_np = topi.testing.softmax_python(a_np) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) @@ -68,7 +68,7 @@ def verify_log_softmax(m, n): b_np = topi.testing.log_softmax_python(a_np) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) diff --git a/tests/webgl/test_remote_save_load.py b/tests/webgl/test_remote_save_load.py index 13b2288e6713..1e2ca0f8dd5c 100644 --- a/tests/webgl/test_remote_save_load.py +++ b/tests/webgl/test_remote_save_load.py @@ -37,11 +37,11 @@ proxy_port = 9090 def try_remote_save_load(): - if not tvm.module.enabled("rpc"): + if not tvm.runtime.enabled("rpc"): return - if not tvm.module.enabled("opengl"): + if not tvm.runtime.enabled("opengl"): return - if not tvm.module.enabled("llvm"): + if not tvm.runtime.enabled("llvm"): return # Build the module. diff --git a/topi/python/topi/vision/ssd/multibox.py b/topi/python/topi/vision/ssd/multibox.py index 135315b3f086..8c31f823cbe4 100644 --- a/topi/python/topi/vision/ssd/multibox.py +++ b/topi/python/topi/vision/ssd/multibox.py @@ -182,7 +182,7 @@ def hybrid_multibox_transform_loc(cls_prob, loc_pred, anchor, threshold : tvm.const Threshold to be a positive prediction. - variances : tvm.ndarray + variances : tvm.nd.NDArray Variances to be decoded from box regression output. Returns diff --git a/topi/recipe/conv/depthwise_conv2d_test.py b/topi/recipe/conv/depthwise_conv2d_test.py index 0cfa42d931ef..90c61037f9b3 100644 --- a/topi/recipe/conv/depthwise_conv2d_test.py +++ b/topi/recipe/conv/depthwise_conv2d_test.py @@ -82,7 +82,7 @@ def test_depthwise_conv2d_nchw(): shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return ctx = tvm.context(device, 0) @@ -172,7 +172,7 @@ def test_depthwise_conv2d_nhwc(): shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return ctx = tvm.context(device, 0) diff --git a/topi/recipe/conv/test_conv2d_hwcn_map.py b/topi/recipe/conv/test_conv2d_hwcn_map.py index 80dc11620842..3f7decabfd0e 100644 --- a/topi/recipe/conv/test_conv2d_hwcn_map.py +++ b/topi/recipe/conv/test_conv2d_hwcn_map.py @@ -68,7 +68,7 @@ def test_conv2d_hwcn_map(): c_np = np.maximum(b_np, 0) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return ctx = tvm.context(device, 0) diff --git a/topi/tests/python/test_topi_conv2d_nhwc.py b/topi/tests/python/test_topi_conv2d_nhwc.py index 2a44d60487f8..342f3190b702 100644 --- a/topi/tests/python/test_topi_conv2d_nhwc.py +++ b/topi/tests/python/test_topi_conv2d_nhwc.py @@ -44,7 +44,7 @@ def get_ref_data(): a_np, w_np, b_np = get_ref_data() def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) diff --git a/topi/tests/python/test_topi_conv3d_ndhwc.py b/topi/tests/python/test_topi_conv3d_ndhwc.py index 242e054b8c72..b95b13d403b9 100644 --- a/topi/tests/python/test_topi_conv3d_ndhwc.py +++ b/topi/tests/python/test_topi_conv3d_ndhwc.py @@ -52,7 +52,7 @@ def get_ref_data(): a_np, w_np, b_np = get_ref_data() def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) diff --git a/topi/tests/python/test_topi_lrn.py b/topi/tests/python/test_topi_lrn.py index 3407c6b26f56..53139cdf10c6 100644 --- a/topi/tests/python/test_topi_lrn.py +++ b/topi/tests/python/test_topi_lrn.py @@ -30,7 +30,7 @@ def verify_lrn(shape, size, axis, bias, alpha, beta): b_np = topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) diff --git a/topi/tests/python/test_topi_tensor.py b/topi/tests/python/test_topi_tensor.py index ccf886e552ee..465d98e5f082 100644 --- a/topi/tests/python/test_topi_tensor.py +++ b/topi/tests/python/test_topi_tensor.py @@ -38,7 +38,7 @@ def get_ref_data(): np_nd = get_ref_data() def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return @@ -67,7 +67,7 @@ def get_ref_data(): np_nd = get_ref_data() def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return diff --git a/topi/tests/python/test_topi_transform.py b/topi/tests/python/test_topi_transform.py index e87c6db8856a..fd04fc4b0965 100644 --- a/topi/tests/python/test_topi_transform.py +++ b/topi/tests/python/test_topi_transform.py @@ -225,7 +225,7 @@ def verify_expand_like(in_shape, out_shape, axis): s = tvm.create_schedule([C.op]) def check_device(device): - if not tvm.module.enabled(device): + if not tvm.runtime.enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) diff --git a/tutorials/relay_quick_start.py b/tutorials/relay_quick_start.py index 26157f07d83e..5461b08a4b35 100644 --- a/tutorials/relay_quick_start.py +++ b/tutorials/relay_quick_start.py @@ -145,7 +145,7 @@ # load the module back. loaded_json = open(temp.relpath("deploy_graph.json")).read() -loaded_lib = tvm.module.load(path_lib) +loaded_lib = tvm.runtime.load_module(path_lib) loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read()) input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32")) diff --git a/tutorials/tensor_expr_get_started.py b/tutorials/tensor_expr_get_started.py index efdd499f1847..ca92b3b3ab95 100644 --- a/tutorials/tensor_expr_get_started.py +++ b/tutorials/tensor_expr_get_started.py @@ -235,17 +235,17 @@ # The following code loads the host and device module separately and # re-links them together. We can verify that the newly loaded function works. # -fadd1 = tvm.module.load(temp.relpath("myadd.so")) +fadd1 = tvm.runtime.load_module(temp.relpath("myadd.so")) if tgt == "cuda": - fadd1_dev = tvm.module.load(temp.relpath("myadd.ptx")) + fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.ptx")) fadd1.import_module(fadd1_dev) if tgt == "rocm": - fadd1_dev = tvm.module.load(temp.relpath("myadd.hsaco")) + fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.hsaco")) fadd1.import_module(fadd1_dev) if tgt.startswith('opencl'): - fadd1_dev = tvm.module.load(temp.relpath("myadd.cl")) + fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.cl")) fadd1.import_module(fadd1_dev) fadd1(a, b, c) @@ -261,7 +261,7 @@ # Currently we support packing of Metal, OpenCL and CUDA modules. # fadd.export_library(temp.relpath("myadd_pack.so")) -fadd2 = tvm.module.load(temp.relpath("myadd_pack.so")) +fadd2 = tvm.runtime.load_module(temp.relpath("myadd_pack.so")) fadd2(a, b, c) tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy()) diff --git a/vta/apps/gemm/python/tsim.py b/vta/apps/gemm/python/tsim.py index f5e56489aafd..c0f7b136e11b 100644 --- a/vta/apps/gemm/python/tsim.py +++ b/vta/apps/gemm/python/tsim.py @@ -62,7 +62,7 @@ def init(hw_backend): if hw_backend in ("verilog", "chisel"): hw_lib = osp.join(cur_path, "..", "hardware", hw_backend, "build", hw_libname) load_sw() - m = tvm.module.load(hw_lib, "vta-tsim") + m = tvm.runtime.load_module(hw_lib, "vta-tsim") f = tvm.get_global_func("tvm.vta.tsim.init") f(m) diff --git a/vta/apps/tsim_example/python/tsim.py b/vta/apps/tsim_example/python/tsim.py index f5e56489aafd..c0f7b136e11b 100644 --- a/vta/apps/tsim_example/python/tsim.py +++ b/vta/apps/tsim_example/python/tsim.py @@ -62,7 +62,7 @@ def init(hw_backend): if hw_backend in ("verilog", "chisel"): hw_lib = osp.join(cur_path, "..", "hardware", hw_backend, "build", hw_libname) load_sw() - m = tvm.module.load(hw_lib, "vta-tsim") + m = tvm.runtime.load_module(hw_lib, "vta-tsim") f = tvm.get_global_func("tvm.vta.tsim.init") f(m) diff --git a/vta/python/vta/testing/simulator.py b/vta/python/vta/testing/simulator.py index eb2c1c24852c..38c9467ec815 100644 --- a/vta/python/vta/testing/simulator.py +++ b/vta/python/vta/testing/simulator.py @@ -40,7 +40,7 @@ def _load_sw(): assert lib_hw # make sure to build vta/hardware/chisel try: f = tvm.get_global_func("vta.tsim.init") - m = tvm.module.load(lib_hw[0], "vta-tsim") + m = tvm.runtime.load_module(lib_hw[0], "vta-tsim") f(m) return lib_hw except OSError: diff --git a/vta/scripts/tune_resnet.py b/vta/scripts/tune_resnet.py index 18aee0917d07..9d8ed8980bb0 100644 --- a/vta/scripts/tune_resnet.py +++ b/vta/scripts/tune_resnet.py @@ -196,7 +196,7 @@ def tune_tasks(tasks, opt = parse_arguments() # Make sure that TVM was compiled with RPC=1 - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") # Read in VTA environment env = vta.get_env() @@ -234,7 +234,7 @@ def tune_tasks(tasks, # VTA target and execution context target = env.target if opt.device == "vta" else env.target_vta_cpu ctx = remote.ext_dev(0) if opt.device == "vta" else remote.cpu(0) - + # Compile Relay program print("Initial compile...") relay_prog, params = compile_network(opt, env, target) @@ -266,7 +266,7 @@ def tune_tasks(tasks, tune_tasks(tasks, **tuning_opt) # Compile kernels with history best records - with autotvm.tophub.context(target, extra_files=[opt.log_filename]): + with autotvm.tophub.context(target, extra_files=[opt.log_filename]): # Compile network print("Compiling network with best tuning parameters...") diff --git a/vta/src/dpi/module.cc b/vta/src/dpi/module.cc index 27161c4b1bf8..bb8284cf8faf 100644 --- a/vta/src/dpi/module.cc +++ b/vta/src/dpi/module.cc @@ -418,7 +418,7 @@ Module DPIModuleNode::Load(std::string dll_name) { return Module(n); } -TVM_REGISTER_GLOBAL("module.loadfile_vta-tsim") +TVM_REGISTER_GLOBAL("runtime.module.loadfile_vta-tsim") .set_body([](TVMArgs args, TVMRetValue* rv) { *rv = DPIModuleNode::Load(args[0]); }); diff --git a/vta/tests/python/integration/test_benchmark_topi_conv2d.py b/vta/tests/python/integration/test_benchmark_topi_conv2d.py index 942776f62390..af71561972a1 100644 --- a/vta/tests/python/integration/test_benchmark_topi_conv2d.py +++ b/vta/tests/python/integration/test_benchmark_topi_conv2d.py @@ -227,7 +227,7 @@ def _run(env, remote): if device == "vta": target = env.target if env.TARGET not in ["sim", "tsim"]: - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") program_fpga(remote, bitstream=None) reconfig_runtime(remote) elif device == "arm_cpu": diff --git a/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py b/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py index e2601d1a424f..d729fa517692 100644 --- a/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py +++ b/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py @@ -215,7 +215,7 @@ def _run(env, remote): if device == "vta": target = env.target if env.TARGET not in ["sim", "tsim"]: - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") program_fpga(remote, bitstream=None) reconfig_runtime(remote) elif device == "arm_cpu": diff --git a/vta/tests/python/integration/test_benchmark_topi_dense.py b/vta/tests/python/integration/test_benchmark_topi_dense.py index 174e9663422b..b0ee2f5f7792 100644 --- a/vta/tests/python/integration/test_benchmark_topi_dense.py +++ b/vta/tests/python/integration/test_benchmark_topi_dense.py @@ -178,7 +178,7 @@ def _run(env, remote): if device == "vta": target = env.target if env.TARGET not in ["sim", "tsim"]: - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") program_fpga(remote, bitstream=None) reconfig_runtime(remote) elif device == "arm_cpu": diff --git a/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py b/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py index 975d5b9aaaf9..7bba2449cea5 100644 --- a/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py +++ b/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py @@ -127,7 +127,7 @@ def get_ref_data(): w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype) b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype) r_np = topi.testing.conv2d_nchw_python( - a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), + a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad, wl.groups).astype(env.acc_dtype) return a_np, w_np, b_np, r_np @@ -224,7 +224,7 @@ def _run(env, remote): if device == "vta": target = env.target if env.TARGET not in ["sim", "tsim"]: - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") program_fpga(remote, bitstream=None) reconfig_runtime(remote) elif device == "arm_cpu": diff --git a/vta/tests/python/pynq/test_program_rpc.py b/vta/tests/python/pynq/test_program_rpc.py index a7ef6f28cc50..2d8da5acc16a 100644 --- a/vta/tests/python/pynq/test_program_rpc.py +++ b/vta/tests/python/pynq/test_program_rpc.py @@ -29,14 +29,14 @@ def program_rpc_bitstream(path=None): ---------- path : path to bitstream (optional) """ - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") remote = rpc.connect(host, port) program_fpga(remote, path) def reconfig_rpc_runtime(): """Reconfig the RPC server runtime """ - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") remote = rpc.connect(host, port) reconfig_runtime(remote) diff --git a/vta/tutorials/frontend/deploy_vision_on_vta.py b/vta/tutorials/frontend/deploy_vision_on_vta.py index 154eb85ed9a5..c410d24d07ae 100644 --- a/vta/tutorials/frontend/deploy_vision_on_vta.py +++ b/vta/tutorials/frontend/deploy_vision_on_vta.py @@ -60,7 +60,7 @@ from vta.top import graph_pack # Make sure that TVM was compiled with RPC=1 -assert tvm.module.enabled("rpc") +assert tvm.runtime.enabled("rpc") ###################################################################### # Define the platform and model targets @@ -243,7 +243,7 @@ m.set_input('data', image) # Perform inference and gather execution statistics -# More on: https://docs.tvm.ai/api/python/module.html#tvm.module.Module.time_evaluator +# More on: https://docs.tvm.ai/api/python/module.html#tvm.runtime.Module.time_evaluator num = 4 # number of times we run module for a single measurement rep = 3 # number of measurements (we derive std dev from this) timer = m.module.time_evaluator("run", ctx, number=num, repeat=rep) diff --git a/vta/tutorials/matrix_multiply.py b/vta/tutorials/matrix_multiply.py index 70a899bb534f..3e46b427baf6 100644 --- a/vta/tutorials/matrix_multiply.py +++ b/vta/tutorials/matrix_multiply.py @@ -54,7 +54,7 @@ if env.TARGET == "pynq": # Make sure that TVM was compiled with RPC=1 - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") remote = rpc.connect(host, port) # Reconfigure the JIT runtime diff --git a/vta/tutorials/optimize/convolution_opt.py b/vta/tutorials/optimize/convolution_opt.py index f1e0ba397671..e5cf8e595b15 100644 --- a/vta/tutorials/optimize/convolution_opt.py +++ b/vta/tutorials/optimize/convolution_opt.py @@ -58,7 +58,7 @@ if env.TARGET == "pynq": # Make sure that TVM was compiled with RPC=1 - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") remote = rpc.connect(host, port) # Reconfigure the JIT runtime diff --git a/vta/tutorials/optimize/matrix_multiply_opt.py b/vta/tutorials/optimize/matrix_multiply_opt.py index b20094a2cd8b..2d54b97957b2 100644 --- a/vta/tutorials/optimize/matrix_multiply_opt.py +++ b/vta/tutorials/optimize/matrix_multiply_opt.py @@ -57,7 +57,7 @@ if env.TARGET == "pynq": # Make sure that TVM was compiled with RPC=1 - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") remote = rpc.connect(host, port) # Reconfigure the JIT runtime diff --git a/vta/tutorials/vta_get_started.py b/vta/tutorials/vta_get_started.py index 93a0add50a87..dd305154a733 100644 --- a/vta/tutorials/vta_get_started.py +++ b/vta/tutorials/vta_get_started.py @@ -78,7 +78,7 @@ if env.TARGET == "pynq": # Make sure that TVM was compiled with RPC=1 - assert tvm.module.enabled("rpc") + assert tvm.runtime.enabled("rpc") remote = rpc.connect(host, port) # Reconfigure the JIT runtime diff --git a/web/README.md b/web/README.md index d8127f2fa69c..e47c6038b312 100644 --- a/web/README.md +++ b/web/README.md @@ -91,7 +91,7 @@ from tvm.contrib import emscripten import os def prepare_test_libs(base_path): target = "llvm -target=asmjs-unknown-emscripten -system-lib" - if not tvm.module.enabled(target): + if not tvm.runtime.enabled(target): raise RuntimeError("Target %s is not enbaled" % target) n = tvm.var("n") A = tvm.placeholder((n,), name='A') diff --git a/web/tvm_runtime.js b/web/tvm_runtime.js index 6dfb684bb18b..cf5d55ef2261 100644 --- a/web/tvm_runtime.js +++ b/web/tvm_runtime.js @@ -858,7 +858,7 @@ var tvm_runtime = tvm_runtime || {}; */ this.systemLib = function() { if (typeof systemFunc.fGetSystemLib === "undefined") { - systemFunc.fGetSystemLib = getGlobalFunc("module._GetSystemLib"); + systemFunc.fGetSystemLib = getGlobalFunc("runtime.SystemLib"); } return systemFunc.fGetSystemLib(); };