Skip to content

Commit

Permalink
Rename tvm.module -> tvm.runtime
Browse files Browse the repository at this point in the history
  • Loading branch information
alexwong committed Feb 29, 2020
1 parent 95f3c65 commit 3599939
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 6 deletions.
8 changes: 4 additions & 4 deletions nnvm/tests/python/unittest/test_graph_annotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def test_conv_network():
conv2d
"""
def compile_run_graph(device, target):
if not tvm.module.enabled(device):
if not tvm.runtime.enabled(device):
print("Skip test because %s is not enabled." % device)
return

Expand Down Expand Up @@ -129,7 +129,7 @@ def test_fusible_network():
tanh
"""
def compile_run_graph(device, target):
if not tvm.module.enabled(device):
if not tvm.runtime.enabled(device):
print("Skip test because %s is not enabled." % device)
return

Expand Down Expand Up @@ -265,7 +265,7 @@ def check_load_module():
fo.write(nnvm.compiler.save_param_dict(params))

# Load lib, json, and params back.
loaded_lib = tvm.module.load(path_lib)
loaded_lib = tvm.runtime.load(path_lib)
loaded_json = open(temp.relpath("deploy.json")).read()
loaded_json = graph.load_json(loaded_json)
loaded_params = bytearray(open(temp.relpath("deploy.params"),
Expand Down Expand Up @@ -315,7 +315,7 @@ def check_inmemory_module():
# conv2d (acc)
# """
# def compile_run_graph(device, target):
# if not tvm.module.enabled(device):
# if not tvm.runtime.enabled(device):
# print("Skip test because %s is not enabled." % device)
# return
#
Expand Down
4 changes: 2 additions & 2 deletions tests/python/relay/test_tensorrt.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from tvm.contrib import graph_runtime

def should_skip():
if not tvm.module.enabled("cuda") or not tvm.gpu(0).exist:
if not tvm.runtime.enabled("cuda") or not tvm.gpu(0).exist:
print("skip because cuda is not enabled.")
return True
if not relay.tensorrt.IsTrtRuntimeAvailable():
Expand Down Expand Up @@ -523,7 +523,7 @@ def test_tensorrt_serialize():
graph = f_graph_json.read()
with open('compiled.params', 'rb') as f_params:
params = bytearray(f_params.read())
lib = tvm.module.load("compiled.tensorrt")
lib = tvm.runtime.load("compiled.tensorrt")
# Run
mod = graph_runtime.create(graph, lib, ctx=tvm.gpu(0))
mod.load_params(params)
Expand Down

0 comments on commit 3599939

Please sign in to comment.