diff --git a/apps/android_camera/models/prepare_model.py b/apps/android_camera/models/prepare_model.py index 36674d273bd1..703a4656c479 100644 --- a/apps/android_camera/models/prepare_model.py +++ b/apps/android_camera/models/prepare_model.py @@ -87,7 +87,7 @@ def main(model_str, output_path): except FileExistsError: pass print("building...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(net, target, target_host=target_host, params=params) print("dumping lib...") lib.export_library(output_path_str + '/' + 'deploy_lib_cpu.so', ndk.create_shared) diff --git a/apps/benchmark/arm_cpu_imagenet_bench.py b/apps/benchmark/arm_cpu_imagenet_bench.py index 53b616868bdd..f319d5a53042 100644 --- a/apps/benchmark/arm_cpu_imagenet_bench.py +++ b/apps/benchmark/arm_cpu_imagenet_bench.py @@ -39,7 +39,7 @@ def evaluate_network(network, target, target_host, repeat): net, params, input_shape, output_shape = get_network(network, batch_size=1) print_progress("%-20s building..." % network) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build( net, target=target, target_host=target_host, params=params) diff --git a/apps/benchmark/gpu_imagenet_bench.py b/apps/benchmark/gpu_imagenet_bench.py index 00237006a2d3..a3df2c46a24b 100644 --- a/apps/benchmark/gpu_imagenet_bench.py +++ b/apps/benchmark/gpu_imagenet_bench.py @@ -33,7 +33,7 @@ def benchmark(network, target): net, params, input_shape, output_shape = get_network(network, batch_size=1) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(net, target=target, params=params) # create runtime diff --git a/apps/benchmark/mobile_gpu_imagenet_bench.py b/apps/benchmark/mobile_gpu_imagenet_bench.py index 4f93a0d5e383..83127ff5af72 100644 --- a/apps/benchmark/mobile_gpu_imagenet_bench.py +++ b/apps/benchmark/mobile_gpu_imagenet_bench.py @@ -38,7 +38,7 @@ def evaluate_network(network, target, target_host, dtype, repeat): net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype) print_progress("%-20s building..." % network) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build( net, target=target, target_host=target_host, params=params) diff --git a/apps/bundle_deploy/build_model.py b/apps/bundle_deploy/build_model.py index 63d658e6d428..1d415cd40ef4 100644 --- a/apps/bundle_deploy/build_model.py +++ b/apps/bundle_deploy/build_model.py @@ -33,7 +33,7 @@ def build_module(opts): func = mod["main"] func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build( func, 'llvm --system-lib', params=params) diff --git a/apps/sgx/src/build_model.py b/apps/sgx/src/build_model.py index 6e0933efd381..b988574fc558 100755 --- a/apps/sgx/src/build_model.py +++ b/apps/sgx/src/build_model.py @@ -37,7 +37,7 @@ def main(): net, params = relay.testing.resnet.get_workload( layers=18, batch_size=dshape[0], image_shape=dshape[1:]) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build( net, 'llvm --system-lib', params=params) diff --git a/golang/sample/gen_mobilenet_lib.py b/golang/sample/gen_mobilenet_lib.py index 8becd078fd5e..d4dcf2136f81 100644 --- a/golang/sample/gen_mobilenet_lib.py +++ b/golang/sample/gen_mobilenet_lib.py @@ -16,7 +16,7 @@ # under the License. import os -from tvm import relay +from tvm import relay, transform from tvm.contrib.download import download_testdata @@ -77,7 +77,7 @@ def extract(path): target = 'llvm' # Build with Relay -with relay.build_config(opt_level=3): +with transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target, params=params) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index e86890f3639a..05222c65ecd1 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -505,7 +505,7 @@ def infer_value(input_val, params, mod=None): assert all(var.name_hint in params.keys() for var in analysis.free_vars( input_val)), "All inputs to infer must be available in params." func = _function.Function(analysis.free_vars(input_val), input_val) - with tvm.relay.build_config(opt_level=0): + with tvm.transform.PassContext(opt_level=0): graph, lib, params = tvm.relay.build(func, target="llvm", params=params) ctx = tvm.cpu(0) m = graph_runtime.create(graph, lib, ctx) diff --git a/python/tvm/relay/quantize/_calibrate.py b/python/tvm/relay/quantize/_calibrate.py index 9794698a0447..59ee51b7ef59 100644 --- a/python/tvm/relay/quantize/_calibrate.py +++ b/python/tvm/relay/quantize/_calibrate.py @@ -28,7 +28,6 @@ from .. import op as _op from .. import expr as _expr from .. import analysis as _analysis -from .. import transform as _transform from .. import build_module as _build_module from ...contrib import graph_runtime from .kl_divergence import _find_scale_by_kl @@ -45,7 +44,7 @@ def _get_profile_runtime(mod): target = 'llvm' ctx = tvm.context(target) - with _transform.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = _build_module.build(func, target=target) runtime = graph_runtime.create(graph, lib, ctx) runtime.set_input(**params) diff --git a/python/tvm/relay/transform/transform.py b/python/tvm/relay/transform/transform.py index 19ddb32fc52f..8f4ec1046500 100644 --- a/python/tvm/relay/transform/transform.py +++ b/python/tvm/relay/transform/transform.py @@ -21,6 +21,7 @@ import types import inspect import functools +import warnings import tvm.ir from tvm import te @@ -34,7 +35,9 @@ def build_config(opt_level=2, required_pass=None, disabled_pass=None, trace=None): - """Configure the build behavior by setting config variables. + """Configure the build behavior by setting config variables. This function + will be deprecated in TVM v0.7. Instead, we should directly use + tvm.transform.PassContext. Parameters ---------- @@ -72,8 +75,9 @@ def build_config(opt_level=2, pass_context: PassContext The pass context for optimizations. """ - return tvm.ir.transform.PassContext(opt_level, required_pass, - disabled_pass, trace) + warnings.warn("relay.build_config will be deprecated. Please use \ + tvm.transform.PassContext directly", DeprecationWarning) + return tvm.transform.PassContext(opt_level, required_pass, disabled_pass, trace) @tvm._ffi.register_object("relay.FunctionPass") diff --git a/rust/frontend/examples/resnet/src/build_resnet.py b/rust/frontend/examples/resnet/src/build_resnet.py index 49c67bf1c4f3..a09a0c3a56eb 100644 --- a/rust/frontend/examples/resnet/src/build_resnet.py +++ b/rust/frontend/examples/resnet/src/build_resnet.py @@ -75,8 +75,8 @@ def build(target_dir): num_layers=18, batch_size=batch_size, image_shape=image_shape) # compile the model - with relay.build_config(opt_level=opt_level): - graph, lib, params = relay.build_module.build(net, target, params=params) + with tvm.transform.PassContext(opt_level=opt_level): + graph, lib, params = relay.build_module.build(net, target, params=params) # save the model artifacts lib.save(deploy_lib) diff --git a/src/relay/backend/build_module.cc b/src/relay/backend/build_module.cc index abce0683b8db..7aad766c5210 100644 --- a/src/relay/backend/build_module.cc +++ b/src/relay/backend/build_module.cc @@ -304,9 +304,8 @@ class RelayBuildModule : public runtime::ModuleNode { // Handle heterogeneous compilation. transform::PassContext pass_ctx = PassContext::Current(); if (targets_.size() > 1) { - Optional opt_fallback_dev = - pass_ctx->GetConfig("relay.fallback_device_type", - IntImm(runtime::DataType::Int(32), static_cast(kDLCPU))); + Optional opt_fallback_dev = + pass_ctx->GetConfig("relay.fallback_device_type", Integer(static_cast(kDLCPU))); auto fallback_dev = opt_fallback_dev.value(); CHECK_GT(fallback_dev->value, 0U); relay_module = RunDeviceAnnotationPass(relay_module, fallback_dev->value); diff --git a/tests/cpp/relay_transform_sequential.cc b/tests/cpp/relay_transform_sequential.cc index 60d3a5e23da8..f08d5574d51c 100644 --- a/tests/cpp/relay_transform_sequential.cc +++ b/tests/cpp/relay_transform_sequential.cc @@ -70,7 +70,7 @@ TEST(Relay, Sequential) { auto mod = IRModule::FromExpr(func); auto pass_ctx = relay::transform::PassContext::Create(); pass_ctx->opt_level = 3; - pass_ctx->config.Set("relay.fallback_device_type", IntImm(DataType::Int(32), 1)); + pass_ctx->config.Set("relay.fallback_device_type", Integer(1)); { tvm::With ctx_scope(pass_ctx); tvm::With tctx(tvm::Target::Create("llvm")); diff --git a/tests/python/frontend/caffe2/test_forward.py b/tests/python/frontend/caffe2/test_forward.py index f05287216ec9..50a878180ac9 100644 --- a/tests/python/frontend/caffe2/test_forward.py +++ b/tests/python/frontend/caffe2/test_forward.py @@ -43,7 +43,7 @@ def get_tvm_output(model, dtype_dict = {input_names: input_data.dtype} mod, params = relay.frontend.from_caffe2( model.init_net, model.predict_net, shape_dict, dtype_dict) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) m = graph_runtime.create(graph, lib, ctx) diff --git a/tests/python/frontend/coreml/test_forward.py b/tests/python/frontend/coreml/test_forward.py index 3a156385d510..179f5b41c1d7 100644 --- a/tests/python/frontend/coreml/test_forward.py +++ b/tests/python/frontend/coreml/test_forward.py @@ -33,7 +33,7 @@ def get_tvm_output(func, x, params, target, ctx, out_shape=(1, 1000), input_name='image', dtype='float32'): - with relay.transform.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=params) m = graph_runtime.create(graph, lib, ctx) # set inputs @@ -76,7 +76,7 @@ def run_tvm_graph(coreml_model, target, ctx, input_data, input_name, output_shap dtype_dict = {input_name: input_data.dtype} mod, params = relay.frontend.from_coreml(coreml_model, shape_dict) - with relay.transform.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) from tvm.contrib import graph_runtime diff --git a/tests/python/frontend/keras/test_forward.py b/tests/python/frontend/keras/test_forward.py index ed0181f184d9..9b963c396319 100644 --- a/tests/python/frontend/keras/test_forward.py +++ b/tests/python/frontend/keras/test_forward.py @@ -84,7 +84,7 @@ def get_keras_output(xs, dtype='float32'): def get_tvm_output(xs, target, ctx, dtype='float32'): shape_dict = {name: x.shape for (name, x) in zip(keras_model.input_names, xs)} mod, params = relay.frontend.from_keras(keras_model, shape_dict, layout=layout) - with relay.transform.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): graph, lib, params = relay.build(mod, target, params=params) diff --git a/tests/python/frontend/mxnet/test_forward.py b/tests/python/frontend/mxnet/test_forward.py index 6d36ea30e1dd..5ed2fb890b71 100644 --- a/tests/python/frontend/mxnet/test_forward.py +++ b/tests/python/frontend/mxnet/test_forward.py @@ -66,7 +66,7 @@ def get_tvm_output(symbol, x, args, auxs, target, ctx, dtype='float32'): shape_dict, arg_params=args, aux_params=auxs) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) m = graph_runtime.create(graph, lib, ctx) # set inputs diff --git a/tests/python/frontend/mxnet/test_qnn_ops_utils.py b/tests/python/frontend/mxnet/test_qnn_ops_utils.py index d130eef3b962..541162d79afe 100644 --- a/tests/python/frontend/mxnet/test_qnn_ops_utils.py +++ b/tests/python/frontend/mxnet/test_qnn_ops_utils.py @@ -15,8 +15,8 @@ # specific language governing permissions and limitations # under the License. -import tvm import numpy as np +import tvm from tvm import relay from tvm.contrib import graph_runtime from tvm.relay.frontend.mxnet_qnn_op_utils import dequantize_mxnet_min_max, \ @@ -39,7 +39,7 @@ def dequantize_test_driver(in_dtype, quant_args, in_data, verify_output_data): in_dtype=in_dtype) mod = relay.Function(relay.analysis.free_vars(dequantized_output), dequantized_output) mod = tvm.IRModule.from_expr(mod) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) rt_mod.set_input(input_data=in_data) @@ -93,7 +93,7 @@ def quantize_test_driver(out_dtype, quant_args, in_data, verify_output_data): out_dtype=out_dtype) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) mod = tvm.IRModule.from_expr(mod) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) rt_mod.set_input(input_data=in_data) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 2e61b4c62c73..5effe412af37 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -65,7 +65,7 @@ def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset) - with relay.build_config(opt_level=1): + with tvm.transform.PassContext(opt_level=1): graph, lib, params = relay.build(mod, target, params=params) diff --git a/tests/python/frontend/pytorch/qnn_test.py b/tests/python/frontend/pytorch/qnn_test.py index bf5fa981e6f4..551cdc4cd418 100644 --- a/tests/python/frontend/pytorch/qnn_test.py +++ b/tests/python/frontend/pytorch/qnn_test.py @@ -41,7 +41,7 @@ def get_tvm_runtime(script_module, input_name, ishape): input_shapes = [(input_name, ishape)] mod, params = relay.frontend.from_pytorch(script_module, input_shapes) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): # test on only cpu for now, torch cannot run quant models on cuda # also not to make CI too slow json, lib, params = relay.build(mod, target="llvm", params=params) diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 6159bb816ccf..f6edbf119684 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -176,7 +176,7 @@ def verify_model(model_name, input_data=[], compiled_input = dict(zip(input_names, [inp.cpu().numpy() for inp in baseline_input])) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): for target, ctx in ctx_list: relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params) relay_model = graph_runtime.create(relay_graph, relay_lib, ctx) @@ -2294,7 +2294,7 @@ def test_forward_pretrained_bert_base_uncased(): # ---------------------------- target = 'llvm' - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params) ###################################################################### diff --git a/tests/python/frontend/tensorflow/test_bn_dynamic.py b/tests/python/frontend/tensorflow/test_bn_dynamic.py index a2d69034a94a..e80d774408a3 100644 --- a/tests/python/frontend/tensorflow/test_bn_dynamic.py +++ b/tests/python/frontend/tensorflow/test_bn_dynamic.py @@ -50,7 +50,7 @@ def verify_fused_batch_norm(shape): continue mod, params = relay.frontend.from_tensorflow(constant_graph, outputs=['output']) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=device, params=params) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index c6a285c93d6a..89a033557db2 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -123,7 +123,7 @@ def run_tvm_graph(graph_def, input_data, input_node, num_output=1, result = ex.evaluate()(*inputs) return vmobj_to_list(result) else: - with relay.build_config(opt_level=opt_level): + with tvm.transform.PassContext(opt_level=opt_level): graph, lib, params = relay.build(mod, target, target_host, params) ctx = tvm.context(target, 0) @@ -2307,7 +2307,7 @@ def _get_tvm_graph_module(graph_def): 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_c': 'float32', 'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_h': 'float32'} target = 'llvm' - with relay.build_config(opt_level=0): + with tvm.transform.PassContext(opt_level=0): graph, lib, params = relay.build(mod, target, params=params) diff --git a/tests/python/frontend/tflite/test_forward.py b/tests/python/frontend/tflite/test_forward.py index a68fd90120f8..24b82c65d4e5 100644 --- a/tests/python/frontend/tflite/test_forward.py +++ b/tests/python/frontend/tflite/test_forward.py @@ -109,7 +109,7 @@ def run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target shape_dict=shape_dict, dtype_dict=dtype_dict) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) ctx = tvm.context(target, 0) diff --git a/tests/python/nightly/quantization/test_quantization_accuracy.py b/tests/python/nightly/quantization/test_quantization_accuracy.py index 4818cc651b94..d4b55f14100b 100644 --- a/tests/python/nightly/quantization/test_quantization_accuracy.py +++ b/tests/python/nightly/quantization/test_quantization_accuracy.py @@ -66,7 +66,7 @@ def get_model(model_name, batch_size, qconfig, target=None, original=False, simu mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape}) net = mod['main'] - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): qfunc = relay.quantize.prerequisite_optimize(net, params=params) logging.debug('original') logging.debug(qfunc.astext(show_meta_data=False)) @@ -83,7 +83,7 @@ def get_model(model_name, batch_size, qconfig, target=None, original=False, simu def eval_acc(model, dataset, batch_fn, target=tvm.target.cuda(), ctx=tvm.gpu(), log_interval=100): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(model, target) # create runtime module m = tvm.contrib.graph_runtime.create(graph, lib, ctx) diff --git a/tests/python/relay/benchmarking/benchmark_vm.py b/tests/python/relay/benchmarking/benchmark_vm.py index 1e9030c5d8e6..a6e05bee5ca2 100644 --- a/tests/python/relay/benchmarking/benchmark_vm.py +++ b/tests/python/relay/benchmarking/benchmark_vm.py @@ -36,7 +36,7 @@ def benchmark_execution(mod, model="unknown"): def get_graph_runtime_output(mod, data, params, target, ctx, dtype='float32', number=2, repeat=20): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) m = graph_runtime.create(graph, lib, ctx) @@ -59,7 +59,7 @@ def get_graph_runtime_output(mod, data, params, target, ctx, def get_vm_output(mod, data, params, target, ctx, dtype='float32', number=2, repeat=20): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): exe = vm.compile(mod, target, params=params) rly_vm = vm_rt.VirtualMachine(exe) rly_vm.init(ctx) diff --git a/tests/python/relay/test_backend_compile_engine.py b/tests/python/relay/test_backend_compile_engine.py index eb018fed96e7..1b4e08f7eb7b 100644 --- a/tests/python/relay/test_backend_compile_engine.py +++ b/tests/python/relay/test_backend_compile_engine.py @@ -184,7 +184,7 @@ def test_compile_placeholder_bypass(): z = relay.var("z", shape=(2, 3)) result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)]) func = relay.Function(relay.analysis.free_vars(result), result) - with relay.build_config(opt_level=0): + with tvm.transform.PassContext(opt_level=0): graph, lib, params = relay.build(tvm.IRModule.from_expr(func), 'llvm') diff --git a/tests/python/relay/test_backend_graph_runtime.py b/tests/python/relay/test_backend_graph_runtime.py index 226d5ba218e8..f0785bcf1c09 100644 --- a/tests/python/relay/test_backend_graph_runtime.py +++ b/tests/python/relay/test_backend_graph_runtime.py @@ -166,7 +166,7 @@ def unit_numpy(X, W): z = unit(rnn_dim) for target, ctx in ctx_list(): - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): graph, lib, params = relay.build(tvm.IRModule.from_expr(z), target) m = graph_runtime.create(graph, lib, ctx) m.set_input("X", tvm.nd.array(x.astype(dtype))) diff --git a/tests/python/relay/test_cpp_build_module.py b/tests/python/relay/test_cpp_build_module.py index 171b6b0b77b0..8d5438424e32 100644 --- a/tests/python/relay/test_cpp_build_module.py +++ b/tests/python/relay/test_cpp_build_module.py @@ -115,7 +115,7 @@ def check_conversion(tgt, ctx): X = tvm.nd.array(n * np.random.randn(n).astype(src) - n / 2) # build - with relay.build_config(opt_level=1): + with tvm.transform.PassContext(opt_level=1): g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), tgt) # test diff --git a/tests/python/relay/test_external_codegen.py b/tests/python/relay/test_external_codegen.py index 3797910080a1..c449ce39ff01 100644 --- a/tests/python/relay/test_external_codegen.py +++ b/tests/python/relay/test_external_codegen.py @@ -49,7 +49,8 @@ def update_lib(lib): return lib def check_vm_result(): - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, + disabled_pass=["AlterOpLayout"]): exe = relay.vm.compile(mod, target=target) code, lib = exe.save() lib = update_lib(lib) @@ -60,7 +61,8 @@ def check_vm_result(): tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) def check_graph_runtime_result(): - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, + disabled_pass=["AlterOpLayout"]): json, lib, _ = relay.build(mod, target=target) lib = update_lib(lib) rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx) diff --git a/tests/python/relay/test_memory_passes.py b/tests/python/relay/test_memory_passes.py index 70e7086cef4d..dc16865aa620 100644 --- a/tests/python/relay/test_memory_passes.py +++ b/tests/python/relay/test_memory_passes.py @@ -37,7 +37,7 @@ def check_memory_plan(func, check_fn): no_plan_result = ex.evaluate(mod['main'])(*args) # Compute with memory planning. - with relay.build_config(opt_level=1, disabled_pass=["MemoryPlan"]): + with tvm.transform.PassContext(opt_level=1, disabled_pass=["MemoryPlan"]): plan_result = ex.evaluate(mod['main'])(*args) # Compute Python result. diff --git a/tests/python/relay/test_op_fast_math.py b/tests/python/relay/test_op_fast_math.py index 215b83e8e80d..a771d29a431d 100644 --- a/tests/python/relay/test_op_fast_math.py +++ b/tests/python/relay/test_op_fast_math.py @@ -34,7 +34,7 @@ def test_apply(relay_op, name, f_numpy, low, high, step, dtype="float32"): func = relay.Function([x], y) mod = tvm.IRModule.from_expr(func) - with relay.build_config(opt_level=3, required_pass=['FastMath']): + with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']): graph, lib, params = relay.build(mod, target="llvm", params=None) # Check that the op related to fast math have been convered to function in lib diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index 68eced328fa8..3e8720d5ba50 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -262,7 +262,7 @@ def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, with open(temp.relpath("temp.log"), "w") as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath("temp.log")): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): print('Compiling...') graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu") @@ -356,7 +356,7 @@ def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) - with WinogradFallback(), relay.build_config(opt_level=3): + with WinogradFallback(), tvm.transform.PassContext(opt_level=3): for target, ctx in ctx_list(): if target != 'cuda': continue @@ -578,7 +578,7 @@ def run_test_conv3d_cuda(dtype, out_dtype, scale, dshape, kshape, data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) - with WinogradFallback(), relay.build_config(opt_level=3): + with WinogradFallback(), tvm.transform.PassContext(opt_level=3): for target, ctx in ctx_list(): if target != 'cuda': continue @@ -1199,7 +1199,7 @@ def _compile(ic, oc, target, data_layout, kernel_layout, dtypes): wdata = np.random.rand(*kernel_shape) * 10 parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))} - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) assembly = lib.get_source("asm") @@ -1314,7 +1314,7 @@ def test_depthwise_conv2d_int8(): llvm_version = tvm.target.codegen.llvm_version_major() for target in targets: if llvm_version >= 8: - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) diff --git a/tests/python/relay/test_op_qnn_conv2d.py b/tests/python/relay/test_op_qnn_conv2d.py index 6911c52958f0..fcb335fd0d63 100644 --- a/tests/python/relay/test_op_qnn_conv2d.py +++ b/tests/python/relay/test_op_qnn_conv2d.py @@ -182,7 +182,7 @@ def get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype): def get_output(func, golden_inputs): - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): golden_data, golden_weight = golden_inputs params = {'kernel': golden_weight} graph, lib, params = relay.build(func, "llvm", params=params) @@ -655,7 +655,7 @@ def test_tflite_large_irregular(): golden_data = np.full(data_shape, 127).astype('uint8') golden_weight = np.full(kernel_shape, 127).astype('uint8') - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): params = {'kernel': golden_weight} graph, lib, params = relay.build(qnn_func, "llvm", params=params) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) @@ -698,7 +698,7 @@ def test_tflite_output_multiplier_greater_than_one(): -1, -1, 1, 1)).reshape(kernel_shape) golden_weight = golden_weight.astype('uint8') - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): params = {'kernel': golden_weight} graph, lib, params = relay.build(qnn_func, "llvm", params=params) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) @@ -744,7 +744,7 @@ def test_tflite_anistropic_strides(): golden_weight = np.array((129, 131, 133, 135)).reshape(kernel_shape) golden_weight = golden_weight.astype('uint8') - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): params = {'kernel': golden_weight} graph, lib, params = relay.build(qnn_func, "llvm", params=params) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) @@ -789,7 +789,7 @@ def test_broadcast_layout(): func = relay.add(func, bias) func = relay.Function(relay.analysis.free_vars(func), func) mod = tvm.IRModule.from_expr(func) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm -mcpu=skylake-avx512") def test_depthwise_depth_multiplier(): diff --git a/tests/python/relay/test_op_qnn_dense.py b/tests/python/relay/test_op_qnn_dense.py index 3cfcfd165b46..0ba3210e8d8b 100644 --- a/tests/python/relay/test_op_qnn_dense.py +++ b/tests/python/relay/test_op_qnn_dense.py @@ -167,7 +167,7 @@ def qnn_dense_driver(test_configuration): mod = relay.Function(relay.analysis.free_vars(mod), mod) mod = tvm.IRModule.from_expr(mod) mod = relay.qnn.transform.CanonicalizeOps()(mod) - with relay.build_config(opt_level=2): + with tvm.transform.PassContext(opt_level=2): graph, lib, params = relay.build(mod, "llvm", params=None) mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) mod.set_input(quantized_data_name, test_configuration[quantized_data_name]) diff --git a/tests/python/relay/test_op_qnn_dequantize.py b/tests/python/relay/test_op_qnn_dequantize.py index febf5c5e6ecc..3c82b7fa0afa 100644 --- a/tests/python/relay/test_op_qnn_dequantize.py +++ b/tests/python/relay/test_op_qnn_dequantize.py @@ -30,7 +30,7 @@ def quantize_test_driver(in_dtype, quant_args, in_data, verify_output_data): input_zero_point=input_zero_point) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) mod = tvm.IRModule.from_expr(mod) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) rt_mod.set_input(input_data=in_data) diff --git a/tests/python/relay/test_op_qnn_quantize.py b/tests/python/relay/test_op_qnn_quantize.py index 09b04d8925c6..a284e8bdbc82 100644 --- a/tests/python/relay/test_op_qnn_quantize.py +++ b/tests/python/relay/test_op_qnn_quantize.py @@ -32,7 +32,7 @@ def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data, verify_ out_dtype=out_dtype) mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output) mod = tvm.IRModule.from_expr(mod) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) rt_mod.set_input(input_data=in_data) diff --git a/tests/python/relay/test_op_qnn_requantize.py b/tests/python/relay/test_op_qnn_requantize.py index 81233972cb28..fb52b3030582 100644 --- a/tests/python/relay/test_op_qnn_requantize.py +++ b/tests/python/relay/test_op_qnn_requantize.py @@ -24,7 +24,7 @@ roundings = ["UPWARD", "TONEAREST"] def verify(mod, goldens): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, "llvm", params=None) golden_data, golden_output = goldens rt_mod = graph_runtime.create(graph, lib, ctx=tvm.cpu(0)) diff --git a/tests/python/relay/test_pass_annotate_target.py b/tests/python/relay/test_pass_annotate_target.py index 01ba9b619205..05839466b05a 100644 --- a/tests/python/relay/test_pass_annotate_target.py +++ b/tests/python/relay/test_pass_annotate_target.py @@ -52,7 +52,7 @@ def update_lib(lib): return lib def check_vm_result(): - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): exe = relay.vm.compile(mod, target=target, params=params) code, lib = exe.save() lib = update_lib(lib) @@ -63,7 +63,7 @@ def check_vm_result(): tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) def check_graph_runtime_result(): - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): json, lib, param = relay.build(mod, target=target, params=params) lib = update_lib(lib) rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx) diff --git a/tests/python/relay/test_pass_fast_math.py b/tests/python/relay/test_pass_fast_math.py index e75316f1e04b..93ad034be2ef 100644 --- a/tests/python/relay/test_pass_fast_math.py +++ b/tests/python/relay/test_pass_fast_math.py @@ -29,7 +29,7 @@ def test_exp(): assert "fast_exp" in fast_mod.astext() # Check that FastMath option works for relay.build. - with relay.build_config(opt_level=3, required_pass=['FastMath']): + with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']): fast_mod = relay.optimize(mod, target='llvm', params=None) assert "fast_exp" in fast_mod[0].astext() @@ -43,7 +43,7 @@ def test_tanh(): assert "fast_tanh" in fast_mod.astext() # Check that FastMath option works for relay.build. - with relay.build_config(opt_level=3, required_pass=['FastMath']): + with tvm.transform.PassContext(opt_level=3, required_pass=['FastMath']): fast_mod = relay.optimize(mod, target='llvm', params=None) assert "fast_tanh" in fast_mod[0].astext() diff --git a/tests/python/relay/test_pass_fold_constant.py b/tests/python/relay/test_pass_fold_constant.py index 1e8c6da932c7..fcccab5c6b97 100644 --- a/tests/python/relay/test_pass_fold_constant.py +++ b/tests/python/relay/test_pass_fold_constant.py @@ -213,7 +213,7 @@ def initializer(_, param): mod, params = create_workload(bn_output[0], initializer) mod["main"] = bind_params_by_name(mod["main"], params) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): mod = remove_bn_pass(mod) expect = run_infer_type(expected()) diff --git a/tests/python/relay/test_pass_manager.py b/tests/python/relay/test_pass_manager.py index d6037b5ec1f5..25299caae30b 100644 --- a/tests/python/relay/test_pass_manager.py +++ b/tests/python/relay/test_pass_manager.py @@ -382,7 +382,7 @@ def test_no_pass(): def test_only_module_pass(): passes = [module_pass] sequential = tvm.transform.Sequential(opt_level=1, passes=passes) - with relay.build_config(required_pass=["mod_transform"]): + with tvm.transform.PassContext(required_pass=["mod_transform"]): ret_mod = sequential(mod) # Check the subtract function. sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint) @@ -397,7 +397,7 @@ def test_only_function_pass(): # Check the subtract function. passes = [function_pass] sequential = tvm.transform.Sequential(opt_level=1, passes=passes) - with relay.build_config(required_pass=["func_transform"]): + with tvm.transform.PassContext(required_pass=["func_transform"]): ret_mod = sequential(mod) _, new_sub = extract_var_func(ret_mod, v_sub.name_hint) check_func(new_sub, get_ref_sub()) @@ -413,7 +413,7 @@ def test_multiple_passes(): passes = [module_pass, function_pass] sequential = tvm.transform.Sequential(opt_level=1, passes=passes) required = ["mod_transform", "func_transform"] - with relay.build_config(required_pass=required): + with tvm.transform.PassContext(required_pass=required): ret_mod = sequential(mod) # Check the abs function is added. @@ -490,7 +490,7 @@ def expected(): ]) mod = tvm.IRModule({"main": before()}) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with tvm.target.create("llvm"): mod = seq(mod) @@ -515,7 +515,7 @@ def test_print_ir(capfd): ]) mod = tvm.IRModule({"main": func}) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): mod = seq(mod) out = capfd.readouterr().err @@ -549,7 +549,7 @@ def test_print_debug_callback(): assert __TRACE_COUNTER__ == 0 mod = tvm.IRModule({"main": func}) - with relay.build_config(opt_level=3, trace=_tracer): + with tvm.transform.PassContext(opt_level=3, trace=_tracer): mod = seq(mod) assert __TRACE_COUNTER__ == 3 diff --git a/tests/python/relay/test_pass_partition_graph.py b/tests/python/relay/test_pass_partition_graph.py index fd76285a5144..8c1ed791ecff 100644 --- a/tests/python/relay/test_pass_partition_graph.py +++ b/tests/python/relay/test_pass_partition_graph.py @@ -194,7 +194,7 @@ def update_lib(lib): def check_vm_result(): compile_engine.get().clear() - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): exe = relay.vm.compile(mod, target=target, params=params) code, lib = exe.save() lib = update_lib(lib) @@ -206,7 +206,7 @@ def check_vm_result(): def check_graph_runtime_result(): compile_engine.get().clear() - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): json, lib, param = relay.build(mod, target=target, params=params) lib = update_lib(lib) rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx) @@ -504,7 +504,7 @@ def partition(): transform.AlterOpLayout(), ]) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): mod = opt_pass(mod) return mod @@ -587,7 +587,7 @@ def partition(): transform.Inline(), ]) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): mod = opt_pass(mod) return mod @@ -877,7 +877,8 @@ def get_partitoned_mod(mod, params, pattern_table): transform.PartitionGraph() ]) - with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]): + with tvm.transform.PassContext(opt_level=3, + disabled_pass=["AlterOpLayout"]): return composite_partition(mod) def test_detect_pattern(pattern_table, include_bn, include_sigmoid, diff --git a/tests/python/relay/test_simplify_fc_transpose.py b/tests/python/relay/test_simplify_fc_transpose.py index 537a5a29348c..e29038c49b8e 100644 --- a/tests/python/relay/test_simplify_fc_transpose.py +++ b/tests/python/relay/test_simplify_fc_transpose.py @@ -27,7 +27,7 @@ from tvm.relay.data_dep_optimization import simplify_fc_transpose def run_func(func, params, x): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, new_params = relay.build(func, "llvm", params=params) from tvm.contrib import graph_runtime diff --git a/tests/python/relay/test_sparse_dense_convert.py b/tests/python/relay/test_sparse_dense_convert.py index c4f0572c0482..e0204aeaf9d0 100644 --- a/tests/python/relay/test_sparse_dense_convert.py +++ b/tests/python/relay/test_sparse_dense_convert.py @@ -46,7 +46,7 @@ def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"): return s def run_func(func, params, x): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, new_params = relay.build(func, "llvm", params=params) from tvm.contrib import graph_runtime diff --git a/tests/python/unittest/test_runtime_module_export.py b/tests/python/unittest/test_runtime_module_export.py index fce7d2f350dc..8473a67e6e41 100644 --- a/tests/python/unittest/test_runtime_module_export.py +++ b/tests/python/unittest/test_runtime_module_export.py @@ -67,7 +67,7 @@ def verify_gpu_mod_export(obj_format): resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18) resnet50_mod, resnet50_params = relay.testing.resnet.get_workload(num_layers=50) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): _, resnet18_gpu_lib, _ = relay.build_module.build(resnet18_mod, "cuda", params=resnet18_params) _, resnet50_cpu_lib, _ = relay.build_module.build(resnet50_mod, "llvm", params=resnet50_params) @@ -93,7 +93,7 @@ def verify_multi_dso_mod_export(obj_format): return resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): _, resnet18_cpu_lib, _ = relay.build_module.build(resnet18_mod, "llvm", params=resnet18_params) A = te.placeholder((1024,), name='A') @@ -177,7 +177,7 @@ def verify_multi_c_mod_export(): return resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): _, resnet18_cpu_lib, _ = relay.build_module.build(resnet18_mod, "llvm", params=resnet18_params) A = te.placeholder((1024,), name='A') diff --git a/tests/python/unittest/test_target_codegen_blob.py b/tests/python/unittest/test_target_codegen_blob.py index 719ddfe2a820..7cd579397ec8 100644 --- a/tests/python/unittest/test_target_codegen_blob.py +++ b/tests/python/unittest/test_target_codegen_blob.py @@ -31,7 +31,7 @@ def test_resnet18(): def verify(data): mod, params = relay.testing.resnet.get_workload(num_layers=18) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params) ctx = tvm.cpu() module = graph_runtime.create(graph, lib, ctx) @@ -42,7 +42,7 @@ def verify(data): return out resnet18_mod, resnet18_params = relay.testing.resnet.get_workload(num_layers=18) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, resnet18_gpu_lib, graph_params = relay.build_module.build(resnet18_mod, "cuda", params=resnet18_params) from tvm.contrib import util diff --git a/tutorials/autotvm/tune_relay_arm.py b/tutorials/autotvm/tune_relay_arm.py index ffd3e8b9b5cb..3b07097ce696 100644 --- a/tutorials/autotvm/tune_relay_arm.py +++ b/tutorials/autotvm/tune_relay_arm.py @@ -311,7 +311,7 @@ def tune_and_evaluate(tuning_opt): # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target=target, params=params) diff --git a/tutorials/autotvm/tune_relay_cuda.py b/tutorials/autotvm/tune_relay_cuda.py index 4195075ca66d..a6fe45b96263 100644 --- a/tutorials/autotvm/tune_relay_cuda.py +++ b/tutorials/autotvm/tune_relay_cuda.py @@ -222,7 +222,7 @@ def tune_and_evaluate(tuning_opt): # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target=target, params=params) diff --git a/tutorials/autotvm/tune_relay_mobile_gpu.py b/tutorials/autotvm/tune_relay_mobile_gpu.py index ad7460829329..4748f41e96c3 100644 --- a/tutorials/autotvm/tune_relay_mobile_gpu.py +++ b/tutorials/autotvm/tune_relay_mobile_gpu.py @@ -308,7 +308,7 @@ def tune_and_evaluate(tuning_opt): # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target=target, params=params, target_host=target_host) # export library diff --git a/tutorials/autotvm/tune_relay_x86.py b/tutorials/autotvm/tune_relay_x86.py index 15ce2de4b82f..dcc5b25c8288 100644 --- a/tutorials/autotvm/tune_relay_x86.py +++ b/tutorials/autotvm/tune_relay_x86.py @@ -189,7 +189,7 @@ def tune_and_evaluate(tuning_opt): # compile kernels with graph-level best records with autotvm.apply_graph_best(graph_opt_sch_file): print("Compile...") - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build( mod, target=target, params=params) diff --git a/tutorials/dev/relay_pass_infra.py b/tutorials/dev/relay_pass_infra.py index 980d96ccc119..df40733164e8 100644 --- a/tutorials/dev/relay_pass_infra.py +++ b/tutorials/dev/relay_pass_infra.py @@ -160,7 +160,7 @@ def alter_conv2d(attrs, inputs, tinfos, out_type): # however, provides a configuration interface # for users to customize the optimization level that they want to execute. -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): mod2 = seq(mod) print(mod2) @@ -173,7 +173,7 @@ def alter_conv2d(attrs, inputs, tinfos, out_type): # EliminateCommonSubexpr as following. The printed module will again show two # identical addition operations. -with relay.build_config(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]): +with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]): mod3 = seq(mod) print(mod3) @@ -182,12 +182,12 @@ def alter_conv2d(attrs, inputs, tinfos, out_type): # provides a means to make pass target-aware. For example, the layout # alteration pass falls in such category. -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): mod4 = seq(mod) print(mod4) seq1 = tvm.transform.Sequential([relay.transform.AlterOpLayout()]) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): with tvm.target.create("llvm"): mod5 = seq1(mod) print(mod5) @@ -242,7 +242,7 @@ def visit_constant(self, c): relay.transform.EliminateCommonSubexpr(), relay.transform.FuseOps(), tvm.transform.PrintIR()]) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): mod = seq(mod) print("done") diff --git a/tutorials/frontend/build_gcn.py b/tutorials/frontend/build_gcn.py index 6ac518e42032..19719a5378eb 100644 --- a/tutorials/frontend/build_gcn.py +++ b/tutorials/frontend/build_gcn.py @@ -336,7 +336,7 @@ def prepare_params(g, data): mod = tvm.IRModule() mod["main"] = func # Build with Relay -with relay.build_config(opt_level=0): # Currently only support opt_level=0 +with tvm.transform.PassContext(opt_level=0): # Currently only support opt_level=0 graph, lib, params = relay.build(mod, target, params=params) # Generate graph runtime diff --git a/tutorials/frontend/deploy_model_on_android.py b/tutorials/frontend/deploy_model_on_android.py index 17ec9cb6baa1..bc5b5239a889 100644 --- a/tutorials/frontend/deploy_model_on_android.py +++ b/tutorials/frontend/deploy_model_on_android.py @@ -263,7 +263,7 @@ def transform_image(image): shape_dict = {input_name: x.shape} mod, params = relay.frontend.from_keras(keras_mobilenet_v2, shape_dict) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=target, target_host=target_host, params=params) diff --git a/tutorials/frontend/deploy_model_on_rasp.py b/tutorials/frontend/deploy_model_on_rasp.py index ef707feedd2f..25df34128415 100644 --- a/tutorials/frontend/deploy_model_on_rasp.py +++ b/tutorials/frontend/deploy_model_on_rasp.py @@ -179,7 +179,7 @@ def transform_image(image): # The above line is a simple form of # target = tvm.target.create('llvm -device=arm_cpu -model=bcm2837 -target=armv7l-linux-gnueabihf -mattr=+neon') -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=params) # After `relay.build`, you will get three return values: graph, diff --git a/tutorials/frontend/deploy_prequantized.py b/tutorials/frontend/deploy_prequantized.py index 40279778c045..d6183d68ad4a 100644 --- a/tutorials/frontend/deploy_prequantized.py +++ b/tutorials/frontend/deploy_prequantized.py @@ -81,7 +81,7 @@ def get_synset(): def run_tvm_model(mod, params, input_name, inp, target="llvm"): - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): json, lib, params = relay.build(mod, target=target, params=params) runtime = tvm.contrib.graph_runtime.create(json, lib, tvm.context(target, 0)) diff --git a/tutorials/frontend/deploy_prequantized_tflite.py b/tutorials/frontend/deploy_prequantized_tflite.py index 5fd683723b16..ecd283ac46c8 100644 --- a/tutorials/frontend/deploy_prequantized_tflite.py +++ b/tutorials/frontend/deploy_prequantized_tflite.py @@ -198,7 +198,7 @@ def run_tvm(graph, lib, params): # Lets now the compile the Relay module. We use the "llvm" target here. Please replace it with the # target platform that you are interested in. target = 'llvm' -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build(mod, target=target, params=params) diff --git a/tutorials/frontend/deploy_ssd_gluoncv.py b/tutorials/frontend/deploy_ssd_gluoncv.py index 6126df0e73ab..e2fc3c59cb33 100644 --- a/tutorials/frontend/deploy_ssd_gluoncv.py +++ b/tutorials/frontend/deploy_ssd_gluoncv.py @@ -87,7 +87,7 @@ def build(target): mod, params = relay.frontend.from_mxnet(block, {"data": dshape}) - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) return graph, lib, params diff --git a/tutorials/frontend/from_caffe2.py b/tutorials/frontend/from_caffe2.py index 8fad80df1d1e..76741d0324e2 100644 --- a/tutorials/frontend/from_caffe2.py +++ b/tutorials/frontend/from_caffe2.py @@ -88,7 +88,7 @@ def transform_image(image): # compile the model # target x86 CPU target = 'llvm' -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) ###################################################################### diff --git a/tutorials/frontend/from_coreml.py b/tutorials/frontend/from_coreml.py index 2a0c8dbc93f2..beac48325237 100644 --- a/tutorials/frontend/from_coreml.py +++ b/tutorials/frontend/from_coreml.py @@ -74,7 +74,7 @@ # Parse CoreML model and convert into Relay computation graph mod, params = relay.frontend.from_coreml(mlmodel, shape_dict) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) diff --git a/tutorials/frontend/from_darknet.py b/tutorials/frontend/from_darknet.py index e2c1ea5aacbf..6d84463ca7f0 100644 --- a/tutorials/frontend/from_darknet.py +++ b/tutorials/frontend/from_darknet.py @@ -100,7 +100,7 @@ data = np.empty([batch_size, net.c, net.h, net.w], dtype) shape = {'data': data.shape} print("Compiling the model...") -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=target, target_host=target_host, diff --git a/tutorials/frontend/from_keras.py b/tutorials/frontend/from_keras.py index 928a8acbefa7..7ece790eb177 100644 --- a/tutorials/frontend/from_keras.py +++ b/tutorials/frontend/from_keras.py @@ -79,7 +79,7 @@ # compile the model target = 'cuda' ctx = tvm.gpu(0) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): executor = relay.build_module.create_executor('graph', mod, ctx, target) ###################################################################### diff --git a/tutorials/frontend/from_mxnet.py b/tutorials/frontend/from_mxnet.py index d0e4c4ab0d18..6e6b2d79b209 100644 --- a/tutorials/frontend/from_mxnet.py +++ b/tutorials/frontend/from_mxnet.py @@ -90,7 +90,7 @@ def transform_image(image): ###################################################################### # now compile the graph target = 'cuda' -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(func, target, params=params) ###################################################################### diff --git a/tutorials/frontend/from_onnx.py b/tutorials/frontend/from_onnx.py index 766451c2f8b1..9973a08153dd 100644 --- a/tutorials/frontend/from_onnx.py +++ b/tutorials/frontend/from_onnx.py @@ -74,7 +74,7 @@ shape_dict = {input_name: x.shape} mod, params = relay.frontend.from_onnx(onnx_model, shape_dict) -with relay.build_config(opt_level=1): +with tvm.transform.PassContext(opt_level=1): intrp = relay.build_module.create_executor('graph', mod, tvm.cpu(0), target) ###################################################################### diff --git a/tutorials/frontend/from_pytorch.py b/tutorials/frontend/from_pytorch.py index 8354b0eca193..53d29a9447be 100644 --- a/tutorials/frontend/from_pytorch.py +++ b/tutorials/frontend/from_pytorch.py @@ -101,7 +101,7 @@ target = 'llvm' target_host = 'llvm' ctx = tvm.cpu(0) -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=target, target_host=target_host, diff --git a/tutorials/frontend/from_tensorflow.py b/tutorials/frontend/from_tensorflow.py index 0ebd733ef9aa..b7b3d69c780b 100644 --- a/tutorials/frontend/from_tensorflow.py +++ b/tutorials/frontend/from_tensorflow.py @@ -144,7 +144,7 @@ # params: final params after compilation. # lib: target library which can be deployed on target with TVM runtime. -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=target, target_host=target_host, diff --git a/tutorials/frontend/from_tflite.py b/tutorials/frontend/from_tflite.py index e01a4ecdf4c3..67f6f9f854fd 100644 --- a/tutorials/frontend/from_tflite.py +++ b/tutorials/frontend/from_tflite.py @@ -135,7 +135,7 @@ def extract(path): # Build the module against to x86 CPU target = "llvm" -with relay.build_config(opt_level=3): +with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target, params=params) ###################################################################### diff --git a/tutorials/relay_quick_start.py b/tutorials/relay_quick_start.py index b2174a048035..e52a99aeccd4 100644 --- a/tutorials/relay_quick_start.py +++ b/tutorials/relay_quick_start.py @@ -96,7 +96,7 @@ opt_level = 3 target = tvm.target.cuda() -with relay.build_config(opt_level=opt_level): +with tvm.transform.PassContext(opt_level=opt_level): graph, lib, params = relay.build(mod, target, params=params) ##################################################################### diff --git a/vta/scripts/tune_resnet.py b/vta/scripts/tune_resnet.py index 26c240ed7fad..2d358d335389 100644 --- a/vta/scripts/tune_resnet.py +++ b/vta/scripts/tune_resnet.py @@ -127,7 +127,7 @@ def compile_network(opt, env, target): # Perform quantization in Relay # Note: We set opt_level to 3 in order to fold batch norm - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]): relay_prog = relay.quantize.quantize(mod["main"], params=params) @@ -272,7 +272,7 @@ def tune_tasks(tasks, # Compile network print("Compiling network with best tuning parameters...") if target.device_name != "vta": - with relay.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}): + with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}): graph, lib, params = relay.build( relay_prog, target=target, params=params, target_host=env.target_host) diff --git a/vta/tutorials/autotvm/tune_relay_vta.py b/vta/tutorials/autotvm/tune_relay_vta.py index 63106a5dc588..a92b1ee5d90b 100644 --- a/vta/tutorials/autotvm/tune_relay_vta.py +++ b/vta/tutorials/autotvm/tune_relay_vta.py @@ -92,7 +92,7 @@ def compile_network(env, target, model, start_pack, stop_pack): # Perform quantization in Relay # Note: We set opt_level to 3 in order to fold batch norm - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]): mod = relay.quantize.quantize(mod, params=params) diff --git a/vta/tutorials/frontend/deploy_classification.py b/vta/tutorials/frontend/deploy_classification.py index 7ca4b989eb82..3a367851ed25 100644 --- a/vta/tutorials/frontend/deploy_classification.py +++ b/vta/tutorials/frontend/deploy_classification.py @@ -171,7 +171,7 @@ if target.device_name == "vta": # Perform quantization in Relay # Note: We set opt_level to 3 in order to fold batch norm - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]): mod = relay.quantize.quantize(mod, params=params) diff --git a/vta/tutorials/frontend/deploy_detection.py b/vta/tutorials/frontend/deploy_detection.py index efcd2c43591d..5039488149d5 100644 --- a/vta/tutorials/frontend/deploy_detection.py +++ b/vta/tutorials/frontend/deploy_detection.py @@ -207,7 +207,7 @@ if target.device_name == "vta": # Perform quantization in Relay # Note: We set opt_level to 3 in order to fold batch norm - with relay.build_config(opt_level=3): + with tvm.transform.PassContext(opt_level=3): with relay.quantize.qconfig(global_scale=33.0, skip_conv_layers=[0], store_lowbit_output=True,