diff --git a/python/tvm/auto_scheduler/dispatcher.py b/python/tvm/auto_scheduler/dispatcher.py index 19bae8622355..b0b98d8d0f56 100644 --- a/python/tvm/auto_scheduler/dispatcher.py +++ b/python/tvm/auto_scheduler/dispatcher.py @@ -258,10 +258,11 @@ def query(self, target, workload_key, has_complex_op, dag): if self.verbose == 2 or (has_complex_op and self.verbose == 1): msg = ( - "Cannot find tuned schedules for target=%s, workload_key=%s, compute:\n%s" + "-----------------------------------\n" + "Cannot find tuned schedules for target=%s, workload_key=%s. " "A fallback TOPI schedule is used, " - "which may bring great performance regression or even compilation failure." - % (target, workload_key, dag) + "which may bring great performance regression or even compilation failure. " + "Compute DAG info:\n%s" % (target, workload_key, dag) ) if msg not in self.messages: self.messages.add(msg) diff --git a/python/tvm/relay/op/strategy/cuda.py b/python/tvm/relay/op/strategy/cuda.py index 105f50116c3e..5c7091dc64b2 100644 --- a/python/tvm/relay/op/strategy/cuda.py +++ b/python/tvm/relay/op/strategy/cuda.py @@ -101,6 +101,17 @@ def schedule_lrn_cuda(attrs, outs, target): return topi.cuda.schedule_lrn(outs) +def naive_schedule(_, outs, target): + """Return the naive default schedule""" + if "gpu" in target.keys: + # For GPU, we at least need thread binding to make a valid schedule. + # So the naive schedule cannot be compiled. + raise RuntimeError( + "Cannot compile for GPU targets if no tuned schedule is found. Please see the warning messages above for more information about the failed workloads." + ) + return tvm.te.create_schedule(outs[-1].op) + + @conv2d_strategy.register(["cuda", "gpu"]) def conv2d_strategy_cuda(attrs, inputs, out_type, target): """conv2d cuda strategy""" @@ -224,7 +235,7 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target): if use_auto_scheduler and judge_winograd_auto_scheduler: strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc), - wrap_topi_schedule(tvm.te.create_schedule), + naive_schedule, # this implementation should never be picked by autotvm name="conv2d_nhwc.winograd", plevel=15, ) @@ -451,7 +462,7 @@ def conv2d_winograd_without_weight_transfrom_strategy_cuda(attrs, inputs, out_ty if PassContext.current().config.get("relay.backend.use_auto_scheduler", False): strategy.add_implementation( wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc_without_weight_transform), - wrap_topi_schedule(tvm.te.create_schedule), + naive_schedule, # this implementation should never be picked by autotvm name="conv2d_nhwc_winograd_without_weight_transform", plevel=15, )