Skip to content

Commit

Permalink
[AutoScheduler] Improve warning messages (apache#6935)
Browse files Browse the repository at this point in the history
* [AutoScheduler] Improve warning messages

* fix lint
  • Loading branch information
merrymercy authored and trevor-m committed Dec 4, 2020
1 parent d0d541a commit 68ad30a
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 5 deletions.
7 changes: 4 additions & 3 deletions python/tvm/auto_scheduler/dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,10 +258,11 @@ def query(self, target, workload_key, has_complex_op, dag):

if self.verbose == 2 or (has_complex_op and self.verbose == 1):
msg = (
"Cannot find tuned schedules for target=%s, workload_key=%s, compute:\n%s"
"-----------------------------------\n"
"Cannot find tuned schedules for target=%s, workload_key=%s. "
"A fallback TOPI schedule is used, "
"which may bring great performance regression or even compilation failure."
% (target, workload_key, dag)
"which may bring great performance regression or even compilation failure. "
"Compute DAG info:\n%s" % (target, workload_key, dag)
)
if msg not in self.messages:
self.messages.add(msg)
Expand Down
16 changes: 14 additions & 2 deletions python/tvm/relay/op/strategy/cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,18 @@ def schedule_lrn_cuda(attrs, outs, target):
return topi.cuda.schedule_lrn(outs)


def naive_schedule(_, outs, target):
"""Return the naive default schedule"""
if "gpu" in target.keys:
# For GPU, we at least need thread binding to make a valid schedule.
# So the naive schedule cannot be compiled.
raise RuntimeError(
"Cannot compile for GPU targets if no tuned schedule is found."
"Please see the warning messages above for more information about the failed workloads."
)
return tvm.te.create_schedule(outs[-1].op)


@conv2d_strategy.register(["cuda", "gpu"])
def conv2d_strategy_cuda(attrs, inputs, out_type, target):
"""conv2d cuda strategy"""
Expand Down Expand Up @@ -224,7 +236,7 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target):
if use_auto_scheduler and judge_winograd_auto_scheduler:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc),
wrap_topi_schedule(tvm.te.create_schedule),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc.winograd",
plevel=15,
)
Expand Down Expand Up @@ -451,7 +463,7 @@ def conv2d_winograd_without_weight_transfrom_strategy_cuda(attrs, inputs, out_ty
if PassContext.current().config.get("relay.backend.use_auto_scheduler", False):
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc_without_weight_transform),
wrap_topi_schedule(tvm.te.create_schedule),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc_winograd_without_weight_transform",
plevel=15,
)
Expand Down

0 comments on commit 68ad30a

Please sign in to comment.