Skip to content

Commit

Permalink
�[TOPI] Fix CUDA Library Tuning (apache#6132)
Browse files Browse the repository at this point in the history
  • Loading branch information
comaniac authored and Trevor Morris committed Aug 26, 2020
1 parent a9e8288 commit 46916ce
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 3 deletions.
7 changes: 5 additions & 2 deletions python/tvm/autotvm/task/space.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import numpy as np

from tvm.te import schedule, thread_axis
from tvm.tir import expr
from tvm.autotvm.util import get_const_int

Axis = namedtuple('Axis', ['space', 'index'])
Expand Down Expand Up @@ -733,10 +734,12 @@ def add_flop(self, flop):
Parameters
---------
flop: int or float
flop: int or float or IntImm or FloatImm
number of float operations
"""
self.flop += flop
if isinstance(flop, (expr.IntImm, expr.FloatImm)):
flop = flop.value
self.flop += float(flop)

def raise_error(self, msg):
"""register error in config
Expand Down
7 changes: 6 additions & 1 deletion topi/python/topi/cuda/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"""Compute definition for conv2d with cuda backend"""
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import OtherOptionEntity
from tvm.contrib import cudnn

from .. import nn, generic
Expand Down Expand Up @@ -99,14 +100,18 @@ def conv2d_cudnn(cfg, data, kernel, strides, padding, dilation, groups=1,
else:
dtype = data.dtype

cfg.define_knob('algo', range(8))
if cfg.is_fallback: # Let CUDNN choose the best algo
cfg['algo'] = OtherOptionEntity(-1)

return cudnn.conv_forward(data,
kernel,
[pt, pl], # cudnn padding pt, pl on both sides of input
[stride_h, stride_w],
[dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
algo=-1, # let CUDNN choose the best algo
algo=cfg['algo'].val,
conv_dtype=dtype,
groups=groups)

Expand Down

0 comments on commit 46916ce

Please sign in to comment.