Skip to content

Commit

Permalink
Merge with pull request apache#44 (apache#46)
Browse files Browse the repository at this point in the history
* resnet example merged to imagenet

* merge with master
  • Loading branch information
Laurawly authored and tqchen committed May 26, 2018
1 parent 7eaf482 commit ba208f3
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 9 deletions.
16 changes: 10 additions & 6 deletions nnvm/python/nnvm/testing/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True):
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
True means channel number between input and output is the same,
otherwise means differ
name : str
Base name of the operators
"""
Expand Down Expand Up @@ -146,7 +147,7 @@ def resnet(units, num_stages, filter_list, num_classes, image_shape,
fc1 = sym.cast(data=fc1, dtype=np.float32)
return sym.softmax(data=fc1, name='softmax')

def get_symbol(num_classes, num_layers=50, image_shape=(3, 224, 224), dtype='float32'):
def get_symbol(num_classes, num_layers=50, image_shape=(3, 224, 224), dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
Expand Down Expand Up @@ -198,8 +199,8 @@ def get_symbol(num_classes, num_layers=50, image_shape=(3, 224, 224), dtype='flo
bottle_neck=bottle_neck,
dtype=dtype)

def get_workload(batch_size, num_classes=1000, image_shape=(3, 224, 224),
dtype="float32", **kwargs):
def get_workload(batch_size=1, num_classes=1000, num_layers=18,
image_shape=(3, 224, 224), dtype="float32", **kwargs):
"""Get benchmark workload for resnet
Parameters
Expand All @@ -210,6 +211,9 @@ def get_workload(batch_size, num_classes=1000, image_shape=(3, 224, 224),
num_classes : int, optional
Number of claseses
num_layers : int, optional
Number of layers
image_shape : tuple, optional
The input image shape
Expand All @@ -227,6 +231,6 @@ def get_workload(batch_size, num_classes=1000, image_shape=(3, 224, 224),
params : dict of str to NDArray
The parameters.
"""
net = get_symbol(num_classes=num_classes, image_shape=image_shape,
dtype=dtype, **kwargs)
net = get_symbol(num_classes=num_classes, num_layers=num_layers,
image_shape=image_shape, dtype=dtype, **kwargs)
return create_workload(net, batch_size, image_shape, dtype)
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
"""
Compile MobileNet Inference on GPU
Compile ImageNet Inference on GPU
==================================
**Author**: `Yuwei Hu <https://huyuwei.github.io/>`_
This is an example of using NNVM to compile MobileNet model and deploy its inference on GPU.
This is an example of using NNVM to compile MobileNet/ResNet model and deploy its inference on GPU.
To begin with, we import nnvm(for compilation) and TVM(for deployment).
"""
Expand Down Expand Up @@ -39,14 +39,17 @@ def tvm_callback_cuda_compile(code):
# .. note::
#
# In a typical workflow, we can get this pair from :any:`nnvm.frontend`
#
# Example: /nnvm-top/tests/python/frontend/mxnet/test_forward.py
target = "cuda"
ctx = tvm.gpu(0)
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
out_shape = (batch_size, num_classes)
# To use ResNet to do inference, run the following instead
#net, params = nnvm.testing.resnet.get_workload(
# batch_size=1, image_shape=image_shape)
net, params = nnvm.testing.mobilenet.get_workload(
batch_size=1, image_shape=image_shape)

Expand Down

0 comments on commit ba208f3

Please sign in to comment.