Skip to content

Commit

Permalink
[tutorial][benchmark] nnvm -> relay (apache#4368)
Browse files Browse the repository at this point in the history
* [tutorial] nnvm -> relay

* use relay workload

* delete movbilenetv2 option
  • Loading branch information
yzhliu authored and yongwww committed Nov 26, 2019
1 parent a063118 commit 8ecbb1b
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 44 deletions.
8 changes: 4 additions & 4 deletions apps/benchmark/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ In general, the performance should also be good.

It is recommended that you run tuning by yourself if you have your customized network or devices.
Please follow the tutorial for
[NVIDIA GPU](https://docs.tvm.ai/tutorials/autotvm/tune_nnvm_cuda.html),
[ARM CPU](https://docs.tvm.ai/tutorials/autotvm/tune_nnvm_arm.html),
[Mobile GPU](https://docs.tvm.ai/tutorials/autotvm/tune_nnvm_mobile_gpu.html).
[NVIDIA GPU](https://docs.tvm.ai/tutorials/autotvm/tune_conv2d_cuda.html),
[ARM CPU](https://docs.tvm.ai/tutorials/autotvm/tune_relay_arm.html),
[Mobile GPU](https://docs.tvm.ai/tutorials/autotvm/tune_relay_mobile_gpu.html).

### NVIDIA GPU

Expand Down Expand Up @@ -67,7 +67,7 @@ python3 -m tvm.exec.rpc_tracker

2. Register devices to the tracker
* For Linux device
* Build tvm runtime on your device [Help](https://docs.tvm.ai/tutorials/nnvm/deploy_model_on_rasp.html#build-tvm-runtime-on-device)
* Build tvm runtime on your device [Help](https://docs.tvm.ai/tutorials/frontend/deploy_model_on_rasp.html#build-tvm-runtime-on-device)
* Register your device to tracker by
```bash
python3 -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=[DEVICE_KEY]
Expand Down
12 changes: 5 additions & 7 deletions apps/benchmark/arm_cpu_imagenet_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
import tvm
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
import nnvm.compiler
import nnvm.testing
from tvm import relay

from util import get_network, print_progress

Expand All @@ -39,10 +38,9 @@ def evaluate_network(network, target, target_host, repeat):
net, params, input_shape, output_shape = get_network(network, batch_size=1)

print_progress("%-20s building..." % network)
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(
net, target=target, target_host=target_host,
shape={'data': input_shape}, params=params, dtype=dtype)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(
net, target=target, target_host=target_host, params=params)

tmp = tempdir()
if 'android' in str(target):
Expand Down Expand Up @@ -76,7 +74,7 @@ def evaluate_network(network, target, target_host, repeat):
parser.add_argument("--network", type=str, choices=
['resnet-18', 'resnet-34', 'resnet-50',
'vgg-16', 'vgg-19', 'densenet-121', 'inception_v3',
'mobilenet', 'mobilenet_v2', 'squeezenet_v1.0', 'squeezenet_v1.1'],
'mobilenet', 'squeezenet_v1.0', 'squeezenet_v1.1'],
help='The name of neural network')
parser.add_argument("--model", type=str, choices=
['rk3399', 'mate10', 'mate10pro', 'p20', 'p20pro',
Expand Down
11 changes: 4 additions & 7 deletions apps/benchmark/gpu_imagenet_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,17 @@
import numpy as np

import tvm
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
import nnvm.compiler
import nnvm.testing
from tvm import relay

from util import get_network


def benchmark(network, target):
net, params, input_shape, output_shape = get_network(network, batch_size=1)

with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(
net, target=target, shape={'data': input_shape}, params=params, dtype=dtype)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(net, target=target, params=params)

# create runtime
ctx = tvm.context(str(target), 0)
Expand All @@ -56,7 +53,7 @@ def benchmark(network, target):
parser.add_argument("--network", type=str, choices=
['resnet-18', 'resnet-34', 'resnet-50',
'vgg-16', 'vgg-19', 'densenet-121', 'inception_v3',
'mobilenet', 'mobilenet_v2', 'squeezenet_v1.0', 'squeezenet_v1.1'],
'mobilenet', 'squeezenet_v1.0', 'squeezenet_v1.1'],
help='The name of neural network')
parser.add_argument("--model", type=str,
choices=['1080ti', 'titanx', 'tx2', 'gfx900'], default='1080ti',
Expand Down
12 changes: 5 additions & 7 deletions apps/benchmark/mobile_gpu_imagenet_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
import tvm
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
import nnvm.compiler
import nnvm.testing
from tvm import relay

from util import get_network, print_progress

Expand All @@ -38,10 +37,9 @@ def evaluate_network(network, target, target_host, dtype, repeat):
net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype)

print_progress("%-20s building..." % network)
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(
net, target=target, target_host=target_host,
shape={'data': input_shape}, params=params, dtype=dtype)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(
net, target=target, target_host=target_host, params=params)

tmp = tempdir()
if 'android' in str(target) or 'android' in str(target_host):
Expand Down Expand Up @@ -75,7 +73,7 @@ def evaluate_network(network, target, target_host, dtype, repeat):
parser.add_argument("--network", type=str, choices=
['resnet-18', 'resnet-34', 'resnet-50',
'vgg-16', 'vgg-19', 'densenet-121', 'inception_v3',
'mobilenet', 'mobilenet_v2', 'squeezenet_v1.0', 'squeezenet_v1.1'],
'mobilenet', 'squeezenet_v1.0', 'squeezenet_v1.1'],
help='The name of neural network')
parser.add_argument("--model", type=str, choices=
['rk3399'], default='rk3399',
Expand Down
31 changes: 12 additions & 19 deletions apps/benchmark/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
"""Utility for benchmark"""

import sys
import nnvm
from tvm import relay
from tvm.relay import testing

def get_network(name, batch_size, dtype='float32'):
"""Get the symbol definition and random weight of a network
Expand Down Expand Up @@ -46,38 +47,30 @@ def get_network(name, batch_size, dtype='float32'):
output_shape = (batch_size, 1000)

if name == 'mobilenet':
net, params = nnvm.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'mobilenet_v2':
net, params = nnvm.testing.mobilenet_v2.get_workload(batch_size=batch_size, dtype=dtype)
net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)
elif name == 'inception_v3':
input_shape = (batch_size, 3, 299, 299)
net, params = nnvm.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif "resnet" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
net, params = testing.resnet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif "vgg" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
net, params = testing.vgg.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
elif "densenet" in name:
n_layer = int(name.split('-')[1])
net, params = nnvm.testing.densenet.get_workload(num_layers=n_layer, batch_size=batch_size, dtype=dtype)
net, params = testing.densenet.get_workload(densenet_size=n_layer, batch_size=batch_size, dtype=dtype)
elif "squeezenet" in name:
version = name.split("_v")[1]
net, params = nnvm.testing.squeezenet.get_workload(batch_size=batch_size, version=version, dtype=dtype)
elif name == 'custom':
# an example for custom network
from nnvm.testing import utils
net = nnvm.sym.Variable('data')
net = nnvm.sym.conv2d(net, channels=4, kernel_size=(3,3), padding=(1,1))
net = nnvm.sym.flatten(net)
net = nnvm.sym.dense(net, units=1000)
net, params = utils.create_workload(net, batch_size, (3, 224, 224), dtype=dtype)
net, params = testing.squeezenet.get_workload(batch_size=batch_size, version=version, dtype=dtype)
elif name == 'mxnet':
# an example for mxnet model
from mxnet.gluon.model_zoo.vision import get_model
block = get_model('resnet18_v1', pretrained=True)
net, params = nnvm.frontend.from_mxnet(block)
net = nnvm.sym.softmax(net)
net, params = relay.frontend.from_mxnet(block, shape={'data': input_shape}, dtype=dtype)
net = net["main"]
net = relay.Function(net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs)
net = relay.Module.from_expr(net)
else:
raise ValueError("Unsupported network: " + name)

Expand Down

0 comments on commit 8ecbb1b

Please sign in to comment.