From 95cb444328b8aaccf640632c57cecd02e7001b38 Mon Sep 17 00:00:00 2001 From: Alex Wong <11878166+alexwong@users.noreply.github.com> Date: Thu, 27 Feb 2020 19:54:40 -0800 Subject: [PATCH] Remove internal pytorch_neo (#88) --- docs/api/python/relay/frontend.rst | 2 - python/tvm/relay/frontend/__init__.py | 1 - python/tvm/relay/frontend/pytorch_neo.py | 968 ------------------ .../python/frontend/pytorch_neo/single_op.py | 312 ------ .../frontend/pytorch_neo/test_forward.py | 525 ---------- tests/scripts/task_python_frontend.sh | 3 - 6 files changed, 1811 deletions(-) delete mode 100644 python/tvm/relay/frontend/pytorch_neo.py delete mode 100644 tests/python/frontend/pytorch_neo/single_op.py delete mode 100644 tests/python/frontend/pytorch_neo/test_forward.py diff --git a/docs/api/python/relay/frontend.rst b/docs/api/python/relay/frontend.rst index ef3f2fd0a407..90da0a4d2808 100644 --- a/docs/api/python/relay/frontend.rst +++ b/docs/api/python/relay/frontend.rst @@ -34,5 +34,3 @@ tvm.relay.frontend .. autofunction:: tvm.relay.frontend.from_caffe2 .. autofunction:: tvm.relay.frontend.from_tensorflow - -.. autofunction:: tvm.relay.frontend.from_pytorch_neo diff --git a/python/tvm/relay/frontend/__init__.py b/python/tvm/relay/frontend/__init__.py index 0e772ef6b447..e62334132ecc 100644 --- a/python/tvm/relay/frontend/__init__.py +++ b/python/tvm/relay/frontend/__init__.py @@ -32,4 +32,3 @@ from .caffe2 import from_caffe2 from .tensorflow import from_tensorflow from .darknet import from_darknet -from .pytorch_neo import from_pytorch_neo diff --git a/python/tvm/relay/frontend/pytorch_neo.py b/python/tvm/relay/frontend/pytorch_neo.py deleted file mode 100644 index 15a7281a6116..000000000000 --- a/python/tvm/relay/frontend/pytorch_neo.py +++ /dev/null @@ -1,968 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# pylint: disable=import-self, too-many-lines, len-as-condition, no-else-return, unused-variable, too-many-nested-blocks -# pylint: disable=consider-iterating-dictionary, invalid-name, unused-argument, unused-variable -"""PT: PyTorch frontend.""" -import numpy as np - -import tvm - -from .. import analysis as _analysis -from .. import expr as _expr -from .. import module as _module -from .. import op as _op -from .common import get_relay_op -from .common import infer_shape as _infer_shape - -__all__ = ['from_pytorch_neo'] - -# operator implementation -def _elemwise(name): - def _impl(inputs): - data0 = convert_input(inputs[0]) - data1 = convert_input(inputs[1]) - - return get_relay_op(name)(data0, data1) - return _impl - -def _unsqueeze(): - def _impl(inputs): - data = inputs[0] - axis = inputs[1] - - return _op.transform.expand_dims(data, int(axis), 1) - return _impl - -def _concatenate(): - def _impl(inputs): - data = inputs[0] - axis = inputs[1] - - if isinstance(data, (_expr.Call, _expr.TupleGetItem, _expr.Var)): - data = [data] - - return _op.tensor.concatenate(data, int(axis)) - return _impl - -def _slice(): - def _impl(inputs): - data = inputs[0] - strides = [] - - inferred_shape = _infer_shape(data) - end = [] - for infer in inferred_shape: - end.append(int(infer)) - if isinstance(data, _expr.Var): - end = _infer_shape(data) - end = list(end) - - begin = [0]*len(end) - dim = int(inputs[1]) - begin[dim] = int(inputs[2]) - - if inputs[3].isdigit(): - end[dim] = min(end[dim], int(inputs[3])) - - strides.append(int(inputs[4])) - return _op.transform.strided_slice(data, begin, end, strides) - return _impl - -def _select(): - def _impl(inputs): - data = inputs[0] - dim = int(inputs[1]) - index = int(inputs[2]) - - return _op.transform.take(data, _expr.const(index, dtype='int32'), axis=dim) - return _impl - -def _ones(): - def _impl(inputs): - fill_value = _expr.const(1, dtype='float32') - - if isinstance(inputs[0], _expr.Var): - shape = _infer_shape(inputs[0]) - elif isinstance(inputs[0], (_expr.Call, _expr.TupleGetItem)): - shape = _infer_shape(inputs[0]) - else: - shape = inputs[0].shape - - return get_relay_op('full')(fill_value, shape, 'float32') - return _impl - - -def _zeros(): - def _impl(inputs): - fill_value = _expr.const(0, dtype='float32') - - if isinstance(inputs[0], _expr.Var): - shape = _infer_shape(inputs[0]) - elif isinstance(inputs[0], (_expr.Call, _expr.TupleGetItem)): - shape = _infer_shape(inputs[0]) - else: - shape = inputs[0].shape - - return _op.full(fill_value, shape, 'float32') - return _impl - -def _relu(): - def _impl(inputs): - data = inputs[0] - return _op.nn.relu(data) - return _impl - -def _adaptive_avg_2d(): - def _impl(inputs): - data = inputs[0] - output_size = _infer_shape(inputs[1]) - - return _op.contrib.contrib.adaptive_avg_pool2d( - data, - output_size=output_size) - return _impl - -def _adaptive_max_2d(): - def _impl(inputs): - data = inputs[0] - output_size = _infer_shape(inputs[1]) - - return _op.contrib.contrib.adaptive_max_pool2d( - data, - output_size=output_size) - return _impl - -def _maxpool_2d(): - def _impl(inputs): - data = inputs[0] - - pool_size = _infer_shape(inputs[1]) - strides = _infer_shape(inputs[2]) - padding = _infer_shape(inputs[3]) - - ceil_mode = int(inputs[5]) - - return _op.nn.max_pool2d(data, pool_size, strides, padding, "NCHW", ceil_mode) - return _impl - -def _hardtanh(): - def _impl(inputs): - a = inputs[0] - tanh_min = float(inputs[1]) - tanh_max = float(inputs[2]) - return _op.tensor.clip(a, tanh_min, tanh_max) - return _impl - -def _convolution(): - def _impl(inputs): - # Use transpose or normal - use_transpose = False - if inputs[6] == '1': - use_transpose = True - - use_bias = False - if isinstance(inputs[2], _expr.Var): - use_bias = True - - data = inputs[0] - weight = inputs[1] - bias = inputs[2] - - if isinstance(weight, (_expr.Call, _expr.Var, _expr.TupleGetItem)): - inferred_shape = _infer_shape(weight) - weight_shape = [] - for infer in inferred_shape: - weight_shape.append(infer) - else: - weight_shape = weight.shape - channels = weight_shape[0] - - strides = inputs[3] - padding = inputs[4] - dilation = inputs[5] - - kernel_size = weight_shape[2:] - - else: - data = inputs[0] - weight = inputs[1] - bias = inputs[2] - - if isinstance(weight, (_expr.Call, _expr.Var, _expr.TupleGetItem)): - inferred_shape = _infer_shape(weight) - weight_shape = [] - for infer in inferred_shape: - weight_shape.append(infer) - else: - weight_shape = weight.shape - channels = weight_shape[0] - - strides = inputs[3] - padding = inputs[4] - dilation = inputs[5] - - kernel_size = weight_shape[2:] - - if isinstance(strides, _expr.Var): - strides = _infer_shape(strides) - - if isinstance(padding, _expr.Var): - padding = _infer_shape(padding) - - if isinstance(dilation, _expr.Var): - dilation = _infer_shape(dilation) - - groups = int(inputs[8]) - - if use_transpose: - conv_out = _op.nn.conv2d_transpose(data, - weight, - strides=strides, - padding=padding, - dilation=dilation, - groups=groups, - channels=channels, - kernel_size=kernel_size, - data_layout="NCHW", - kernel_layout="OIHW", - out_layout="", - out_dtype="") - else: - conv_out = _op.nn.conv2d(data, - weight, - strides=strides, - padding=padding, - dilation=dilation, - groups=groups, - channels=channels, - kernel_size=kernel_size, - data_layout="NCHW", - kernel_layout="OIHW", - out_layout="", - out_dtype="") - - if use_bias: - return _op.nn.bias_add(conv_out, bias) - else: - return conv_out - return _impl - -def _softmax(): - def _impl(inputs): - data = inputs[0] - axis = inputs[1] - if isinstance(axis, str): - axis = int(axis) - - return _op.nn.softmax(data, axis=axis) - return _impl - -def _contiguous(): - def _impl(inputs): - data = inputs[0] - return _op.tensor.copy(data) - return _impl - -def _batch_norm(): - def _impl(inputs): - data = inputs[0] - - channels = _infer_shape(data) - - if isinstance(inputs[1], _expr.Var) and isinstance(inputs[2], _expr.Var): - scale = center = True - weight = inputs[1] - beta = inputs[2] - else: - scale = center = False - - if scale: - gamma = weight - else: - gamma = _expr.const(np.ones([int(channels[1])]).astype('float32'), dtype='float32') - - if center: - beta = beta - else: - beta = _expr.const(np.zeros([int(channels[1])]).astype('float32'), dtype='float32') - - moving_mean = inputs[3] - moving_var = inputs[4] - epsilon = float(inputs[7]) - - center = center - scale = scale - - return _op.nn.batch_norm(data, - gamma, - beta, - moving_mean, - moving_var, - axis=1, - epsilon=epsilon, - center=center, - scale=scale)[0] - return _impl - -def _transpose(): - def _impl(inputs): - data = inputs[0] - - if isinstance(data, _expr.Var): - ndims = len(_infer_shape(data)) - elif isinstance(data, (_expr.Call, _expr.TupleGetItem)): - ndims = _infer_shape(data) - else: - ndims = data.shape - - if isinstance(data, tvm.ndarray.NDArray): - ndims = len(data.shape) - axes = list(range(ndims)) - - num_inputs = len(inputs) - - if num_inputs == 1: - if ndims >= 2: - axes[-1] = ndims - 2 - axes[-2] = ndims - 1 - if not isinstance(data, _expr.Var): - data = _expr.const(data, dtype='float32') - - elif num_inputs == 3: - parse = lambda i: ndims * (i < 0) + i - src, dst = [parse(int(inputs[i])) for i in [1, 2]] - axes[src] = dst - axes[dst] = src - else: - axes = inputs[1] - return _op.transform.transpose(data, axes) - return _impl - -def _flatten(): - def _impl(inputs): - data = inputs[0] - return _op.nn.batch_flatten(data) - return _impl - -def _dense(): - def _impl(inputs): - use_bias = False - - if isinstance(inputs[0], _expr.Var): - use_bias = True - - data = inputs[1] - weight = inputs[2] - beta = int(inputs[3]) - alpha = int(inputs[4]) - - if isinstance(alpha, int) and isinstance(data, (_expr.Call, _expr.TupleGetItem)): - alpha = _expr.const(alpha, dtype='float32') - data *= alpha - - if isinstance(beta, int) and isinstance(weight, (_expr.Call, _expr.TupleGetItem)): - beta = _expr.const(beta, dtype='float32') - weight *= beta - - weight_out = _op.transform.transpose(weight, axes=[1, 0]) - - units = _infer_shape(weight_out)[0] - dense_out = _op.nn.dense(data, weight_out, units=units) - - if use_bias: - bias = inputs[0] - return _op.nn.bias_add(dense_out, bias) - else: - return dense_out - return _impl - -def _size(): - def _impl(inputs): - axis = int(inputs[1]) - if isinstance(inputs[0], _expr.Var): - shape = _infer_shape(inputs[0]) - else: - shape = _infer_shape(inputs[0]) - return shape[axis] - return _impl - -def _numtotensor(): - def _impl(inputs): - val = inputs[0] - - if isinstance(val, tvm.expr.IntImm): - val = val.__int__() - - arr = val * np.ones([]).astype(np.float32) - return _expr.const(arr, dtype='float32') - return _impl - -def _view(): - def _impl(inputs): - data = inputs[0] - - if len(inputs) == 3: - new_shape = [inputs[1], _infer_shape(inputs[2])[0]] - else: - if isinstance(inputs[1], list): - new_shape = inputs[1] - else: - new_shape = _infer_shape(inputs[1]) - return _op.transform.reshape(data, new_shape) - return _impl - -def _log_softmax(): - def _impl(inputs): - data = inputs[0] - axis = int(inputs[1]) - return _op.nn.log_softmax(data, axis) - return _impl - -def _sigmoid(): - def _impl(inputs): - data = inputs[0] - return _op.tensor.sigmoid(data) - return _impl - -def _avg_pool2d(): - def _impl(inputs): - data = inputs[0] - - pool_size = _infer_shape(inputs[1]) - strides = _infer_shape(inputs[2]) - padding = _infer_shape(inputs[3]) - - ceil_mode = int(inputs[4]) - count_include_pad = int(inputs[5]) - - return _op.nn.avg_pool2d(data, - pool_size=pool_size, - strides=strides, - padding=padding, - ceil_mode=ceil_mode, - count_include_pad=count_include_pad) - return _impl - -def _dropout(): - def _impl(inputs): - data = inputs[0] - rate = float(inputs[1]) - - return _op.nn.dropout(data, rate) - return _impl - -def _reduce(name): - def _impl(inputs, attrs, params): - data = inputs[0] - return get_relay_op(name)(data) - return _impl - -def _mean(): - def _impl(inputs): - data = inputs[0] - axis = _infer_shape(inputs[1]) - - keepdims = int(inputs[2]) - exclude = int(inputs[3]) - - return _op.mean(data, axis, keepdims, exclude) - return _impl - -def _chunk(): - def _impl(inputs): - data = inputs[0] - - num_chunks = int(inputs[1]) - axis = int(inputs[2]) - - if isinstance(data, _expr.Var): - inferred_shape = _infer_shape(data) - elif isinstance(data, (_expr.Call, _expr.TupleGetItem)): - inferred_shape = _infer_shape(data) - - shape = [] - for infer in inferred_shape: - shape.append(infer) - - dim = int(shape[axis]) - - if dim % num_chunks: - unif_size = int(dim / (num_chunks - 1)) - else: - unif_size = int(dim / num_chunks) - - chunks = [] - for i in range(0, dim, unif_size): - begin = [0] * len(shape) - end = shape[:] - begin[axis] = i - end[axis] = i + unif_size - stride = [1] * len(shape) - - chunk_out = _op.transform.strided_slice(data, begin, end, stride) - chunks.append(chunk_out) - - - if dim % num_chunks: - begin = [0] * len(shape) - end = shape[:] - begin[axis] = unif_size * (num_chunks - 1) - end[axis] = dim - stride = [1] * len(shape) - - chunk_out = _op.transform.strided_slice(data, begin, end, stride) - chunks.append(chunk_out) - - return chunks - return _impl - -def _matmul(): - def _impl(inputs): - data0 = inputs[0] - data1 = inputs[1] - data1_t = _op.transpose(data1, axes=(1, 0)) - - return _op.nn.dense(data0, data1_t) - return _impl - -def _expand(): - def _impl(inputs): - data_in = inputs[0] - if isinstance(data_in, _expr.Var): - shape = _infer_shape(data_in) - elif isinstance(data_in, (_expr.Call, _expr.TupleGetItem)): - shape = _infer_shape(data_in) - - ndims = len(shape) - sizes = _infer_shape(inputs[1]) - out = inputs[0] - - for i in range(ndims): - if sizes[i] in {-1, shape[i]}: - continue - data = list() - for temp in range(sizes[i]): - data.append(out) - call = _op.tensor.concatenate(data, i) - - return call - return _impl - -def _int(): - def _impl(inputs): - if isinstance(inputs[0], _expr.Call): - return inputs[0].astype(dtype='float32') - elif isinstance(inputs[0], _expr.Constant): - return int(inputs[0].data.asnumpy()) - return int(inputs[0]) - return _impl - -def _listunpack(): - def _impl(inputs): - return inputs[0] - return _impl - -def _to(): - def _impl(inputs): - return inputs[0] - return _impl - -def _device(): - def _impl(inputs): - return None - return _impl - -def _pad(): - def _impl(inputs): - data = inputs[0] - padding = inputs[1] - pad_width = list(zip(padding, padding)) - pad_value = inputs[2] - return _op.nn.pad(data, pad_width, pad_value) - return _impl - -def _sqrt(): - def _impl(inputs): - data = inputs[0] - return _op.tensor.sqrt(data) - return _impl - -# Helper functions for operator implementation - -def convert_input(data): - """ Handle input conversion for elemwise op """ - if isinstance(data, (_expr.Call, _expr.TupleGetItem, _expr.Var, _expr.Constant)): - return data - elif isinstance(data, str): - if len(data) == 1: - return _expr.const(int(data), dtype='float32') - else: - if '.' in data: - return _expr.const(float(data[1:-1]), dtype='float32') - else: - return _expr.const(int(data[1:-1]), dtype='float32') - else: - return _expr.const(int(data), dtype='float32') - -# Operator mappings - -_convert_map = { - 'aten::device' : _device(), - 'aten::add' : _elemwise('add'), - 'aten::add_' : _elemwise('add'), - 'aten::sub' : _elemwise('subtract'), - 'aten::sub_' : _elemwise('subtract'), - 'aten::max' : _elemwise('maximum'), - 'aten::min' : _elemwise('minimum'), - 'aten::mul' : _elemwise('multiply'), - 'aten::mul_' : _elemwise('multiply'), - 'aten::pow' : _elemwise('power'), - 'aten::div' : _elemwise('divide'), - 'aten::div_' : _elemwise('divide'), - 'aten::ones' : _ones(), - 'aten::zeros' : _zeros(), - 'aten::to' : _to(), - 'aten::unsqueeze' : _unsqueeze(), - 'aten::cat' : _concatenate(), - 'aten::slice' : _slice(), - 'aten::select' : _select(), - 'aten::relu' : _relu(), - 'aten::relu_' : _relu(), - 'aten::adaptive_avg_pool2d' : _adaptive_avg_2d(), - 'aten::adaptive_max_pool2d' : _adaptive_max_2d(), - 'aten::max_pool2d' : _maxpool_2d(), - 'aten::max_pool2d_with_indices' : _maxpool_2d(), - 'aten::hardtanh' : _hardtanh(), - 'aten::hardtanh_' : _hardtanh(), - 'aten::_convolution' : _convolution(), - 'aten::softmax' : _softmax(), - 'aten::threshold' : _relu(), - 'aten::threshold_' : _relu(), - 'aten::contiguous' : _contiguous(), - 'aten::batch_norm' : _batch_norm(), - 'aten::transpose' : _transpose(), - 'aten::transpose_' : _transpose(), - 'aten::t' : _transpose(), - 'aten::flatten' : _flatten(), - 'aten::addmm' : _dense(), - 'aten::size' : _size(), - 'aten::view' : _view(), - 'aten::clone' : _contiguous(), - 'aten::log_softmax' : _log_softmax(), - 'aten::sigmoid' : _sigmoid(), - 'aten::avg_pool2d' : _avg_pool2d(), - 'aten::dropout' : _dropout(), - 'aten::dropout_' : _dropout(), - 'aten::mean' : _mean(), - 'aten::chunk' : _chunk(), - 'aten::matmul' : _matmul(), - 'aten::expand' : _expand(), - 'aten::Int' : _int(), - 'prim::NumToTensor' : _numtotensor(), - 'prim::ListUnpack' : _listunpack(), - 'aten::constant_pad_nd' : _pad(), - 'aten::permute' : _transpose(), - 'aten::sum' : _reduce('sum'), - 'aten::prod' : _reduce('prod'), - 'aten::sqrt' : _sqrt() -} - -# Internal graph for parsing - -class Graph(object): - """ A helper class for handling relay graph copying from PyTorch trace. """ - - def __init__(self, trace, input_shapes): - - self._trace = trace - self._inputs_r = {} - self._params = {} - self._param_tensors = {} - self._consts = {} - self._ops = {} - self._op_inputs_r = {} - self._input_shapes = input_shapes if input_shapes else {} - self._fn_param = [] - self._relay_map = {} - self._nid_to_node_name = {} - - def from_pytorch(self): - """ Construct relay nodes from trace of PyTorch graph - - Currently only supports traced PyTorch format which means no control flow. - User must perform torch.jit.trace on a model and pass this in. - Future support should include support scripted models (torch.jit.script) which - preserves control flow. - - Returns - ------- - mod : tvm.relay.Module - The module that optimizations will be performed on. - - params : dict of str to tvm.ndarray - Dict of converted parameters stored in tvm.ndarray format - """ - # Check for missing ops - missing_operators = self._parse_import_prerequisites() - - if missing_operators: - raise NotImplementedError( \ - "The following operators are not implemented: {}".format(missing_operators)) - - # Translate PyTorch graph to by decorating Graph with state dict and inputs into each op - self._parse_inputs() - self._parse_params() - self._parse_ops() - - nid = 0 - for (op_name, operator), op_node in self._ops.items(): - if operator == 'prim::Constant': - pass - elif operator == 'prim::ListConstruct': - if any(inp.debugName() in self._nid_to_node_name.keys() \ - for inp in op_node.inputs()): - listconstr = [] - for i in op_node.inputs(): - if i.debugName() in self._nid_to_node_name.keys(): - listconstr.append( \ - self._relay_map[self._nid_to_node_name[i.debugName()]]) - elif i.node().kind() == 'prim::Constant': - listconstr.append(int(self._consts[i.debugName()])) - elif i.debugName() in self._inputs_r.keys(): - listconstr.append(int(self._inputs_r[i.debugName()])) - - # Unwrap for tensors - if len(listconstr) == 1: - listconstr = listconstr[0] - - self._relay_map[nid] = listconstr - self._nid_to_node_name[op_name] = nid - nid = nid + 1 - else: - for i in op_node.inputs(): - if i.debugName() in self._nid_to_node_name.keys(): - for cnt in range(0, len(self._op_inputs_r[(op_name, operator)])): - if isinstance(self._op_inputs_r[(op_name, operator)][cnt], str): - if "call/var" in self._op_inputs_r[(op_name, operator)][cnt]: - self._op_inputs_r[(op_name, operator)][cnt] = \ - self._relay_map[self._nid_to_node_name[i.debugName()]] - break - - call = _convert_map[operator](self._op_inputs_r[(op_name, operator)]) - - self._relay_map[nid] = call - self._nid_to_node_name[op_name] = nid - nid = nid + 1 - - outputs = [] - - for i in range(nid): - output = self._relay_map[i] - outputs.append(output) - - if len(outputs) == 1: - body = outputs[0] - else: - body = outputs[-1] - - func = tvm.relay.Function(_analysis.free_vars(body), body) - - param = {k: tvm.nd.array(v) for k, v in self._param_tensors.items()} - - return _module.Module.from_expr(func), param - - def _parse_inputs(self): - """ Map inputs to parser and inputs to graph. """ - # Get names and objects of inputs for IR - ir_names = [i.debugName() for i in self._trace.graph.inputs()] - ir_inputs = [i for i in self._trace.graph.inputs()] - - # Create corresponding shape and add to input - for input_name, ir_input in zip(self._input_shapes, ir_inputs[1:]): - input_shape = self._input_shapes[input_name] - tensor = tvm.nd.array(np.zeros(input_shape).astype(np.float32)) - ir_input.setDebugName(input_name) - self._inputs_r[input_name] = _expr.var(input_name, - shape=self._input_shapes[input_name], - dtype='float32') - self._fn_param.append(_expr.var(input_name, - shape=self._input_shapes[input_name], - dtype='float32')) - - # Add self (first input of a PyTorch graph) to inputs - input_shape = [3] - tensor = tvm.nd.array(np.zeros(input_shape).astype(np.float32)) - input_name = ir_names[0] - self._inputs_r[input_name] = tensor - - def _parse_params(self): - """ Map state dictionary values to corresponding prim::GetAttr op node. """ - # Grab weights, biases, etc. from graph - state_dict = self._trace.state_dict() - - # Get names of all inputs - input_names = [i for i in self._inputs_r.keys()] - - # Iterate through graph for getAttr nodes and match full state_dict name to nodes - node_weight_map = {} - for node in self._trace.graph.nodes(): - if node.kind() == "prim::GetAttr": - node_str = str(node) - node_assign = (node_str.split(' = ')[0]).split(' : ') - node_name = (node_assign[0])[1:] - node_getattr_name = ((node_str.split(' = ')[1]).split('"')[1::2])[0] - node_arg = (((node_str.split(' = '))[1]).split('(')[1])[1:-2] - - if node_arg in input_names: - node_weight_map[node_name] = node_getattr_name - else: - previous_map = node_weight_map[node_arg[:]] - node_weight_map[node_name] = previous_map+"."+node_getattr_name - - if node_getattr_name == "weight" or node_getattr_name == "bias" \ - or node_getattr_name == "running_mean" \ - or node_getattr_name == "running_var": - - value = state_dict[node_weight_map[node_name]] - tensor = tvm.nd.array(value.cpu().numpy()) - shape = tensor.shape - self._param_tensors[node_name] = tensor - - self._params[node_name] = _expr.var(node_name, - shape=shape, - dtype='float32') - - self._fn_param.append(_expr.var(node_name, - shape=shape, - dtype='float32')) - - - def _parse_ops(self): - """ Iterate through nodes and decorate graph with constants, operators, - and the inputs to each operator. """ - # Traverse nodes and add to graph - for node in self._trace.graph.nodes(): - - node_str = str(node) - node_assign = (node_str.split(' = ')[0]).split(' : ') - node_name = (node_assign[0])[1:] - node_expr = (node_str.split(' = ')[1]).split(',')[0] - - if node.kind() == "prim::Constant": - node_value = '0' - if "None" not in node_str and node_expr != "prim::Constant()" and \ - "?" not in node_str: - node_value = ((node_str.split(' = ')[1]).split('value=')[1]).split(']')[0] - self._consts[node_name] = node_value - elif node.kind() == "prim::ListConstruct": - list_shape = [] - for input_node in node.inputs(): - if input_node.debugName() in self._inputs_r.keys(): - list_shape.append(int(self._inputs_r[input_node.debugName()])) - elif input_node.debugName() in self._consts.keys(): - list_shape.append(int(self._consts[input_node.debugName()])) - else: - pass - self._inputs_r[node_name] = _expr.var(node_name, shape=list_shape, dtype='float32') - elif node.kind() == "prim::GetAttr": - continue - - self._add_op(node_name, node.kind(), node) - - # Graph Helper Functions - - def _add_op(self, op_name, operator, op_node): - """ Add an operator and its operators inputs to the graph and insert placeholders - where an input is a call node. - - Parameters - ---------- - op_name : string - The ID of the op node - - operator : string - The kind of operator - - op_node : PyTorch Node object - The full Node object for the op node - - """ - self._ops[(op_name, operator)] = op_node - input_list_r = [] - for input_node in op_node.inputs(): - if input_node.debugName() in self._inputs_r.keys(): - input_list_r.append(self._inputs_r[input_node.debugName()]) - elif input_node.debugName() in self._params.keys(): - input_list_r.append(self._params[input_node.debugName()]) - elif input_node.node().kind() == "prim::Constant": - input_list_r.append(self._consts[input_node.debugName()]) - else: - input_list_r.append("call/var."+input_node.debugName()) - - # If the inputs of a ListConstruct op is a call or var, remove it from inputs - if op_node.kind() == 'prim::ListConstruct': - if op_name in self._inputs_r.keys(): - self._inputs_r.pop(op_name) - - self._op_inputs_r[(op_name, operator)] = input_list_r - - def _parse_import_prerequisites(self): - """ Calculate the named preconditions from PyTorch graph. - - Returns - ------- - missing_operators : set object - Set of operator names which don't have their mapping in TVM, - i.e. which are not supported - - """ - missing_operators = set() - for node in self._trace.graph.nodes(): - if node.kind() == "prim::Constant" or node.kind() == 'prim::ListConstruct' or \ - node.kind() == 'prim::GetAttr': - pass - else: - if any([node.kind() in _convert_map]): - pass - else: - missing_operators.add(node.kind()) - - return missing_operators - -def from_pytorch_neo(trace, input_shapes): - """ Load PyTorch model in the form of a trace object into relay. - The companion parameters will be handled automatically. - - Parameters - ---------- - trace : torch.jit.TopLevelTracedModule object - Trace of the PyTorch graph - - input_shapes : Dictionary of input dimensions - Graph level input shape dictionary - - Returns - ------- - mod : tvm.relay.Module - The module that optimizations will be performed on. - - params : dict of str to tvm.ndarray - Dict of converted parameters stored in tvm.ndarray format - """ - g = Graph(trace, input_shapes) - mod, params = g.from_pytorch() - return mod, params diff --git a/tests/python/frontend/pytorch_neo/single_op.py b/tests/python/frontend/pytorch_neo/single_op.py deleted file mode 100644 index eb6a50b92651..000000000000 --- a/tests/python/frontend/pytorch_neo/single_op.py +++ /dev/null @@ -1,312 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# pylint: disable=import-self, invalid-name, unused-argument -"""Models consisting of single operators""" -import torch -from torch.nn import Module - - -class Add1(Module): - def forward(self, *args): - return args[0] + args[0] - -class Add2(Module): - def forward(self, *args): - return args[0] + 1 - -class Add3(Module): - def forward(self, *args): - ones = torch.ones([1, 3, 224, 224]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] + ones - -class Add4(Module): - def forward(self, *args): - ones = torch.ones([1, 1, 224, 224]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] + ones - -class Add5(Module): - def forward(self, *args): - ones = torch.ones([]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] + ones - -class Subtract1(Module): - def forward(self, *args): - return args[0] - args[0] - -class Subtract2(Module): - def forward(self, *args): - return args[0] - 1 - -class Subtract3(Module): - def forward(self, *args): - ones = torch.ones([1, 3, 224, 224]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] - ones - -class Subtract4(Module): - def forward(self, *args): - ones = torch.ones([1, 1, 224, 224]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] - ones - -class Subtract5(Module): - def forward(self, *args): - ones = torch.ones([]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] - ones - -class Multiply1(Module): - def forward(self, *args): - return args[0] * args[0] - -class Multiply2(Module): - def forward(self, *args): - return args[0] * 1 - -class Multiply3(Module): - def forward(self, *args): - ones = torch.ones([1, 3, 224, 224]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] * ones - -class Multiply4(Module): - def forward(self, *args): - ones = torch.ones([1, 1, 224, 224]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] * ones - -class Multiply5(Module): - def forward(self, *args): - ones = torch.ones([]) - if torch.cuda.is_available(): - ones = ones.cuda() - return args[0] * ones - -class Unsqueeze1(Module): - def forward(self, *args): - return args[0].unsqueeze(2) - -class Concatenate1(Module): - def forward(self, *args): - return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1) - -class Concatenate2(Module): - def forward(self, *args): - a = (args[0][:, :, 0] + 2) * 7 - b = (args[0][:, :, 1] + 3) * 11 - c = (args[0][:, :, 2] + 5) * 13 - return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2) - -class ReLU1(Module): - def forward(self, *args): - return torch.nn.ReLU()(args[0]) - -class AdaptiveAvgPool2D1(Module): - def forward(self, *args): - return torch.nn.AdaptiveAvgPool2d([1, 1])(args[0]) - -class AdaptiveAvgPool2D2(Module): - def forward(self, *args): - return torch.nn.AdaptiveAvgPool2d([100, 100])(args[0]) - -class AdaptiveAvgPool2D3(Module): - def forward(self, *args): - return torch.nn.AdaptiveAvgPool2d([224, 224])(args[0]) - -class MaxPool2D1(Module): - def forward(self, *args): - return torch.nn.MaxPool2d(kernel_size=[1, 1])(args[0]) - -class MaxPool2D2(Module): - def forward(self, *args): - return torch.nn.MaxPool2d(kernel_size=[100, 100])(args[0]) - -class MaxPool2D3(Module): - def forward(self, *args): - return torch.nn.MaxPool2d(kernel_size=[224, 224])(args[0]) - -class HardTanh1(Module): - def forward(self, *args): - return torch.nn.Hardtanh()(args[0]) - -class Conv2D1(Module): - - def __init__(self): - super(Conv2D1, self).__init__() - self.conv = torch.nn.Conv2d(3, 64, 7, bias=True) - self.softmax = torch.nn.Softmax() - - def forward(self, *args): - return self.softmax(self.conv(args[0])) - -class Conv2D2(Module): - - def __init__(self): - super(Conv2D2, self).__init__() - self.conv = torch.nn.Conv2d(3, 64, 7, bias=False) - self.softmax = torch.nn.Softmax() - - def forward(self, *args): - return self.softmax(self.conv(args[0])) - -class Conv2D3(Module): - - def __init__(self): - super(Conv2D3, self).__init__() - self.conv1 = torch.nn.Conv2d(3, 64, 7, bias=True) - self.conv2 = torch.nn.Conv2d(64, 64, 1, bias=True) - - def forward(self, *args): - x = args[0] - x = self.conv1(x) - for i in range(200): - x = self.conv2(x) - return x - -class Threshold1(Module): - def forward(self, *args): - return torch.nn.Threshold(0, 0)(args[0]) - -class Pad1(Module): - def forward(self, *args): - return torch.ConstantPad2d(3)(args[0]) - -class Contiguous1(Module): - def forward(self, *args): - return args[0].contiguous() - -class BatchNorm1(Module): - def __init__(self): - super(BatchNorm1, self).__init__() - self.batch_norm = torch.nn.BatchNorm2d(3, affine=True) - def forward(self, *args): - return self.batch_norm(args[0]) - -class BatchNorm2(Module): - def __init__(self): - super(BatchNorm2, self).__init__() - self.batch_norm = torch.nn.BatchNorm2d(3, affine=False) - def forward(self, *args): - return self.batch_norm(args[0]) - -class BatchNorm3(Module): - def __init__(self): - super(BatchNorm3, self).__init__() - self.batch_norm = torch.nn.BatchNorm2d(3, affine=False) - def forward(self, *args): - x = args[0] - for i in range(200): - x = self.batch_norm(x) - return x - -class Transpose1(Module): - def forward(self, *args): - return args[0].transpose(2, 3) - -class Transpose2(Module): - def forward(self, *args): - return args[0].transpose(-2, -1) - -class Transpose3(Module): - def forward(self, *args): - return args[0].t() - -class Size1(Module): - def forward(self, *args): - return args[0].size(0) * args[0] - -class View1(Module): - def forward(self, *args): - return args[0].view((1, 3 * 224 * 224)) - -class View2(Module): - def forward(self, *args): - return args[0].view(args[0].shape[0], -1) - -class Select1(Module): - def forward(self, *args): - return args[0].select(1, 1) - -class Clone1(Module): - def forward(self, *args): - return args[0].clone() - -class LogSoftmax1(Module): - def forward(self, *args): - return torch.nn.LogSoftmax(dim=1)(args[0][0, 0]) - -class Sigmoid1(Module): - def forward(self, *args): - return torch.nn.Sigmoid()(args[0]) - -class Dense1(Module): - def __init__(self): - super(Dense1, self).__init__() - self.linear = torch.nn.Linear(224, 7, bias=True) - def forward(self, *args): - return self.linear(args[0][0, 0]) - -class Dense2(Module): - def __init__(self): - super(Dense2, self).__init__() - self.linear = torch.nn.Linear(224, 7, bias=False) - def forward(self, *args): - return self.linear(args[0][0, 0]) - -class AvgPool2D1(Module): - def forward(self, *args): - return torch.nn.AvgPool2d(kernel_size=[100, 100])(args[0]) - -class Dropout1(Module): - def forward(self, *args): - return torch.nn.functional.dropout(args[0][0, 0], 0.5, False) - -class Slice1(Module): - def forward(self, *args): - return args[0][:, :, :, :3] - -class Slice2(Module): - def forward(self, *args): - return args[0][0, :, :, :] - -class Mean1(Module): - def forward(self, *args): - return args[0].mean(2) - -class Expand1(Module): - def forward(self, *args): - return args[0].expand((3, -1, -1, -1)) - -class Pow1(Module): - def forward(self, *args): - return args[0] ** 2 - -class Chunk1(Module): - def forward(self, *args): - chunks = args[0].chunk(7, 2) - return torch.cat(chunks, 2) diff --git a/tests/python/frontend/pytorch_neo/test_forward.py b/tests/python/frontend/pytorch_neo/test_forward.py deleted file mode 100644 index f41e85aa1827..000000000000 --- a/tests/python/frontend/pytorch_neo/test_forward.py +++ /dev/null @@ -1,525 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# pylint: disable=import-self, invalid-name, unused-argument -"""Unit tests for various models and operators""" -from time import time -import os -import sys -from tempfile import TemporaryDirectory -from scipy.stats import t as tdistr -import numpy as np -import torch -import tvm -import torchvision -import single_op - -from tvm import relay -from tvm.contrib import graph_runtime -from tvm.relay.testing.config import ctx_list - -sys.setrecursionlimit(10000) - -def _vectorize(ten): - return ten.reshape(-1) - -def atol(tru, est): - def _atol_elt(tru, est): - return abs(tru - est) - tru = _vectorize(tru) - est = _vectorize(est) - return max([_atol_elt(x, y) for x, y in zip(tru, est)]) - -def rtol(tru, est): - def _rtol_elt(tru, est): - return abs(tru - est) / min(abs(tru), abs(est)) - tru = _vectorize(tru) - est = _vectorize(est) - return max([_rtol_elt(x, y) for x, y in zip(tru, est)]) - -def assert_shapes_match(tru, est): - if tru.shape != est.shape: - msg = "Output shapes {} and {} don't match" - raise AssertionError(msg.format(tru.shape, est.shape)) - -def load_single_op(model_name): - """Given a model name, returns a single-operator model in eval - mode as well as an example input.""" - model = getattr(single_op, model_name)().float().eval() - input_shape = [1, 3, 224, 224] - input_data = torch.rand(input_shape).float() - return model, input_data - -def load_torchvision(model_name): - """Given a model name, returns a Torchvision model in eval mode as well - as an example input.""" - if model_name.startswith('inception'): - height = width = 299 - mean = [0.5, 0.5, 0.5] - std = [0.5, 0.5, 0.5] - else: - height = width = 224 - mean = [0.485, 0.456, 0.406] - std = [0.229, 0.224, 0.225] - input_shape = [1, 3, height, width] - input_data = torch.randn(input_shape).float() - for channel in range(3): - input_data[:, channel] -= mean[channel] - input_data[:, channel] /= std[channel] - model = getattr(torchvision.models, model_name)(pretrained=True) - model = model.float().eval() - return model, input_data - -def load_pretrainedmodels(model_name): - """Given a model name, returns a pretrainedmodels.pytorch model in eval - mode as well as an example input.""" - import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch - model = getattr(pretrainedmodels, model_name)().float().eval() - input_shape = [1, *model.input_size] - input_data = torch.rand(input_shape).float() * 256 - for channel in range(3): - input_data[:, channel] -= model.mean[channel] - input_data[:, channel] /= model.std[channel] - return model, input_data - -def load_model(model_name): - """Given a model name, returns a model as well as an example input.""" - if hasattr(single_op, model_name): - return load_single_op(model_name) - if hasattr(torchvision.models, model_name): - return load_torchvision(model_name) - try: - if hasattr(pretrainedmodels, model_name): - return load_pretrainedmodels(model_name) - except ModuleNotFoundError: - raise ModuleNotFoundError('Please install pretrainedmodels.pytorch') - raise RuntimeError('Model not supported') - - -def confidence_interval(mean, stdev, count, alpha=.01): - """Returns the lower and upper bounds of the confidence interval of a random - variable. Confidence is 1 - alpha (default confidence is 99%).""" - stdval = tdistr.ppf(1 - alpha / 2, count - 1) - lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count) - return lower, upper - -def measure_latency(model, input_shapes, output_shapes, thresh, dryruns=40): - """Compute the latency of the given model""" - latencies = [] - count = 0 - while True: - if isinstance(model, torch.nn.Module): - input_data = [torch.rand(shape).float() for shape in input_shapes] - if torch.cuda.is_available(): - input_data = list(map(lambda x: x.cuda(), input_data)) - model = model.cuda() - t_start = time() - model(*input_data) - t_end = time() - latencies.append(t_end - t_start) - else: - input_data = {} - for i, shape in enumerate(input_shapes): - name = 'input' + str(i) - arr = np.random.random(shape).astype('float32') - input_data[name] = tvm.nd.array(arr) - t_start = time() - model.set_input(**input_data) - model.run() - for i, shape in enumerate(output_shapes): - arr = np.zeros(shape).astype('float32') - model.get_output(i, tvm.nd.array(arr)) - t_end = time() - count += 1 - if count < dryruns: - continue - latencies.append(t_end - t_start) - mean = np.mean(latencies) - stdev = np.std(latencies) - sample_size = len(latencies) - if sample_size > dryruns: - lower, upper = confidence_interval(mean, stdev, sample_size) - est = (upper + lower) / 2 - err = (upper - lower) / 2 - if err < thresh: - return est - -def verify_model(model_name): - """Assert that the output of a compiled model matches with that of its - baseline.""" - baseline_model, baseline_input = load_model(model_name) - if torch.cuda.is_available(): - baseline_model = baseline_model.cuda() - baseline_input = baseline_input.cuda() - baseline_outputs = baseline_model(baseline_input) - if isinstance(baseline_outputs, tuple): - baseline_outputs = tuple(out.detach().cpu().numpy() for out in baseline_outputs) - else: - baseline_outputs = (baseline_outputs.detach().float().cpu().numpy(),) - output_shapes = [out.shape for out in baseline_outputs] - dtype = 'float32' - input_name = 'input0' - input_shapes = {input_name: list(baseline_input.shape)} - baseline_model(baseline_input) - trace = torch.jit.trace(baseline_model, baseline_input).float().eval() - if torch.cuda.is_available(): - trace = trace.cuda() - else: - trace = trace.cpu() - - mod, params = relay.frontend.from_pytorch_neo(trace, input_shapes) - - compiled_input = {input_name: tvm.nd.array(baseline_input.cpu().numpy())} - - with relay.build_config(opt_level=3): - for target, ctx in ctx_list(): - relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params) - relay_model = graph_runtime.create(relay_graph, relay_lib, ctx) - relay_model.set_input(**relay_params) - relay_model.set_input(**compiled_input) - relay_model.run() - - for i, baseline_output in enumerate(baseline_outputs): - output_shape = baseline_output.shape - compiled_output = relay_model.get_output( - i, tvm.nd.array(np.zeros(output_shape).astype(dtype), ctx)).asnumpy() - - assert_shapes_match(baseline_output, compiled_output) - tvm.testing.assert_allclose(baseline_output, compiled_output, - rtol=1e-3, atol=1e-3) - - from subprocess import call - call('rm -rf ~/.torch/models/*', shell=True) - -# Test Functions -def test_add1(): - verify_model('Add1') - -def test_add2(): - verify_model('Add2') - -def test_add3(): - verify_model('Add3') - -def test_add4(): - verify_model('Add4') - -def test_add5(): - verify_model('Add5') - -def test_subtract1(): - verify_model('Subtract1') - -def test_subtract2(): - verify_model('Subtract2') - -def test_subtract3(): - verify_model('Subtract3') - -def test_subtract4(): - verify_model('Subtract4') - -def test_subtract5(): - verify_model('Subtract5') - -def test_multiply1(): - verify_model('Multiply1') - -def test_multiply2(): - verify_model('Multiply2') - -def test_multiply3(): - verify_model('Multiply3') - -def test_multiply4(): - verify_model('Multiply4') - -def test_multiply5(): - verify_model('Multiply5') - -def test_unsqueeze1(): - verify_model('Unsqueeze1') - -def test_concatenate1(): - verify_model('Concatenate1') - -def test_concatenate2(): - verify_model('Concatenate2') - -def test_relu1(): - verify_model('ReLU1') - -def test_adaptiveavgpool2d1(): - verify_model('AdaptiveAvgPool2D1') - -def test_adaptiveavgpool2d2(): - verify_model('AdaptiveAvgPool2D2') - -def test_adaptiveavgpool2d3(): - verify_model('AdaptiveAvgPool2D3') - -def test_maxpool2d1(): - verify_model('MaxPool2D1') - -def test_maxpool2d2(): - verify_model('MaxPool2D2') - -def test_maxpool2d3(): - verify_model('MaxPool2D3') - -def test_hardtanh1(): - verify_model('HardTanh1') - -def test_conv2d1(): - verify_model('Conv2D1') - -def test_conv2d2(): - verify_model('Conv2D2') - -def test_threshold1(): - verify_model('Threshold1') - -def test_contiguous1(): - verify_model('Contiguous1') - -def test_batchnorm1(): - verify_model('BatchNorm1') - -def test_batchnorm2(): - verify_model('BatchNorm2') - -def test_transpose1(): - verify_model('Transpose1') - -def test_transpose2(): - verify_model('Transpose2') - -def test_size1(): - verify_model('Size1') - -def test_view1(): - verify_model('View1') - -def test_view2(): - verify_model('View2') - -def test_select1(): - verify_model('Select1') - -def test_clone1(): - verify_model('Clone1') - -def test_logsoftmax1(): - verify_model('LogSoftmax1') - -def test_sigmoid1(): - verify_model('Sigmoid1') - -def test_dense1(): - verify_model('Dense1') - -def test_dense2(): - verify_model('Dense2') - -def test_avgpool2d1(): - verify_model('AvgPool2D1') - -def test_dropout1(): - verify_model('Dropout1') - -def test_slice1(): - verify_model('Slice1') - -def test_slice2(): - verify_model('Slice2') - -def test_mean1(): - verify_model('Mean1') - -def test_expand1(): - verify_model('Expand1') - -def test_pow1(): - verify_model('Pow1') - -def test_chunk1(): - verify_model('Chunk1') - -# Model tests -def test_resnet18(): - verify_model('resnet18') - -def test_resnet34(): - verify_model('resnet34') - -def test_resnet50(): - verify_model('resnet50') - -def test_resnet101(): - verify_model('resnet101') - -def test_resnet152(): - verify_model('resnet152') - -def test_squeezenet1_0(): - verify_model('squeezenet1_0') - -def test_squeezenet1_1(): - verify_model('squeezenet1_1') - -def test_mobilenet_v2(): - verify_model('mobilenet_v2') - -def test_densenet121(): - verify_model('densenet121') - -def test_densenet161(): - verify_model('densenet161') - -def test_densenet169(): - verify_model('densenet169') - -def test_densenet201(): - verify_model('densenet201') - -def test_inception_v3(): - verify_model('inception_v3') - -def test_googlenet(): - verify_model('googlenet') - -def test_mnasnet0_5(): - verify_model('mnasnet0_5') - -def test_mnasnet1_0(): - verify_model('mnasnet1_0') - -""" -def test_alexnet(): - verify_model('alexnet') - -def test_vgg11(): - verify_model('vgg11') - -def test_vgg13(): - verify_model('vgg13') - -def test_vgg16(): - verify_model('vgg16') - -def test_vgg19(): - verify_model('vgg19') - -def test_vgg11_bn(): - verify_model('vgg11_bn') - -def test_vgg13_bn(): - verify_model('vgg13_bn') - -def test_vgg19_bn(): - verify_model('vgg19_bn') - -def test_shufflenet_v2_x0_5(): - verify_model('shufflenet_v2_x0_5') - -def test_shufflenet_v2_x1_0(): - verify_model('shufflenet_v2_x1_0') -""" - -if __name__ == '__main__': - - # Single operator tests - test_add1() - test_add2() - test_add3() - test_add4() - test_add5() - test_subtract1() - test_subtract2() - test_subtract3() - test_subtract4() - test_subtract5() - test_multiply1() - test_multiply2() - test_multiply3() - test_multiply4() - test_multiply5() - test_unsqueeze1() - test_concatenate1() - test_concatenate2() - test_relu1() - test_adaptiveavgpool2d1() - test_adaptiveavgpool2d2() - test_adaptiveavgpool2d3() - test_maxpool2d1() - test_maxpool2d2() - test_maxpool2d3() - test_hardtanh1() - test_conv2d1() - test_conv2d2() - test_threshold1() - test_contiguous1() - test_batchnorm1() - test_batchnorm2() - test_transpose1() - test_transpose2() - test_size1() - test_view1() - test_view2() - test_select1() - test_clone1() - test_logsoftmax1() - test_sigmoid1() - test_dense1() - test_dense2() - test_avgpool2d1() - test_dropout1() - test_slice1() - test_slice2() - test_mean1() - test_expand1() - test_pow1() - test_chunk1() - - # Model tests - test_resnet18() - test_resnet34() - test_resnet50() - test_resnet101() - test_resnet152() - test_squeezenet1_0() - test_squeezenet1_1() - test_mobilenet_v2() - test_densenet121() - test_densenet161() - test_densenet169() - test_densenet201() - test_inception_v3() - test_googlenet() - test_mnasnet0_5() - test_mnasnet1_0() - - # TODO: Fix flaky VGG when running in CI-GPU container - #test_alexnet() - #test_vgg11() - #test_vgg13() - #test_vgg16() - #test_vgg19() - #test_vgg11_bn() - #test_vgg13_bn() - #test_vgg19_bn() - #test_shufflenet_v2_x0_5() - #test_shufflenet_v2_x1_0() \ No newline at end of file diff --git a/tests/scripts/task_python_frontend.sh b/tests/scripts/task_python_frontend.sh index d94582d9c6b2..68d861baa5e0 100755 --- a/tests/scripts/task_python_frontend.sh +++ b/tests/scripts/task_python_frontend.sh @@ -50,6 +50,3 @@ python3 -m pytest -v tests/python/frontend/caffe2 echo "Running relay DarkNet frontend test..." python3 -m pytest -v tests/python/frontend/darknet - -echo "Running relay PyTorch frontend test..." -python3 -m pytest -v tests/python/frontend/pytorch_neo