diff --git a/nnvm/include/nnvm/op_attr_types.h b/nnvm/include/nnvm/op_attr_types.h index ff24dd1c49df..db0eee75313d 100644 --- a/nnvm/include/nnvm/op_attr_types.h +++ b/nnvm/include/nnvm/op_attr_types.h @@ -224,7 +224,7 @@ using FCorrectLayout = std::function* ishapes, std::vector* ilayouts, const std::vector* last_ilayouts, diff --git a/nnvm/python/nnvm/frontend/mxnet.py b/nnvm/python/nnvm/frontend/mxnet.py index d76ed01e91f6..2cf701ea9040 100644 --- a/nnvm/python/nnvm/frontend/mxnet.py +++ b/nnvm/python/nnvm/frontend/mxnet.py @@ -274,8 +274,8 @@ def _lrn(inputs, attrs): return _get_nnvm_op(op_name)(*inputs, **new_attrs) def _symbol_ring_buffer(inputs, attrs): - output = _get_nnvm_op('ring_buffer')(*inputs, **attrs) - return _sym._assign(inputs[1], output) + output = _get_nnvm_op('ring_buffer')(*inputs, **attrs) + return _sym._assign(inputs[1], output) def _copy(inputs, _): @@ -365,8 +365,7 @@ def _argmin(inputs, attrs): 'expand_dims' : _expand_dims, 'LRN' : _lrn, 'ring_buffer' : _symbol_ring_buffer, - 'LinearRegressionOutput' : _copy, - 'argmax' : _argmax + 'LinearRegressionOutput' : _copy } def _convert_symbol(op_name, inputs, attrs, diff --git a/nnvm/python/nnvm/top/nn.py b/nnvm/python/nnvm/top/nn.py index a1e36646051e..d25438313693 100644 --- a/nnvm/python/nnvm/top/nn.py +++ b/nnvm/python/nnvm/top/nn.py @@ -121,12 +121,12 @@ def compute_conv2d(attrs, inputs, _): else: raise ValueError("not support arbitrary group number for now") - if attrs.get_bool("use_bias"): - bias = inputs[2] - expand_axis = 1 if layout == "NCHW" else 0 - bias = topi.expand_dims(bias, axis=expand_axis, num_newaxis=2) - out = topi.add(out, bias) - return out + if attrs.get_bool("use_bias"): + bias = inputs[2] + expand_axis = 1 if layout == "NCHW" else 0 + bias = topi.expand_dims(bias, axis=expand_axis, num_newaxis=2) + out = topi.add(out, bias) + return out @reg.register_schedule("conv2d") def schedule_conv2d(attrs, outs, target): @@ -260,11 +260,11 @@ def compute_contrib_conv2d_winograd_without_weight_transform(attrs, inputs, _): inputs[0], inputs[1], strides, padding, dilation, layout, out_dtype, tile_size) - if attrs.get_bool("use_bias"): - bias = inputs[2] - bias = topi.expand_dims(bias, axis=1, num_newaxis=2) - out = topi.add(out, bias) - return out + if attrs.get_bool("use_bias"): + bias = inputs[2] + bias = topi.expand_dims(bias, axis=1, num_newaxis=2) + out = topi.add(out, bias) + return out @reg.register_schedule("_contrib_conv2d_winograd_without_weight_transform") def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target): @@ -312,22 +312,22 @@ def schedule_conv2d_transpose(attrs, outs, target): reg.register_pattern("conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE) @reg.register_alter_op_layout("max_pool2d") -def alter_pooling_layout(attrs, inputs, tinfos): +def alter_pooling_layout_max_pool2d(attrs, inputs, tinfos): with tvm.target.create(attrs.get_string("target")): return topi.nn.max_pool2d_alter_layout(attrs, inputs, tinfos) @reg.register_alter_op_layout("avg_pool2d") -def alter_pooling_layout(attrs, inputs, tinfos): +def alter_pooling_layout_avg_pool2d(attrs, inputs, tinfos): with tvm.target.create(attrs.get_string("target")): return topi.nn.avg_pool2d_alter_layout(attrs, inputs, tinfos) @reg.register_alter_op_layout("global_max_pool2d") -def alter_pooling_layout(attrs, inputs, tinfos): +def alter_pooling_layout_global_max_pool2d(attrs, inputs, tinfos): with tvm.target.create(attrs.get_string("target")): return topi.nn.global_max_pool2d_alter_layout(attrs, inputs, tinfos) @reg.register_alter_op_layout("global_avg_pool2d") -def alter_pooling_layout(attrs, inputs, tinfos): +def alter_pooling_layout_global_avg_pool2d(attrs, inputs, tinfos): with tvm.target.create(attrs.get_string("target")): return topi.nn.global_avg_pool2d_alter_layout(attrs, inputs, tinfos) diff --git a/nnvm/src/pass/graph_annotate.h b/nnvm/src/pass/graph_annotate.h index 56b112542bbd..eba958eff915 100644 --- a/nnvm/src/pass/graph_annotate.h +++ b/nnvm/src/pass/graph_annotate.h @@ -3,38 +3,38 @@ * \file graph_annotate.h * \brief Define rules to annotate a graph. */ - #ifndef NNVM_PASS_GRAPH_ANNOTATE_H_ - #define NNVM_PASS_GRAPH_ANNOTATE_H_ +#ifndef NNVM_PASS_GRAPH_ANNOTATE_H_ +#define NNVM_PASS_GRAPH_ANNOTATE_H_ - #include +#include - #include - #include +#include +#include - namespace nnvm { +namespace nnvm { - class ManualAnnotator; +class ManualAnnotator; /* * This class is an abstract class that can be derived by other classes to * implement how a node should be selected. */ - class GraphAnnotator { - public: - explicit GraphAnnotator(int fallback_device) +class GraphAnnotator { + public: + explicit GraphAnnotator(int fallback_device) : fallback_device_(fallback_device) {} - virtual ~GraphAnnotator() = default; - // A virtual function that is implemented by different annotation methods. - virtual int AnnotateNode(const nnvm::Node* n) const = 0; + virtual ~GraphAnnotator() = default; + // A virtual function that is implemented by different annotation methods. + virtual int AnnotateNode(const nnvm::Node* n) const = 0; - int GetFallbackDevice() const { + int GetFallbackDevice() const { return fallback_device_; - } + } - private: - friend ManualAnnotator; - /* The fallback device. */ - int fallback_device_; - }; + private: + friend ManualAnnotator; + /* The fallback device. */ + int fallback_device_; +}; /* * This class defines a manual way to annotate a graph node. In this method, @@ -43,28 +43,28 @@ * is registered with a fallback property or the operator name has not been * saved, this node will be annotated with the fallback device. */ - class ManualAnnotator : public GraphAnnotator { - using OpNameDeviceMap = std::unordered_map; - public: - explicit ManualAnnotator(const OpNameDeviceMap& op_name_dev_map, +class ManualAnnotator : public GraphAnnotator { + using OpNameDeviceMap = std::unordered_map; + public: + explicit ManualAnnotator(const OpNameDeviceMap& op_name_dev_map, int fallback_device) : GraphAnnotator(fallback_device), op_name_dev_map_(new OpNameDeviceMap(op_name_dev_map)) {} - int AnnotateNode(const nnvm::Node* n) const final { + int AnnotateNode(const nnvm::Node* n) const final { if (n->is_variable()) return 0; if (n->op()->fallback) return fallback_device_; return op_name_dev_map_->count(n->op()->name) ? op_name_dev_map_->at(n->op()->name) : fallback_device_; - } + } - private: - std::unique_ptr op_name_dev_map_; - }; + private: + std::unique_ptr op_name_dev_map_; +}; - using ManualAnnotatorPtr = std::shared_ptr; +using ManualAnnotatorPtr = std::shared_ptr; - } // namespace nnvm - #endif // NNVM_PASS_GRAPH_ANNOTATE_H_ +} // namespace nnvm +#endif // NNVM_PASS_GRAPH_ANNOTATE_H_ diff --git a/nnvm/src/top/nn/nn.cc b/nnvm/src/top/nn/nn.cc index a86b76f27bfe..222053a78b12 100644 --- a/nnvm/src/top/nn/nn.cc +++ b/nnvm/src/top/nn/nn.cc @@ -194,7 +194,7 @@ inline bool BatchNormInferShape(const nnvm::NodeAttrs& attrs, return true; } -inline bool BatchNormCorrectLayout(NodeAttrs& attrs, +inline bool BatchNormCorrectLayout(const NodeAttrs& attrs, std::vector* ishapes, std::vector *in_layouts, const std::vector *last_in_layouts, @@ -593,7 +593,7 @@ inline bool PadInferShape(const nnvm::NodeAttrs& attrs, return true; } -inline bool PadCorrectLayout(NodeAttrs& attrs, +inline bool PadCorrectLayout(const NodeAttrs& attrs, std::vector* ishapes, std::vector* ilayouts, const std::vector* last_ilayouts, diff --git a/nnvm/src/top/tensor/broadcast.cc b/nnvm/src/top/tensor/broadcast.cc index d6db0a4f4b7b..6141ff1ae621 100644 --- a/nnvm/src/top/tensor/broadcast.cc +++ b/nnvm/src/top/tensor/broadcast.cc @@ -129,7 +129,7 @@ inline bool BinaryBroadcastShape(const nnvm::NodeAttrs& attrs, return true; } -inline bool BinaryBroadcastCorrectLayout(NodeAttrs& attrs, +inline bool BinaryBroadcastCorrectLayout(const NodeAttrs& attrs, std::vector* ishapes, std::vector* ilayouts, const std::vector* last_ilayouts, diff --git a/python/tvm/ndarray.py b/python/tvm/ndarray.py index 7a9c5e62f903..b35c3de63918 100644 --- a/python/tvm/ndarray.py +++ b/python/tvm/ndarray.py @@ -7,6 +7,7 @@ from __future__ import absolute_import as _abs import numpy as _np +from ._ffi.function import register_func from ._ffi.ndarray import TVMContext, TVMType, NDArrayBase from ._ffi.ndarray import context, empty, from_dlpack from ._ffi.ndarray import _set_class_ndarray @@ -199,7 +200,6 @@ def array(arr, ctx=cpu(0)): return empty(arr.shape, arr.dtype, ctx).copyfrom(arr) -from ._ffi.function import register_func @register_func("tvm.nd.random_uniform") def random_uniform(size, dtype, target): size = [int(x) for x in size.split()] diff --git a/src/contrib/subgraph/tensorrt_executor.cc b/src/contrib/subgraph/tensorrt_executor.cc index 2591d4012357..ee490cf9914d 100644 --- a/src/contrib/subgraph/tensorrt_executor.cc +++ b/src/contrib/subgraph/tensorrt_executor.cc @@ -9,13 +9,13 @@ #include #include #include +#include #include #include #include #include "./subgraph.h" #include "./tensorrt_executor.h" #include "../../runtime/cuda/cuda_common.h" -#include namespace tvm { namespace contrib { diff --git a/topi/python/topi/cuda/pooling.py b/topi/python/topi/cuda/pooling.py index 83465402b84f..bb924b7e04a6 100644 --- a/topi/python/topi/cuda/pooling.py +++ b/topi/python/topi/cuda/pooling.py @@ -3,7 +3,6 @@ import tvm from .. import tag from .. import generic -from ..nn.pooling import * @generic.schedule_global_pool.register(["cuda", "gpu"]) def schedule_global_pool(outs): diff --git a/topi/python/topi/nn/pooling.py b/topi/python/topi/nn/pooling.py index 256fd6531bff..18077cbcca7f 100644 --- a/topi/python/topi/nn/pooling.py +++ b/topi/python/topi/nn/pooling.py @@ -1,8 +1,8 @@ """TVM operator pooling compute.""" from __future__ import absolute_import +import tvm from .. import cpp -import tvm POOL_TYPE_CODE = { "avg": 0, @@ -102,6 +102,7 @@ def pool(data, @tvm.target.generic_func def max_pool2d_alter_layout(attrs, inputs, tinfos): + #pylint: disable=unused-argument """Change max pool2d layout. Parameters @@ -119,6 +120,7 @@ def max_pool2d_alter_layout(attrs, inputs, tinfos): @tvm.target.generic_func def avg_pool2d_alter_layout(attrs, inputs, tinfos): + #pylint: disable=unused-argument """Change average pool2d layout. Parameters @@ -136,6 +138,7 @@ def avg_pool2d_alter_layout(attrs, inputs, tinfos): @tvm.target.generic_func def global_max_pool2d_alter_layout(attrs, inputs, tinfos): + #pylint: disable=unused-argument """Change global max pool2d layout. Parameters @@ -153,6 +156,7 @@ def global_max_pool2d_alter_layout(attrs, inputs, tinfos): @tvm.target.generic_func def global_avg_pool2d_alter_layout(attrs, inputs, tinfos): + #pylint: disable=unused-argument """Change global average pool2d layout. Parameters