Skip to content

Commit

Permalink
Fix lint warning (#3)
Browse files Browse the repository at this point in the history
  • Loading branch information
wweic authored Mar 1, 2019
1 parent b500978 commit 303e228
Show file tree
Hide file tree
Showing 10 changed files with 61 additions and 59 deletions.
2 changes: 1 addition & 1 deletion nnvm/include/nnvm/op_attr_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ using FCorrectLayout = std::function<bool(
* \return success flag.
*/
using FCorrectLayoutEx = std::function<bool(
NodeAttrs& attrs,
const NodeAttrs& attrs,
std::vector<TShape>* ishapes,
std::vector<Layout>* ilayouts,
const std::vector<Layout>* last_ilayouts,
Expand Down
7 changes: 3 additions & 4 deletions nnvm/python/nnvm/frontend/mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,8 +274,8 @@ def _lrn(inputs, attrs):
return _get_nnvm_op(op_name)(*inputs, **new_attrs)

def _symbol_ring_buffer(inputs, attrs):
output = _get_nnvm_op('ring_buffer')(*inputs, **attrs)
return _sym._assign(inputs[1], output)
output = _get_nnvm_op('ring_buffer')(*inputs, **attrs)
return _sym._assign(inputs[1], output)


def _copy(inputs, _):
Expand Down Expand Up @@ -365,8 +365,7 @@ def _argmin(inputs, attrs):
'expand_dims' : _expand_dims,
'LRN' : _lrn,
'ring_buffer' : _symbol_ring_buffer,
'LinearRegressionOutput' : _copy,
'argmax' : _argmax
'LinearRegressionOutput' : _copy
}

def _convert_symbol(op_name, inputs, attrs,
Expand Down
30 changes: 15 additions & 15 deletions nnvm/python/nnvm/top/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,12 +121,12 @@ def compute_conv2d(attrs, inputs, _):
else:
raise ValueError("not support arbitrary group number for now")

if attrs.get_bool("use_bias"):
bias = inputs[2]
expand_axis = 1 if layout == "NCHW" else 0
bias = topi.expand_dims(bias, axis=expand_axis, num_newaxis=2)
out = topi.add(out, bias)
return out
if attrs.get_bool("use_bias"):
bias = inputs[2]
expand_axis = 1 if layout == "NCHW" else 0
bias = topi.expand_dims(bias, axis=expand_axis, num_newaxis=2)
out = topi.add(out, bias)
return out

@reg.register_schedule("conv2d")
def schedule_conv2d(attrs, outs, target):
Expand Down Expand Up @@ -260,11 +260,11 @@ def compute_contrib_conv2d_winograd_without_weight_transform(attrs, inputs, _):
inputs[0], inputs[1], strides, padding, dilation, layout, out_dtype,
tile_size)

if attrs.get_bool("use_bias"):
bias = inputs[2]
bias = topi.expand_dims(bias, axis=1, num_newaxis=2)
out = topi.add(out, bias)
return out
if attrs.get_bool("use_bias"):
bias = inputs[2]
bias = topi.expand_dims(bias, axis=1, num_newaxis=2)
out = topi.add(out, bias)
return out

@reg.register_schedule("_contrib_conv2d_winograd_without_weight_transform")
def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target):
Expand Down Expand Up @@ -312,22 +312,22 @@ def schedule_conv2d_transpose(attrs, outs, target):
reg.register_pattern("conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)

@reg.register_alter_op_layout("max_pool2d")
def alter_pooling_layout(attrs, inputs, tinfos):
def alter_pooling_layout_max_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
return topi.nn.max_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("avg_pool2d")
def alter_pooling_layout(attrs, inputs, tinfos):
def alter_pooling_layout_avg_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
return topi.nn.avg_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("global_max_pool2d")
def alter_pooling_layout(attrs, inputs, tinfos):
def alter_pooling_layout_global_max_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
return topi.nn.global_max_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("global_avg_pool2d")
def alter_pooling_layout(attrs, inputs, tinfos):
def alter_pooling_layout_global_avg_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
return topi.nn.global_avg_pool2d_alter_layout(attrs, inputs, tinfos)

Expand Down
64 changes: 32 additions & 32 deletions nnvm/src/pass/graph_annotate.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,38 +3,38 @@
* \file graph_annotate.h
* \brief Define rules to annotate a graph.
*/
#ifndef NNVM_PASS_GRAPH_ANNOTATE_H_
#define NNVM_PASS_GRAPH_ANNOTATE_H_
#ifndef NNVM_PASS_GRAPH_ANNOTATE_H_
#define NNVM_PASS_GRAPH_ANNOTATE_H_

#include <nnvm/graph.h>
#include <nnvm/graph.h>

#include <string>
#include <unordered_map>
#include <string>
#include <unordered_map>

namespace nnvm {
namespace nnvm {

class ManualAnnotator;
class ManualAnnotator;
/*
* This class is an abstract class that can be derived by other classes to
* implement how a node should be selected.
*/
class GraphAnnotator {
public:
explicit GraphAnnotator(int fallback_device)
class GraphAnnotator {
public:
explicit GraphAnnotator(int fallback_device)
: fallback_device_(fallback_device) {}
virtual ~GraphAnnotator() = default;
// A virtual function that is implemented by different annotation methods.
virtual int AnnotateNode(const nnvm::Node* n) const = 0;
virtual ~GraphAnnotator() = default;
// A virtual function that is implemented by different annotation methods.
virtual int AnnotateNode(const nnvm::Node* n) const = 0;

int GetFallbackDevice() const {
int GetFallbackDevice() const {
return fallback_device_;
}
}

private:
friend ManualAnnotator;
/* The fallback device. */
int fallback_device_;
};
private:
friend ManualAnnotator;
/* The fallback device. */
int fallback_device_;
};

/*
* This class defines a manual way to annotate a graph node. In this method,
Expand All @@ -43,28 +43,28 @@
* is registered with a fallback property or the operator name has not been
* saved, this node will be annotated with the fallback device.
*/
class ManualAnnotator : public GraphAnnotator {
using OpNameDeviceMap = std::unordered_map<std::string, int>;
public:
explicit ManualAnnotator(const OpNameDeviceMap& op_name_dev_map,
class ManualAnnotator : public GraphAnnotator {
using OpNameDeviceMap = std::unordered_map<std::string, int>;
public:
explicit ManualAnnotator(const OpNameDeviceMap& op_name_dev_map,
int fallback_device)
: GraphAnnotator(fallback_device),
op_name_dev_map_(new OpNameDeviceMap(op_name_dev_map)) {}

int AnnotateNode(const nnvm::Node* n) const final {
int AnnotateNode(const nnvm::Node* n) const final {
if (n->is_variable()) return 0;
if (n->op()->fallback) return fallback_device_;

return op_name_dev_map_->count(n->op()->name)
? op_name_dev_map_->at(n->op()->name)
: fallback_device_;
}
}

private:
std::unique_ptr<const OpNameDeviceMap> op_name_dev_map_;
};
private:
std::unique_ptr<const OpNameDeviceMap> op_name_dev_map_;
};

using ManualAnnotatorPtr = std::shared_ptr<ManualAnnotator>;
using ManualAnnotatorPtr = std::shared_ptr<ManualAnnotator>;

} // namespace nnvm
#endif // NNVM_PASS_GRAPH_ANNOTATE_H_
} // namespace nnvm
#endif // NNVM_PASS_GRAPH_ANNOTATE_H_
4 changes: 2 additions & 2 deletions nnvm/src/top/nn/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ inline bool BatchNormInferShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool BatchNormCorrectLayout(NodeAttrs& attrs,
inline bool BatchNormCorrectLayout(const NodeAttrs& attrs,
std::vector<TShape>* ishapes,
std::vector<Layout> *in_layouts,
const std::vector<Layout> *last_in_layouts,
Expand Down Expand Up @@ -593,7 +593,7 @@ inline bool PadInferShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool PadCorrectLayout(NodeAttrs& attrs,
inline bool PadCorrectLayout(const NodeAttrs& attrs,
std::vector<TShape>* ishapes,
std::vector<Layout>* ilayouts,
const std::vector<Layout>* last_ilayouts,
Expand Down
2 changes: 1 addition & 1 deletion nnvm/src/top/tensor/broadcast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ inline bool BinaryBroadcastShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool BinaryBroadcastCorrectLayout(NodeAttrs& attrs,
inline bool BinaryBroadcastCorrectLayout(const NodeAttrs& attrs,
std::vector<TShape>* ishapes,
std::vector<Layout>* ilayouts,
const std::vector<Layout>* last_ilayouts,
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from __future__ import absolute_import as _abs
import numpy as _np

from ._ffi.function import register_func
from ._ffi.ndarray import TVMContext, TVMType, NDArrayBase
from ._ffi.ndarray import context, empty, from_dlpack
from ._ffi.ndarray import _set_class_ndarray
Expand Down Expand Up @@ -199,7 +200,6 @@ def array(arr, ctx=cpu(0)):
return empty(arr.shape, arr.dtype, ctx).copyfrom(arr)


from ._ffi.function import register_func
@register_func("tvm.nd.random_uniform")
def random_uniform(size, dtype, target):
size = [int(x) for x in size.split()]
Expand Down
2 changes: 1 addition & 1 deletion src/contrib/subgraph/tensorrt_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@
#include <dmlc/parameter.h>
#include <dmlc/timer.h>
#include <unordered_set>
#include <cmath>
#include <functional>
#include <iostream>
#include <sstream>
#include "./subgraph.h"
#include "./tensorrt_executor.h"
#include "../../runtime/cuda/cuda_common.h"
#include <cmath>

namespace tvm {
namespace contrib {
Expand Down
1 change: 0 additions & 1 deletion topi/python/topi/cuda/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import tvm
from .. import tag
from .. import generic
from ..nn.pooling import *

@generic.schedule_global_pool.register(["cuda", "gpu"])
def schedule_global_pool(outs):
Expand Down
6 changes: 5 additions & 1 deletion topi/python/topi/nn/pooling.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
"""TVM operator pooling compute."""
from __future__ import absolute_import
import tvm
from .. import cpp

import tvm

POOL_TYPE_CODE = {
"avg": 0,
Expand Down Expand Up @@ -102,6 +102,7 @@ def pool(data,

@tvm.target.generic_func
def max_pool2d_alter_layout(attrs, inputs, tinfos):
#pylint: disable=unused-argument
"""Change max pool2d layout.
Parameters
Expand All @@ -119,6 +120,7 @@ def max_pool2d_alter_layout(attrs, inputs, tinfos):

@tvm.target.generic_func
def avg_pool2d_alter_layout(attrs, inputs, tinfos):
#pylint: disable=unused-argument
"""Change average pool2d layout.
Parameters
Expand All @@ -136,6 +138,7 @@ def avg_pool2d_alter_layout(attrs, inputs, tinfos):

@tvm.target.generic_func
def global_max_pool2d_alter_layout(attrs, inputs, tinfos):
#pylint: disable=unused-argument
"""Change global max pool2d layout.
Parameters
Expand All @@ -153,6 +156,7 @@ def global_max_pool2d_alter_layout(attrs, inputs, tinfos):

@tvm.target.generic_func
def global_avg_pool2d_alter_layout(attrs, inputs, tinfos):
#pylint: disable=unused-argument
"""Change global average pool2d layout.
Parameters
Expand Down

0 comments on commit 303e228

Please sign in to comment.