Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RELAY][OPS]LRN and L2_Normalize #1860

Merged
merged 3 commits into from
Oct 11, 2018
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions docs/langref/relay_op.rst
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ This level enables fully connected multi-layer perceptron.
tvm.relay.tanh
tvm.relay.sigmoid
tvm.relay.nn.relu
tvm.relay.nn.l2_normalize


**Level 2: Convolutions**

Expand All @@ -53,6 +55,7 @@ This level enables typical convnet models.
tvm.relay.nn.global_avg_pool2d
tvm.relay.nn.upsampling
tvm.relay.nn.batch_flatten
tvm.relay.nn.lrn


**Level 3: Additional Math And Transform Operators**
Expand Down Expand Up @@ -120,6 +123,7 @@ Level 1 Definitions
.. autofunction:: tvm.relay.nn.softmax
.. autofunction:: tvm.relay.nn.log_softmax
.. autofunction:: tvm.relay.nn.relu
.. autofunction:: tvm.relay.nn.l2_normalize


Level 2 Definitions
Expand All @@ -131,6 +135,7 @@ Level 2 Definitions
.. autofunction:: tvm.relay.nn.global_avg_pool2d
.. autofunction:: tvm.relay.nn.upsampling
.. autofunction:: tvm.relay.nn.batch_flatten
.. autofunction:: tvm.relay.nn.lrn


Level 3 Definitions
Expand Down
38 changes: 38 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,44 @@ struct UpSamplingAttrs : public tvm::AttrsNode<UpSamplingAttrs> {
};




/*! \brief Attributes for LRN operator */
struct LRNAttrs : public tvm::AttrsNode<LRNAttrs> {
IndexExpr size;
IndexExpr axis;
double bias;
double alpha;
double beta;

TVM_DECLARE_ATTRS(LRNAttrs, "relay.attrs.LRNAttrs") {
TVM_ATTR_FIELD(size).set_default(5)
.describe("The size of the local region to be considered for normalization.");
TVM_ATTR_FIELD(axis).set_default(1)
.describe("Axis of input data layout channel.");
TVM_ATTR_FIELD(bias).set_default(2)
.describe("The offset parameter to avoid division by 0.");
TVM_ATTR_FIELD(alpha).set_default(0.0001)
.describe("The scaling parameter.");
TVM_ATTR_FIELD(beta).set_default(0.75)
.describe("The exponent parameter.");
}
};


/*! \brief Attributes for L2Normalize operator */
struct L2NormalizeAttrs : public tvm::AttrsNode<L2NormalizeAttrs> {
double eps;
Array<IndexExpr> axis;

TVM_DECLARE_ATTRS(L2NormalizeAttrs, "relay.attrs.L2NormalizeAttrs") {
TVM_ATTR_FIELD(eps)
.describe("A lower bound value for the norm, to avoid division by 0.");
TVM_ATTR_FIELD(axis)
.describe("Axis over the normalization applied.");
}
};

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_NN_H_
63 changes: 63 additions & 0 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,3 +383,66 @@ def relu(data):
The computed result.
"""
return _make.relu(data)


def lrn(data, size=5, axis=1, bias=2, alpha=.00001, beta=0.75):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

idk if it is good or not to use these defaults, but seems only AlexNet-like archs apply lrn somewhere, so it is fine.

"""This operator takes data as input and does local response normalization.

Normalize the input in a local region across or within feature maps.
Each input value is divided by (data / (bias + (alpha * sum_data ^2 /size))^beta)
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).

.. math::
(data / (bias + (alpha * sum_data ^2 /size))^beta)
Parameters
----------
data : relay.Expr
The input data to the operator.

size : int, optional
The size of the local region to be considered for normalization.

axis : int, optional
Input data layout channel axis. Default value is 1 for NCHW format

bias : float, optional
The offset parameter to avoid dividing by 0.

alpha : float, optional
The scaling parameter.

beta : float, optional
The exponent parameter.

Returns
-------
result : relay.Expr
The computed result.
"""

return _make.lrn(data, size, axis, alpha, beta, bias)

def l2_normalize(data, eps, axis=None):
"""Perform L2 normalization on the input data

.. math::
y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps))

Parameters
----------
data : relay.Expr
The input data to the operator.

eps : float
epsilon value

axis : list of int, optional
axis over the normalization applied

Returns
-------
result : relay.Expr
The computed result.
"""
return _make.l2_normalize(data, eps, axis)
74 changes: 74 additions & 0 deletions src/relay/op/nn/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,5 +143,79 @@ RELAY_REGISTER_UNARY_OP("relay.op.nn._make.", "relu")
.set_support_level(1)
.add_type_rel("Identity", IdentityRel);


// Positional relay function to create LRN operator used by frontend FFI.
Expr MakeLRN(Expr data,
IndexExpr size,
IndexExpr axis,
double alpha,
double beta,
double bias) {
auto attrs = make_node<LRNAttrs>();
attrs->size = size;
attrs->axis = axis;
attrs->alpha = alpha;
attrs->beta = beta;
attrs->bias = bias;
static const Op& op = Op::Get("nn.lrn");
return CallNode::make(op, {data}, Attrs(attrs), {});
}

TVM_REGISTER_API("relay.op.nn._make.lrn")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 6>(MakeLRN, args, rv);
});

RELAY_REGISTER_OP("nn.lrn")
.describe(R"code(LRN layer.

Normalize the input in a local region across or within feature maps.
Each input value is divided by (1 + (\alpha/n) \sum_i x_i^2)^\beta,
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).

.. math::

data / (bias + (alpha * sum_data ^2 /size))^beta

- **data**: The input tensor.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(2)
.add_type_rel("Identity", IdentityRel);


// Positional relay function to create L2Normalize operator used by frontend FFI.
Expr MakeL2Normalize(Expr data,
double eps,
Array<IndexExpr> axis) {
auto attrs = make_node<L2NormalizeAttrs>();
attrs->eps = eps;
attrs->axis = std::move(axis);
static const Op& op = Op::Get("nn.l2_normalize");
return CallNode::make(op, {data}, Attrs(attrs), {});
}

TVM_REGISTER_API("relay.op.nn._make.l2_normalize")
.set_body([](const TVMArgs& args, TVMRetValue* rv) {
runtime::detail::unpack_call<Expr, 3>(MakeL2Normalize, args, rv);
});

RELAY_REGISTER_OP("nn.l2_normalize")
.describe(R"code(L2 Normalization layer.

Normalizes along dimension axis using an L2 norm

.. math::
output = x / sqrt(max(sum(x^2), epsilon))

- **data**: The input tensor.
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor.")
.set_support_level(1)
.add_type_rel("Identity", IdentityRel);

} // namespace relay
} // namespace tvm
26 changes: 26 additions & 0 deletions tests/python/relay/test_op_level1.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,30 @@ def test_concatenate_infer_type():
assert ftype.ret_type == relay.ty.TensorType(
(n, t + t, 100), "float32")

def test_lrn():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c , h, w), "float32"))
with ib.function(x) as func:
ib.ret(relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75))
ib.ret(func)

func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c , h, w), "float32")


def test_l2_normalize():
ib = relay.ir_builder.IRBuilder()
n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w")
x = ib.param("x", relay.ty.TensorType((n, c , h, w), "float32"))
with ib.function(x) as func:
ib.ret(relay.nn.l2_normalize(x, eps=0.001, axis=[1]))
ib.ret(func)

func = relay.ir_pass.infer_type(ib.env, func.to_func())
ftype = func.checked_type
assert ftype.ret_type == relay.ty.TensorType((n, c , h, w), "float32")

if __name__ == "__main__":
test_unary_op()
Expand All @@ -178,3 +202,5 @@ def test_concatenate_infer_type():
test_log_softmax()
test_binary_op()
test_binary_broadcast_op()
test_lrn()
test_l2_normalize()