diff --git a/docs/langref/relay_op.rst b/docs/langref/relay_op.rst index deafaa99d645..f4c39261ba1f 100644 --- a/docs/langref/relay_op.rst +++ b/docs/langref/relay_op.rst @@ -30,6 +30,7 @@ This level enables fully connected multi-layer perceptron. tvm.relay.expand_dims tvm.relay.concatenate tvm.relay.nn.softmax + tvm.relay.nn.log_softmax tvm.relay.subtract tvm.relay.multiply tvm.relay.divide @@ -114,6 +115,7 @@ Level 1 Definitions .. autofunction:: tvm.relay.sigmoid .. autofunction:: tvm.relay.concatenate .. autofunction:: tvm.relay.nn.softmax +.. autofunction:: tvm.relay.nn.log_softmax Level 2 Definitions diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py index 681afd5075c9..5a1bc1068f4b 100644 --- a/python/tvm/relay/op/nn/nn.py +++ b/python/tvm/relay/op/nn/nn.py @@ -108,6 +108,28 @@ def softmax(data, axis): return _make.softmax(data, axis) +def log_softmax(data, axis): + r"""Computes log softmax. + + .. math:: + + \text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)} + + .. note:: + This operator can be optimized away for inference. + + Parameters + ---------- + data: relay.Expr + The input data to the operator. + + axis: int + The axis to sum over when computing softmax + """ + + return _make.log_softmax(data, axis) + + def max_pool2d(data, pool_size=(1, 1), strides=(1, 1), diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc index 1937d610d003..dfbeceb45cc0 100644 --- a/src/relay/op/nn/nn.cc +++ b/src/relay/op/nn/nn.cc @@ -41,6 +41,35 @@ RELAY_REGISTER_OP("nn.softmax") .set_support_level(1) .add_type_rel("Identity", IdentityRel); + +TVM_REGISTER_API("relay.op.nn._make.log_softmax") +.set_body([](const TVMArgs& args, TVMRetValue* rv) { + auto make_func = [](Expr data, int axis) { + auto attrs = make_node(); + attrs->axis = axis; + static const Op& op = Op::Get("nn.log_softmax"); + return CallNode::make(op, {data}, Attrs(attrs), {}); + }; + + runtime::detail::unpack_call(make_func, args, rv); +}); + +RELAY_REGISTER_OP("nn.log_softmax") + .describe(R"code(Computes log softmax. + +.. math:: \text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)} + +.. note:: + This operator can be optimized away for inference. + +- **data**: The input data +)code" TVM_ADD_FILELINE) +.set_num_inputs(1) +.add_argument("data", "Tensor", "The input tensor.") +.set_support_level(1) +.add_type_rel("Identity", IdentityRel); + + // BatchFlatten bool BatchFlattenRel(const Array& types, int num_inputs, diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index 78cdc048d438..e8c5b5fc87f2 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -54,6 +54,18 @@ def test_softmax(): assert ftype.ret_type == relay.ty.TensorType((n, d), "float32") +def test_log_softmax(): + ib = relay.ir_builder.IRBuilder() + n, d = tvm.var("n"), tvm.var("d") + x = ib.param("x", relay.ty.TensorType((n, d), "float32")) + with ib.function(x) as func: + ib.ret(relay.nn.log_softmax(x, axis=1)) + ib.ret(func) + + func = relay.ir_pass.infer_type(ib.env, func.to_func()) + ftype = func.checked_type + assert ftype.ret_type == relay.ty.TensorType((n, d), "float32") + def test_unary_op(): for op in [relay.exp, relay.log, @@ -162,5 +174,6 @@ def test_concatenate_infer_type(): test_expand_dims_infer_type() test_concatenate_infer_type() test_softmax() + test_log_softmax() test_binary_op() test_binary_broadcast_op()