Skip to content

Commit

Permalink
Add the parameter checking for softplus and fix the doc string (#26530)
Browse files Browse the repository at this point in the history
  • Loading branch information
hong19860320 authored Aug 22, 2020
1 parent 0ca10d3 commit c11c83f
Show file tree
Hide file tree
Showing 3 changed files with 111 additions and 124 deletions.
3 changes: 3 additions & 0 deletions python/paddle/fluid/tests/unittests/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -717,6 +717,9 @@ def test_errors(self):
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
self.assertRaises(TypeError, F.softshrink, x_int32)
# The threshold must be no less than zero
x_fp32 = paddle.data(name='x_fp32', shape=[12, 10], dtype='float32')
self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
F.softshrink(x_fp16)
Expand Down
126 changes: 58 additions & 68 deletions python/paddle/nn/functional/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None):
x, \\text{otherwise}
\\end{cases}
Args:
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
min (float, optional): The minimum value of the linear region range. Default is -1.
max (float, optional): The maximum value of the linear region range. Default is 1.
Expand Down Expand Up @@ -598,9 +598,9 @@ def relu6(x, name=None):
.. math::
\text{relu6}(x) = \min(\max(0,x), 6)
relu6(x) = min(max(0,x), 6)
Args:
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Expand All @@ -609,18 +609,16 @@ def relu6(x, name=None):
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
out = F.relu6(x) # [0, 0.3, 6]
paddle.disable_static()
x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
out = F.relu6(x) # [0, 0.3, 6]
"""
threshold = 6.0
if in_dygraph_mode():
Expand All @@ -646,11 +644,9 @@ def selu(x,
.. math::
\text{selu}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1))), \\
with\,alpha=1.6732632423543772848170429916717 and \\
scale=1.0507009873554804934193349852946
selu(x) = scale * (max(0,x) + min(0, alpha * (e^{x} - 1)))
Args:
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
scale (float, optional): The value of scale for selu. Default is 1.0507009873554804934193349852946
alpha (float, optional): The value of alpha for selu. Default is 1.6732632423543772848170429916717
Expand All @@ -661,18 +657,16 @@ def selu(x,
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([[0, 1],[2, 3]]))
out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]]
paddle.disable_static()
x = paddle.to_tensor(np.array([[0, 1],[2, 3]]))
out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]]
"""
if in_dygraph_mode():
return core.ops.selu(x, 'scale', scale, 'alpha', alpha)
Expand Down Expand Up @@ -856,10 +850,10 @@ def softplus(x, beta=1, threshold=20, name=None):
.. math::
\text{softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) \\
\text{For numerical stability, the implementation reverts to the linear function when :}\,x \times \beta > threshold.
softplus(x) = \\frac{1}{beta} * \\log(1 + e^{beta * x}) \\\\
\\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
Args:
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
beta (float, optional): The value of beta for softplus. Default is 1
threshold (float, optional): The value of threshold for softplus. Default is 20
Expand All @@ -870,18 +864,16 @@ def softplus(x, beta=1, threshold=20, name=None):
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
if in_dygraph_mode():
return core.ops.softplus(x, 'beta', beta, 'threshold', threshold)
Expand All @@ -905,14 +897,13 @@ def softshrink(x, threshold=0.5, name=None):
.. math::
\text{softshrink}(x) =
\begin{cases}
x - threshold, & \text{ if } x > threshold \\
x + threshold, & \text{ if } x < -threshold \\
0, & \text{ otherwise }
\end{cases}
softshrink(x)= \\begin{cases}
x - threshold, \\text{if } x > threshold \\\\
x + threshold, \\text{if } x < -threshold \\\\
0, \\text{otherwise}
\\end{cases}
Args:
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
Expand All @@ -922,19 +913,22 @@ def softshrink(x, threshold=0.5, name=None):
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
"""
if threshold < 0:
raise ValueError(
"The threshold must be no less than zero. Received: {}.".format(
threshold))

if in_dygraph_mode():
return core.ops.softshrink(x, 'lambda', threshold)

Expand All @@ -956,9 +950,9 @@ def softsign(x, name=None):
.. math::
\text{softsign}(x) = \frac{x}{1 + |x|}
softsign(x) = \\frac{x}{1 + |x|}
Args:
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Expand All @@ -967,18 +961,16 @@ def softsign(x, name=None):
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
"""
if in_dygraph_mode():
return core.ops.softsign(x)
Expand All @@ -997,7 +989,7 @@ def tanhshrink(x, name=None):
.. math::
\text{tanhshrink}(x) = x - \text{tanh}(x)
tanhshrink(x) = x - tanh(x)
Args:
x (Tensor): The input Tensor with data type float32, float64.
Expand All @@ -1008,18 +1000,16 @@ def tanhshrink(x, name=None):
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
paddle.disable_static()
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
paddle.disable_static()
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
"""
if in_dygraph_mode():
return core.ops.tanh_shrink(x)
Expand Down
Loading

0 comments on commit c11c83f

Please sign in to comment.