Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Clean Fluid API]Remove API: lrn (remove directly) #47945

Merged
merged 1 commit into from
Nov 21, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 0 additions & 98 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,6 @@
'unsqueeze',
'lod_reset',
'lod_append',
'lrn',
'pad',
'pad_constant_like',
'label_smooth',
Expand Down Expand Up @@ -7332,103 +7331,6 @@ def lod_append(x, level):
return out


def lrn(
input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, data_format='NCHW'
):
r"""
:alias_main: paddle.nn.functional.lrn
:alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn
:old_api: paddle.fluid.layers.lrn

This operator implements the Local Response Normalization Layer.
This layer performs a type of "lateral inhibition" by normalizing over local input regions.
For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_

The formula is as follows:

.. math::

Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta}

In the above equation:

- :math:`n` : The number of channels to sum over.
- :math:`k` : The offset (avoid being divided by 0).
- :math:`\\alpha` : The scaling parameter.
- :math:`\\beta` : The exponent parameter.


Args:
input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C],
where N is the batch size, C is the input channel, H is Height, W is weight. The data
type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError.
n (int, optional): The number of channels to sum over. Default: 5
k (float, optional): An offset, positive. Default: 1.0
alpha (float, optional): The scaling parameter, positive. Default:1e-4
beta (float, optional): The exponent, positive. Default:0.75
name (str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.

Returns:
Variable: A tensor variable storing the transformation result with the same shape and data type as input.


Examples:

.. code-block:: python

import paddle.fluid as fluid
data = fluid.data(
name="data", shape=[None, 3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
print(lrn.shape) # [-1, 3, 112, 112]
print(lrn.dtype) # float32
"""
helper = LayerHelper('lrn', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'lrn')
dtype = helper.input_dtype()
input_shape = input.shape
dims = len(input_shape)

if dims != 4:
raise ValueError(
"Input's dimension size of Op(lrn) must be 4, but received %d."
% (dims)
)
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(lrn) got wrong value: received "
+ data_format
+ " but only NCHW or NHWC supported."
)

mid_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True
)
lrn_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="lrn",
inputs={"X": input},
outputs={
"Out": lrn_out,
"MidOut": mid_out,
},
attrs={
"n": n,
"k": k,
"alpha": alpha,
"beta": beta,
"data_format": data_format,
},
)

return lrn_out


def pad(x, paddings, pad_value=0.0, name=None):
r"""
:alias_main: paddle.nn.functional.pad
Expand Down
7 changes: 0 additions & 7 deletions python/paddle/fluid/tests/unittests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3373,13 +3373,6 @@ def make_space_to_depth(self):
)
return layers.space_to_depth(data, 3)

def make_lrn(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
data = self._get_data(name='data', shape=[6, 2, 2], dtype='float32')
return layers.lrn(data)

def make_get_places(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
Expand Down
50 changes: 0 additions & 50 deletions python/paddle/fluid/tests/unittests/test_lrn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,56 +106,6 @@ def init_test_case(self):
self.data_format = 'NHWC'


class TestLRNAPI(unittest.TestCase):
def test_case(self):
data1 = fluid.data(name='data1', shape=[2, 4, 5, 5], dtype='float32')
data2 = fluid.data(name='data2', shape=[2, 5, 5, 4], dtype='float32')
out1 = fluid.layers.lrn(data1, data_format='NCHW')
out2 = fluid.layers.lrn(data2, data_format='NHWC')
data1_np = np.random.random((2, 4, 5, 5)).astype("float32")
data2_np = np.transpose(data1_np, [0, 2, 3, 1])

if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(
fluid.default_main_program(),
feed={"data1": data1_np, "data2": data2_np},
fetch_list=[out1, out2],
return_numpy=True,
)

np.testing.assert_allclose(
results[0], np.transpose(results[1], (0, 3, 1, 2)), rtol=1e-05
)

def test_exception(self):
input1 = fluid.data(name="input1", shape=[2, 4, 5, 5], dtype="float32")
input2 = fluid.data(
name="input2", shape=[2, 4, 5, 5, 5], dtype="float32"
)

def _attr_data_fromat():
out = fluid.layers.lrn(input1, data_format='NDHW')

def _input_dim_size():
out = fluid.layers.lrn(input2)

self.assertRaises(ValueError, _attr_data_fromat)
self.assertRaises(ValueError, _input_dim_size)


class TestLRNOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input must be float32
in_w = fluid.data(name="in_w", shape=[None, 3, 3, 3], dtype="int64")
self.assertRaises(TypeError, fluid.layers.lrn, in_w)


class TestLocalResponseNormFAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
Expand Down