Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[remove fluid.layers.relu] remove unit tests (part 1) #48975

Merged
merged 5 commits into from
Dec 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def func(self, place):

x = layers.data('x', shape, False, dtype)
x.persistable = True
y = layers.relu(x)
y = F.relu(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.02

Expand Down
3 changes: 2 additions & 1 deletion python/paddle/fluid/tests/unittests/test_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
import paddle.static as static


Expand Down Expand Up @@ -285,7 +286,7 @@ def test_error(self):
x = fluid.data(name='x', shape=[None, 2, 8, 8], dtype='float32')
x.stop_gradient = False
conv = fluid.layers.conv2d(x, 4, 1, bias_attr=False)
y = fluid.layers.relu(conv)
y = F.relu(conv)

with self.assertRaises(TypeError):
x_grad = fluid.gradients(y.name, x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import core

paddle.enable_static()
Expand Down Expand Up @@ -160,7 +161,7 @@ def build_origin_program(
data_layout='NHWC',
)
out = bn1 + bn2
out = fluid.layers.relu(out)
out = F.relu(out)
prediction = fluid.layers.fc(
input=out, size=10, act='softmax', param_attr=self.fc_param_attr
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F


class TestMNIST(TestParallelExecutorBase):
Expand Down Expand Up @@ -97,7 +98,7 @@ def build_program(self, main_program, startup_program):
X = fluid.data(name="X", shape=[3, 3], dtype='float32')
Y = fluid.data(name="Y", shape=[3, 3], dtype='float32')
Out1 = X * 5
Out2 = fluid.layers.relu(Out1)
Out2 = F.relu(Out1)
prediction = paddle.tensor.math._add_with_axis(Y, Out2, axis=1)
loss = paddle.mean(prediction)
sgd = fluid.optimizer.SGD(learning_rate=0.001)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F


def norm(*args, **kargs):
Expand Down Expand Up @@ -59,7 +60,7 @@ def simple_depthwise_net(use_feed):
hidden = paddle.reshape(img, (-1, 1, 28, 28))
for _ in range(4):
hidden = sep_conv(hidden, channel=200, stride=2, filter=5)
hidden = fluid.layers.relu(hidden)
hidden = F.relu(hidden)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np

import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.reader import use_pinned_memory


Expand Down Expand Up @@ -45,7 +46,7 @@ def setUp(self):
def iter_loader_data(self, loader):
for _ in range(self.epoch_num):
for image, label in loader():
relu = fluid.layers.relu(image)
relu = F.relu(image)
self.assertEqual(image.shape, [self.batch_size, 784])
self.assertEqual(label.shape, [self.batch_size, 1])
self.assertEqual(relu.shape, [self.batch_size, 784])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import numpy as np

import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import core


Expand Down Expand Up @@ -112,7 +113,7 @@ def __reader__():
try:
for _ in range(self.epoch_num):
for image, _ in loader():
fluid.layers.relu(image)
F.relu(image)
except core.EnforceNotMet as ex:
self.assertIn("Blocking queue is killed", str(ex))
exception = ex
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np

import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.io import DataLoader, Dataset


Expand Down Expand Up @@ -71,7 +72,7 @@ def prepare_data_loader(self):
def run_one_epoch_with_break(self, loader):
for step_id, data in enumerate(loader()):
image, label = data
relu = fluid.layers.relu(image)
relu = F.relu(image)
self.assertEqual(image.shape, [self.batch_size, 784])
self.assertEqual(label.shape, [self.batch_size, 1])
self.assertEqual(relu.shape, [self.batch_size, 784])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.wrapped_decorator import wrap_decorator
from paddle.vision.models import resnet50, resnet101
Expand Down Expand Up @@ -317,8 +318,8 @@ def func_example_no_grad_vars(self):
numel = x_np.size
x.stop_gradient = False

y1 = fluid.layers.relu(x)
y2 = fluid.layers.relu(x)
y1 = F.relu(x)
y2 = F.relu(x)
z = y1 + y2
w = z * z

Expand Down Expand Up @@ -436,7 +437,7 @@ def func_example_with_gradient_accumulation_and_create_graph(self):
numel = x_np.size
x.stop_gradient = False

y = fluid.layers.relu(x)
y = F.relu(x)
z = y + 1
w = z * z

Expand Down Expand Up @@ -489,8 +490,8 @@ def func_example_with_gradient_accumulation_and_no_grad_vars(self):
numel = x_np.size
x.stop_gradient = False

y1 = fluid.layers.relu(x)
y2 = fluid.layers.relu(x)
y1 = F.relu(x)
y2 = F.relu(x)
z = y1 + y2
w = z * z

Expand Down Expand Up @@ -540,7 +541,7 @@ def func_example_with_gradient_accumulation_and_not_create_graph(self):
numel = x_np.size
x.stop_gradient = False

y = fluid.layers.relu(x)
y = F.relu(x)
z = y + 1
w = z * z

Expand Down
3 changes: 2 additions & 1 deletion python/paddle/fluid/tests/unittests/test_imperative_gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import AdamOptimizer
Expand Down Expand Up @@ -58,7 +59,7 @@ def __init__(self, name_scope, num_hidden):
self.gc2 = GraphConv(self.full_name(), 32, 10)

def forward(self, x, adj):
x = fluid.layers.relu(self.gc(x, adj))
x = F.relu(self.gc(x, adj))
return self.gc2(x, adj)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import (
Expand All @@ -34,7 +35,7 @@ def __init__(self, name_scope):
super().__init__(name_scope)

def forward(self, inputs):
x = fluid.layers.relu(inputs)
x = F.relu(inputs)
x = paddle.multiply(x, x)
x = paddle.sum(x)
return [x]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer

Expand All @@ -38,7 +39,7 @@ def forward(self, inputs):
x = paddle.reshape(inputs, shape=[-1, 4])
x = self.affine1(x)
x = paddle.nn.functional.dropout(x, self.dropout_ratio)
x = fluid.layers.relu(x)
x = F.relu(x)
action_scores = self.affine2(x)
return paddle.nn.functional.softmax(action_scores, axis=1)

Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/tests/unittests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,19 +344,19 @@ def test_SyncBatchNorm(self):
def test_relu(self):
with self.static_graph():
t = layers.data(name='t', shape=[3, 3], dtype='float32')
ret = layers.relu(t)
ret = F.relu(t)
static_ret = self.get_static_graph_result(
feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret]
)[0]

with self.dynamic_graph():
with _test_eager_guard():
t = np.ones([3, 3], dtype='float32')
dy_eager_ret = layers.relu(base.to_variable(t))
dy_eager_ret = F.relu(base.to_variable(t))
dy_eager_ret_value = dy_eager_ret.numpy()

t = np.ones([3, 3], dtype='float32')
dy_ret = layers.relu(base.to_variable(t))
dy_ret = F.relu(base.to_variable(t))
dy_ret_value = dy_ret.numpy()

np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F


class TestMemoryReuseExcludeFeedVar(unittest.TestCase):
Expand All @@ -29,7 +30,7 @@ def main_impl(self, place):
image = fluid.layers.data(
name='image', shape=self.image_shape, dtype='float32'
)
relu_image = fluid.layers.relu(image)
relu_image = F.relu(image)
loss = paddle.mean(relu_image)

build_strategy = fluid.BuildStrategy()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.nn.functional as F
from paddle import _legacy_C_ops


Expand Down Expand Up @@ -66,7 +66,7 @@ def test_relu(self):
a = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a)

res1 = layers.relu(x)
res1 = F.relu(x)
res2 = _legacy_C_ops.relu(x)

np.testing.assert_array_equal(res1.numpy(), res2.numpy())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.wrapped_decorator import wrap_decorator

Expand Down Expand Up @@ -220,7 +221,7 @@ def func_example_with_gradient_accumulation_and_create_graph(self):
numel = x_np.size
x.stop_gradient = False

y = fluid.layers.relu(x)
y = F.relu(x)
z = y + 1
w = z * z

Expand Down Expand Up @@ -261,8 +262,8 @@ def test_example_with_gradient_accumulation_and_no_grad_vars(self):
numel = x_np.size
x.stop_gradient = False

y1 = fluid.layers.relu(x)
y2 = fluid.layers.relu(x)
y1 = F.relu(x)
y2 = F.relu(x)
z = y1 + y2
w = z * z

Expand Down Expand Up @@ -308,7 +309,7 @@ def test_example_with_gradient_accumulation_and_not_create_graph(self):
numel = x_np.size
x.stop_gradient = False

y = fluid.layers.relu(x)
y = F.relu(x)
z = y + 1
w = z * z

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F


class TestInferencePartialFeed(unittest.TestCase):
Expand All @@ -38,9 +39,9 @@ def run_network(self, places, use_split, has_persistable):
else:
lr = fluid.data(name='lr', shape=[None], dtype='float32')

relu_x = fluid.layers.relu(x)
relu_y = fluid.layers.relu(y)
relu_lr = fluid.layers.relu(lr)
relu_x = F.relu(x)
relu_y = F.relu(y)
relu_lr = F.relu(lr)

exe = fluid.Executor(places[0])
exe.run(startup_prog)
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/fluid/tests/unittests/test_var_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard


Expand Down Expand Up @@ -653,7 +654,7 @@ def func_test_backward(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
var.stop_gradient = False
loss = fluid.layers.relu(var)
loss = F.relu(var)
loss.backward()
grad_var = var._grad_ivar()
self.assertEqual(grad_var.shape, self.shape)
Expand All @@ -667,7 +668,7 @@ def func_test_gradient(self):
with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array)
var.stop_gradient = False
loss = fluid.layers.relu(var)
loss = F.relu(var)
loss.backward()
grad_var = var.gradient()
self.assertEqual(grad_var.shape, self.array.shape)
Expand Down
3 changes: 2 additions & 1 deletion python/paddle/fluid/tests/unittests/test_while_loop_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
import paddle.nn.functional as F
from paddle.fluid.backward import append_backward
from paddle.fluid.framework import Program, program_guard

Expand Down Expand Up @@ -96,7 +97,7 @@ def body(i, ten, test_dict, test_list, test_list_dict):
test_list[0] = paddle.reshape(test_list[0], [2, -1]) + 1

test_list_dict[0]["test_key"] += 1
test_list_dict[0]["test_key"] = fluid.layers.relu(
test_list_dict[0]["test_key"] = F.relu(
test_list_dict[0]["test_key"]
)

Expand Down