From abac31fd95060c5d8b5a1808774a984c194e04e6 Mon Sep 17 00:00:00 2001 From: Zhan Rongrui <46243324+zrr1999@users.noreply.github.com> Date: Mon, 20 Nov 2023 11:22:41 +0800 Subject: [PATCH] Migrate print into pir (#58780) --- .../pir/dialect/op_generator/ops_api_gen.py | 2 +- python/paddle/static/nn/control_flow.py | 19 ++- test/legacy_test/test_index_put_op.py | 6 +- test/legacy_test/test_print_op.py | 111 +++++++++++------- 4 files changed, 94 insertions(+), 44 deletions(-) diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index 1b78209d2223d..1075065cd0755 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -92,6 +92,7 @@ 'fc', 'self_dp_attention', 'get_tensor_from_selected_rows', + 'print', ] NO_NEED_GEN_STATIC_ONLY_APIS = [ @@ -114,7 +115,6 @@ 'fused_scale_bias_relu_conv_bn', 'fused_scale_bias_add_relu', 'memcpy', - 'print', 'recv_v2', 'rnn_', 'seed', diff --git a/python/paddle/static/nn/control_flow.py b/python/paddle/static/nn/control_flow.py index 32b4f4407be23..0f00cf4915295 100644 --- a/python/paddle/static/nn/control_flow.py +++ b/python/paddle/static/nn/control_flow.py @@ -16,6 +16,7 @@ from functools import partial, reduce import paddle +from paddle import _C_ops from paddle.base import core from paddle.base.backward import _infer_var_data_type_shape_ from paddle.base.framework import ( @@ -1745,8 +1746,24 @@ def Print( ['uint16', 'float16', 'float32', 'float64', 'int32', 'int64', 'bool'], 'paddle.static.Print', ) + message = message or "" + helper = LayerHelper('print', **locals()) + + if in_pir_mode(): + return _C_ops.print( + input, + first_n, + message, + summarize, + print_tensor_name, + print_tensor_type, + print_tensor_shape, + print_tensor_layout, + print_tensor_lod, + print_phase.upper(), + True, + ) - helper = LayerHelper('print' + "_" + input.name, **locals()) output = helper.create_variable_for_type_inference(input.dtype) helper.append_op( type='print', diff --git a/test/legacy_test/test_index_put_op.py b/test/legacy_test/test_index_put_op.py index 3d988462194cc..ca8b5389f8b37 100644 --- a/test/legacy_test/test_index_put_op.py +++ b/test/legacy_test/test_index_put_op.py @@ -18,6 +18,7 @@ import numpy as np import paddle +from paddle.pir_utils import test_with_pir_api def compute_index_put_ref(x_np, indices_np, value_np, accumulate=False): @@ -143,7 +144,7 @@ def test_dygraph_forward(self): ) np.testing.assert_allclose(ref_res, pd_res.numpy(), atol=1e-7) - # @test_with_pir_api + @test_with_pir_api def test_static_forward(self): paddle.enable_static() for place in self.place: @@ -626,6 +627,7 @@ def setPlace(self): if paddle.is_compiled_with_cuda(): self.place.append('gpu') + @test_with_pir_api def test_dygraph_forward(self): paddle.disable_static() for place in self.place: @@ -934,7 +936,7 @@ def test_backward_all_false_bool_indice(self): atol=1e-7, ) - # @test_with_pir_api + @test_with_pir_api def test_backward_in_static(self): paddle.enable_static() exe = paddle.static.Executor() diff --git a/test/legacy_test/test_print_op.py b/test/legacy_test/test_print_op.py index c4390d76bb9ff..95c1dd420626d 100755 --- a/test/legacy_test/test_print_op.py +++ b/test/legacy_test/test_print_op.py @@ -19,8 +19,10 @@ import paddle from paddle import base +from paddle.autograd.ir_backward import grad from paddle.base import core -from paddle.base.framework import switch_main_program +from paddle.framework import in_dynamic_or_pir_mode +from paddle.pir_utils import test_with_pir_api from paddle.static import Program, program_guard paddle.enable_static() @@ -39,54 +41,81 @@ def build_network(self, only_forward, **kargs): x.stop_gradient = False paddle.static.Print(input=x, **kargs) loss = paddle.mean(x) - paddle.static.append_backward(loss=loss) + + if in_dynamic_or_pir_mode(): + dx = grad(loss, [x]) + else: + paddle.static.append_backward(loss=loss) return loss + @test_with_pir_api def test_forward(self): - switch_main_program(Program()) - printed = self.build_network(True, print_phase='forward') - exe = paddle.static.Executor(self.place) - outs = exe.run( - feed={'x': self.x_tensor}, fetch_list=[printed], return_numpy=False - ) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + printed = self.build_network(True, print_phase='forward') + exe = paddle.static.Executor(self.place) + outs = exe.run( + feed={'x': self.x_tensor}, + fetch_list=[printed], + return_numpy=False, + ) + @test_with_pir_api def test_backward(self): - switch_main_program(Program()) - loss = self.build_network(False, print_phase='backward') - exe = paddle.static.Executor(self.place) - outs = exe.run( - feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False - ) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + loss = self.build_network(False, print_phase='backward') + exe = paddle.static.Executor(self.place) + outs = exe.run( + feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False + ) + @test_with_pir_api def test_all_parameters(self): - x = paddle.static.data('x', shape=[-1, 3], dtype='float32', lod_level=1) - x.stop_gradient = False - - for print_tensor_name in [True, False]: - for print_tensor_type in [True, False]: - for print_tensor_shape in [True, False]: - for print_tensor_lod in [True, False]: - paddle.static.Print( - input=x, - print_tensor_name=print_tensor_name, - print_tensor_type=print_tensor_type, - print_tensor_shape=print_tensor_shape, - print_tensor_lod=print_tensor_lod, - ) - loss = paddle.mean(x) - paddle.static.append_backward(loss=loss) - exe = paddle.static.Executor(self.place) - outs = exe.run( - feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False - ) + prog = paddle.static.Program() + with paddle.static.program_guard(prog, paddle.static.Program()): + x = paddle.static.data( + 'x', shape=[-1, 3], dtype='float32', lod_level=1 + ) + x.stop_gradient = False + + for print_tensor_name in [True, False]: + for print_tensor_type in [True, False]: + for print_tensor_shape in [True, False]: + for print_tensor_lod in [True, False]: + paddle.static.Print( + input=x, + print_tensor_name=print_tensor_name, + print_tensor_type=print_tensor_type, + print_tensor_shape=print_tensor_shape, + print_tensor_lod=print_tensor_lod, + ) + loss = paddle.mean(x) + if in_dynamic_or_pir_mode(): + dx = grad(loss, [x]) + else: + paddle.static.append_backward(loss=loss) + exe = paddle.static.Executor(self.place) + outs = exe.run( + feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False + ) + @test_with_pir_api def test_no_summarize(self): - switch_main_program(Program()) - printed = self.build_network(True, summarize=-1, print_phase='forward') - exe = paddle.static.Executor(self.place) - outs = exe.run( - feed={'x': self.x_tensor}, fetch_list=[printed], return_numpy=False - ) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + printed = self.build_network( + True, summarize=-1, print_phase='forward' + ) + exe = paddle.static.Executor(self.place) + outs = exe.run( + feed={'x': self.x_tensor}, + fetch_list=[printed], + return_numpy=False, + ) class TestPrintOpError(unittest.TestCase): @@ -137,6 +166,8 @@ def check_backward(self, use_cuda): feed_dict = {"image": img, "label": label} exe.run(binary, feed_dict) + # fc is not supported in pir + # @test_with_pir_api def test_fw_bw(self): if paddle.is_compiled_with_cuda(): self.check_backward(use_cuda=True)