Skip to content

Commit

Permalink
Migrate print into pir (#58780)
Browse files Browse the repository at this point in the history
  • Loading branch information
zrr1999 authored Nov 20, 2023
1 parent fd91052 commit abac31f
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 44 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/pir/dialect/op_generator/ops_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@
'fc',
'self_dp_attention',
'get_tensor_from_selected_rows',
'print',
]

NO_NEED_GEN_STATIC_ONLY_APIS = [
Expand All @@ -114,7 +115,6 @@
'fused_scale_bias_relu_conv_bn',
'fused_scale_bias_add_relu',
'memcpy',
'print',
'recv_v2',
'rnn_',
'seed',
Expand Down
19 changes: 18 additions & 1 deletion python/paddle/static/nn/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from functools import partial, reduce

import paddle
from paddle import _C_ops
from paddle.base import core
from paddle.base.backward import _infer_var_data_type_shape_
from paddle.base.framework import (
Expand Down Expand Up @@ -1745,8 +1746,24 @@ def Print(
['uint16', 'float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'paddle.static.Print',
)
message = message or ""
helper = LayerHelper('print', **locals())

if in_pir_mode():
return _C_ops.print(
input,
first_n,
message,
summarize,
print_tensor_name,
print_tensor_type,
print_tensor_shape,
print_tensor_layout,
print_tensor_lod,
print_phase.upper(),
True,
)

helper = LayerHelper('print' + "_" + input.name, **locals())
output = helper.create_variable_for_type_inference(input.dtype)
helper.append_op(
type='print',
Expand Down
6 changes: 4 additions & 2 deletions test/legacy_test/test_index_put_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import numpy as np

import paddle
from paddle.pir_utils import test_with_pir_api


def compute_index_put_ref(x_np, indices_np, value_np, accumulate=False):
Expand Down Expand Up @@ -143,7 +144,7 @@ def test_dygraph_forward(self):
)
np.testing.assert_allclose(ref_res, pd_res.numpy(), atol=1e-7)

# @test_with_pir_api
@test_with_pir_api
def test_static_forward(self):
paddle.enable_static()
for place in self.place:
Expand Down Expand Up @@ -626,6 +627,7 @@ def setPlace(self):
if paddle.is_compiled_with_cuda():
self.place.append('gpu')

@test_with_pir_api
def test_dygraph_forward(self):
paddle.disable_static()
for place in self.place:
Expand Down Expand Up @@ -934,7 +936,7 @@ def test_backward_all_false_bool_indice(self):
atol=1e-7,
)

# @test_with_pir_api
@test_with_pir_api
def test_backward_in_static(self):
paddle.enable_static()
exe = paddle.static.Executor()
Expand Down
111 changes: 71 additions & 40 deletions test/legacy_test/test_print_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@

import paddle
from paddle import base
from paddle.autograd.ir_backward import grad
from paddle.base import core
from paddle.base.framework import switch_main_program
from paddle.framework import in_dynamic_or_pir_mode
from paddle.pir_utils import test_with_pir_api
from paddle.static import Program, program_guard

paddle.enable_static()
Expand All @@ -39,54 +41,81 @@ def build_network(self, only_forward, **kargs):
x.stop_gradient = False
paddle.static.Print(input=x, **kargs)
loss = paddle.mean(x)
paddle.static.append_backward(loss=loss)

if in_dynamic_or_pir_mode():
dx = grad(loss, [x])
else:
paddle.static.append_backward(loss=loss)
return loss

@test_with_pir_api
def test_forward(self):
switch_main_program(Program())
printed = self.build_network(True, print_phase='forward')
exe = paddle.static.Executor(self.place)
outs = exe.run(
feed={'x': self.x_tensor}, fetch_list=[printed], return_numpy=False
)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
printed = self.build_network(True, print_phase='forward')
exe = paddle.static.Executor(self.place)
outs = exe.run(
feed={'x': self.x_tensor},
fetch_list=[printed],
return_numpy=False,
)

@test_with_pir_api
def test_backward(self):
switch_main_program(Program())
loss = self.build_network(False, print_phase='backward')
exe = paddle.static.Executor(self.place)
outs = exe.run(
feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False
)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
loss = self.build_network(False, print_phase='backward')
exe = paddle.static.Executor(self.place)
outs = exe.run(
feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False
)

@test_with_pir_api
def test_all_parameters(self):
x = paddle.static.data('x', shape=[-1, 3], dtype='float32', lod_level=1)
x.stop_gradient = False

for print_tensor_name in [True, False]:
for print_tensor_type in [True, False]:
for print_tensor_shape in [True, False]:
for print_tensor_lod in [True, False]:
paddle.static.Print(
input=x,
print_tensor_name=print_tensor_name,
print_tensor_type=print_tensor_type,
print_tensor_shape=print_tensor_shape,
print_tensor_lod=print_tensor_lod,
)
loss = paddle.mean(x)
paddle.static.append_backward(loss=loss)
exe = paddle.static.Executor(self.place)
outs = exe.run(
feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False
)
prog = paddle.static.Program()
with paddle.static.program_guard(prog, paddle.static.Program()):
x = paddle.static.data(
'x', shape=[-1, 3], dtype='float32', lod_level=1
)
x.stop_gradient = False

for print_tensor_name in [True, False]:
for print_tensor_type in [True, False]:
for print_tensor_shape in [True, False]:
for print_tensor_lod in [True, False]:
paddle.static.Print(
input=x,
print_tensor_name=print_tensor_name,
print_tensor_type=print_tensor_type,
print_tensor_shape=print_tensor_shape,
print_tensor_lod=print_tensor_lod,
)
loss = paddle.mean(x)
if in_dynamic_or_pir_mode():
dx = grad(loss, [x])
else:
paddle.static.append_backward(loss=loss)
exe = paddle.static.Executor(self.place)
outs = exe.run(
feed={'x': self.x_tensor}, fetch_list=[loss], return_numpy=False
)

@test_with_pir_api
def test_no_summarize(self):
switch_main_program(Program())
printed = self.build_network(True, summarize=-1, print_phase='forward')
exe = paddle.static.Executor(self.place)
outs = exe.run(
feed={'x': self.x_tensor}, fetch_list=[printed], return_numpy=False
)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
printed = self.build_network(
True, summarize=-1, print_phase='forward'
)
exe = paddle.static.Executor(self.place)
outs = exe.run(
feed={'x': self.x_tensor},
fetch_list=[printed],
return_numpy=False,
)


class TestPrintOpError(unittest.TestCase):
Expand Down Expand Up @@ -137,6 +166,8 @@ def check_backward(self, use_cuda):
feed_dict = {"image": img, "label": label}
exe.run(binary, feed_dict)

# fc is not supported in pir
# @test_with_pir_api
def test_fw_bw(self):
if paddle.is_compiled_with_cuda():
self.check_backward(use_cuda=True)
Expand Down

0 comments on commit abac31f

Please sign in to comment.