From e75c651d150f7d131614bad32765ca2cf1b5f3cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kevin=E5=90=B4=E5=98=89=E6=96=87?= <417333277@qq.com> Date: Wed, 7 Dec 2022 19:04:41 +0800 Subject: [PATCH] Remove reduntant numpy output in Example code (1/3), test=document_fix (#48678) --- .../communication/stream/all_reduce.py | 2 +- python/paddle/fft.py | 41 +++++++++--------- .../incubate/nn/layer/fused_transformer.py | 4 +- python/paddle/nn/functional/activation.py | 9 ++-- python/paddle/nn/functional/conv.py | 20 ++++----- python/paddle/nn/functional/distance.py | 4 +- python/paddle/nn/functional/extension.py | 9 ++-- python/paddle/nn/functional/loss.py | 41 ++++++++++-------- python/paddle/nn/functional/vision.py | 5 +-- python/paddle/nn/initializer/constant.py | 10 +++-- python/paddle/nn/layer/conv.py | 20 ++++----- python/paddle/nn/layer/distance.py | 4 +- python/paddle/nn/layer/loss.py | 43 +++++++++++-------- python/paddle/nn/layer/vision.py | 5 +-- python/paddle/nn/quant/quant_layers.py | 6 +-- python/paddle/tensor/manipulation.py | 5 ++- python/paddle/text/datasets/conll05.py | 2 +- python/paddle/text/datasets/imdb.py | 2 +- python/paddle/text/datasets/imikolov.py | 2 +- python/paddle/text/datasets/movielens.py | 2 +- 20 files changed, 126 insertions(+), 110 deletions(-) diff --git a/python/paddle/distributed/communication/stream/all_reduce.py b/python/paddle/distributed/communication/stream/all_reduce.py index dd04ab0852bf3..16f69764f4e61 100644 --- a/python/paddle/distributed/communication/stream/all_reduce.py +++ b/python/paddle/distributed/communication/stream/all_reduce.py @@ -106,7 +106,7 @@ def all_reduce( data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]]) task = dist.stream.all_reduce(data, sync_op=False) task.wait() - out = data.numpy() + out = data # [[5, 7, 9], [5, 7, 9]] """ if _warn_cur_rank_not_in_group(group): diff --git a/python/paddle/fft.py b/python/paddle/fft.py index 9d2b4e2a1995e..7718e038c77c6 100644 --- a/python/paddle/fft.py +++ b/python/paddle/fft.py @@ -530,26 +530,27 @@ def fftn(x, s=None, axes=None, norm="backward", name=None): x = paddle.meshgrid(arr, arr, arr)[1] fftn_xp = paddle.fft.fftn(x, axes=(1, 2)) - print(fftn_xp.numpy()) - # [[[24.+0.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] - - # [[24.+0.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] - - # [[24.+0.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]] - - # [[24.+0.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.+8.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.+0.j 0.+0.j 0.+0.j 0.-0.j] - # [-8.-8.j 0.+0.j 0.+0.j 0.-0.j]]] + print(fftn_xp) + # Tensor(shape=[4, 4, 4], dtype=complex128, place=Place(gpu:0), stop_gradient=True, + # [[[(24+0j), 0j , 0j , -0j ], + # [(-8+8j), 0j , 0j , -0j ], + # [(-8+0j), 0j , 0j , -0j ], + # [(-8-8j), 0j , 0j , -0j ]], + + # [[(24+0j), 0j , 0j , -0j ], + # [(-8+8j), 0j , 0j , -0j ], + # [(-8+0j), 0j , 0j , -0j ], + # [(-8-8j), 0j , 0j , -0j ]], + + # [[(24+0j), 0j , 0j , -0j ], + # [(-8+8j), 0j , 0j , -0j ], + # [(-8+0j), 0j , 0j , -0j ], + # [(-8-8j), 0j , 0j , -0j ]], + + # [[(24+0j), 0j , 0j , -0j ], + # [(-8+8j), 0j , 0j , -0j ], + # [(-8+0j), 0j , 0j , -0j ], + # [(-8-8j), 0j , 0j , -0j ]]]) """ if is_integer(x) or is_floating_point(x): return fftn_r2c( diff --git a/python/paddle/incubate/nn/layer/fused_transformer.py b/python/paddle/incubate/nn/layer/fused_transformer.py index ad96ab9669e67..2f745a3feb980 100644 --- a/python/paddle/incubate/nn/layer/fused_transformer.py +++ b/python/paddle/incubate/nn/layer/fused_transformer.py @@ -533,8 +533,8 @@ class FusedFeedForward(Layer): fused_feedforward_layer = FusedFeedForward(8, 8) x = paddle.rand((1, 8, 8)) out = fused_feedforward_layer(x) - print(out.numpy().shape) - # (1, 8, 8) + print(out.shape) + # [1, 8, 8] """ def __init__( diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 6bf32317a461f..89bb63643f6a1 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -1677,11 +1677,12 @@ def glu(x, axis=-1, name=None): x = paddle.to_tensor( [[-0.22014759, -1.76358426, 0.80566144, 0.04241343], - [-1.94900405, -1.89956081, 0.17134808, -1.11280477]] + [-1.94900405, -1.89956081, 0.17134808, -1.11280477]] ) - print(F.glu(x).numpy()) - # array([[-0.15216254, -0.9004892 ], - # [-1.0577879 , -0.46985325]], dtype=float32) + print(F.glu(x)) + # Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [[-0.15216254, -0.90048921], + # [-1.05778778, -0.46985325]]) """ check_variable_and_dtype( diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index face92190c0f5..9b5f63254809b 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -657,10 +657,9 @@ def conv2d( w_var = paddle.randn((6, 3, 3, 3), dtype='float32') y_var = F.conv2d(x_var, w_var) - y_np = y_var.numpy() - print(y_np.shape) - # (2, 6, 6, 6) + print(y_var.shape) + # [2, 6, 6, 6] """ # entry checks if data_format not in ["NCHW", "NHWC"]: @@ -1234,10 +1233,9 @@ def conv2d_transpose( w_var = paddle.randn((3, 6, 3, 3), dtype='float32') y_var = F.conv2d_transpose(x_var, w_var) - y_np = y_var.numpy() - print(y_np.shape) - # (2, 6, 10, 10) + print(y_var.shape) + # [2, 6, 10, 10] """ if data_format not in ['NCHW', 'NHWC']: @@ -1523,10 +1521,9 @@ def conv3d( w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32') y_var = F.conv3d(x_var, w_var) - y_np = y_var.numpy() - print(y_np.shape) - # (2, 6, 6, 6, 6) + print(y_var.shape) + # [2, 6, 6, 6, 6] """ # entry check if data_format not in ["NCDHW", "NDHWC"]: @@ -1738,10 +1735,9 @@ def conv3d_transpose( w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32') y_var = F.conv3d_transpose(x_var, w_var) - y_np = y_var.numpy() - print(y_np.shape) - # (2, 6, 10, 10, 10) + print(y_var.shape) + # [2, 6, 10, 10, 10] """ # entry checks if data_format not in ["NCDHW", "NDHWC"]: diff --git a/python/paddle/nn/functional/distance.py b/python/paddle/nn/functional/distance.py index a931d3cb006ad..b9783c251be0c 100644 --- a/python/paddle/nn/functional/distance.py +++ b/python/paddle/nn/functional/distance.py @@ -63,7 +63,9 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64) y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64) distance = paddle.nn.functional.pairwise_distance(x, y) - print(distance.numpy()) # [5. 5.] + print(distance) + # Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True, + # [4.99999860, 4.99999860]) """ check_type(p, 'porder', (float, int), 'PairwiseDistance') diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index e2327871bcfc3..f3d906be1f3ed 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -215,10 +215,11 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): lengths = paddle.to_tensor([10, 9, 8]) mask = paddle.nn.functional.sequence_mask(lengths) - print(mask.numpy()) - # [[1 1 1 1 1 1 1 1 1 1] - # [1 1 1 1 1 1 1 1 1 0] - # [1 1 1 1 1 1 1 1 0 0]] + print(mask) + # Tensor(shape=[3, 10], dtype=int64, place=Place(gpu:0), stop_gradient=True, + # [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + # [1, 1, 1, 1, 1, 1, 1, 1, 1, 0], + # [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]) """ diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index dd5c2e128268c..83341a9dabc01 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -1353,17 +1353,20 @@ def l1_loss(input, label, reduction='mean', name=None): label = paddle.to_tensor([[1.7, 1], [0.4, 0.5]]) l1_loss = paddle.nn.functional.l1_loss(input, label) - print(l1_loss.numpy()) - # [0.35] + print(l1_loss) + # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [0.34999999]) l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='none') - print(l1_loss.numpy()) - # [[0.20000005 0.19999999] - # [0.2 0.79999995]] + print(l1_loss) + # Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [[0.20000005, 0.19999999], + # [0.20000000, 0.79999995]]) l1_loss = paddle.nn.functional.l1_loss(input, label, reduction='sum') - print(l1_loss.numpy()) - # [1.4] + print(l1_loss) + # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [1.39999998]) """ if reduction not in ['sum', 'mean', 'none']: @@ -2530,9 +2533,11 @@ def cross_entropy( cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction=reduction) dy_ret = cross_entropy_loss( - input, - label) - print(dy_ret.numpy()) #[5.41993642] + input, + label) + print(dy_ret) + # Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True, + # [5.34043430]) .. code-block:: python @@ -2550,13 +2555,15 @@ def cross_entropy( labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0) labels /= paddle.sum(labels, axis=axis, keepdim=True) paddle_loss_mean = paddle.nn.functional.cross_entropy( - logits, - labels, - soft_label=True, - axis=axis, - weight=weight, - reduction=reduction) - print(paddle_loss_mean.numpy()) #[1.12908343] + logits, + labels, + soft_label=True, + axis=axis, + weight=weight, + reduction=reduction) + print(paddle_loss_mean) + # Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True, + # [1.11043464]) """ diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index c01f962d79dbc..2cb448f9fdb2c 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -368,9 +368,8 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): x = paddle.randn(shape=[2,9,4,4]) out_var = F.pixel_shuffle(x, 3) - out = out_var.numpy() - print(out.shape) - # (2, 1, 12, 12) + print(out_var.shape) + # [2, 1, 12, 12] """ if not isinstance(upscale_factor, int): raise TypeError("upscale factor must be int type") diff --git a/python/paddle/nn/initializer/constant.py b/python/paddle/nn/initializer/constant.py index 6a8ce4385fc5e..637ae6299005c 100644 --- a/python/paddle/nn/initializer/constant.py +++ b/python/paddle/nn/initializer/constant.py @@ -32,11 +32,13 @@ class Constant(ConstantInitializer): data = paddle.rand([30, 10, 2], dtype='float32') linear = nn.Linear(2, - 4, - weight_attr=nn.initializer.Constant(value=2.0)) + 4, + weight_attr=nn.initializer.Constant(value=2.0)) res = linear(data) - print(linear.weight.numpy()) - #result is [[2. 2. 2. 2.],[2. 2. 2. 2.]] + print(linear.weight) + # Tensor(shape=[2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=False, + # [[2., 2., 2., 2.], + # [2., 2., 2., 2.]]) """ diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index 81eef1091c1d6..a3d719f67c182 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -668,9 +668,8 @@ class Conv2D(_ConvNd): conv = nn.Conv2D(4, 6, (3, 3)) y_var = conv(x_var) - y_np = y_var.numpy() - print(y_np.shape) - # (2, 6, 6, 6) + print(y_var.shape) + # [2, 6, 6, 6] """ def __init__( @@ -841,9 +840,8 @@ class Conv2DTranspose(_ConvNd): conv = nn.Conv2DTranspose(4, 6, (3, 3)) y_var = conv(x_var) - y_np = y_var.numpy() - print(y_np.shape) - # (2, 6, 10, 10) + print(y_var.shape) + # [2, 6, 10, 10] """ def __init__( @@ -999,9 +997,8 @@ class Conv3D(_ConvNd): conv = nn.Conv3D(4, 6, (3, 3, 3)) y_var = conv(x_var) - y_np = y_var.numpy() - print(y_np.shape) - # (2, 6, 6, 6, 6) + print(y_var.shape) + # [2, 6, 6, 6, 6] """ def __init__( @@ -1181,9 +1178,8 @@ class Conv3DTranspose(_ConvNd): conv = nn.Conv3DTranspose(4, 6, (3, 3, 3)) y_var = conv(x_var) - y_np = y_var.numpy() - print(y_np.shape) - # (2, 6, 10, 10, 10) + print(y_var.shape) + # [2, 6, 10, 10, 10] """ def __init__( diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index 72dea12b49a71..f63ce53c4e2b2 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -56,7 +56,9 @@ class PairwiseDistance(Layer): y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64) dist = paddle.nn.PairwiseDistance() distance = dist(x, y) - print(distance.numpy()) # [5. 5.] + print(distance) + # Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True, + # [4.99999860, 4.99999860]) """ diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index e88331676c525..2d5f57f2c585e 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -102,7 +102,9 @@ class BCEWithLogitsLoss(Layer): label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") bce_logit_loss = paddle.nn.BCEWithLogitsLoss() output = bce_logit_loss(logit, label) - print(output.numpy()) # [0.45618808] + print(output) + # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [0.45618814]) """ @@ -319,9 +321,11 @@ class CrossEntropyLoss(Layer): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( weight=weight, reduction=reduction) dy_ret = cross_entropy_loss( - input, - label) - print(dy_ret.numpy()) #[5.41993642] + input, + label) + print(dy_ret) + # Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True, + # [5.34043430]) .. code-block:: python @@ -339,13 +343,15 @@ class CrossEntropyLoss(Layer): labels = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0) labels /= paddle.sum(labels, axis=axis, keepdim=True) paddle_loss_mean = paddle.nn.functional.cross_entropy( - logits, - labels, - soft_label=True, - axis=axis, - weight=weight, - reduction=reduction) - print(paddle_loss_mean.numpy()) #[1.12908343] + logits, + labels, + soft_label=True, + axis=axis, + weight=weight, + reduction=reduction) + print(paddle_loss_mean) + # Tensor(shape=[1], dtype=float64, place=Place(gpu:0), stop_gradient=True, + # [1.11043464]) """ @@ -635,19 +641,22 @@ class L1Loss(Layer): l1_loss = paddle.nn.L1Loss() output = l1_loss(input, label) - print(output.numpy()) - # [0.35] + print(output) + # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [0.34999999]) l1_loss = paddle.nn.L1Loss(reduction='sum') output = l1_loss(input, label) - print(output.numpy()) - # [1.4] + print(output) + # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [1.39999998]) l1_loss = paddle.nn.L1Loss(reduction='none') output = l1_loss(input, label) print(output) - # [[0.20000005 0.19999999] - # [0.2 0.79999995]] + # Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + # [[0.20000005, 0.19999999], + # [0.20000000, 0.79999995]]) """ diff --git a/python/paddle/nn/layer/vision.py b/python/paddle/nn/layer/vision.py index bcc6ea77bb55c..8542e2b62111e 100644 --- a/python/paddle/nn/layer/vision.py +++ b/python/paddle/nn/layer/vision.py @@ -52,10 +52,9 @@ class PixelShuffle(Layer): x = paddle.randn(shape=[2,9,4,4]) pixel_shuffle = nn.PixelShuffle(3) - out_var = pixel_shuffle(x) - out = out_var.numpy() + out = pixel_shuffle(x) print(out.shape) - # (2, 1, 12, 12) + # [2, 1, 12, 12] """ diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index 8d81a61dd7921..9cb2db000d531 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -628,10 +628,8 @@ class QuantizedConv2DTranspose(Layer): conv_quantized = QuantizedConv2DTranspose(conv) y_quantized = conv_quantized(x_var) y_var = conv(x_var) - y_quantized_np = y_quantized.numpy() - y_np = y_var.numpy() - print(y_np.shape, y_quantized_np.shape) - # (2, 6, 10, 10), (2, 6, 10, 10) + print(y_var.shape, y_quantized.shape) + # [2, 6, 10, 10], [2, 6, 10, 10] """ diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 8c47809d222a9..60272630b2199 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -2453,7 +2453,10 @@ def unique( x = paddle.to_tensor([2, 3, 3, 1, 5, 3]) unique = paddle.unique(x) - np_unique = unique.numpy() # [1 2 3 5] + print(unique) + # Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True, + # [1, 2, 3, 5]) + _, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True) print(indices) # Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True, diff --git a/python/paddle/text/datasets/conll05.py b/python/paddle/text/datasets/conll05.py index 10ef8f4edfb4e..7456030b6711f 100644 --- a/python/paddle/text/datasets/conll05.py +++ b/python/paddle/text/datasets/conll05.py @@ -88,7 +88,7 @@ def forward(self, pred_idx, mark, label): model = SimpleNet() pred_idx, mark, label= model(pred_idx, mark, label) - print(pred_idx.numpy(), mark.numpy(), label.numpy()) + print(pred_idx, mark, label) """ diff --git a/python/paddle/text/datasets/imdb.py b/python/paddle/text/datasets/imdb.py index 7aad2095c4118..abf4424e3f37b 100644 --- a/python/paddle/text/datasets/imdb.py +++ b/python/paddle/text/datasets/imdb.py @@ -67,7 +67,7 @@ def forward(self, doc, label): model = SimpleNet() image, label = model(doc, label) - print(doc.numpy().shape, label.numpy().shape) + print(doc.shape, label.shape) """ diff --git a/python/paddle/text/datasets/imikolov.py b/python/paddle/text/datasets/imikolov.py index c9f04712c6fe1..d936bcb667881 100644 --- a/python/paddle/text/datasets/imikolov.py +++ b/python/paddle/text/datasets/imikolov.py @@ -67,7 +67,7 @@ def forward(self, src, trg): model = SimpleNet() src, trg = model(src, trg) - print(src.numpy().shape, trg.numpy().shape) + print(src.shape, trg.shape) """ diff --git a/python/paddle/text/datasets/movielens.py b/python/paddle/text/datasets/movielens.py index 505863748caa1..b3048426b4299 100644 --- a/python/paddle/text/datasets/movielens.py +++ b/python/paddle/text/datasets/movielens.py @@ -134,7 +134,7 @@ def forward(self, category, title, rating): model = SimpleNet() category, title, rating = model(category, title, rating) - print(category.numpy().shape, title.numpy().shape, rating.numpy().shape) + print(category.shape, title.shape, rating.shape) """