From 8a097399b39c74f894474219598c1a5cb25e1fde Mon Sep 17 00:00:00 2001 From: HongyuJia Date: Mon, 27 Feb 2023 15:51:43 +0800 Subject: [PATCH] [Tensor Operants & Prim] Tensor pow API uses elementwise_pow (#50886) * [Tensor Operants & Prim] Tensor pow API uses elementwise_pow * unittest change to fill_constant+elementwise_pow --- paddle/fluid/prim/api/api.yaml | 2 - .../tensor_operants_gen.py | 24 +++++++++++ paddle/fluid/prim/tests/CMakeLists.txt | 1 + paddle/fluid/prim/tests/test_static_prim.cc | 43 +++++++++++-------- paddle/phi/api/include/tensor.h | 3 +- .../api/yaml/generator/tensor_operants_gen.py | 42 +++++++++++++++++- paddle/phi/api/yaml/tensor_operants.yaml | 9 ++-- ...test_comp_get_grad_op_desc_prim_enabled.py | 3 +- .../unittests/prim/test_comp_skip_op_set.py | 3 +- 9 files changed, 100 insertions(+), 30 deletions(-) diff --git a/paddle/fluid/prim/api/api.yaml b/paddle/fluid/prim/api/api.yaml index 430b1a2412477..9f9c2763a4cc5 100644 --- a/paddle/fluid/prim/api/api.yaml +++ b/paddle/fluid/prim/api/api.yaml @@ -3,7 +3,6 @@ - multiply - divide - unsqueeze -- pow - exp - scale - matmul @@ -25,5 +24,4 @@ - scatter_nd_add - tile - transpose -- subtract - pad diff --git a/paddle/fluid/prim/api/auto_code_generated/tensor_operants_gen.py b/paddle/fluid/prim/api/auto_code_generated/tensor_operants_gen.py index 51f0aeb3a4765..9d553f95305cc 100644 --- a/paddle/fluid/prim/api/auto_code_generated/tensor_operants_gen.py +++ b/paddle/fluid/prim/api/auto_code_generated/tensor_operants_gen.py @@ -64,6 +64,10 @@ class EagerTensorOperants : public TensorOperantsBase { Tensor divide(const Scalar& x, const Tensor& y); + Tensor pow(const Tensor& x, const Tensor& y); + + Tensor pow(const Tensor& x, const Scalar& y); + """ @@ -121,6 +125,14 @@ class EagerTensorOperants : public TensorOperantsBase { return ::divide_ad_func(::full_like_ad_func(y, x), y); } +Tensor EagerTensorOperants::pow(const Tensor& x, const Tensor& y) { + return ::elementwise_pow_ad_func(x, y); +} + +Tensor EagerTensorOperants::pow(const Tensor& x, const Scalar& y) { + return ::elementwise_pow_ad_func(x, ::full_like_ad_func(x, y)); +} + """ @@ -176,6 +188,10 @@ class StaticTensorOperants : public TensorOperantsBase { Tensor divide(const Scalar& x, const Tensor& y); + Tensor pow(const Tensor& x, const Tensor& y); + + Tensor pow(const Tensor& x, const Scalar& y); + """ @@ -236,6 +252,14 @@ class StaticTensorOperants : public TensorOperantsBase { return paddle::prim::divide(paddle::prim::full(y.shape(), x, y.dtype(), y.place()), y); } +Tensor StaticTensorOperants::pow(const Tensor& x, const Tensor& y) { + return paddle::prim::elementwise_pow(x, y); +} + +Tensor StaticTensorOperants::pow(const Tensor& x, const Scalar& y) { + return paddle::prim::elementwise_pow(x, paddle::prim::full(x.shape(), y, x.dtype(), x.place())); +} + """ diff --git a/paddle/fluid/prim/tests/CMakeLists.txt b/paddle/fluid/prim/tests/CMakeLists.txt index 0118f5e127c76..07098c92e0523 100644 --- a/paddle/fluid/prim/tests/CMakeLists.txt +++ b/paddle/fluid/prim/tests/CMakeLists.txt @@ -30,6 +30,7 @@ cc_test_old( operator elementwise_mul_op elementwise_sub_op + elementwise_pow_op fill_constant_op activation_op phi_api diff --git a/paddle/fluid/prim/tests/test_static_prim.cc b/paddle/fluid/prim/tests/test_static_prim.cc index 32bdb01a218c5..b7376d1689edd 100644 --- a/paddle/fluid/prim/tests/test_static_prim.cc +++ b/paddle/fluid/prim/tests/test_static_prim.cc @@ -194,7 +194,7 @@ TEST(StaticPrim, TanhBackwardComposite) { target_block, grad_sub_block)); ASSERT_EQ(target_block->AllOps().size(), static_cast(1)); - ASSERT_EQ(grad_ops.size(), static_cast(4)); + ASSERT_EQ(grad_ops.size(), static_cast(5)); ASSERT_EQ(target_block->AllOps()[0]->Type(), "tanh"); ASSERT_EQ(target_block->AllOps()[0]->Inputs().at("X").size(), static_cast(1)); @@ -204,36 +204,41 @@ TEST(StaticPrim, TanhBackwardComposite) { ASSERT_EQ(target_block->AllOps()[0]->Outputs().at("Out")[0], "b"); ASSERT_EQ(target_block->AllOps()[0]->Outputs().at("Out")[0], "b"); - ASSERT_EQ(grad_ops[0]->Type(), "pow"); - ASSERT_EQ(grad_ops[0]->Inputs().at("X").size(), static_cast(1)); - ASSERT_EQ(grad_ops[0]->Inputs().at("X")[0], "b"); - ASSERT_EQ(PADDLE_GET_CONST(float, grad_ops[0]->GetAttr("factor")), - static_cast(2.0)); + ASSERT_EQ(grad_ops[0]->Type(), "fill_constant"); + ASSERT_EQ(PADDLE_GET_CONST(int, grad_ops[0]->GetAttr("dtype")), + static_cast(5)); // ProtoDataType::FP32 ASSERT_EQ(grad_ops[0]->Outputs().at("Out").size(), static_cast(1)); - ASSERT_EQ(grad_ops[1]->Type(), "fill_constant"); - ASSERT_EQ(PADDLE_GET_CONST(int, grad_ops[1]->GetAttr("dtype")), - static_cast(5)); // ProtoDataType::FP32 - ASSERT_EQ(grad_ops[1]->Outputs().at("Out").size(), + ASSERT_EQ(grad_ops[1]->Type(), "elementwise_pow"); + ASSERT_EQ(grad_ops[1]->Inputs().at("X").size(), static_cast(1)); + ASSERT_EQ(grad_ops[1]->Inputs().at("Y").size(), static_cast(1)); + ASSERT_EQ(grad_ops[1]->Inputs().at("X")[0], "b"); + ASSERT_EQ(grad_ops[0]->Outputs().at("Out").size(), static_cast(1)); - ASSERT_EQ(grad_ops[2]->Type(), "elementwise_sub"); - ASSERT_EQ(grad_ops[2]->Inputs().at("X").size(), static_cast(1)); - ASSERT_EQ(grad_ops[2]->Inputs().at("Y").size(), static_cast(1)); - ASSERT_EQ(grad_ops[2]->Inputs().at("X")[0], - grad_ops[1]->Outputs().at("Out")[0]); + ASSERT_EQ(grad_ops[2]->Type(), "fill_constant"); + ASSERT_EQ(PADDLE_GET_CONST(int, grad_ops[2]->GetAttr("dtype")), + static_cast(5)); // ProtoDataType::FP32 ASSERT_EQ(grad_ops[2]->Outputs().at("Out").size(), static_cast(1)); - ASSERT_EQ(grad_ops[3]->Type(), "elementwise_mul"); + ASSERT_EQ(grad_ops[3]->Type(), "elementwise_sub"); ASSERT_EQ(grad_ops[3]->Inputs().at("X").size(), static_cast(1)); ASSERT_EQ(grad_ops[3]->Inputs().at("Y").size(), static_cast(1)); - ASSERT_EQ(grad_ops[3]->Inputs().at("Y")[0], + ASSERT_EQ(grad_ops[3]->Inputs().at("X")[0], grad_ops[2]->Outputs().at("Out")[0]); - ASSERT_EQ(grad_ops[3]->Inputs().at("X")[0], "b@GRAD"); ASSERT_EQ(grad_ops[3]->Outputs().at("Out").size(), static_cast(1)); + + ASSERT_EQ(grad_ops[4]->Type(), "elementwise_mul"); + ASSERT_EQ(grad_ops[4]->Inputs().at("X").size(), static_cast(1)); + ASSERT_EQ(grad_ops[4]->Inputs().at("Y").size(), static_cast(1)); + ASSERT_EQ(grad_ops[4]->Inputs().at("Y")[0], + grad_ops[3]->Outputs().at("Out")[0]); + ASSERT_EQ(grad_ops[4]->Inputs().at("X")[0], "b@GRAD"); + ASSERT_EQ(grad_ops[4]->Outputs().at("Out").size(), + static_cast(1)); } TEST(StaticCompositeGradMaker, TestMutiInputMethod) { @@ -376,7 +381,7 @@ TEST(StaticPrim, TestFlags) { USE_OP_ITSELF(fill_constant); USE_OP_ITSELF(tanh); USE_OP_ITSELF(tanh_grad); -USE_OP_ITSELF(pow); USE_OP_ITSELF(elementwise_mul); USE_OP_ITSELF(elementwise_sub); +USE_OP_ITSELF(elementwise_pow); USE_OP_ITSELF(scale); diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index 7f50eea321202..4f64031ea059a 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -677,12 +677,13 @@ class PADDLE_API Tensor final { Tensor divide(const Scalar& y) const; Tensor multiply(const Scalar& y) const; Tensor subtract(const Scalar& y) const; + Tensor pow(const Tensor& y) const; + Tensor pow(const Scalar& y) const; Tensor exp() const; Tensor floor() const; Tensor gather_nd(const Tensor& index) const; Tensor log() const; - Tensor pow(const Scalar& y) const; Tensor roll(const IntArray& shifts, const std::vector& axis) const; Tensor scatter(const Tensor& index, const Tensor& updates, diff --git a/paddle/phi/api/yaml/generator/tensor_operants_gen.py b/paddle/phi/api/yaml/generator/tensor_operants_gen.py index ad4df97295c43..c2fd879d31042 100644 --- a/paddle/phi/api/yaml/generator/tensor_operants_gen.py +++ b/paddle/phi/api/yaml/generator/tensor_operants_gen.py @@ -29,6 +29,8 @@ indent = " " +specific_ops_map = {"elementwise_pow": "pow"} + operants_base_include = """// Generated by paddle/phi/api/yaml/generator/tensor_operants_gen.py @@ -68,6 +70,10 @@ class TensorOperantsBase { virtual Tensor multiply(const Scalar& x, const Tensor& y) = 0; virtual Tensor subtract(const Scalar& x, const Tensor& y) = 0; + + virtual Tensor pow(const Tensor& x, const Tensor& y) = 0; + + virtual Tensor pow(const Tensor& x, const Scalar& y) = 0; """ @@ -143,6 +149,14 @@ class TensorOperantsBase { return scale(-1.0, 0.0, true); } +Tensor Tensor::pow(const Tensor& y) const { + return paddle::OperantsManager::Instance().pow(static_cast(*this), y); +} + +Tensor Tensor::pow(const Scalar& y) const { + return paddle::OperantsManager::Instance().pow(static_cast(*this), y); +} + PADDLE_API Tensor operator+(const Scalar& x, const Tensor& y) { return paddle::OperantsManager::Instance().add(x, y); } @@ -211,6 +225,10 @@ class PhiTensorOperants : public TensorOperantsBase { Tensor divide(const Scalar& x, const Tensor& y); + Tensor pow(const Tensor& x, const Tensor& y); + + Tensor pow(const Tensor& x, const Scalar& y); + """ @@ -267,6 +285,14 @@ class PhiTensorOperants : public TensorOperantsBase { Tensor PhiTensorOperants::divide(const Scalar& x, const Tensor& y) { return paddle::experimental::divide(paddle::experimental::full_like(y, x), y); } + +Tensor PhiTensorOperants::pow(const Tensor& x, const Tensor& y) { + return paddle::experimental::elementwise_pow(x, y); +} + +Tensor PhiTensorOperants::pow(const Tensor& x, const Scalar& y) { + return paddle::experimental::elementwise_pow(x, paddle::experimental::full_like(x, y)); +} """ @@ -359,6 +385,10 @@ class OperantsManager { Tensor divide(const Scalar& x, const Tensor& y); + Tensor pow(const Tensor& x, const Tensor& y); + + Tensor pow(const Tensor& x, const Scalar& y); + """ @@ -512,8 +542,10 @@ def gene_operants_implementation(self): """ - def gene_operants_manager_code(self): + def gene_operants_manager_code(self, is_specific_op=False): func_name = self.get_api_func_name() + if is_specific_op: + func_name = specific_ops_map[func_name] func_args = self.inputs['names'] + self.attrs['names'] func_args_code = ", ".join(func_args) return f""" @@ -552,11 +584,19 @@ def gene_operants_manager_code(self): def gene_operants_manager_implementation(self): func_name = self.get_api_func_name() final_code = "" + # Codes for arthemetic operants if func_name in ["add", "subtract", "multiply", "divide"]: final_code += f""" {self.get_return_type()} OperantsManager::{func_name}(const Tensor& x, const Scalar& y) {{{self.gene_operants_manager_code()}}} {self.get_return_type()} OperantsManager::{func_name}(const Scalar& x, const Tensor& y) {{{self.gene_operants_manager_code()}}} +""" + # Codes for specific operants + if func_name in specific_ops_map.keys(): + final_code += f""" +{self.get_return_type()} OperantsManager::{specific_ops_map[func_name]}(const Tensor& x, const Tensor& y) {{{self.gene_operants_manager_code(is_specific_op=True)}}} + +{self.get_return_type()} OperantsManager::{specific_ops_map[func_name]}(const Tensor& x, const Scalar& y) {{{self.gene_operants_manager_code(is_specific_op=True)}}} """ # func decalaration if func_name[-1] != '_': diff --git a/paddle/phi/api/yaml/tensor_operants.yaml b/paddle/phi/api/yaml/tensor_operants.yaml index 764fb1096c7bb..80eb4d12ffc57 100644 --- a/paddle/phi/api/yaml/tensor_operants.yaml +++ b/paddle/phi/api/yaml/tensor_operants.yaml @@ -1,14 +1,14 @@ # Attach operants to Tensor, this file should be consistent with the declaration in `tensor.h` +- add +- subtract +- multiply +- divide - unsqueeze -- pow - exp - scale -- multiply - matmul - expand -- divide - sum -- add - abs - assign - elementwise_pow @@ -22,4 +22,3 @@ - scatter - scatter_nd_add - tile -- subtract diff --git a/python/paddle/fluid/tests/unittests/prim/test_comp_get_grad_op_desc_prim_enabled.py b/python/paddle/fluid/tests/unittests/prim/test_comp_get_grad_op_desc_prim_enabled.py index fbcf526a56857..3d9c68a16f55e 100644 --- a/python/paddle/fluid/tests/unittests/prim/test_comp_get_grad_op_desc_prim_enabled.py +++ b/python/paddle/fluid/tests/unittests/prim/test_comp_get_grad_op_desc_prim_enabled.py @@ -42,7 +42,8 @@ set(), tuple(), ( - 'pow', + 'fill_constant', + 'elementwise_pow', 'fill_constant', 'elementwise_sub', 'elementwise_mul', diff --git a/python/paddle/fluid/tests/unittests/prim/test_comp_skip_op_set.py b/python/paddle/fluid/tests/unittests/prim/test_comp_skip_op_set.py index 2b1056bc56d30..1fab1a1f63649 100644 --- a/python/paddle/fluid/tests/unittests/prim/test_comp_skip_op_set.py +++ b/python/paddle/fluid/tests/unittests/prim/test_comp_skip_op_set.py @@ -28,7 +28,8 @@ def setUp(self): self.grad_sub_block = tuple() self.desired_ops = 'tanh_grad' self.desired_ops_no_skip = ( - 'pow', + 'fill_constant', + 'elementwise_pow', 'fill_constant', 'elementwise_sub', 'elementwise_mul',