From 4b5a24da274f3e3154892956df8d7eb81d78b5d2 Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Mon, 21 Mar 2022 13:07:57 +0000 Subject: [PATCH 01/13] [Phi] trans logsumexp op --- .../operators/reduce_ops/logsumexp_op.cc | 92 +--------- .../fluid/operators/reduce_ops/logsumexp_op.h | 170 ------------------ paddle/phi/infermeta/unary.cc | 85 +++++++++ paddle/phi/infermeta/unary.h | 6 + .../phi/kernels/cpu/logsumexp_grad_kernel.cc | 24 +++ paddle/phi/kernels/cpu/logsumexp_kernel.cc | 28 +++ .../phi/kernels/gpu/logsumexp_grad_kernel.cu | 24 +++ .../kernels/gpu/logsumexp_kernel.cu} | 18 +- .../kernels/impl/logsumexp_grad_kernel_impl.h | 92 ++++++++++ .../phi/kernels/impl/logsumexp_kernel_impl.h | 100 +++++++++++ paddle/phi/kernels/logsumexp_grad_kernel.h | 33 ++++ .../kernels/logsumexp_kernel.h} | 20 ++- 12 files changed, 424 insertions(+), 268 deletions(-) delete mode 100644 paddle/fluid/operators/reduce_ops/logsumexp_op.h create mode 100644 paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc create mode 100644 paddle/phi/kernels/cpu/logsumexp_kernel.cc create mode 100644 paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu rename paddle/{fluid/operators/reduce_ops/logsumexp_op.part.cu => phi/kernels/gpu/logsumexp_kernel.cu} (55%) create mode 100644 paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h create mode 100644 paddle/phi/kernels/impl/logsumexp_kernel_impl.h create mode 100644 paddle/phi/kernels/logsumexp_grad_kernel.h rename paddle/{fluid/operators/reduce_ops/logsumexp_op.cu => phi/kernels/logsumexp_kernel.h} (55%) diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.cc b/paddle/fluid/operators/reduce_ops/logsumexp_op.cc index 9f0ef19bd6299..889e13fa67de6 100644 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.cc +++ b/paddle/fluid/operators/reduce_ops/logsumexp_op.cc @@ -16,6 +16,10 @@ #include #include #include +#include "paddle/fluid/framework/infershape_utils.h" +#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/unary.h" namespace paddle { namespace operators { @@ -23,80 +27,6 @@ namespace operators { class LogsumexpOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "logsumexp"); - OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "logsumexp"); - auto x_dims = ctx->GetInputDim("X"); - auto x_rank = x_dims.size(); - PADDLE_ENFORCE_LE(x_rank, 4, - platform::errors::InvalidArgument( - "The input tensor X's dimensions of logsumexp " - "should be less or equal than 4. But received X's " - "dimensions = %d, X's shape = [%s].", - x_rank, x_dims)); - auto axis = ctx->Attrs().Get>("axis"); - PADDLE_ENFORCE_GT( - axis.size(), 0, - platform::errors::InvalidArgument( - "The size of axis of logsumexp " - "should be greater than 0. But received the size of axis " - "of logsumexp is %d.", - axis.size())); - - for (size_t i = 0; i < axis.size(); i++) { - PADDLE_ENFORCE_LT(axis[i], x_rank, - platform::errors::InvalidArgument( - "axis[%d] should be in the " - "range [-D, D), where D is the dimensions of X and " - "D is %d. But received axis[%d] = %d.", - i, x_rank, i, axis[i])); - PADDLE_ENFORCE_GE(axis[i], -x_rank, - platform::errors::InvalidArgument( - "axis[%d] should be in the " - "range [-D, D), where D is the dimensions of X and " - "D is %d. But received axis[%d] = %d.", - i, x_rank, i, axis[i])); - if (axis[i] < 0) { - axis[i] += x_rank; - } - } - - bool keepdim = ctx->Attrs().Get("keepdim"); - bool reduce_all = ctx->Attrs().Get("reduce_all"); - auto dims_vector = vectorize(x_dims); - if (reduce_all) { - if (keepdim) - ctx->SetOutputDim("Out", - phi::make_ddim(std::vector(x_rank, 1))); - else - ctx->SetOutputDim("Out", {1}); - } else { - auto dims_vector = vectorize(x_dims); - if (keepdim) { - for (size_t i = 0; i < axis.size(); ++i) { - dims_vector[axis[i]] = 1; - } - } else { - const int kDelFlag = -1; - for (size_t i = 0; i < axis.size(); ++i) { - dims_vector[axis[i]] = kDelFlag; - } - dims_vector.erase( - std::remove(dims_vector.begin(), dims_vector.end(), kDelFlag), - dims_vector.end()); - } - if (!keepdim && dims_vector.size() == 0) { - dims_vector.push_back(1); - } - auto out_dims = phi::make_ddim(dims_vector); - ctx->SetOutputDim("Out", out_dims); - if (axis.size() > 0 && axis[0] != 0) { - // Only pass LoD when not reducing on the first dim. - ctx->ShareLoD("X", /*->*/ "Out"); - } - } - } }; class LogsumexpOpMaker : public framework::OpProtoAndCheckerMaker { @@ -164,16 +94,10 @@ class LogsumexpGradOpMaker : public framework::SingleGradOpMaker { } // namespace paddle namespace ops = paddle::operators; - +DECLARE_INFER_SHAPE_FUNCTOR(logsumexp, LogsumexpInferShapeFunctor, + PD_INFER_META(phi::LogsumexpInferMeta)); REGISTER_OPERATOR(logsumexp, ops::LogsumexpOp, ops::LogsumexpOpMaker, ops::LogsumexpGradOpMaker, - ops::LogsumexpGradOpMaker); + ops::LogsumexpGradOpMaker, + LogsumexpInferShapeFunctor); REGISTER_OPERATOR(logsumexp_grad, ops::LogsumexpGrapOp); - -REGISTER_OP_CPU_KERNEL( - logsumexp, ops::LogsumexpKernel, - ops::LogsumexpKernel); -REGISTER_OP_CPU_KERNEL( - logsumexp_grad, - ops::LogsumexpGradKernel, - ops::LogsumexpGradKernel); diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.h b/paddle/fluid/operators/reduce_ops/logsumexp_op.h deleted file mode 100644 index 4490f08b2129a..0000000000000 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.h +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h" - -namespace paddle { -namespace operators { - -#define HANDLE_DIM(NDIM, RDIM) \ - if (ndim == NDIM && rdim == RDIM) { \ - paddle::operators::ReduceFunctor( \ - context.template device_context(), *input, output, \ - axis, keepdim); \ - } - -struct LogsumexpFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - auto x_dim = x->dimensions(); - auto t_dim = x_dim; - for (int i = 0; i < static_cast(dim.size()); i++) { - t_dim[dim[i]] = 1; - } - - auto r_dim = x_dim; - for (int i = 0; i < static_cast(r_dim.size()); i++) { - r_dim[i] = 1; - } - for (int i = 0; i < static_cast(dim.size()); i++) { - r_dim[dim[i]] = x_dim[dim[i]]; - } - - auto y_dim = y->dimensions(); - auto x_max = x->maximum(dim); - y->device(place) = - (x_max + - (*x - x_max.reshape(t_dim).broadcast(r_dim)).exp().sum(dim).log()) - .reshape(y_dim); - } -}; - -struct LogsumexpGradFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, - const Dim& dim, int size) { - dx->device(place) = dy->broadcast(dim) * (*x - y->broadcast(dim)).exp(); - } -}; - -template -class LogsumexpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* input = context.Input("X"); - auto* output = context.Output("Out"); - output->mutable_data(context.GetPlace()); - - auto axis = context.Attr>("axis"); - auto keepdim = context.Attr("keepdim"); - auto reduce_all = context.Attr("reduce_all"); - - const auto& input_dim_size = input->dims().size(); - // The dims has full dim, set the reduce_all is True - reduce_all |= (static_cast(axis.size()) == input_dim_size); - - if (reduce_all) { - // Flatten and reduce 1-D tensor - auto x = EigenVector::Flatten(*input); - auto out = EigenScalar::From(*output); - auto& place = - *context.template device_context().eigen_device(); - auto reduce_dim = Eigen::array({{0}}); - LogsumexpFunctor()(place, &x, &out, reduce_dim); - } else { - int ndim = input_dim_size; - int rdim = axis.size(); - // comments for accelerating compiling temporarily. - // HANDLE_DIM(6, 5); - // HANDLE_DIM(6, 4); - // HANDLE_DIM(6, 3); - // HANDLE_DIM(6, 2); - // HANDLE_DIM(6, 1); - // HANDLE_DIM(5, 4); - // HANDLE_DIM(5, 3); - // HANDLE_DIM(5, 2); - // HANDLE_DIM(5, 1); - HANDLE_DIM(4, 3); - HANDLE_DIM(4, 2); - HANDLE_DIM(4, 1); - HANDLE_DIM(3, 2); - HANDLE_DIM(3, 1); - HANDLE_DIM(2, 1); - } - } -}; - -template -class LogsumexpGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* input = context.Input("X"); - auto* output = context.Input("Out"); - auto* output_grad = context.Input(framework::GradVarName("Out")); - auto* input_grad = context.Output(framework::GradVarName("X")); - input_grad->mutable_data(context.GetPlace()); - - auto axis = context.Attr>("axis"); - auto reduce_all = context.Attr("reduce_all"); - const auto input_dim_size = context.Input("X")->dims().size(); - reduce_all |= (static_cast(axis.size()) == input_dim_size); - - if (reduce_all) { - auto x = EigenVector::Flatten(*input); - auto y = EigenVector::Flatten(*output); - auto dy = EigenVector::Flatten(*output_grad); - auto dx = EigenVector::Flatten(*input_grad); - auto& place = - *context.template device_context().eigen_device(); - auto broadcast_dim = - Eigen::array({{static_cast(input->numel())}}); - LogsumexpGradFunctor()(place, &x, &y, &dx, &dy, broadcast_dim, - broadcast_dim[0]); - } else { - int rank = input->dims().size(); - LogsumexpGradFunctor functor; - switch (rank) { - case 1: - ReduceGradFunctor( - context.template device_context(), *input, *output, - *output_grad, input_grad, functor, axis); - break; - case 2: - ReduceGradFunctor( - context.template device_context(), *input, *output, - *output_grad, input_grad, functor, axis); - break; - case 3: - ReduceGradFunctor( - context.template device_context(), *input, *output, - *output_grad, input_grad, functor, axis); - break; - case 4: - ReduceGradFunctor( - context.template device_context(), *input, *output, - *output_grad, input_grad, functor, axis); - break; - } - } - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 7c5f38744f892..a2edb4856dd75 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -646,6 +646,91 @@ void KthvalueInferMeta(const MetaTensor& x, indices->set_dtype(x.dtype()); } +void LogsumexpInferMeta(const MetaTensor& input, + const std::vector& axis, + bool keepdim, + bool reduce_all, + MetaTensor* out) { + auto x_dims = input.dims(); + auto x_rank = x_dims.size(); + std::vector formated_axis = axis; + PADDLE_ENFORCE_LE(x_rank, + 4, + errors::InvalidArgument( + "The input tensor X's dimensions of logsumexp " + "should be less or equal than 4. But received X's " + "dimensions = %d, X's shape = [%s].", + x_rank, + x_dims)); + PADDLE_ENFORCE_GT( + axis.size(), + 0, + errors::InvalidArgument( + "The size of axis of logsumexp " + "should be greater than 0. But received the size of axis " + "of logsumexp is %d.", + axis.size())); + + for (size_t i = 0; i < axis.size(); i++) { + PADDLE_ENFORCE_LT(axis[i], + x_rank, + errors::InvalidArgument( + "axis[%d] should be in the " + "range [-D, D), where D is the dimensions of X and " + "D is %d. But received axis[%d] = %d.", + i, + x_rank, + i, + axis[i])); + PADDLE_ENFORCE_GE(axis[i], + -x_rank, + errors::InvalidArgument( + "axis[%d] should be in the " + "range [-D, D), where D is the dimensions of X and " + "D is %d. But received axis[%d] = %d.", + i, + x_rank, + i, + axis[i])); + if (axis[i] < 0) { + formated_axis[i] += x_rank; + } + } + + auto dims_vector = vectorize(x_dims); + if (reduce_all) { + if (keepdim) + out->set_dims(phi::make_ddim(std::vector(x_rank, 1))); + else + out->set_dims({1}); + } else { + auto dims_vector = vectorize(x_dims); + if (keepdim) { + for (size_t i = 0; i < formated_axis.size(); ++i) { + dims_vector[formated_axis[i]] = 1; + } + } else { + const int kDelFlag = -1; + for (size_t i = 0; i < formated_axis.size(); ++i) { + dims_vector[formated_axis[i]] = kDelFlag; + } + dims_vector.erase( + std::remove(dims_vector.begin(), dims_vector.end(), kDelFlag), + dims_vector.end()); + } + if (!keepdim && dims_vector.size() == 0) { + dims_vector.push_back(1); + } + auto out_dims = phi::make_ddim(dims_vector); + out->set_dims(out_dims); + if (formated_axis.size() > 0 && formated_axis[0] != 0) { + // Only pass LoD when not reducing on the first dim. + out->share_lod(input); + } + } + out->set_dtype(input.dtype()); +} + void MatrixPowerInferMeta(const MetaTensor& x, int n, MetaTensor* out) { auto dims = x.dims(); auto n_dim = dims.size(); diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index d84283a65c4d1..148c5cadceec3 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -112,6 +112,12 @@ void KthvalueInferMeta(const MetaTensor& x, MetaTensor* indices, MetaConfig = MetaConfig()); +void LogsumexpInferMeta(const MetaTensor& input, + const std::vector& axis, + bool keepdim, + bool reduce_all, + MetaTensor* out); + void MatrixPowerInferMeta(const MetaTensor& x, int n, MetaTensor* out); void MaxPoolWithIndexInferMeta(const MetaTensor& x, diff --git a/paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc b/paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc new file mode 100644 index 0000000000000..016c8929ea56d --- /dev/null +++ b/paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc @@ -0,0 +1,24 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/logsumexp_grad_kernel.h" + +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/diagonal.h" +#include "paddle/phi/kernels/funcs/eigen/common.h" +#include "paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h" + +PD_REGISTER_KERNEL( + logsumexp_grad, CPU, ALL_LAYOUT, phi::LogsumexpGradKernel, float, double) {} diff --git a/paddle/phi/kernels/cpu/logsumexp_kernel.cc b/paddle/phi/kernels/cpu/logsumexp_kernel.cc new file mode 100644 index 0000000000000..90c5241645b0d --- /dev/null +++ b/paddle/phi/kernels/cpu/logsumexp_kernel.cc @@ -0,0 +1,28 @@ +../ paddle / phi / kernels / cpu / + logsumexp_grad_kernel + .cc // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/logsumexp_kernel.h" + +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/diagonal.h" +#include "paddle/phi/kernels/funcs/eigen/common.h" + +#include "paddle/phi/kernels/impl/logsumexp_kernel_impl.h" + + PD_REGISTER_KERNEL( + logsumexp, CPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) { +} diff --git a/paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu b/paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu new file mode 100644 index 0000000000000..d0e3b4d6445fe --- /dev/null +++ b/paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu @@ -0,0 +1,24 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/logsumexp_grad_kernel.h" + +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/diagonal.h" +#include "paddle/phi/kernels/funcs/eigen/common.h" +#include "paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h" + +PD_REGISTER_KERNEL( + logsumexp_grad, GPU, ALL_LAYOUT, phi::LogsumexpGradKernel, float, double) {} diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.part.cu b/paddle/phi/kernels/gpu/logsumexp_kernel.cu similarity index 55% rename from paddle/fluid/operators/reduce_ops/logsumexp_op.part.cu rename to paddle/phi/kernels/gpu/logsumexp_kernel.cu index 81124e4f070a5..8ec9d316223fe 100644 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.part.cu +++ b/paddle/phi/kernels/gpu/logsumexp_kernel.cu @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,12 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// .part used to speed up nvcc compile -#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h" +#include "paddle/phi/kernels/logsumexp_kernel.h" -namespace ops = paddle::operators; +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/funcs/diagonal.h" +#include "paddle/phi/kernels/funcs/eigen/common.h" -REGISTER_OP_CUDA_KERNEL( - logsumexp_grad, - ops::LogsumexpGradKernel, - ops::LogsumexpGradKernel); +#include "paddle/phi/kernels/impl/logsumexp_kernel_impl.h" + +PD_REGISTER_KERNEL( + logsumexp, GPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) {} diff --git a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h new file mode 100644 index 0000000000000..d6bb3d8126ff8 --- /dev/null +++ b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h @@ -0,0 +1,92 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include + +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/phi/kernels/funcs/eigen/common.h" +#include "paddle/phi/kernels/funcs/eigen/eigen_function.h" +#include "paddle/phi/kernels/funcs/reduce_grad_functions.h" +#include "paddle/phi/kernels/logsumexp_grad_kernel.h" + +namespace phi { + +struct LogsumexpGradFunctor { + template + void operator()(const Context& place, + X* x, + Y* y, + DX* dx, + DY* dy, + const Dim& dim, + int size) { + dx->device(place) = dy->broadcast(dim) * (*x - y->broadcast(dim)).exp(); + } +}; + +template +void LogsumexpGradKernel(const Context& dev_ctx, + const DenseTensor& in, + const DenseTensor& out, + const DenseTensor& out_grad, + const std::vector& axis, + bool keepdim, + bool reduce_all, + DenseTensor* in_grad) { + dev_ctx.template Alloc(in_grad); + + const auto input_dim_size = in.dims().size(); + reduce_all |= (static_cast(axis.size()) == input_dim_size); + + if (reduce_all) { + auto x = phi::EigenVector::Flatten(in); + auto y = phi::EigenVector::Flatten(out); + auto dy = phi::EigenVector::Flatten(out_grad); + auto dx = phi::EigenVector::Flatten(*in_grad); + auto& place = *dev_ctx.eigen_device(); + auto broadcast_dim = Eigen::array({{static_cast(in.numel())}}); + LogsumexpGradFunctor()( + place, &x, &y, &dx, &dy, broadcast_dim, broadcast_dim[0]); + } else { + int rank = in.dims().size(); + LogsumexpGradFunctor functor; + switch (rank) { + case 1: + phi::funcs::ReduceGradFunctor( + dev_ctx, in, out, out_grad, in_grad, functor, axis); + break; + case 2: + phi::funcs::ReduceGradFunctor( + dev_ctx, in, out, out_grad, in_grad, functor, axis); + break; + case 3: + phi::funcs::ReduceGradFunctor( + dev_ctx, in, out, out_grad, in_grad, functor, axis); + break; + case 4: + phi::funcs::ReduceGradFunctor( + dev_ctx, in, out, out_grad, in_grad, functor, axis); + break; + } + } +} + +} // namespace phi diff --git a/paddle/phi/kernels/impl/logsumexp_kernel_impl.h b/paddle/phi/kernels/impl/logsumexp_kernel_impl.h new file mode 100644 index 0000000000000..7a9573ff522b0 --- /dev/null +++ b/paddle/phi/kernels/impl/logsumexp_kernel_impl.h @@ -0,0 +1,100 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include + +#include "paddle/phi/kernels/cpu/reduce.h" +#include "paddle/phi/kernels/funcs/eigen/common.h" +#include "paddle/phi/kernels/funcs/eigen/eigen_function.h" +#include "paddle/phi/kernels/logsumexp_kernel.h" + +namespace phi { + +#define HANDLE_DIM(NDIM, RDIM) \ + if (ndim == NDIM && rdim == RDIM) { \ + ReduceFunctor( \ + dev_ctx, x, out, axis, keepdim); \ + } + +struct LogsumexpFunctor { + template + void operator()(const Context& place, X* x, Y* y, const Dim& dim) { + auto x_dim = x->dimensions(); + auto t_dim = x_dim; + for (int i = 0; i < static_cast(dim.size()); i++) { + t_dim[dim[i]] = 1; + } + + auto r_dim = x_dim; + for (int i = 0; i < static_cast(r_dim.size()); i++) { + r_dim[i] = 1; + } + for (int i = 0; i < static_cast(dim.size()); i++) { + r_dim[dim[i]] = x_dim[dim[i]]; + } + + auto y_dim = y->dimensions(); + auto x_max = x->maximum(dim); + y->device(place) = + (x_max + + (*x - x_max.reshape(t_dim).broadcast(r_dim)).exp().sum(dim).log()) + .reshape(y_dim); + } +}; + +template +void LogsumexpKernel(const Context& dev_ctx, + const DenseTensor& x, + const std::vector& axis, + bool keepdim, + bool reduce_all, + DenseTensor* out) { + dev_ctx.template Alloc(out); + + const auto& input_dim_size = x.dims().size(); + // The dims has full dim, set the reduce_all is True + reduce_all |= (static_cast(axis.size()) == input_dim_size); + + if (reduce_all) { + // Flatten and reduce 1-D tensor + auto input = phi::EigenVector::Flatten(x); + auto output = phi::EigenScalar::From(*out); + auto& place = *dev_ctx.eigen_device(); + auto reduce_dim = Eigen::array({{0}}); + LogsumexpFunctor()(place, &input, &output, reduce_dim); + } else { + int ndim = input_dim_size; + int rdim = axis.size(); + // comments for accelerating compiling temporarily. + // HANDLE_DIM(6, 5); + // HANDLE_DIM(6, 4); + // HANDLE_DIM(6, 3); + // HANDLE_DIM(6, 2); + // HANDLE_DIM(6, 1); + // HANDLE_DIM(5, 4); + // HANDLE_DIM(5, 3); + // HANDLE_DIM(5, 2); + // HANDLE_DIM(5, 1); + HANDLE_DIM(4, 3); + HANDLE_DIM(4, 2); + HANDLE_DIM(4, 1); + HANDLE_DIM(3, 2); + HANDLE_DIM(3, 1); + HANDLE_DIM(2, 1); + } +} + +} // namespace phi diff --git a/paddle/phi/kernels/logsumexp_grad_kernel.h b/paddle/phi/kernels/logsumexp_grad_kernel.h new file mode 100644 index 0000000000000..01f68879055f7 --- /dev/null +++ b/paddle/phi/kernels/logsumexp_grad_kernel.h @@ -0,0 +1,33 @@ +../ paddle / phi / kernels / impl / + logsumexp_kernel_impl + .h // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/core/dense_tensor.h" + + namespace phi { + + template + void LogsumexpGradKernel(const Context& ctx, + const DenseTensor& in, + const DenseTensor& out, + const DenseTensor& out_grad, + const std::vector& axis, + bool keepdim, + bool reduce_all, + DenseTensor* in_grad); + +} // namespace phi diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.cu b/paddle/phi/kernels/logsumexp_kernel.h similarity index 55% rename from paddle/fluid/operators/reduce_ops/logsumexp_op.cu rename to paddle/phi/kernels/logsumexp_kernel.h index 86a31595ebaab..ba1b18230fa52 100644 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.cu +++ b/paddle/phi/kernels/logsumexp_kernel.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,10 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h" +#pragma once -namespace ops = paddle::operators; +#include "paddle/phi/core/dense_tensor.h" -REGISTER_OP_CUDA_KERNEL( - logsumexp, ops::LogsumexpKernel, - ops::LogsumexpKernel); +namespace phi { + +template +void LogsumexpKernel(const Context& ctx, + const DenseTensor& x, + const std::vector& axis, + bool keepdim, + bool reduce_all, + DenseTensor* out); + +} // namespace phi From 8b5ae9295e0bf989870f81ea1cd50101d33e2eaa Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Tue, 22 Mar 2022 05:42:23 +0000 Subject: [PATCH 02/13] fix bugs --- paddle/phi/kernels/logsumexp_grad_kernel.h | 24 ++++++++++------------ 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/paddle/phi/kernels/logsumexp_grad_kernel.h b/paddle/phi/kernels/logsumexp_grad_kernel.h index 01f68879055f7..d68c447aa65cb 100644 --- a/paddle/phi/kernels/logsumexp_grad_kernel.h +++ b/paddle/phi/kernels/logsumexp_grad_kernel.h @@ -1,6 +1,4 @@ -../ paddle / phi / kernels / impl / - logsumexp_kernel_impl - .h // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,16 +16,16 @@ #include "paddle/phi/core/dense_tensor.h" - namespace phi { +namespace phi { - template - void LogsumexpGradKernel(const Context& ctx, - const DenseTensor& in, - const DenseTensor& out, - const DenseTensor& out_grad, - const std::vector& axis, - bool keepdim, - bool reduce_all, - DenseTensor* in_grad); +template +void LogsumexpGradKernel(const Context& ctx, + const DenseTensor& in, + const DenseTensor& out, + const DenseTensor& out_grad, + const std::vector& axis, + bool keepdim, + bool reduce_all, + DenseTensor* in_grad); } // namespace phi From e1dc9bb0bd07307e2d17d100b898b335081020d6 Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Tue, 22 Mar 2022 05:48:55 +0000 Subject: [PATCH 03/13] fix bugs --- paddle/phi/kernels/cpu/logsumexp_kernel.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/paddle/phi/kernels/cpu/logsumexp_kernel.cc b/paddle/phi/kernels/cpu/logsumexp_kernel.cc index 90c5241645b0d..4b0d537b56481 100644 --- a/paddle/phi/kernels/cpu/logsumexp_kernel.cc +++ b/paddle/phi/kernels/cpu/logsumexp_kernel.cc @@ -20,9 +20,14 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/diagonal.h" #include "paddle/phi/kernels/funcs/eigen/common.h" - #include "paddle/phi/kernels/impl/logsumexp_kernel_impl.h" - - PD_REGISTER_KERNEL( - logsumexp, CPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) { + PD_REGISTER_KERNEL(logsumexp_grad, + CPU, + ALL_LAYOUT, + phi::LogsumexpGradKernel, + float, + double) { } + +PD_REGISTER_KERNEL( + logsumexp, CPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) {} From 924f7a59b7b3af716dce1409c79410cb2f8c9981 Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Tue, 22 Mar 2022 05:50:11 +0000 Subject: [PATCH 04/13] fix bugs --- paddle/phi/kernels/cpu/logsumexp_kernel.cc | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/paddle/phi/kernels/cpu/logsumexp_kernel.cc b/paddle/phi/kernels/cpu/logsumexp_kernel.cc index 4b0d537b56481..7ce89064915b0 100644 --- a/paddle/phi/kernels/cpu/logsumexp_kernel.cc +++ b/paddle/phi/kernels/cpu/logsumexp_kernel.cc @@ -18,16 +18,12 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" + #include "paddle/phi/kernels/funcs/diagonal.h" #include "paddle/phi/kernels/funcs/eigen/common.h" + #include "paddle/phi/kernels/impl/logsumexp_kernel_impl.h" - PD_REGISTER_KERNEL(logsumexp_grad, - CPU, - ALL_LAYOUT, - phi::LogsumexpGradKernel, - float, - double) { -} -PD_REGISTER_KERNEL( - logsumexp, CPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) {} + PD_REGISTER_KERNEL( + logsumexp, CPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) { +} From 2e402a5a14d3cce6d3e9912f70a2926b4f6f95be Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Tue, 22 Mar 2022 05:52:53 +0000 Subject: [PATCH 05/13] fix bugs --- paddle/phi/kernels/cpu/logsumexp_kernel.cc | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/paddle/phi/kernels/cpu/logsumexp_kernel.cc b/paddle/phi/kernels/cpu/logsumexp_kernel.cc index 7ce89064915b0..be679fea8e60c 100644 --- a/paddle/phi/kernels/cpu/logsumexp_kernel.cc +++ b/paddle/phi/kernels/cpu/logsumexp_kernel.cc @@ -1,6 +1,4 @@ -../ paddle / phi / kernels / cpu / - logsumexp_grad_kernel - .cc // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,6 +22,5 @@ #include "paddle/phi/kernels/impl/logsumexp_kernel_impl.h" - PD_REGISTER_KERNEL( - logsumexp, CPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) { -} +PD_REGISTER_KERNEL( + logsumexp, CPU, ALL_LAYOUT, phi::LogsumexpKernel, float, double) {} From 75c0c6eed3690b4ce2da3c0492a4017e28c2c47e Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Tue, 22 Mar 2022 05:53:11 +0000 Subject: [PATCH 06/13] fix bugs --- paddle/phi/kernels/cpu/logsumexp_kernel.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/phi/kernels/cpu/logsumexp_kernel.cc b/paddle/phi/kernels/cpu/logsumexp_kernel.cc index be679fea8e60c..622429ea3eac5 100644 --- a/paddle/phi/kernels/cpu/logsumexp_kernel.cc +++ b/paddle/phi/kernels/cpu/logsumexp_kernel.cc @@ -16,7 +16,6 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" - #include "paddle/phi/kernels/funcs/diagonal.h" #include "paddle/phi/kernels/funcs/eigen/common.h" From 0e19fe9491008cb9198b3f0fff9b678535f5bf07 Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Tue, 22 Mar 2022 07:07:57 +0000 Subject: [PATCH 07/13] fix bugs --- paddle/fluid/operators/reduce_ops/logsumexp_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op.cc b/paddle/fluid/operators/reduce_ops/logsumexp_op.cc index 889e13fa67de6..0602c73db6bbc 100644 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op.cc +++ b/paddle/fluid/operators/reduce_ops/logsumexp_op.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h" #include #include #include From 26c6fbbefbf98fda204ec21088c9272a0bccde78 Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Thu, 24 Mar 2022 03:16:03 +0000 Subject: [PATCH 08/13] add sig --- paddle/phi/ops/compat/logsumexp_sig.cc | 35 ++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 paddle/phi/ops/compat/logsumexp_sig.cc diff --git a/paddle/phi/ops/compat/logsumexp_sig.cc b/paddle/phi/ops/compat/logsumexp_sig.cc new file mode 100644 index 0000000000000..db2965c51795f --- /dev/null +++ b/paddle/phi/ops/compat/logsumexp_sig.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +KernelSignature LogsumexpOpArgumentMapping(const ArgumentMappingContext& ctx) { + return KernelSignature( + "logsumexp", {"X"}, {"axis", "keepdim", "reduce_all"}, {"Out"}); +} + +KernelSignature LogsumexpGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("logsumexp_grad", + {GradVarName("Out"), "X"}, + {"axis", "keepdim", "reduce_all"}, + {GradVarName("X")}); +} + +} // namespace phi + +PD_REGISTER_ARG_MAPPING_FN(logsumexp, phi::LogsumexpOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(logsumexp_grad, phi::LogsumexpGradOpArgumentMapping); From 3e965b1dff51fa4557e20a91a450532277ab5cdd Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Thu, 24 Mar 2022 06:22:50 +0000 Subject: [PATCH 09/13] fix sig bugs --- paddle/phi/ops/compat/logsumexp_sig.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/phi/ops/compat/logsumexp_sig.cc b/paddle/phi/ops/compat/logsumexp_sig.cc index db2965c51795f..a48d4baf600ed 100644 --- a/paddle/phi/ops/compat/logsumexp_sig.cc +++ b/paddle/phi/ops/compat/logsumexp_sig.cc @@ -24,7 +24,7 @@ KernelSignature LogsumexpOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature LogsumexpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("logsumexp_grad", - {GradVarName("Out"), "X"}, + {GradVarName("Out"), "X", "Out"}, {"axis", "keepdim", "reduce_all"}, {GradVarName("X")}); } From fc67129424a06797383a2442b803c0252022e3fa Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Fri, 25 Mar 2022 10:48:40 +0800 Subject: [PATCH 10/13] fix sig bugs --- paddle/phi/ops/compat/logsumexp_sig.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/phi/ops/compat/logsumexp_sig.cc b/paddle/phi/ops/compat/logsumexp_sig.cc index a48d4baf600ed..2e28680059ec4 100644 --- a/paddle/phi/ops/compat/logsumexp_sig.cc +++ b/paddle/phi/ops/compat/logsumexp_sig.cc @@ -24,7 +24,7 @@ KernelSignature LogsumexpOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature LogsumexpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("logsumexp_grad", - {GradVarName("Out"), "X", "Out"}, + {"X", "Out", GradVarName("Out")}, {"axis", "keepdim", "reduce_all"}, {GradVarName("X")}); } From 4d897d294e4e5e453a42639610b2d4e9913ccc8a Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Fri, 25 Mar 2022 13:51:22 +0800 Subject: [PATCH 11/13] fix xpu bugs --- paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc b/paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc index dcb849de0991b..6fb60fa179157 100644 --- a/paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc +++ b/paddle/fluid/operators/reduce_ops/logsumexp_op_xpu.cc @@ -14,7 +14,7 @@ #ifdef PADDLE_WITH_XPU -#include "paddle/fluid/operators/reduce_ops/logsumexp_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h" #include "paddle/fluid/platform/device/xpu/xpu_header.h" #include "paddle/fluid/platform/device_context.h" From 022021c220b3c211a35e4818bd0606c5d8e067f7 Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Mon, 28 Mar 2022 09:46:20 +0800 Subject: [PATCH 12/13] fix review bugs --- paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc | 2 -- paddle/phi/kernels/cpu/logsumexp_kernel.cc | 2 -- paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu | 2 -- paddle/phi/kernels/gpu/logsumexp_kernel.cu | 2 -- paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h | 1 - paddle/phi/ops/compat/logsumexp_sig.cc | 6 ------ 6 files changed, 15 deletions(-) diff --git a/paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc b/paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc index 016c8929ea56d..e0ef67084b445 100644 --- a/paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/logsumexp_grad_kernel.cc @@ -16,8 +16,6 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" -#include "paddle/phi/kernels/funcs/diagonal.h" -#include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h" PD_REGISTER_KERNEL( diff --git a/paddle/phi/kernels/cpu/logsumexp_kernel.cc b/paddle/phi/kernels/cpu/logsumexp_kernel.cc index 622429ea3eac5..06e0b30a9ca65 100644 --- a/paddle/phi/kernels/cpu/logsumexp_kernel.cc +++ b/paddle/phi/kernels/cpu/logsumexp_kernel.cc @@ -16,8 +16,6 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" -#include "paddle/phi/kernels/funcs/diagonal.h" -#include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/impl/logsumexp_kernel_impl.h" diff --git a/paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu b/paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu index d0e3b4d6445fe..490b3e9404561 100644 --- a/paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/logsumexp_grad_kernel.cu @@ -16,8 +16,6 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" -#include "paddle/phi/kernels/funcs/diagonal.h" -#include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h" PD_REGISTER_KERNEL( diff --git a/paddle/phi/kernels/gpu/logsumexp_kernel.cu b/paddle/phi/kernels/gpu/logsumexp_kernel.cu index 8ec9d316223fe..0f07a39ab113a 100644 --- a/paddle/phi/kernels/gpu/logsumexp_kernel.cu +++ b/paddle/phi/kernels/gpu/logsumexp_kernel.cu @@ -16,8 +16,6 @@ #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" -#include "paddle/phi/kernels/funcs/diagonal.h" -#include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/impl/logsumexp_kernel_impl.h" diff --git a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h index d6bb3d8126ff8..c2583ce8d32df 100644 --- a/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/logsumexp_grad_kernel_impl.h @@ -16,7 +16,6 @@ #include #include -#include "paddle/fluid/framework/tensor_util.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/reduce_grad_functions.h" diff --git a/paddle/phi/ops/compat/logsumexp_sig.cc b/paddle/phi/ops/compat/logsumexp_sig.cc index 2e28680059ec4..ca7345dbe7049 100644 --- a/paddle/phi/ops/compat/logsumexp_sig.cc +++ b/paddle/phi/ops/compat/logsumexp_sig.cc @@ -16,11 +16,6 @@ namespace phi { -KernelSignature LogsumexpOpArgumentMapping(const ArgumentMappingContext& ctx) { - return KernelSignature( - "logsumexp", {"X"}, {"axis", "keepdim", "reduce_all"}, {"Out"}); -} - KernelSignature LogsumexpGradOpArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("logsumexp_grad", @@ -31,5 +26,4 @@ KernelSignature LogsumexpGradOpArgumentMapping( } // namespace phi -PD_REGISTER_ARG_MAPPING_FN(logsumexp, phi::LogsumexpOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(logsumexp_grad, phi::LogsumexpGradOpArgumentMapping); From 6ecec2afbcb615ea15132e055cea1659cc4919a2 Mon Sep 17 00:00:00 2001 From: xingjing1 <2907374824@qq.com> Date: Mon, 28 Mar 2022 14:09:51 +0800 Subject: [PATCH 13/13] test=develop