Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[phi] tranfer the selu_op and pass the CI #39819

Merged
merged 7 commits into from
Mar 1, 2022
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions paddle/fluid/operators/selu_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/selu_op.h"

#include <memory>
#include <string>
#include <unordered_map>
Expand Down Expand Up @@ -127,9 +125,3 @@ REGISTER_OPERATOR(selu, ops::SeluOp, ops::SeluOpMaker, ops::SeluOpInferVarType,
ops::SeluGradMaker<paddle::framework::OpDesc>,
ops::SeluGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(selu_grad, ops::SeluGradOp);
REGISTER_OP_CPU_KERNEL(
selu, ops::SeluKernel<paddle::platform::CPUDeviceContext, float>,
ops::SeluKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
selu_grad, ops::SeluGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::SeluGradKernel<paddle::platform::CPUDeviceContext, double>);
22 changes: 0 additions & 22 deletions paddle/fluid/operators/selu_op.cu

This file was deleted.

123 changes: 0 additions & 123 deletions paddle/fluid/operators/selu_op.h

This file was deleted.

21 changes: 21 additions & 0 deletions paddle/phi/kernels/cpu/selu_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/selu_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/selu_grad_kernel_impl.h"

PD_REGISTER_KERNEL(
selu_grad, CPU, ALL_LAYOUT, phi::SeluGradKernel, float, double) {}
20 changes: 20 additions & 0 deletions paddle/phi/kernels/cpu/selu_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/selu_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/selu_kernel_impl.h"

PD_REGISTER_KERNEL(selu, CPU, ALL_LAYOUT, phi::SeluKernel, float, double) {}
21 changes: 21 additions & 0 deletions paddle/phi/kernels/gpu/selu_grad_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/selu_grad_kernel_impl.h"
#include "paddle/phi/kernels/selu_grad_kernel.h"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"paddle/phi/kernels/selu_grad_kernel.h" 放在开头,这里可能是pre-commit自动调整了


PD_REGISTER_KERNEL(
selu_grad, GPU, ALL_LAYOUT, phi::SeluGradKernel, float, double) {}
21 changes: 21 additions & 0 deletions paddle/phi/kernels/gpu/selu_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/selu_kernel_impl.h"
#include "paddle/phi/kernels/selu_kernel.h"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

同上


// TODO(add supported dtype.)
PD_REGISTER_KERNEL(selu, GPU, ALL_LAYOUT, phi::SeluKernel, float, double) {}
Binary file not shown.
33 changes: 33 additions & 0 deletions paddle/phi/kernels/impl/selu_grad_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/impl/selu_kernel_impl.h"
namespace phi {
template <typename T, typename Context>
void SeluGradKernel(const Context& dev_ctx,
const DenseTensor& out,
const DenseTensor& dout,
float scale,
float alpha,
DenseTensor* dx) {
auto dx_ptr = dev_ctx.template Alloc<T>(dx);
SeluGradFunctor<T> functor(
out.data<T>(), dout.data<T>(), alpha, scale, dx_ptr);
size_t limit = static_cast<size_t>(out.numel());
paddle::platform::ForRange<Context> for_range(dev_ctx, limit);
for_range(functor);
}
} // namespace phi
91 changes: 91 additions & 0 deletions paddle/phi/kernels/impl/selu_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#if defined(__NVCC__) || defined(__HIPCC__)
// TODO( add GPU related file here )
#endif
#include <string>
#include "paddle/fluid/operators/math.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T>
struct SeluFunctor {
SeluFunctor(const T* x_data_ptr, float alpha, float scale, T* y_data_ptr)
: x_data_ptr_(x_data_ptr),
alpha_(alpha),
scale_(scale),
y_data_ptr_(y_data_ptr) {}

HOSTDEVICE void operator()(size_t idx) const {
T x_ele = x_data_ptr_[idx];
if (x_ele <= 0) {
x_ele = alpha_ * paddle::operators::real_exp(x_ele) - alpha_;
}
y_data_ptr_[idx] = scale_ * x_ele;
}
const T* x_data_ptr_;
const float alpha_;
const float scale_;
T* y_data_ptr_;
};

template <typename T>
struct SeluGradFunctor {
SeluGradFunctor(const T* y_data_ptr,
const T* dy_data_ptr,
float alpha,
float scale,
T* dx_data_ptr)
: y_data_ptr_(y_data_ptr),
dy_data_ptr_(dy_data_ptr),
alpha_(alpha),
scale_(scale),
la_(alpha * scale),
dx_data_ptr_(dx_data_ptr) {}

HOSTDEVICE void operator()(size_t idx) const {
T y_ele = y_data_ptr_[idx];
T dy_ele = dy_data_ptr_[idx];

float tmp = scale_;
if (y_ele <= 0) {
tmp = y_ele + la_;
}
dx_data_ptr_[idx] = dy_ele * tmp;
}
const T* y_data_ptr_;
const T* dy_data_ptr_;
const float alpha_;
const float scale_;
const float la_;
T* dx_data_ptr_;
};

template <typename T, typename Context>
void SeluKernel(const Context& dev_ctx,
const DenseTensor& x,
float scale,
float alpha,
DenseTensor* out) {
auto out_ptr = dev_ctx.template Alloc<T>(out);
SeluFunctor<T> functor(x.data<T>(), alpha, scale, out_ptr);
size_t limit = static_cast<size_t>(x.numel());
paddle::platform::ForRange<Context> for_range(dev_ctx, limit);
for_range(functor);
}
} // namespace phi
Loading