Skip to content

Commit

Permalink
[PHI] Migrate softmax kernel (PaddlePaddle#47339)
Browse files Browse the repository at this point in the history
* add extra attr property set

* add type_info for all context

* add onednn context to all context

* fix context compile error

* simplify conv kernel args

* pass runtime attr into dev_ctx

* fix marco error

* clear conv_grad_kernel extra args

* merge conv_grad_grad into conv_grad

* clear conv2d_grad_grad extra attrs

* remove redundant imports

* migrate softmax

* clear yaml and eager extra attr

* fix conv1d error

* change to thread local

* fix npu compile failed

* try to fix windows compile failed

* add conv2d onednn phi kernel

* fix ci bugs (xuewujiao#36)

* fix compile bugs (xuewujiao#38)

* fix extra input transform bug (xuewujiao#39)

* support dynamic created attr (xuewujiao#40)

* reset extra info gen code

* rm conv_grad_grad kernel

* reimpl pass attr adapting

* add int attr support

* remove vector inputnames creating

* merge dev

* fix map at error

* adjust attribute

* adapt funcs to PHI

Co-authored-by: Chen Weihang <[email protected]>
Co-authored-by: YuanRisheng <[email protected]>
  • Loading branch information
3 people authored Nov 3, 2022
1 parent f9a0605 commit b8ae385
Show file tree
Hide file tree
Showing 6 changed files with 72 additions and 116 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
#include "paddle/phi/core/kernel_registry.h"

USE_OP_ITSELF(softmax);
USE_OP_DEVICE_KERNEL(softmax, MKLDNN);
PD_DECLARE_KERNEL(softmax, OneDNN, ONEDNN);
USE_OP_ITSELF(elementwise_add);
USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN);
USE_OP_ITSELF(leaky_relu);
Expand Down
111 changes: 0 additions & 111 deletions paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc

This file was deleted.

2 changes: 1 addition & 1 deletion paddle/fluid/operators/mkldnn/test_mkldnn_caching.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ USE_OP_DEVICE_KERNEL(elementwise_mul, MKLDNN);
USE_OP_ITSELF(relu);
PD_DECLARE_KERNEL(relu, OneDNN, ONEDNN);
USE_OP_ITSELF(softmax);
USE_OP_DEVICE_KERNEL(softmax, MKLDNN);
PD_DECLARE_KERNEL(softmax, OneDNN, ONEDNN);
USE_OP_ITSELF(conv2d);
PD_DECLARE_KERNEL(conv2d, OneDNN, ONEDNN);

Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/operators/mkldnn/test_mkldnn_op_inplace.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@ USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN);
USE_OP_ITSELF(relu);
PD_DECLARE_KERNEL(relu, OneDNN, ONEDNN);
USE_OP_ITSELF(softmax);
USE_OP_DEVICE_KERNEL(softmax, MKLDNN);

PD_DECLARE_KERNEL(softmax, OneDNN, ONEDNN);
PD_DECLARE_KERNEL(softmax, CPU, ALL_LAYOUT);

namespace paddle {
Expand Down
9 changes: 8 additions & 1 deletion paddle/phi/backends/onednn/onednn_reuse.h
Original file line number Diff line number Diff line change
Expand Up @@ -753,12 +753,19 @@ class SoftmaxOneDNNHandler
public:
SoftmaxOneDNNHandler(const dnnl::engine onednn_engine,
Place cpu_place,
int axis,
const DenseTensor* x,
int axis)
DenseTensor* out)
: OneDNNHandlerNoCachingT<T,
dnnl::softmax_forward,
dnnl::softmax_backward>(onednn_engine,
cpu_place) {
PADDLE_ENFORCE_EQ(
x->dims(),
out->dims(),
phi::errors::InvalidArgument(
"The shape of input and output tensor must be identical."));

const int canonical_axis = funcs::CanonicalAxis(axis, x->dims().size());
this->AcquireForwardPrimitiveDescriptor(
dnnl::prop_kind::forward_scoring, x->mem_desc(), canonical_axis);
Expand Down
61 changes: 61 additions & 0 deletions paddle/phi/kernels/onednn/softmax_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/softmax_kernel.h"

#include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/phi/core/kernel_registry.h"

namespace phi {

template <typename T, typename Context>
void SoftmaxKernel(const Context& dev_ctx,
const DenseTensor& x,
int axis,
DenseTensor* out) {
funcs::SoftmaxOneDNNHandler<T> handler(
dev_ctx.GetEngine(), dev_ctx.GetPlace(), axis, &x, out);

auto src_memory_p = handler.AcquireSrcMemory(&x);
std::shared_ptr<dnnl::memory> dst_memory_p = nullptr;
if (x.IsSharedBufferWith(*out)) {
dst_memory_p = src_memory_p;
dev_ctx.template Alloc<T>(out);
} else {
dst_memory_p = handler.AcquireDstMemory(out);
}
auto softmax_p = handler.AcquireForwardPrimitive();

auto& astream = OneDNNContext::tls().get_stream();
softmax_p->execute(
astream, {{DNNL_ARG_SRC, *src_memory_p}, {DNNL_ARG_DST, *dst_memory_p}});
astream.wait();

bool is_test = dev_ctx.HasDnnAttr("is_test")
? PADDLE_GET_CONST(bool, dev_ctx.GetDnnAttr("is_test"))
: false;
if (!is_test) {
T* out_data = dev_ctx.template Alloc<T>(out);
std::for_each(out_data, &out_data[out->numel()], [](T& val) {
val = std::max(val, static_cast<T>(exp(-64)));
});
}

out->set_mem_desc(dst_memory_p->get_desc());
}

} // namespace phi

PD_REGISTER_KERNEL(
softmax, OneDNN, ONEDNN, phi::SoftmaxKernel, float, phi::dtype::bfloat16) {}

0 comments on commit b8ae385

Please sign in to comment.