Skip to content

Commit

Permalink
[CustomOP Optional Inplace] Custom operator supports inplace optional…
Browse files Browse the repository at this point in the history
… vector Tensor input (#52421)

* [CustomOP Optional Inplace] Custom operator supports inplace optional vector Tensor input

* uncomment unittest codes
  • Loading branch information
jiahy0825 authored Apr 3, 2023
1 parent 0b60f28 commit 59c9d75
Show file tree
Hide file tree
Showing 6 changed files with 493 additions and 75 deletions.
6 changes: 5 additions & 1 deletion paddle/fluid/framework/custom_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -833,7 +833,11 @@ static void RunInferDtypeFunc(
auto in_name = inplace_reverse_map.at(out_name);
// make sure ctx has valid inplace optional outputs
if (ctx->HasOutput(out_name)) {
ctx->SetOutputDataTypes(out_name, ctx->GetInputDataTypes(in_name));
size_t size = ctx->InputSize(in_name);
for (size_t i = 0; i < size; ++i) {
auto dtype = ctx->GetInputDataType(in_name, i);
ctx->SetOutputDataType(out_name, dtype, i);
}
} else {
PADDLE_ENFORCE(
detail::IsOptionalVar(out_name),
Expand Down
44 changes: 30 additions & 14 deletions paddle/phi/api/ext/op_meta_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,8 @@ class PADDLE_API CustomOpKernelContext {
std::vector<Tensor> InputsBetween(size_t start, size_t end) const;
Tensor& MutableInputAt(size_t idx);
paddle::optional<Tensor> OptionalInputAt(size_t idx);
paddle::optional<std::vector<Tensor>> OptionalInputsBetween(size_t start,
size_t end);
const std::vector<paddle::any>& Attrs() const { return attrs_; }
const std::vector<std::pair<size_t, size_t>>& InputRange() {
return input_range_;
Expand Down Expand Up @@ -294,6 +296,34 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
}
};

// Handle args for inplace vector<Tensor> case
template <typename... Tail>
struct ComputeCallHelper<std::vector<Tensor>&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto arg = ctx->InputsBetween(range.first, range.second);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};

// Handle args for optional inplace vector<Tensor> case
template <typename... Tail>
struct ComputeCallHelper<paddle::optional<std::vector<Tensor>>&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto arg = ctx->OptionalInputsBetween(range.first, range.second);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};

PD_SPECIALIZE_ComputeCallHelper(bool);
PD_SPECIALIZE_ComputeCallHelper(int);
PD_SPECIALIZE_ComputeCallHelper(float);
Expand Down Expand Up @@ -358,20 +388,6 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
}
};

// Handle args for inplace vector<Tensor> case
template <typename... Tail>
struct ComputeCallHelper<std::vector<Tensor>&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto arg = ctx->InputsBetween(range.first, range.second);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};

template <int out_idx, typename T>
struct ComputeReturnHelper;

Expand Down
12 changes: 12 additions & 0 deletions paddle/phi/api/lib/op_meta_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,18 @@ paddle::optional<Tensor> CustomOpKernelContext::OptionalInputAt(size_t idx) {
return paddle::make_optional<paddle::Tensor>(inputs_.at(idx));
}

paddle::optional<std::vector<Tensor>>
CustomOpKernelContext::OptionalInputsBetween(size_t start, size_t end) {
std::vector<Tensor> rlt;
for (size_t i = start; i < end; ++i) {
if (!inputs_.at(i).is_initialized()) {
return paddle::none;
}
rlt.emplace_back(inputs_.at(i));
}
return paddle::make_optional<std::vector<Tensor>>(rlt);
}

Tensor* CustomOpKernelContext::MutableOutputAt(size_t idx) {
return &(outputs_.at(idx));
}
Expand Down
16 changes: 16 additions & 0 deletions python/paddle/utils/cpp_extension/extension_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1057,6 +1057,22 @@ def _gen_output_content(
if out_idx in inplace_reverse_idx:
in_idx = inplace_reverse_idx[out_idx]
if (
in_idx != -1
and "@VECTOR" in in_names[in_idx]
and "@OPTIONAL" in in_names[in_idx]
):
# inplace optional vector<Tensor> output case
lower_in_names = in_names[in_idx].split("@")[0].lower()
dynamic_content += f"""
{indent}if {lower_in_names} is not None:
{indent} outs['{out_name}'] = [core.eager.Tensor() for _ in range(len({lower_in_names}))]
{indent}else:
{indent} outs['{out_name}'] = core.eager.Tensor()
{indent}ctx.add_outputs(outs['{out_name}'])"""
static_content += f"""
{indent}if {lower_in_names} is not None:
{indent} outs['{out_name}'] = [helper.create_variable(dtype='float32') for _ in range(len({lower_in_names}))]"""
elif (
in_idx != -1 and "@VECTOR" in in_names[in_idx]
): # inplace vector<Tensor> output case
lower_in_names = in_names[in_idx].split("@")[0].lower()
Expand Down
113 changes: 113 additions & 0 deletions test/custom_op/custom_optional.cc
Original file line number Diff line number Diff line change
Expand Up @@ -300,3 +300,116 @@ PD_BUILD_GRAD_OP(custom_optional_inplace_add)
paddle::Grad(paddle::Optional("Y"))}})
.SetKernelFn(PD_KERNEL(AddOptionalInplaceBackward))
.SetInferShapeFn(PD_INFER_SHAPE(AddOptionalInplaceBackwardInferShape));

/*
if (y) {
outX = 2 * x + y[1...n];
outY[i] = x + y[i];
} else {
outX = 2 * x;
outY = None;
}
*/
std::vector<paddle::Tensor> AddOptionalInplaceVectorForward(
const paddle::Tensor& x,
paddle::optional<std::vector<paddle::Tensor>>& y) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor.");
paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place());

PD_DISPATCH_FLOATING_TYPES(
x.type(), "AddOptionalInplaceVectorForward", ([&] {
add_two_pointers<data_t>(
x.data<data_t>(), x.data<data_t>(), outX.data<data_t>(), x.size());
if (y) {
for (size_t i = 0; i < y->size(); ++i) {
add_one_pointer<data_t>(
y->at(i).data<data_t>(), outX.data<data_t>(), outX.size());
add_one_pointer<data_t>(
x.data<data_t>(), y->at(i).data<data_t>(), x.size());
}
}
}));
// No need to return y, because we set it as inplace input.
return {outX};
}

std::vector<paddle::DataType> AddOptionalInplaceVectorInferDtype(
const paddle::DataType& x_dtype,
const paddle::optional<std::vector<paddle::DataType>>& y_dtype) {
return {x_dtype};
}

std::vector<std::vector<int64_t>> AddOptionalInplaceVectorInferShape(
const std::vector<int64_t>& x_shape,
const paddle::optional<std::vector<std::vector<int64_t>>>& y_shape) {
return {x_shape};
}

/*
if (outy_grad) {
x_grad = outX_grad * 2 + outY_grad[1...n];
y_grad[i] = outX_grad + outY_grad[i];
} else {
x_grad = outX_grad * 2;
y_grad = None;
}
*/
std::vector<paddle::Tensor> AddOptionalInplaceVectorBackward(
const paddle::Tensor& x,
const paddle::optional<std::vector<paddle::Tensor>>& y,
const paddle::Tensor& outx_grad,
paddle::optional<std::vector<paddle::Tensor>>& outy_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor.");

paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place());

PD_DISPATCH_FLOATING_TYPES(
outx_grad.type(), "AddOptionalInplaceVectorBackward", ([&] {
add_two_pointers<data_t>(outx_grad.data<data_t>(),
outx_grad.data<data_t>(),
x_grad.data<data_t>(),
x_grad.size());
if (outy_grad) {
for (size_t i = 0; i < outy_grad->size(); ++i) {
add_one_pointer<data_t>(outy_grad->at(i).data<data_t>(),
x_grad.data<data_t>(),
x_grad.size());
add_one_pointer<data_t>(outx_grad.data<data_t>(),
outy_grad->at(i).data<data_t>(),
outx_grad.size());
}
}
}));

return {x_grad};
}

std::vector<std::vector<int64_t>> AddOptionalInplaceVectorBackwardInferShape(
const std::vector<int64_t>& x_shape,
const paddle::optional<std::vector<std::vector<int64_t>>>& y_shape,
const std::vector<int64_t>& x_grad_shape,
const paddle::optional<std::vector<std::vector<int64_t>>>& y_grad_shape) {
return {x_shape};
}

PD_BUILD_OP(custom_optional_inplace_add_vec)
.Inputs({"X", paddle::Optional(paddle::Vec("Y"))})
.Outputs({"OutX", paddle::Optional(paddle::Vec("OutY"))})
.SetInplaceMap({{paddle::Optional(paddle::Vec("Y")),
paddle::Optional(paddle::Vec("OutY"))}})
.SetKernelFn(PD_KERNEL(AddOptionalInplaceVectorForward))
.SetInferShapeFn(PD_INFER_SHAPE(AddOptionalInplaceVectorInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(AddOptionalInplaceVectorInferDtype));

PD_BUILD_GRAD_OP(custom_optional_inplace_add_vec)
.Inputs({"X",
paddle::Optional(paddle::Vec("Y")),
paddle::Grad("OutX"),
paddle::Grad(paddle::Optional(paddle::Vec("OutY")))})
.Outputs({paddle::Grad("X"),
paddle::Grad(paddle::Optional(paddle::Vec("Y")))})
.SetInplaceMap({{paddle::Grad(paddle::Optional(paddle::Vec("OutY"))),
paddle::Grad(paddle::Optional(paddle::Vec("Y")))}})
.SetKernelFn(PD_KERNEL(AddOptionalInplaceVectorBackward))
.SetInferShapeFn(
PD_INFER_SHAPE(AddOptionalInplaceVectorBackwardInferShape));
Loading

0 comments on commit 59c9d75

Please sign in to comment.