Skip to content

Commit

Permalink
Optimize attribute selected performence (#42294) (#42368)
Browse files Browse the repository at this point in the history
* opt attr eaque perf

* opt attr select code

* fix one hot infermeta

* polish get attr impl

* fix tests failed

* add testcases
  • Loading branch information
chenwhql authored Apr 28, 2022
1 parent d04a68d commit e0e534a
Show file tree
Hide file tree
Showing 10 changed files with 660 additions and 531 deletions.
11 changes: 5 additions & 6 deletions paddle/fluid/framework/attribute.h
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ class AttrReader {
return *attr_value;
}

inline const Attribute& GetAttr(const std::string& name) const {
const Attribute* GetAttr(const std::string& name) const {
auto it = attrs_.find(name);
bool found = it != attrs_.end();
if (!found) {
Expand All @@ -251,11 +251,10 @@ class AttrReader {
found = it != default_attrs_->end();
}
}
PADDLE_ENFORCE_EQ(found, true,
platform::errors::NotFound(
"Attribute (%s) should be in AttributeMap.", name));

return it->second;
if (found) {
return &it->second;
}
return nullptr;
}

private:
Expand Down
434 changes: 238 additions & 196 deletions paddle/fluid/framework/infershape_utils.cc

Large diffs are not rendered by default.

349 changes: 198 additions & 151 deletions paddle/fluid/framework/operator.cc

Large diffs are not rendered by default.

372 changes: 206 additions & 166 deletions paddle/fluid/imperative/prepared_operator.h

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2945,7 +2945,7 @@ void UnStackInferMeta(const MetaTensor& x,
}

void OneHotRawInferMeta(const MetaTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
MetaTensor* out) {
Expand All @@ -2955,7 +2955,7 @@ void OneHotRawInferMeta(const MetaTensor& x,
1,
phi::errors::InvalidArgument("Rank of Input(X) should be at least 1."));
auto out_dims_vec = phi::vectorize(x_dims);
out_dims_vec.push_back(depth);
out_dims_vec.push_back(depth.to<int>());
auto out_dims = phi::make_ddim(out_dims_vec);
out->set_dims(out_dims);
out->share_lod(x);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ void UnStackInferMeta(const MetaTensor& x,
std::vector<MetaTensor*> outs);

void OneHotRawInferMeta(const MetaTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
MetaTensor* out);
Expand Down
7 changes: 4 additions & 3 deletions paddle/phi/kernels/cpu/one_hot_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,18 +64,19 @@ struct OneHotV2OpFunctor {
template <typename T, typename Context>
void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
DenseTensor* out) {
auto depth_v = depth.to<int>();
auto out_dims = out->dims();
if (out_dims[out_dims.size() - 1] == -1) {
out_dims[out_dims.size() - 1] = depth;
out_dims[out_dims.size() - 1] = depth_v;
out->Resize(out_dims);
}

phi::VisitDataType(dtype,
OneHotV2OpFunctor<Context, T>(&x, out, depth, dev_ctx));
OneHotV2OpFunctor<Context, T>(&x, out, depth_v, dev_ctx));
}

} // namespace phi
Expand Down
7 changes: 4 additions & 3 deletions paddle/phi/kernels/gpu/one_hot_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -73,18 +73,19 @@ struct OneHotV2OpCUDAFunctor {
template <typename T, typename Context>
void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
DenseTensor* out) {
auto depth_v = depth.to<int>();
auto out_dims = out->dims();
if (out_dims[out_dims.size() - 1] == -1) {
out_dims[out_dims.size() - 1] = depth;
out_dims[out_dims.size() - 1] = depth_v;
out->Resize(out_dims);
}

phi::VisitDataType(
dtype, OneHotV2OpCUDAFunctor<Context, T>(&x, out, depth, dev_ctx));
dtype, OneHotV2OpCUDAFunctor<Context, T>(&x, out, depth_v, dev_ctx));
}

} // namespace phi
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/one_hot_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,8 @@ void OneHotKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& num_classes_s,
DenseTensor* out) {
int num_classes = num_classes_s.to<int>();
OneHotRawKernel<T>(
dev_ctx, x, num_classes, phi::DataType::FLOAT32, false, out);
dev_ctx, x, num_classes_s, phi::DataType::FLOAT32, false, out);
}

} // namespace phi
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/one_hot_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void OneHotKernel(const Context& dev_ctx,
template <typename T, typename Context>
void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
DenseTensor* out);
Expand Down

0 comments on commit e0e534a

Please sign in to comment.