Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optimize attribute selected performence #42294

Merged
merged 7 commits into from
Apr 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions paddle/fluid/framework/attribute.h
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ class AttrReader {
return *attr_value;
}

inline const Attribute& GetAttr(const std::string& name) const {
const Attribute* GetAttr(const std::string& name) const {
auto it = attrs_.find(name);
bool found = it != attrs_.end();
if (!found) {
Expand All @@ -251,11 +251,10 @@ class AttrReader {
found = it != default_attrs_->end();
}
}
PADDLE_ENFORCE_EQ(found, true,
platform::errors::NotFound(
"Attribute (%s) should be in AttributeMap.", name));

return it->second;
if (found) {
return &it->second;
}
return nullptr;
}

private:
Expand Down
434 changes: 238 additions & 196 deletions paddle/fluid/framework/infershape_utils.cc

Large diffs are not rendered by default.

349 changes: 198 additions & 151 deletions paddle/fluid/framework/operator.cc

Large diffs are not rendered by default.

372 changes: 206 additions & 166 deletions paddle/fluid/imperative/prepared_operator.h

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3011,7 +3011,7 @@ void UnStackInferMeta(const MetaTensor& x,
}

void OneHotRawInferMeta(const MetaTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
MetaTensor* out) {
Expand All @@ -3021,7 +3021,7 @@ void OneHotRawInferMeta(const MetaTensor& x,
1,
phi::errors::InvalidArgument("Rank of Input(X) should be at least 1."));
auto out_dims_vec = phi::vectorize(x_dims);
out_dims_vec.push_back(depth);
out_dims_vec.push_back(depth.to<int>());
auto out_dims = phi::make_ddim(out_dims_vec);
out->set_dims(out_dims);
out->share_lod(x);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ void UnStackInferMeta(const MetaTensor& x,
std::vector<MetaTensor*> outs);

void OneHotRawInferMeta(const MetaTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
MetaTensor* out);
Expand Down
7 changes: 4 additions & 3 deletions paddle/phi/kernels/cpu/one_hot_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,18 +64,19 @@ struct OneHotV2OpFunctor {
template <typename T, typename Context>
void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
DenseTensor* out) {
auto depth_v = depth.to<int>();
auto out_dims = out->dims();
if (out_dims[out_dims.size() - 1] == -1) {
out_dims[out_dims.size() - 1] = depth;
out_dims[out_dims.size() - 1] = depth_v;
out->Resize(out_dims);
}

phi::VisitDataType(dtype,
OneHotV2OpFunctor<Context, T>(&x, out, depth, dev_ctx));
OneHotV2OpFunctor<Context, T>(&x, out, depth_v, dev_ctx));
}

} // namespace phi
Expand Down
7 changes: 4 additions & 3 deletions paddle/phi/kernels/gpu/one_hot_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -73,18 +73,19 @@ struct OneHotV2OpCUDAFunctor {
template <typename T, typename Context>
void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
DenseTensor* out) {
auto depth_v = depth.to<int>();
auto out_dims = out->dims();
if (out_dims[out_dims.size() - 1] == -1) {
out_dims[out_dims.size() - 1] = depth;
out_dims[out_dims.size() - 1] = depth_v;
out->Resize(out_dims);
}

phi::VisitDataType(
dtype, OneHotV2OpCUDAFunctor<Context, T>(&x, out, depth, dev_ctx));
dtype, OneHotV2OpCUDAFunctor<Context, T>(&x, out, depth_v, dev_ctx));
}

} // namespace phi
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/kernels/one_hot_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,8 @@ void OneHotKernel(const Context& dev_ctx,
const DenseTensor& x,
const Scalar& num_classes_s,
DenseTensor* out) {
int num_classes = num_classes_s.to<int>();
OneHotRawKernel<T>(
dev_ctx, x, num_classes, phi::DataType::FLOAT32, false, out);
dev_ctx, x, num_classes_s, phi::DataType::FLOAT32, false, out);
}

} // namespace phi
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/one_hot_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void OneHotKernel(const Context& dev_ctx,
template <typename T, typename Context>
void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
int32_t depth,
const Scalar& depth,
DataType dtype,
bool allow_out_of_range,
DenseTensor* out);
Expand Down