Skip to content

Commit

Permalink
clear extra attrs of condition op in opmaker (#46150)
Browse files Browse the repository at this point in the history
  • Loading branch information
zyfncg authored Sep 20, 2022
1 parent d13a4a2 commit f65a61a
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 51 deletions.
5 changes: 0 additions & 5 deletions paddle/fluid/operators/controlflow/conditional_block_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,6 @@ class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker {
"The conditional variable (Cond) is used as scalar "
"condition.")
.SetDefault(false);
AddAttr<std::vector<std::string>>(ConditionalOp::kSkipEagerDeletionVars,
"Vars that would not be deleted when "
"garbage collection strategy enables")
.SetDefault(std::vector<std::string>())
.AsExtra();
AddComment(R"DOC(Conditional block operator
If `is_scalar_condition` is True, the conditional variable (Cond) is a scalar,
Expand Down
5 changes: 0 additions & 5 deletions paddle/fluid/operators/controlflow/while_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -221,11 +221,6 @@ class WhileOpMaker : public framework::OpProtoAndCheckerMaker {
"(bool, default false) Set to true for inference only, false "
"for training. Some layers may run faster when this is true.")
.SetDefault(false);
AddAttr<std::vector<std::string>>(kSkipEagerDeletionVars,
"Vars that would skip eager deletion."
"Users should not set this manually.")
.SetDefault(std::vector<std::string>())
.AsExtra();
AddComment(R"DOC(
)DOC");
}
Expand Down
91 changes: 50 additions & 41 deletions paddle/phi/api/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,11 @@
extra :
attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]

- op : conditional_block
backward : conditional_block_grad
extra :
attrs : ['str[] skip_eager_deletion_vars = {}']

- op : conv2d
backward : conv2d_grad
extra :
Expand Down Expand Up @@ -249,6 +254,11 @@
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]

- op : expand (expand_v2)
backward : expand_grad (expand_v2_grad)
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

- op : expm1
backward : expm1_grad
extra :
Expand Down Expand Up @@ -293,6 +303,15 @@
extra :
attrs : [bool use_mkldnn = false]

- op : full (fill_constant)
extra :
attrs : [bool use_mkldnn = false]

- op : gather
backward : gather_grad
extra :
attrs : [bool overwrite = true]

- op : gelu
backward : gelu_grad
extra :
Expand Down Expand Up @@ -392,6 +411,12 @@
str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}']

- op : matmul_with_flatten (mul)
backward : matmul_with_flatten_grad (mul_grad)
extra :
attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
float scale_out = 1.0f, bool force_fp32_output = false]

- op : maximum (elementwise_max)
backward : maximum_grad (elementwise_max_grad)
extra :
Expand Down Expand Up @@ -447,6 +472,17 @@
outputs :
out : Out

- op : pool2d
backward : pool2d_grad
extra :
attrs : [bool use_mkldnn = false, bool use_quantizer = false,
str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
backward : pool3d_grad
extra :
attrs : [bool use_mkldnn = false]

- op : prelu
backward : prelu_grad
extra :
Expand Down Expand Up @@ -619,6 +655,11 @@
extra :
attrs : [bool use_mkldnn = false]

- op : stack
backward : stack_grad
extra :
attrs : [bool use_mkldnn = false]

- op : subtract (elementwise_sub)
backward : subtract_grad (elementwise_sub_grad)
extra :
Expand Down Expand Up @@ -656,6 +697,12 @@
outputs :
out : Out

- op : transpose (transpose2)
backward : transpose_grad (transpose2_grad)
extra :
attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
str mkldnn_data_type = "float32"]

- op : trilinear_interp (trilinear_interp_v2)
backward : trilinear_interp_grad (trilinear_interp_v2_grad)
extra :
Expand All @@ -667,45 +714,7 @@
outputs :
out : Out

- op : expand (expand_v2)
backward : expand_grad (expand_v2_grad)
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]

- op : full (fill_constant)
extra :
attrs : [bool use_mkldnn = false]

- op : gather
backward : gather_grad
extra :
attrs : [bool overwrite = true]

- op : matmul_with_flatten (mul)
backward : matmul_with_flatten_grad (mul_grad)
extra :
attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}',
float scale_out = 1.0f, bool force_fp32_output = false]

- op : pool2d
backward : pool2d_grad
- op : while
backward : while_grad
extra :
attrs : [bool use_mkldnn = false, bool use_quantizer = false,
str mkldnn_data_type = "float32", bool is_test = false]

- op : pool3d
backward : pool3d_grad
extra :
attrs : [bool use_mkldnn = false]

- op : stack
backward : stack_grad
extra :
attrs : [bool use_mkldnn = false]


- op : transpose (transpose2)
backward : transpose_grad (transpose2_grad)
extra :
attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false,
str mkldnn_data_type = "float32"]
attrs : ['str[] skip_eager_deletion_vars = {}']

0 comments on commit f65a61a

Please sign in to comment.