From e05c1eb59ee6f76ad02c6021951e9f842dcab02f Mon Sep 17 00:00:00 2001 From: zyfncg Date: Mon, 19 Sep 2022 11:46:54 +0800 Subject: [PATCH 1/7] Clear extra attributes of some Op in OpMaker (Part4) (#46060) * clear extra attr of some ops in opmaker * revert clear use_cudnn for pool * fix test_operator_desc * fix Attr interface of OperatorBase --- paddle/phi/api/yaml/op_compat.yaml | 42 ++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 516e03662d542..e82228d1c868f 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -254,6 +254,11 @@ extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] +- op : expand (expand_v2) + backward : expand_grad (expand_v2_grad) + extra : + attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] + - op : expm1 backward : expm1_grad extra : @@ -302,6 +307,15 @@ extra : attrs : [bool use_mkldnn = false] +- op : full (fill_constant) + extra : + attrs : [bool use_mkldnn = false] + +- op : gather + backward : gather_grad + extra : + attrs : [bool overwrite = true] + - op : gather backward : gather_grad extra : @@ -412,6 +426,12 @@ attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}', float scale_out = 1.0f, bool force_fp32_output = false] +- op : matmul_with_flatten (mul) + backward : matmul_with_flatten_grad (mul_grad) + extra : + attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}', + float scale_out = 1.0f, bool force_fp32_output = false] + - op : maximum (elementwise_max) backward : maximum_grad (elementwise_max_grad) extra : @@ -473,6 +493,17 @@ attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32", bool is_test = false] +- op : pool2d + backward : pool2d_grad + extra : + attrs : [bool use_mkldnn = false, bool use_quantizer = false, + str mkldnn_data_type = "float32", bool is_test = false] + +- op : pool3d + backward : pool3d_grad + extra : + attrs : [bool use_mkldnn = false] + - op : pool3d backward : pool3d_grad extra : @@ -655,6 +686,11 @@ extra : attrs : [bool use_mkldnn = false] +- op : stack + backward : stack_grad + extra : + attrs : [bool use_mkldnn = false] + - op : subtract (elementwise_sub) backward : subtract_grad (elementwise_sub_grad) extra : @@ -698,6 +734,12 @@ attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false, str mkldnn_data_type = "float32"] +- op : transpose (transpose2) + backward : transpose_grad (transpose2_grad) + extra : + attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false, + str mkldnn_data_type = "float32"] + - op : trilinear_interp (trilinear_interp_v2) backward : trilinear_interp_grad (trilinear_interp_v2_grad) extra : From fe44e6078485339ef99faa865def7f1e2e64afa4 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Tue, 20 Sep 2022 10:31:33 +0800 Subject: [PATCH 2/7] clear extra attrs of condition op in opmaker (#46150) --- .../operators/controlflow/conditional_block_op.h | 5 ----- paddle/fluid/operators/controlflow/while_op.cc | 5 ----- paddle/phi/api/yaml/op_compat.yaml | 15 ++++++++++----- 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.h b/paddle/fluid/operators/controlflow/conditional_block_op.h index d85eca8f5cb3a..f2407e9a3f05a 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.h +++ b/paddle/fluid/operators/controlflow/conditional_block_op.h @@ -119,11 +119,6 @@ class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { "The conditional variable (Cond) is used as scalar " "condition.") .SetDefault(false); - AddAttr>(ConditionalOp::kSkipEagerDeletionVars, - "Vars that would not be deleted when " - "garbage collection strategy enables") - .SetDefault(std::vector()) - .AsExtra(); AddComment(R"DOC(Conditional block operator If `is_scalar_condition` is True, the conditional variable (Cond) is a scalar, diff --git a/paddle/fluid/operators/controlflow/while_op.cc b/paddle/fluid/operators/controlflow/while_op.cc index 4e0344b3b9391..10fa24b1bd4f5 100644 --- a/paddle/fluid/operators/controlflow/while_op.cc +++ b/paddle/fluid/operators/controlflow/while_op.cc @@ -221,11 +221,6 @@ class WhileOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") .SetDefault(false); - AddAttr>(kSkipEagerDeletionVars, - "Vars that would skip eager deletion." - "Users should not set this manually.") - .SetDefault(std::vector()) - .AsExtra(); AddComment(R"DOC( )DOC"); } diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index e82228d1c868f..d8c3090299c08 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -93,6 +93,11 @@ extra : attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"] +- op : conditional_block + backward : conditional_block_grad + extra : + attrs : ['str[] skip_eager_deletion_vars = {}'] + - op : conv2d backward : conv2d_grad extra : @@ -504,11 +509,6 @@ extra : attrs : [bool use_mkldnn = false] -- op : pool3d - backward : pool3d_grad - extra : - attrs : [bool use_mkldnn = false] - - op : prelu backward : prelu_grad extra : @@ -750,3 +750,8 @@ x : X outputs : out : Out + +- op : while + backward : while_grad + extra : + attrs : ['str[] skip_eager_deletion_vars = {}'] From 0a317addeeaedcd209d8012d61bc03bd01c1d077 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Thu, 22 Sep 2022 16:56:18 +0800 Subject: [PATCH 3/7] Clear extra attrs of lookup_table_v2 in OpMaker (#46321) * clear extra attrs of look_up_table_v2 in opmaker * fix bug --- paddle/fluid/operators/lookup_table_v2_op.cc | 34 ------------------- .../api/yaml/generator/ops_extra_info_gen.py | 2 +- paddle/phi/api/yaml/op_compat.yaml | 7 ++++ 3 files changed, 8 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/operators/lookup_table_v2_op.cc b/paddle/fluid/operators/lookup_table_v2_op.cc index 7baf76a1e1080..5f023fbad6a02 100644 --- a/paddle/fluid/operators/lookup_table_v2_op.cc +++ b/paddle/fluid/operators/lookup_table_v2_op.cc @@ -84,46 +84,12 @@ class LookupTableV2OpMaker : public framework::OpProtoAndCheckerMaker { "An input with type int64 " "contains the ids to be looked up in W."); AddOutput("Out", "The lookup results, which have the same type as W."); - AddAttr("is_sparse", - "(boolean, default false) " - "Sparse update.") - .SetDefault(false) - .AsExtra(); - AddAttr("is_distributed", - "(boolean, default false) distributed lookup table.") - .SetDefault(false) - .AsExtra(); AddAttr("padding_idx", "(int64, default -1) " "If the value is -1, it makes no effect to lookup. " "Otherwise the given value indicates padding the output " "with zeros whenever lookup encounters it in Ids.") .SetDefault(kNoPadding); - - // for parameter prefetch - AddAttr("remote_prefetch", "").SetDefault(false).AsExtra(); - AddAttr("trainer_id", "trainer id from 0 ~ worker_num.") - .SetDefault(0) - .AsExtra(); - AddAttr("slot", "slot of id").SetDefault(0).AsExtra(); - AddAttr>("height_sections", - "Height for each output SelectedRows.") - .SetDefault(std::vector({})) - .AsExtra(); - AddAttr>( - "epmap", - "(string vector, default 127.0.0.1:6164)" - "Server endpoints in the order of input variables for mapping") - .SetDefault({}) - .AsExtra(); - AddAttr>( - "table_names", - "(string vector, the split table names that will be fetched from " - "parameter server)" - "in the order of input variables for mapping") - .SetDefault({}) - .AsExtra(); - AddComment(R"DOC( Lookup Table V2 Operator. diff --git a/paddle/phi/api/yaml/generator/ops_extra_info_gen.py b/paddle/phi/api/yaml/generator/ops_extra_info_gen.py index 6f234e494f52d..b862d8bfe0a85 100644 --- a/paddle/phi/api/yaml/generator/ops_extra_info_gen.py +++ b/paddle/phi/api/yaml/generator/ops_extra_info_gen.py @@ -59,7 +59,7 @@ def map_code_template(attrs_str, attrs_checker_str): def parse_attr(attr_str): result = re.search( - r"(?P[a-z[\]]+)\s+(?P[a-zA-Z0-9_]+)\s*=\s*(?P\S+)", + r"(?P[a-zA-Z0-9_[\]]+)\s+(?P[a-zA-Z0-9_]+)\s*=\s*(?P\S+)", attr_str) return ATTR_TYPE_STRING_MAP[result.group('attr_type')], result.group( 'name'), result.group('default_val') diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index d8c3090299c08..11f1503558509 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -237,6 +237,13 @@ extra : attrs : [bool use_mkldnn = false] +- op : embedding (lookup_table_v2) + backward : embedding_grad (lookup_table_v2_grad) + extra : + attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false, + int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}', + 'str[] table_names = {}'] + - op : erf inputs : x : X From ef7caff5fe21d274e42304cbfbb94c5b38f1f050 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Fri, 23 Sep 2022 11:02:58 +0800 Subject: [PATCH 4/7] clear extra attrs of quantize op in opmaker (#46418) --- paddle/fluid/operators/fake_quantize_op.cc | 90 -------------------- paddle/fluid/operators/quantize_linear_op.cc | 4 - paddle/phi/api/yaml/op_compat.yaml | 36 ++++++++ 3 files changed, 36 insertions(+), 94 deletions(-) diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index cb8263714a5e4..bf1f0103f768b 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -432,24 +432,6 @@ class FakeQuantOrWithDequantAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddComment(R"DOC( This is a Base Op which supports FakeQuantAbsMaxOpMaker and FakeQuantDequantAbsMaxOpMaker. FakeQuantAbsMaxOp operator is used in the dynamic quantization. @@ -529,24 +511,6 @@ class FakeChannelWiseQuantizeAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") @@ -628,24 +592,6 @@ class FakeChannelWiseQuantizeDequantizeAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddComment(R"DOC( The scale of FakeChannelWiseQuantize operator is a vector. In detail, each channel of the input X has a scale value. @@ -715,24 +661,6 @@ class FakeQuantizeRangeAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") @@ -815,24 +743,6 @@ class FakeQuantOrWithDequantMovingAverageAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") diff --git a/paddle/fluid/operators/quantize_linear_op.cc b/paddle/fluid/operators/quantize_linear_op.cc index 7012da3aeda94..d4cd685575eec 100644 --- a/paddle/fluid/operators/quantize_linear_op.cc +++ b/paddle/fluid/operators/quantize_linear_op.cc @@ -134,10 +134,6 @@ class QuantizeLinearOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("OutScale", "(Tensor) Current scale") .AsDispensable() .AsExtra(); // only qat use - AddAttr("moving_rate", - "(float, default 0.9) moving rate.") // only qat use - .SetDefault(0.9) - .AsExtra(); AddAttr("quant_axis", "(int, default 0) The axis for quantization. " "For conv2d, depthwise_conv2d, conv2d_transpose " diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 11f1503558509..f7654f88dd510 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -179,6 +179,10 @@ str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] +- op : dequantize_linear + extra : + attrs : [float moving_rate = 0.9] + - op : diag (diag_v2) backward : diag_grad (diag_v2_grad) inputs : @@ -276,6 +280,34 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] +- op : fake_channel_wise_quantize_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_channel_wise_quantize_dequantize_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_dequantize_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_dequantize_moving_average_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_moving_average_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_range_abs_max + extra : + attrs : [int round_type = 1] + - op : fft_c2c inputs: {x: X} outputs: {out: Out} @@ -521,6 +553,10 @@ extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false] +- op : quantize_linear + extra : + attrs : [float moving_rate = 0.9] + - op : reciprocal backward : reciprocal_grad extra : From b7d9005667ae0cc7f5eb5399c8913cdd07300d91 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Tue, 27 Sep 2022 09:45:12 +0000 Subject: [PATCH 5/7] delete repeated item --- paddle/phi/api/yaml/op_compat.yaml | 42 ------------------------------ 1 file changed, 42 deletions(-) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index f7654f88dd510..cee2540af7895 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -270,11 +270,6 @@ extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] -- op : expand (expand_v2) - backward : expand_grad (expand_v2_grad) - extra : - attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] - - op : expm1 backward : expm1_grad extra : @@ -351,15 +346,6 @@ extra : attrs : [bool use_mkldnn = false] -- op : full (fill_constant) - extra : - attrs : [bool use_mkldnn = false] - -- op : gather - backward : gather_grad - extra : - attrs : [bool overwrite = true] - - op : gather backward : gather_grad extra : @@ -470,12 +456,6 @@ attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}', float scale_out = 1.0f, bool force_fp32_output = false] -- op : matmul_with_flatten (mul) - backward : matmul_with_flatten_grad (mul_grad) - extra : - attrs : [bool use_mkldnn = false, float scale_x = 1.0f, 'float[] scale_y = {1.0f}', - float scale_out = 1.0f, bool force_fp32_output = false] - - op : maximum (elementwise_max) backward : maximum_grad (elementwise_max_grad) extra : @@ -537,12 +517,6 @@ attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32", bool is_test = false] -- op : pool2d - backward : pool2d_grad - extra : - attrs : [bool use_mkldnn = false, bool use_quantizer = false, - str mkldnn_data_type = "float32", bool is_test = false] - - op : pool3d backward : pool3d_grad extra : @@ -724,16 +698,6 @@ extra : attrs : [bool use_mkldnn = false] -- op : stack - backward : stack_grad - extra : - attrs : [bool use_mkldnn = false] - -- op : stack - backward : stack_grad - extra : - attrs : [bool use_mkldnn = false] - - op : subtract (elementwise_sub) backward : subtract_grad (elementwise_sub_grad) extra : @@ -777,12 +741,6 @@ attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false, str mkldnn_data_type = "float32"] -- op : transpose (transpose2) - backward : transpose_grad (transpose2_grad) - extra : - attrs : [bool use_mkldnn = false, str data_format = "AnyLayout", bool use_quantizer = false, - str mkldnn_data_type = "float32"] - - op : trilinear_interp (trilinear_interp_v2) backward : trilinear_interp_grad (trilinear_interp_v2_grad) extra : From 8e80a986417ff09a7bf99f96d8b65a040fb44379 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Mon, 26 Sep 2022 10:33:14 +0800 Subject: [PATCH 6/7] clear extra attrs of distribute op in opmaker (#46451) --- paddle/fluid/operators/nce_op.cc | 28 ------------------- .../pscore/distributed_push_sparse_op.cc | 5 ---- paddle/phi/api/yaml/op_compat.yaml | 10 +++++++ 3 files changed, 10 insertions(+), 33 deletions(-) diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index c9c4d1a4c74f3..dd093729d1913 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -207,34 +207,6 @@ class NCEOpMaker : public framework::OpProtoAndCheckerMaker { // for parameter prefetch AddAttr("remote_prefetch", "").SetDefault(false); - AddAttr("trainer_id", "trainer id from 0 ~ worker_num.") - .SetDefault(0) - .AsExtra(); - AddAttr>("height_sections", - "Height for each output SelectedRows.") - .SetDefault(std::vector({})) - .AsExtra(); - AddAttr>( - "epmap", - "(string vector, default 127.0.0.1:6164)" - "Server endpoints in the order of input variables for mapping") - .SetDefault({}) - .AsExtra(); - AddAttr>( - "table_names", - "(string vector, the split table names that will be fetched from " - "parameter server)" - "in the order of input variables for mapping") - .SetDefault({}) - .AsExtra(); - - AddAttr>("custom_neg_classes", - "This attribute only be used in unitest. Classes " - "in this list wiil be used as negative classes " - "for every samples. Under normal conditions, " - "user should avoid setting this attribute.") - .SetDefault({}) - .AsExtra(); AddAttr("is_test", "(bool, default false) Set to true for inference " "only, false for training.") diff --git a/paddle/fluid/operators/pscore/distributed_push_sparse_op.cc b/paddle/fluid/operators/pscore/distributed_push_sparse_op.cc index 840e33939897f..a2bf63da10bd2 100644 --- a/paddle/fluid/operators/pscore/distributed_push_sparse_op.cc +++ b/paddle/fluid/operators/pscore/distributed_push_sparse_op.cc @@ -113,11 +113,6 @@ class DistributedPushSparseOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_cvm_op", "(boolean, default false) Use cvm op or not.") .SetDefault(false); - AddAttr>("slots", - "[slot_id1, slot_id2] Slots array of Ids.") - .SetDefault({}) - .AsExtra(); - AddComment(R"DOC( Lookup Tablel Prefetch Operator. This operator is used to perform lookup on parameter W, diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index cee2540af7895..0a309d8d5422c 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -208,6 +208,10 @@ outputs : out : Out +- op : distributed_push_sparse + extra : + attrs : ['int[] slots = {}'] + - op : divide (elementwise_div) backward : divide_grad (elementwise_div) extra : @@ -485,6 +489,12 @@ outputs : out : Out +- op : nce + backward : nce_grad + extra : + attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}', + 'str[] table_names = {}', 'int[] custom_neg_classes = {}'] + - op : nearest_interp (nearest_interp_v2) backward : nearest_interp_grad (nearest_interp_v2_grad) extra : From 43dd21f0ae473bb37e9b237cfcff3dd4cd0d4d44 Mon Sep 17 00:00:00 2001 From: zyfncg Date: Mon, 26 Sep 2022 11:28:15 +0800 Subject: [PATCH 7/7] clear extra atts of sequence_softmax in opmaker (#46457) --- .../fluid/operators/sequence_ops/sequence_softmax_op.cc | 8 -------- paddle/phi/api/yaml/op_compat.yaml | 5 +++++ 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc index 5417c20f3d419..5a6d2ab0820e2 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc @@ -73,14 +73,6 @@ class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default false) Only used in cudnn kernel, need install cudnn") .SetDefault(false) .AsExtra(); - AddAttr( - "data_format", - "(string, default NCHW) Only used in " - "An optional string from: \"NHWC\", \"NCHW\". " - "Defaults to \"NHWC\". Specify the data format of the output data, " - "the input will be transformed automatically. ") - .SetDefault("AnyLayout") - .AsExtra(); AddComment(R"DOC( Sequence Softmax Operator. diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 0a309d8d5422c..357781318a13e 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -632,6 +632,11 @@ extra : attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false] +- op : sequence_softmax + backward : sequence_softmax_grad + extra : + attrs : [str data_format = "AnyLayout"] + - op : shape extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]