Skip to content

Commit

Permalink
add some op for full_quantization
Browse files Browse the repository at this point in the history
  • Loading branch information
yghstill committed Mar 2, 2022
1 parent 3fc698f commit 423912c
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 39 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -936,8 +936,6 @@ def analysis_and_save_info(op_node, out_var_name):
if op.type in (
self._quantizable_op_type + self._out_scale_op_list):
out_var_names = _get_op_output_var_names(op)
assert len(out_var_names) == 1, "Post training " + \
"quantization only support one output for " + op.type
for var_name in out_var_names:
analysis_and_save_info(op, var_name)

Expand Down
51 changes: 14 additions & 37 deletions python/paddle/fluid/contrib/slim/quantization/quantization_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,42 +47,14 @@
]

_out_scale_op_list = [
"conv2d",
"depthwise_conv2d",
"mul",
"matmul",
"matmul_v2",
"relu",
"leaky_relu",
"relu6",
"sigmoid",
"tanh",
"prelu",
"swish",
"softmax",
"batch_norm",
"layer_norm",
"elementwise_add",
"pool2d",
"reshape2",
"transpose2",
"concat",
"elementwise_mul",
"scale",
"slice",
"hard_swish",
"hard_sigmoid",
"conv2d_transpose",
"gru",
"bilinear_interp",
"nearest_interp",
"trilinear_interp",
"flatten",
"flatten2",
"transpose",
"pad2d",
"reshape",
"layer_norm",
"conv2d", "depthwise_conv2d", "mul", "matmul", "matmul_v2", "relu",
"leaky_relu", "relu6", "sigmoid", "tanh", "prelu", "swish", "softmax",
"batch_norm", "layer_norm", "elementwise_add", "pool2d", "reshape2",
"transpose2", "concat", "elementwise_mul", "scale", "slice", "hard_swish",
"hard_sigmoid", "conv2d_transpose", "gru", "bilinear_interp",
"nearest_interp", "trilinear_interp", "flatten", "flatten2", "transpose",
"pad2d", "reshape", "layer_norm", "split", "flatten_contiguous_range",
"squeeze2", "nearest_interp_v2", "fill_constant_batch_size_like"
]

# list op real input and output names, to avoid processing input such as AxisTensor.
Expand Down Expand Up @@ -137,6 +109,10 @@
"flatten2": [["X"], ["Out"]],
"unsqueeze2": [["X"], ["Out"]],
"flatten_contiguous_range": [['X'], ["Out"]],
"split": [['X'], ["Out"]],
"squeeze2": [['X'], ["Out"]],
"nearest_interp_v2": [['X'], ["Out"]],
"fill_constant_batch_size_like": [['Input'], ["Out"]],
}

_conv_ops = ['conv2d', 'depthwise_conv2d', 'conv2d_transpose']
Expand Down Expand Up @@ -1804,7 +1780,8 @@ class AddQuantDequantPass(object):
"squeeze", "elementwise_sub", "mul", "matmul", "relu", "relu6",
"leaky_relu", "tanh", "swish", "scale", "transpose", "transpose2",
"sigmoid", "pad2d", "flatten", "flatten2", "batch_norm", "layer_norm",
"matmul_v2"
"matmul_v2", "split", "flatten_contiguous_range", "squeeze2",
"nearest_interp_v2", "fill_constant_batch_size_like"
]

# To be compatible with PaddleSlim, not remove _activation_type for now
Expand Down

0 comments on commit 423912c

Please sign in to comment.