Skip to content

Commit

Permalink
Refactor code about fuse code
Browse files Browse the repository at this point in the history
  • Loading branch information
daquexian committed Nov 21, 2019
1 parent 36c15fb commit f779d6c
Show file tree
Hide file tree
Showing 10 changed files with 197 additions and 184 deletions.
14 changes: 7 additions & 7 deletions common/daq.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ table CONV_2D_Input {
padding_bottom: int;
stride_x: int;
stride_y: int;
fuse:FuseCode;
fuse_code: FuseCode;
}

table CONV_2D_Output {
Expand All @@ -93,7 +93,7 @@ table AVERAGE_POOL_2D_Input {
stride_y: int;
kernel_width: int;
kernel_height: int;
fuse:FuseCode;
fuse_code: FuseCode;
}

table AVERAGE_POOL_2D_Output {
Expand All @@ -115,7 +115,7 @@ table MAX_POOL_2D_Input {
stride_y: int;
kernel_width: int;
kernel_height: int;
fuse:FuseCode;
fuse_code: FuseCode;
}

table MAX_POOL_2D_Output {
Expand Down Expand Up @@ -158,7 +158,7 @@ table FULLY_CONNECTED_Input {
input: string;
weight: string;
bias: string;
fuse:FuseCode;
fuse_code: FuseCode;
}

table FULLY_CONNECTED_Output {
Expand All @@ -173,7 +173,7 @@ table FULLY_CONNECTED {
table ADD_Input {
input1: string;
input2: string;
fuse:FuseCode;
fuse_code: FuseCode;
}

table ADD_Output {
Expand Down Expand Up @@ -210,7 +210,7 @@ table DEPTHWISE_CONV_2D_Input {
stride_x: int;
stride_y: int;
depth_multiplier: int;
fuse:FuseCode;
fuse_code: FuseCode;
}

table DEPTHWISE_CONV_2D_Output {
Expand Down Expand Up @@ -273,7 +273,7 @@ table STRIDED_SLICE {
table MUL_Input {
input1: string;
input2: string;
fuse:FuseCode;
fuse_code: FuseCode;
}

table MUL_Output {
Expand Down
32 changes: 17 additions & 15 deletions dnnlibrary/DaqReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,38 +218,38 @@ expected<Unit, std::string> AddLayers(const DNN::Model &model,
case DNN::LayerType::CONV_2D: {
UNPACK_LAYER_QUANT(CONV_2D, input, weight, bias, padding_left,
padding_right, padding_top, padding_bottom,
stride_x, stride_y, fuse);
stride_x, stride_y, fuse_code);
const dnn::optional<std::string> bias_right_type =
(bias == "") ? dnn::nullopt : dnn::make_optional(bias);

TRY(builder.AddLayer_CONV_2D(
input, weight, bias_right_type, padding_left, padding_right,
padding_top, padding_bottom, stride_x, stride_y, fuse,
padding_top, padding_bottom, stride_x, stride_y, fuse_code,
output, quant_info));
break;
}
case DNN::LayerType::AVERAGE_POOL_2D: {
UNPACK_LAYER_QUANT(AVERAGE_POOL_2D, input, padding_left,
padding_right, padding_top, padding_bottom,
stride_x, stride_y, kernel_width,
kernel_height, fuse);
kernel_height, fuse_code);

TRY(builder.AddLayer_AVERAGE_POOL_2D(
input, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, fuse, output, quant_info));
kernel_height, fuse_code, output, quant_info));
break;
}
case DNN::LayerType::MAX_POOL_2D: {
UNPACK_LAYER_QUANT(MAX_POOL_2D, input, padding_left,
padding_right, padding_top, padding_bottom,
stride_x, stride_y, kernel_width,
kernel_height, fuse);
kernel_height, fuse_code);

TRY(builder.AddLayer_MAX_POOL_2D(
input, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, fuse, output, quant_info));
kernel_height, fuse_code, output, quant_info));
break;
}
case DNN::LayerType::RELU: {
Expand All @@ -265,18 +265,20 @@ expected<Unit, std::string> AddLayers(const DNN::Model &model,
break;
}
case DNN::LayerType::FULLY_CONNECTED: {
UNPACK_LAYER_QUANT(FULLY_CONNECTED, input, weight, bias, fuse);
UNPACK_LAYER_QUANT(FULLY_CONNECTED, input, weight, bias,
fuse_code);
const dnn::optional<std::string> bias_right_type =
(bias == "") ? dnn::nullopt : dnn::make_optional(bias);

TRY(builder.AddLayer_FULLY_CONNECTED(
input, weight, bias_right_type, fuse, output, quant_info));
TRY(builder.AddLayer_FULLY_CONNECTED(input, weight,
bias_right_type, fuse_code,
output, quant_info));
break;
}
case DNN::LayerType::ADD: {
UNPACK_LAYER_QUANT(ADD, input1, input2, fuse);
UNPACK_LAYER_QUANT(ADD, input1, input2, fuse_code);

TRY(builder.AddLayer_ADD(input1, input2, fuse, output,
TRY(builder.AddLayer_ADD(input1, input2, fuse_code, output,
quant_info));
break;
}
Expand All @@ -290,14 +292,14 @@ expected<Unit, std::string> AddLayers(const DNN::Model &model,
UNPACK_LAYER_QUANT(DEPTHWISE_CONV_2D, input, weight, bias,
padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y,
depth_multiplier, fuse);
depth_multiplier, fuse_code);
const dnn::optional<std::string> bias_right_type =
(bias == "") ? dnn::nullopt : dnn::make_optional(bias);

TRY(builder.AddLayer_DEPTHWISE_CONV_2D(
input, weight, bias_right_type, padding_left, padding_right,
padding_top, padding_bottom, stride_x, stride_y,
depth_multiplier, fuse, output, quant_info));
depth_multiplier, fuse_code, output, quant_info));
break;
}
case DNN::LayerType::BATCH_TO_SPACE_ND: {
Expand All @@ -324,9 +326,9 @@ expected<Unit, std::string> AddLayers(const DNN::Model &model,
break;
}
case DNN::LayerType::MUL: {
UNPACK_LAYER_QUANT(MUL, input1, input2, fuse);
UNPACK_LAYER_QUANT(MUL, input1, input2, fuse_code);

TRY(builder.AddLayer_MUL(input1, input2, fuse, output,
TRY(builder.AddLayer_MUL(input1, input2, fuse_code, output,
quant_info));
break;
}
Expand Down
14 changes: 7 additions & 7 deletions dnnlibrary/ModelBuilderImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ expected<Unit, std::string> ModelBuilder::AddLayer_CONV_2D(
const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias, int32_t padding_left,
int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y, int32_t fuse_code,
int32_t stride_x, int32_t stride_y, FuseCode fuse_code,
const std::string &output,
const dnn::optional<QuantInfo> &output_quant_info) {
if (nnapi_->android_sdk_version < 27) {
Expand Down Expand Up @@ -69,7 +69,7 @@ expected<Unit, std::string> ModelBuilder::AddLayer_AVERAGE_POOL_2D(
const std::string &input, int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width, int32_t kernel_height,
int32_t fuse_code, const std::string &output,
FuseCode fuse_code, const std::string &output,
const dnn::optional<QuantInfo> &output_quant_info) {
if (nnapi_->android_sdk_version < 27) {
return make_unexpected("AVERAGE_POOL_2D requires API 27");
Expand Down Expand Up @@ -97,7 +97,7 @@ expected<Unit, std::string> ModelBuilder::AddLayer_MAX_POOL_2D(
const std::string &input, int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width, int32_t kernel_height,
int32_t fuse_code, const std::string &output,
FuseCode fuse_code, const std::string &output,
const dnn::optional<QuantInfo> &output_quant_info) {
if (nnapi_->android_sdk_version < 27) {
return make_unexpected("MAX_POOL_2D requires API 27");
Expand Down Expand Up @@ -162,7 +162,7 @@ expected<Unit, std::string> ModelBuilder::AddLayer_SOFTMAX(

expected<Unit, std::string> ModelBuilder::AddLayer_FULLY_CONNECTED(
const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias, int32_t fuse_code,
const dnn::optional<std::string> &bias, FuseCode fuse_code,
const std::string &output,
const dnn::optional<QuantInfo> &output_quant_info) {
if (nnapi_->android_sdk_version < 27) {
Expand Down Expand Up @@ -211,7 +211,7 @@ expected<Unit, std::string> ModelBuilder::AddLayer_FULLY_CONNECTED(
}

expected<Unit, std::string> ModelBuilder::AddLayer_ADD(
const std::string &input1, const std::string &input2, int32_t fuse_code,
const std::string &input1, const std::string &input2, FuseCode fuse_code,
const std::string &output,
const dnn::optional<QuantInfo> &output_quant_info) {
if (nnapi_->android_sdk_version < 27) {
Expand Down Expand Up @@ -262,7 +262,7 @@ expected<Unit, std::string> ModelBuilder::AddLayer_DEPTHWISE_CONV_2D(
const dnn::optional<std::string> &bias, int32_t padding_left,
int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y, int32_t depth_multiplier,
int32_t fuse_code, const std::string &output,
FuseCode fuse_code, const std::string &output,
const dnn::optional<QuantInfo> &output_quant_info) {
if (nnapi_->android_sdk_version < 27) {
return make_unexpected("DEPTHWISE_CONV_2D requires API 27");
Expand Down Expand Up @@ -402,7 +402,7 @@ expected<Unit, std::string> ModelBuilder::AddLayer_STRIDED_SLICE(
}

expected<Unit, std::string> ModelBuilder::AddLayer_MUL(
const std::string &input1, const std::string &input2, int32_t fuse_code,
const std::string &input1, const std::string &input2, FuseCode fuse_code,
const std::string &output,
const dnn::optional<QuantInfo> &output_quant_info) {
if (nnapi_->android_sdk_version < 27) {
Expand Down
25 changes: 13 additions & 12 deletions generate_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ def add_tensor_operand(operand):
raise Exception('Unknown cpp_type {}'.format(operand['cpp_type']))


def has_fuse_code_attr(op: dict):
return any([x['predefined'] == 'fuse_code' for x in op['input']])


def infer_cfg(cfg, target: Target):
next_pos = 0
for i, op in enumerate(cfg):
Expand Down Expand Up @@ -135,14 +139,10 @@ def infer_cfg(cfg, target: Target):
# op['nnapi'] = op['name'].upper()
# if 'dnn' not in op:
# op['dnn'] = op['name']
if 'fused' not in op:
op['fused'] = False
if target == Target.ModelBuilder and 'nnapi_input' in op:
op['input'].extend(op['nnapi_input'])
elif target == Target.OnnxConverter and 'dnn_input' in op:
op['input'].extend(op['dnn_input'])
if op['fused'] and target == Target.ModelBuilder:
op['input'].append({'name': 'fuse_code', 'nnapi_type': 'scalar', 'cpp_type': 'int32_t'})
if 'support_quant_asymm' not in op:
op['support_quant_asymm'] = False
if 'converter_simple' not in op:
Expand All @@ -160,6 +160,10 @@ def infer_cfg(cfg, target: Target):
ipt['cpp_type'] = 'optional_str'
ipt['is_onnx_attr'] = False
ipt['convert_func'] = 'OnnxToNnapiIdentity'
elif ipt['predefined'] == 'fuse_code':
ipt['name'] = 'fuse_code'
ipt['nnapi_type'] = 'scalar'
ipt['cpp_type'] = 'FuseCode'
if 'is_onnx_attr' not in ipt:
ipt['is_onnx_attr'] = True
if 'convert_func' not in ipt:
Expand Down Expand Up @@ -199,8 +203,8 @@ def generate_onnx_converter():
params = list(map(get_param, ipt_opt))
params_str = ', '.join(map(lambda param: "{} {}".format(*param), params))
cogoutl(f"void OnnxConverter::WriteDaqLayer_{op['nnapi']}{'' if op['converter_simple'] else 'Impl'}({params_str}) {{")
if op['fused']:
cogoutl(f"const auto activation = FindActivation(model_proto_, output);")
# if has_fuse_code_attr(op):
# cogoutl(f"const auto activation = FindActivation(model_proto_, output);")
for x in op['input']:
if not x['is_onnx_attr']:
if x['cpp_type'] == 'str':
Expand Down Expand Up @@ -247,13 +251,13 @@ def get_input_param(x):
return f"&{x['name']}_fb"
elif x['cpp_type'] == 'int32_list':
return f"&{x['name']}"
elif x['predefined'] == 'fuse_code':
return f"ConvertFuseCodeType({x['name']})"
else:
return x['name']

cogout(f"const auto input_param = DNN::Create{op['nnapi']}_InputDirect(builder_, ")
cogout(', '.join(list(map(get_input_param, op['input']))))
if op['fused']:
cogout(', ConvertFuseCodeType(activation.second)')
cogoutl(');')
# cogout(', ')
cogout(f"const auto output_param = DNN::Create{op['nnapi']}_OutputDirect(builder_, ")
Expand Down Expand Up @@ -287,8 +291,6 @@ def generate_daq_reader():
cogoutl(f"case DNN::LayerType::{op['nnapi']}: {{")

arg_names = [x['name'] for x in op['input']]
if op['fused']:
arg_names += ['fuse']
cogoutl(f"UNPACK_LAYER_QUANT({op['nnapi']}, {', '.join(arg_names)});")
arg_names += [x['name'] for x in op['output']]
for i, x in enumerate(op['input']):
Expand Down Expand Up @@ -318,13 +320,12 @@ def generate_fbs():
'optional_str': 'string',
'str_list': '[string]',
'float': 'float',
'FuseCode': 'FuseCode'
}
for i, op in enumerate(cfg):
cogoutl(f"table {op['nnapi']}_Input {{")
for x in op['input']:
cogoutl(f" {x['name']}: {d[x['cpp_type']]};")
if op['fused']:
cogoutl(' fuse:FuseCode;')
cogoutl('}')
cogoutl('')

Expand Down
Loading

0 comments on commit f779d6c

Please sign in to comment.