Skip to content

Commit

Permalink
Merge pull request #70 from JDAI-CV/add_ops
Browse files Browse the repository at this point in the history
Add three ops: sub, exp and abs, and update some code
  • Loading branch information
daquexian authored Jan 28, 2020
2 parents af84537 + e47f9df commit bac3cce
Show file tree
Hide file tree
Showing 10 changed files with 901 additions and 48 deletions.
47 changes: 47 additions & 0 deletions common/daq.fbs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ enum LayerType:byte {
MINIMUM,
MAXIMUM,
LOG,
ABS,
EXP,
SUB,
// Auto generated layer types end
}

Expand Down Expand Up @@ -439,6 +442,47 @@ table LOG {
output: LOG_Output;
}

table ABS_Input {
input: string;
}

table ABS_Output {
output: string;
}

table ABS {
input: ABS_Input;
output: ABS_Output;
}

table EXP_Input {
input: string;
}

table EXP_Output {
output: string;
}

table EXP {
input: EXP_Input;
output: EXP_Output;
}

table SUB_Input {
input1: string;
input2: string;
fuse_code: FuseCode;
}

table SUB_Output {
output: string;
}

table SUB {
input: SUB_Input;
output: SUB_Output;
}

// Auto generated tables end


Expand Down Expand Up @@ -469,6 +513,9 @@ table Layer {
MINIMUM_param:MINIMUM;
MAXIMUM_param:MAXIMUM;
LOG_param:LOG;
ABS_param:ABS;
EXP_param:EXP;
SUB_param:SUB;
// Auto generated fields end
}

Expand Down
24 changes: 24 additions & 0 deletions dnnlibrary/DaqReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,12 @@ std::string layer_type_to_str(DNN::LayerType type) {
return "MAXIMUM";
case DNN::LayerType::LOG:
return "LOG";
case DNN::LayerType::ABS:
return "ABS";
case DNN::LayerType::EXP:
return "EXP";
case DNN::LayerType::SUB:
return "SUB";
// DaqReader auto generated layer_type_to_str end
}
}
Expand Down Expand Up @@ -400,6 +406,24 @@ expected<Unit, std::string> AddLayers(const DNN::Model &model,

TRY(builder.AddLayer_LOG(input, output));
break;
}
case DNN::LayerType::ABS: {
UNPACK_LAYER_QUANT(ABS, input);

TRY(builder.AddLayer_ABS(input, output));
break;
}
case DNN::LayerType::EXP: {
UNPACK_LAYER_QUANT(EXP, input);

TRY(builder.AddLayer_EXP(input, output));
break;
}
case DNN::LayerType::SUB: {
UNPACK_LAYER_QUANT(SUB, input1, input2, fuse_code);

TRY(builder.AddLayer_SUB(input1, input2, fuse_code, output));
break;
}
// auto generated layer reader end
// case DNN::LayerType::CONV_2D: {
Expand Down
97 changes: 89 additions & 8 deletions dnnlibrary/ModelBuilderImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -709,6 +709,68 @@ expected<Unit, std::string> ModelBuilder::AddLayer_LOG(
return Unit();
}

expected<Unit, std::string> ModelBuilder::AddLayer_ABS(
const std::string &input, const std::string &output) {
if (nnapi_->android_sdk_version < 29) {
return make_unexpected("ABS requires API 29");
}
IndexSeq input_indexes;
imm_blob_inputs_.insert(input);
const auto input_idx = operand_indexes_.at(input);
input_indexes.push_back(input_idx);
shaper_.Identity(input, output);
const OperandType operand_type =
GetOperandType(operand_types_.at(input).type, shaper_[output]);
const auto output_idx =
AddOperation(ANEURALNETWORKS_ABS, input_indexes, operand_type)[0];
RegisterOperand(output, output_idx, operand_type);
imm_blob_outputs_.insert(output);
return Unit();
}

expected<Unit, std::string> ModelBuilder::AddLayer_EXP(
const std::string &input, const std::string &output) {
if (nnapi_->android_sdk_version < 29) {
return make_unexpected("EXP requires API 29");
}
IndexSeq input_indexes;
imm_blob_inputs_.insert(input);
const auto input_idx = operand_indexes_.at(input);
input_indexes.push_back(input_idx);
shaper_.Identity(input, output);
const OperandType operand_type =
GetOperandType(operand_types_.at(input).type, shaper_[output]);
const auto output_idx =
AddOperation(ANEURALNETWORKS_EXP, input_indexes, operand_type)[0];
RegisterOperand(output, output_idx, operand_type);
imm_blob_outputs_.insert(output);
return Unit();
}

expected<Unit, std::string> ModelBuilder::AddLayer_SUB_Impl(
const std::string &input1, const std::string &input2, FuseCode fuse_code,
const std::string &output) {
if (nnapi_->android_sdk_version < 28) {
return make_unexpected("SUB requires API 28");
}
IndexSeq input_indexes;
imm_blob_inputs_.insert(input1);
const auto input1_idx = operand_indexes_.at(input1);
input_indexes.push_back(input1_idx);
imm_blob_inputs_.insert(input2);
const auto input2_idx = operand_indexes_.at(input2);
input_indexes.push_back(input2_idx);
AddScalarOperands(input_indexes, fuse_code);
shaper_.Eltwise(input1, input2, output);
const OperandType operand_type =
GetOperandType(operand_types_.at(input1).type, shaper_[output]);
const auto output_idx =
AddOperation(ANEURALNETWORKS_SUB, input_indexes, operand_type)[0];
RegisterOperand(output, output_idx, operand_type);
imm_blob_outputs_.insert(output);
return Unit();
}

// ModelBuilder auto generated methods end

expected<Unit, std::string> ModelBuilder::AddLayer_CONV_2D(
Expand Down Expand Up @@ -754,16 +816,35 @@ expected<Unit, std::string> ModelBuilder::AddLayer_PRELU(
// negative branch
float neg1_buf[1]{-1.f};
AddTensorFromBuffer(neg1_name, neg1_buf, {Type::TENSOR_FLOAT32, {1}});
AddLayer_MUL(input, neg1_name, FuseCode::NONE, imm2_name, dnn::nullopt);
AddLayer_RELU(imm2_name, imm3_name);
AddLayer_MUL(imm3_name, alpha, FuseCode::NONE, imm4_name, dnn::nullopt);
AddLayer_MUL(imm4_name, neg1_name, FuseCode::NONE, imm5_name,
dnn::nullopt);
TRY(AddLayer_MUL(input, neg1_name, FuseCode::NONE, imm2_name,
dnn::nullopt));
TRY(AddLayer_RELU(imm2_name, imm3_name));
TRY(AddLayer_MUL(imm3_name, alpha, FuseCode::NONE, imm4_name,
dnn::nullopt));
TRY(AddLayer_MUL(imm4_name, neg1_name, FuseCode::NONE, imm5_name,
dnn::nullopt));
// add two branches
AddLayer_ADD(imm1_name, imm5_name, FuseCode::NONE, output,
dnn::nullopt);
TRY(AddLayer_ADD(imm1_name, imm5_name, FuseCode::NONE, output,
dnn::nullopt));
} else {
TRY(AddLayer_PRELU_Impl(input, alpha, output));
}
return Unit();
}

expected<Unit, std::string> ModelBuilder::AddLayer_SUB(
const std::string &input1, const std::string &input2, FuseCode fuse_code,
const std::string &output) {
if (nnapi_->android_sdk_version < 28) {
const auto neg1_name = output + "_neg1";
const auto imm1_name = output + "_imm1";
const float neg1_buf[1]{-1.f};
AddTensorFromBuffer(neg1_name, neg1_buf, {Type::TENSOR_FLOAT32, {1}});
TRY(AddLayer_MUL(input2, neg1_name, FuseCode::NONE, imm1_name,
dnn::nullopt));
TRY(AddLayer_ADD(input1, imm1_name, fuse_code, output));
} else {
AddLayer_PRELU_Impl(input, alpha, output);
TRY(AddLayer_SUB_Impl(input1, input2, fuse_code, output));
}
return Unit();
}
Expand Down
Loading

0 comments on commit bac3cce

Please sign in to comment.