From e47f9dfc7760913473a8faa156e0d9352922c94d Mon Sep 17 00:00:00 2001 From: daquexian Date: Tue, 28 Jan 2020 20:08:41 +0800 Subject: [PATCH] Add three ops: sub, exp and abs, and update some code --- common/daq.fbs | 47 +++ dnnlibrary/DaqReader.cpp | 24 ++ dnnlibrary/ModelBuilderImpl.cpp | 97 ++++- include/common/daq_generated.h | 554 ++++++++++++++++++++++++- include/common/data_types.h | 10 +- include/dnnlibrary/ModelBuilder.h | 16 + include/tools/onnx2daq/OnnxConverter.h | 4 + ops.yml | 29 +- tools/onnx2daq/OnnxConverter.cpp | 75 +++- tools/onnx2daq/OnnxConverterImpl.cpp | 93 +++++ 10 files changed, 901 insertions(+), 48 deletions(-) diff --git a/common/daq.fbs b/common/daq.fbs index cb8722f..1aa1c56 100644 --- a/common/daq.fbs +++ b/common/daq.fbs @@ -31,6 +31,9 @@ enum LayerType:byte { MINIMUM, MAXIMUM, LOG, + ABS, + EXP, + SUB, // Auto generated layer types end } @@ -439,6 +442,47 @@ table LOG { output: LOG_Output; } +table ABS_Input { + input: string; +} + +table ABS_Output { + output: string; +} + +table ABS { + input: ABS_Input; + output: ABS_Output; +} + +table EXP_Input { + input: string; +} + +table EXP_Output { + output: string; +} + +table EXP { + input: EXP_Input; + output: EXP_Output; +} + +table SUB_Input { + input1: string; + input2: string; + fuse_code: FuseCode; +} + +table SUB_Output { + output: string; +} + +table SUB { + input: SUB_Input; + output: SUB_Output; +} + // Auto generated tables end @@ -469,6 +513,9 @@ table Layer { MINIMUM_param:MINIMUM; MAXIMUM_param:MAXIMUM; LOG_param:LOG; + ABS_param:ABS; + EXP_param:EXP; + SUB_param:SUB; // Auto generated fields end } diff --git a/dnnlibrary/DaqReader.cpp b/dnnlibrary/DaqReader.cpp index dc0a5b5..5246b40 100644 --- a/dnnlibrary/DaqReader.cpp +++ b/dnnlibrary/DaqReader.cpp @@ -70,6 +70,12 @@ std::string layer_type_to_str(DNN::LayerType type) { return "MAXIMUM"; case DNN::LayerType::LOG: return "LOG"; + case DNN::LayerType::ABS: + return "ABS"; + case DNN::LayerType::EXP: + return "EXP"; + case DNN::LayerType::SUB: + return "SUB"; // DaqReader auto generated layer_type_to_str end } } @@ -400,6 +406,24 @@ expected AddLayers(const DNN::Model &model, TRY(builder.AddLayer_LOG(input, output)); break; + } + case DNN::LayerType::ABS: { + UNPACK_LAYER_QUANT(ABS, input); + + TRY(builder.AddLayer_ABS(input, output)); + break; + } + case DNN::LayerType::EXP: { + UNPACK_LAYER_QUANT(EXP, input); + + TRY(builder.AddLayer_EXP(input, output)); + break; + } + case DNN::LayerType::SUB: { + UNPACK_LAYER_QUANT(SUB, input1, input2, fuse_code); + + TRY(builder.AddLayer_SUB(input1, input2, fuse_code, output)); + break; } // auto generated layer reader end // case DNN::LayerType::CONV_2D: { diff --git a/dnnlibrary/ModelBuilderImpl.cpp b/dnnlibrary/ModelBuilderImpl.cpp index baab307..1585e2a 100644 --- a/dnnlibrary/ModelBuilderImpl.cpp +++ b/dnnlibrary/ModelBuilderImpl.cpp @@ -709,6 +709,68 @@ expected ModelBuilder::AddLayer_LOG( return Unit(); } +expected ModelBuilder::AddLayer_ABS( + const std::string &input, const std::string &output) { + if (nnapi_->android_sdk_version < 29) { + return make_unexpected("ABS requires API 29"); + } + IndexSeq input_indexes; + imm_blob_inputs_.insert(input); + const auto input_idx = operand_indexes_.at(input); + input_indexes.push_back(input_idx); + shaper_.Identity(input, output); + const OperandType operand_type = + GetOperandType(operand_types_.at(input).type, shaper_[output]); + const auto output_idx = + AddOperation(ANEURALNETWORKS_ABS, input_indexes, operand_type)[0]; + RegisterOperand(output, output_idx, operand_type); + imm_blob_outputs_.insert(output); + return Unit(); +} + +expected ModelBuilder::AddLayer_EXP( + const std::string &input, const std::string &output) { + if (nnapi_->android_sdk_version < 29) { + return make_unexpected("EXP requires API 29"); + } + IndexSeq input_indexes; + imm_blob_inputs_.insert(input); + const auto input_idx = operand_indexes_.at(input); + input_indexes.push_back(input_idx); + shaper_.Identity(input, output); + const OperandType operand_type = + GetOperandType(operand_types_.at(input).type, shaper_[output]); + const auto output_idx = + AddOperation(ANEURALNETWORKS_EXP, input_indexes, operand_type)[0]; + RegisterOperand(output, output_idx, operand_type); + imm_blob_outputs_.insert(output); + return Unit(); +} + +expected ModelBuilder::AddLayer_SUB_Impl( + const std::string &input1, const std::string &input2, FuseCode fuse_code, + const std::string &output) { + if (nnapi_->android_sdk_version < 28) { + return make_unexpected("SUB requires API 28"); + } + IndexSeq input_indexes; + imm_blob_inputs_.insert(input1); + const auto input1_idx = operand_indexes_.at(input1); + input_indexes.push_back(input1_idx); + imm_blob_inputs_.insert(input2); + const auto input2_idx = operand_indexes_.at(input2); + input_indexes.push_back(input2_idx); + AddScalarOperands(input_indexes, fuse_code); + shaper_.Eltwise(input1, input2, output); + const OperandType operand_type = + GetOperandType(operand_types_.at(input1).type, shaper_[output]); + const auto output_idx = + AddOperation(ANEURALNETWORKS_SUB, input_indexes, operand_type)[0]; + RegisterOperand(output, output_idx, operand_type); + imm_blob_outputs_.insert(output); + return Unit(); +} + // ModelBuilder auto generated methods end expected ModelBuilder::AddLayer_CONV_2D( @@ -754,16 +816,35 @@ expected ModelBuilder::AddLayer_PRELU( // negative branch float neg1_buf[1]{-1.f}; AddTensorFromBuffer(neg1_name, neg1_buf, {Type::TENSOR_FLOAT32, {1}}); - AddLayer_MUL(input, neg1_name, FuseCode::NONE, imm2_name, dnn::nullopt); - AddLayer_RELU(imm2_name, imm3_name); - AddLayer_MUL(imm3_name, alpha, FuseCode::NONE, imm4_name, dnn::nullopt); - AddLayer_MUL(imm4_name, neg1_name, FuseCode::NONE, imm5_name, - dnn::nullopt); + TRY(AddLayer_MUL(input, neg1_name, FuseCode::NONE, imm2_name, + dnn::nullopt)); + TRY(AddLayer_RELU(imm2_name, imm3_name)); + TRY(AddLayer_MUL(imm3_name, alpha, FuseCode::NONE, imm4_name, + dnn::nullopt)); + TRY(AddLayer_MUL(imm4_name, neg1_name, FuseCode::NONE, imm5_name, + dnn::nullopt)); // add two branches - AddLayer_ADD(imm1_name, imm5_name, FuseCode::NONE, output, - dnn::nullopt); + TRY(AddLayer_ADD(imm1_name, imm5_name, FuseCode::NONE, output, + dnn::nullopt)); + } else { + TRY(AddLayer_PRELU_Impl(input, alpha, output)); + } + return Unit(); +} + +expected ModelBuilder::AddLayer_SUB( + const std::string &input1, const std::string &input2, FuseCode fuse_code, + const std::string &output) { + if (nnapi_->android_sdk_version < 28) { + const auto neg1_name = output + "_neg1"; + const auto imm1_name = output + "_imm1"; + const float neg1_buf[1]{-1.f}; + AddTensorFromBuffer(neg1_name, neg1_buf, {Type::TENSOR_FLOAT32, {1}}); + TRY(AddLayer_MUL(input2, neg1_name, FuseCode::NONE, imm1_name, + dnn::nullopt)); + TRY(AddLayer_ADD(input1, imm1_name, fuse_code, output)); } else { - AddLayer_PRELU_Impl(input, alpha, output); + TRY(AddLayer_SUB_Impl(input1, input2, fuse_code, output)); } return Unit(); } diff --git a/include/common/daq_generated.h b/include/common/daq_generated.h index 881d338..87cb446 100644 --- a/include/common/daq_generated.h +++ b/include/common/daq_generated.h @@ -158,6 +158,24 @@ struct LOG_Output; struct LOG; +struct ABS_Input; + +struct ABS_Output; + +struct ABS; + +struct EXP_Input; + +struct EXP_Output; + +struct EXP; + +struct SUB_Input; + +struct SUB_Output; + +struct SUB; + struct Layer; struct Model; @@ -278,11 +296,14 @@ enum class LayerType : int8_t { MINIMUM = 21, MAXIMUM = 22, LOG = 23, + ABS = 24, + EXP = 25, + SUB = 26, MIN = CONV_2D, - MAX = LOG + MAX = SUB }; -inline const LayerType (&EnumValuesLayerType())[24] { +inline const LayerType (&EnumValuesLayerType())[27] { static const LayerType values[] = { LayerType::CONV_2D, LayerType::AVERAGE_POOL_2D, @@ -307,7 +328,10 @@ inline const LayerType (&EnumValuesLayerType())[24] { LayerType::NEG, LayerType::MINIMUM, LayerType::MAXIMUM, - LayerType::LOG + LayerType::LOG, + LayerType::ABS, + LayerType::EXP, + LayerType::SUB }; return values; } @@ -338,13 +362,16 @@ inline const char * const *EnumNamesLayerType() { "MINIMUM", "MAXIMUM", "LOG", + "ABS", + "EXP", + "SUB", nullptr }; return names; } inline const char *EnumNameLayerType(LayerType e) { - if (e < LayerType::CONV_2D || e > LayerType::LOG) return ""; + if (e < LayerType::CONV_2D || e > LayerType::SUB) return ""; const size_t index = static_cast(e); return EnumNamesLayerType()[index]; } @@ -5127,6 +5154,488 @@ inline flatbuffers::Offset CreateLOG( return builder_.Finish(); } +struct ABS_Input FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUT = 4 + }; + const flatbuffers::String *input() const { + return GetPointer(VT_INPUT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUT) && + verifier.VerifyString(input()) && + verifier.EndTable(); + } +}; + +struct ABS_InputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_input(flatbuffers::Offset input) { + fbb_.AddOffset(ABS_Input::VT_INPUT, input); + } + explicit ABS_InputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ABS_InputBuilder &operator=(const ABS_InputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateABS_Input( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset input = 0) { + ABS_InputBuilder builder_(_fbb); + builder_.add_input(input); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateABS_InputDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *input = nullptr) { + auto input__ = input ? _fbb.CreateString(input) : 0; + return DNN::CreateABS_Input( + _fbb, + input__); +} + +struct ABS_Output FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT = 4 + }; + const flatbuffers::String *output() const { + return GetPointer(VT_OUTPUT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_OUTPUT) && + verifier.VerifyString(output()) && + verifier.EndTable(); + } +}; + +struct ABS_OutputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output(flatbuffers::Offset output) { + fbb_.AddOffset(ABS_Output::VT_OUTPUT, output); + } + explicit ABS_OutputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ABS_OutputBuilder &operator=(const ABS_OutputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateABS_Output( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset output = 0) { + ABS_OutputBuilder builder_(_fbb); + builder_.add_output(output); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateABS_OutputDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *output = nullptr) { + auto output__ = output ? _fbb.CreateString(output) : 0; + return DNN::CreateABS_Output( + _fbb, + output__); +} + +struct ABS FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUT = 4, + VT_OUTPUT = 6 + }; + const ABS_Input *input() const { + return GetPointer(VT_INPUT); + } + const ABS_Output *output() const { + return GetPointer(VT_OUTPUT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUT) && + verifier.VerifyTable(input()) && + VerifyOffset(verifier, VT_OUTPUT) && + verifier.VerifyTable(output()) && + verifier.EndTable(); + } +}; + +struct ABSBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_input(flatbuffers::Offset input) { + fbb_.AddOffset(ABS::VT_INPUT, input); + } + void add_output(flatbuffers::Offset output) { + fbb_.AddOffset(ABS::VT_OUTPUT, output); + } + explicit ABSBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ABSBuilder &operator=(const ABSBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateABS( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset input = 0, + flatbuffers::Offset output = 0) { + ABSBuilder builder_(_fbb); + builder_.add_output(output); + builder_.add_input(input); + return builder_.Finish(); +} + +struct EXP_Input FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUT = 4 + }; + const flatbuffers::String *input() const { + return GetPointer(VT_INPUT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUT) && + verifier.VerifyString(input()) && + verifier.EndTable(); + } +}; + +struct EXP_InputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_input(flatbuffers::Offset input) { + fbb_.AddOffset(EXP_Input::VT_INPUT, input); + } + explicit EXP_InputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EXP_InputBuilder &operator=(const EXP_InputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEXP_Input( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset input = 0) { + EXP_InputBuilder builder_(_fbb); + builder_.add_input(input); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateEXP_InputDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *input = nullptr) { + auto input__ = input ? _fbb.CreateString(input) : 0; + return DNN::CreateEXP_Input( + _fbb, + input__); +} + +struct EXP_Output FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT = 4 + }; + const flatbuffers::String *output() const { + return GetPointer(VT_OUTPUT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_OUTPUT) && + verifier.VerifyString(output()) && + verifier.EndTable(); + } +}; + +struct EXP_OutputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output(flatbuffers::Offset output) { + fbb_.AddOffset(EXP_Output::VT_OUTPUT, output); + } + explicit EXP_OutputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EXP_OutputBuilder &operator=(const EXP_OutputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEXP_Output( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset output = 0) { + EXP_OutputBuilder builder_(_fbb); + builder_.add_output(output); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateEXP_OutputDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *output = nullptr) { + auto output__ = output ? _fbb.CreateString(output) : 0; + return DNN::CreateEXP_Output( + _fbb, + output__); +} + +struct EXP FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUT = 4, + VT_OUTPUT = 6 + }; + const EXP_Input *input() const { + return GetPointer(VT_INPUT); + } + const EXP_Output *output() const { + return GetPointer(VT_OUTPUT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUT) && + verifier.VerifyTable(input()) && + VerifyOffset(verifier, VT_OUTPUT) && + verifier.VerifyTable(output()) && + verifier.EndTable(); + } +}; + +struct EXPBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_input(flatbuffers::Offset input) { + fbb_.AddOffset(EXP::VT_INPUT, input); + } + void add_output(flatbuffers::Offset output) { + fbb_.AddOffset(EXP::VT_OUTPUT, output); + } + explicit EXPBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + EXPBuilder &operator=(const EXPBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEXP( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset input = 0, + flatbuffers::Offset output = 0) { + EXPBuilder builder_(_fbb); + builder_.add_output(output); + builder_.add_input(input); + return builder_.Finish(); +} + +struct SUB_Input FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUT1 = 4, + VT_INPUT2 = 6, + VT_FUSE_CODE = 8 + }; + const flatbuffers::String *input1() const { + return GetPointer(VT_INPUT1); + } + const flatbuffers::String *input2() const { + return GetPointer(VT_INPUT2); + } + FuseCode fuse_code() const { + return static_cast(GetField(VT_FUSE_CODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUT1) && + verifier.VerifyString(input1()) && + VerifyOffset(verifier, VT_INPUT2) && + verifier.VerifyString(input2()) && + VerifyField(verifier, VT_FUSE_CODE) && + verifier.EndTable(); + } +}; + +struct SUB_InputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_input1(flatbuffers::Offset input1) { + fbb_.AddOffset(SUB_Input::VT_INPUT1, input1); + } + void add_input2(flatbuffers::Offset input2) { + fbb_.AddOffset(SUB_Input::VT_INPUT2, input2); + } + void add_fuse_code(FuseCode fuse_code) { + fbb_.AddElement(SUB_Input::VT_FUSE_CODE, static_cast(fuse_code), 0); + } + explicit SUB_InputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SUB_InputBuilder &operator=(const SUB_InputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSUB_Input( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset input1 = 0, + flatbuffers::Offset input2 = 0, + FuseCode fuse_code = FuseCode::None) { + SUB_InputBuilder builder_(_fbb); + builder_.add_input2(input2); + builder_.add_input1(input1); + builder_.add_fuse_code(fuse_code); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSUB_InputDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *input1 = nullptr, + const char *input2 = nullptr, + FuseCode fuse_code = FuseCode::None) { + auto input1__ = input1 ? _fbb.CreateString(input1) : 0; + auto input2__ = input2 ? _fbb.CreateString(input2) : 0; + return DNN::CreateSUB_Input( + _fbb, + input1__, + input2__, + fuse_code); +} + +struct SUB_Output FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT = 4 + }; + const flatbuffers::String *output() const { + return GetPointer(VT_OUTPUT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_OUTPUT) && + verifier.VerifyString(output()) && + verifier.EndTable(); + } +}; + +struct SUB_OutputBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output(flatbuffers::Offset output) { + fbb_.AddOffset(SUB_Output::VT_OUTPUT, output); + } + explicit SUB_OutputBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SUB_OutputBuilder &operator=(const SUB_OutputBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSUB_Output( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset output = 0) { + SUB_OutputBuilder builder_(_fbb); + builder_.add_output(output); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSUB_OutputDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *output = nullptr) { + auto output__ = output ? _fbb.CreateString(output) : 0; + return DNN::CreateSUB_Output( + _fbb, + output__); +} + +struct SUB FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUT = 4, + VT_OUTPUT = 6 + }; + const SUB_Input *input() const { + return GetPointer(VT_INPUT); + } + const SUB_Output *output() const { + return GetPointer(VT_OUTPUT); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUT) && + verifier.VerifyTable(input()) && + VerifyOffset(verifier, VT_OUTPUT) && + verifier.VerifyTable(output()) && + verifier.EndTable(); + } +}; + +struct SUBBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_input(flatbuffers::Offset input) { + fbb_.AddOffset(SUB::VT_INPUT, input); + } + void add_output(flatbuffers::Offset output) { + fbb_.AddOffset(SUB::VT_OUTPUT, output); + } + explicit SUBBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + SUBBuilder &operator=(const SUBBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSUB( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset input = 0, + flatbuffers::Offset output = 0) { + SUBBuilder builder_(_fbb); + builder_.add_output(output); + builder_.add_input(input); + return builder_.Finish(); +} + struct Layer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_TYPE = 4, @@ -5153,7 +5662,10 @@ struct Layer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { VT_NEG_PARAM = 46, VT_MINIMUM_PARAM = 48, VT_MAXIMUM_PARAM = 50, - VT_LOG_PARAM = 52 + VT_LOG_PARAM = 52, + VT_ABS_PARAM = 54, + VT_EXP_PARAM = 56, + VT_SUB_PARAM = 58 }; LayerType type() const { return static_cast(GetField(VT_TYPE, 0)); @@ -5230,6 +5742,15 @@ struct Layer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { const LOG *LOG_param() const { return GetPointer(VT_LOG_PARAM); } + const ABS *ABS_param() const { + return GetPointer(VT_ABS_PARAM); + } + const EXP *EXP_param() const { + return GetPointer(VT_EXP_PARAM); + } + const SUB *SUB_param() const { + return GetPointer(VT_SUB_PARAM); + } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_TYPE) && @@ -5281,6 +5802,12 @@ struct Layer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { verifier.VerifyTable(MAXIMUM_param()) && VerifyOffset(verifier, VT_LOG_PARAM) && verifier.VerifyTable(LOG_param()) && + VerifyOffset(verifier, VT_ABS_PARAM) && + verifier.VerifyTable(ABS_param()) && + VerifyOffset(verifier, VT_EXP_PARAM) && + verifier.VerifyTable(EXP_param()) && + VerifyOffset(verifier, VT_SUB_PARAM) && + verifier.VerifyTable(SUB_param()) && verifier.EndTable(); } }; @@ -5363,6 +5890,15 @@ struct LayerBuilder { void add_LOG_param(flatbuffers::Offset LOG_param) { fbb_.AddOffset(Layer::VT_LOG_PARAM, LOG_param); } + void add_ABS_param(flatbuffers::Offset ABS_param) { + fbb_.AddOffset(Layer::VT_ABS_PARAM, ABS_param); + } + void add_EXP_param(flatbuffers::Offset EXP_param) { + fbb_.AddOffset(Layer::VT_EXP_PARAM, EXP_param); + } + void add_SUB_param(flatbuffers::Offset SUB_param) { + fbb_.AddOffset(Layer::VT_SUB_PARAM, SUB_param); + } explicit LayerBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -5401,8 +5937,14 @@ inline flatbuffers::Offset CreateLayer( flatbuffers::Offset NEG_param = 0, flatbuffers::Offset MINIMUM_param = 0, flatbuffers::Offset MAXIMUM_param = 0, - flatbuffers::Offset LOG_param = 0) { + flatbuffers::Offset LOG_param = 0, + flatbuffers::Offset ABS_param = 0, + flatbuffers::Offset EXP_param = 0, + flatbuffers::Offset SUB_param = 0) { LayerBuilder builder_(_fbb); + builder_.add_SUB_param(SUB_param); + builder_.add_EXP_param(EXP_param); + builder_.add_ABS_param(ABS_param); builder_.add_LOG_param(LOG_param); builder_.add_MAXIMUM_param(MAXIMUM_param); builder_.add_MINIMUM_param(MINIMUM_param); diff --git a/include/common/data_types.h b/include/common/data_types.h index 31a52d5..7afb056 100644 --- a/include/common/data_types.h +++ b/include/common/data_types.h @@ -17,10 +17,12 @@ using tl::make_unexpected; using Unit = tl::monostate; using tl::unexpected; -#define TRY(x) \ - const auto ret = (x); \ - if (!ret) { \ - return ret; \ +#define TRY(x) \ + { \ + const auto ret = (x); \ + if (!ret) { \ + return ret; \ + } \ } enum class FuseCode { NONE, RELU, RELU1, RELU6 }; diff --git a/include/dnnlibrary/ModelBuilder.h b/include/dnnlibrary/ModelBuilder.h index b293298..ab41993 100644 --- a/include/dnnlibrary/ModelBuilder.h +++ b/include/dnnlibrary/ModelBuilder.h @@ -220,6 +220,22 @@ class ModelBuilder { const std::string &output); expected AddLayer_LOG(const std::string &input, const std::string &output); + expected AddLayer_ABS(const std::string &input, + const std::string &output); + expected AddLayer_EXP(const std::string &input, + const std::string &output); + expected AddLayer_SUB(const std::string &input1, + const std::string &input2, + FuseCode fuse_code, + const std::string &output); + + private: + expected AddLayer_SUB_Impl(const std::string &input1, + const std::string &input2, + FuseCode fuse_code, + const std::string &output); + + public: // ModelBuilder auto generated methods end Index AddTensorFromPersistentBuffer( const std::string &name, const void *buffer, diff --git a/include/tools/onnx2daq/OnnxConverter.h b/include/tools/onnx2daq/OnnxConverter.h index 7e6e1bf..0fba27c 100644 --- a/include/tools/onnx2daq/OnnxConverter.h +++ b/include/tools/onnx2daq/OnnxConverter.h @@ -184,6 +184,10 @@ class OnnxConverter { const std::string &input2, const std::string &output); void WriteDaqLayer_LOG(const std::string &input, const std::string &output); + void WriteDaqLayer_ABS(const std::string &input, const std::string &output); + void WriteDaqLayer_EXP(const std::string &input, const std::string &output); + void WriteDaqLayer_SUB(const std::string &input1, const std::string &input2, + FuseCode fuse_code, const std::string &output); // OnnxConverter auto generated methods end /** diff --git a/ops.yml b/ops.yml index 0487dc7..a46a29d 100644 --- a/ops.yml +++ b/ops.yml @@ -431,16 +431,19 @@ nnapi: LOG shaper: Identity api: 29 -#- -# name: Mean -# input: -# - -# name: dims -# nnapi_type: tensor -# cpp_type: int32_list -# needed_by_shaper: true -# - -# name: keep_dims -# nnapi_type: scalar -# cpp_type: bool -# api: 28 + +- nnapi: ABS + shaper: Identity + api: 29 + +- nnapi: EXP + shaper: Identity + api: 29 + +- nnapi: SUB + base_input_num: 2 + input: + - predefined: fuse_code + builder_simple: false + shaper: Eltwise + api: 28 diff --git a/tools/onnx2daq/OnnxConverter.cpp b/tools/onnx2daq/OnnxConverter.cpp index a4e9bc7..3be1337 100644 --- a/tools/onnx2daq/OnnxConverter.cpp +++ b/tools/onnx2daq/OnnxConverter.cpp @@ -448,30 +448,52 @@ std::pair OnnxConverter::IsNodeSupported( #endif NodeAttrHelper helper(node); const auto &op = node.op_type(); - const std::vector supported_types{ - "Conv", "AveragePool", - "MaxPool", "GlobalAveragePool", - "GlobalMaxPool", "Relu", - "PRelu", "Add", - "Mul", "Gemm", - "Softmax", "Concat", - "Dropout", "BatchNormalization", - "Reshape", "LRN", - "Identity", "Tanh", - "Floor", "Sigmoid"}; - if (std::find(supported_types.begin(), supported_types.end(), op) == - supported_types.end()) { + std::map supported_ops{{"Conv", 27}, + {"AveragePool", 27}, + {"MaxPool", 27}, + {"GlobalAveragePool", 27}, + {"GlobalMaxPool", 27}, + {"Relu", 27}, + {"PRelu", 27}, + {"Add", 27}, + {"Mul", 27}, + {"Gemm", 27}, + {"Softmax", 27}, + {"Concat", 27}, + {"Dropout", 27}, + {"BatchNormalization", 27}, + {"Reshape", 27}, + {"LRN", 27}, + {"Identity", 27}, + {"Tanh", 27}, + {"Floor", 27}, + {"Sigmoid", 27}, + {"Abs", 29}, + {"Exp", 29}, + {"Sub", 27}}; + if (supported_ops.find(op) == supported_ops.end()) { return {false, "Unsupported operator " + op}; } +#ifdef __ANDROID__ + if (supported_ops[op] > GetAndroidSdkVersion()) { + return {false, "Operator " + op + " is only supported on API > " + + std::to_string(supported_ops[op])}; + } +#endif if (op == "Conv") { const auto strides = helper.get("strides", vector{1, 1}); const auto pads = helper.get("pads", vector{0, 0, 0, 0}); const auto dilations = helper.get("dilations", vector{1, 1}); const auto group = helper.get("group", 1); - if (dilations != vector{1, 1} && strides != vector{1, 1}) { - return {false, - "Both dilations and strides > 1 is not supported for now"}; + // if (dilations != vector{1, 1} && strides != vector{1, 1}) { +#ifdef __ANDROID__ + if (dilations != vector{1, 1} && GetAndroidSdkVersion() <= 29) { + return { + false, + // "Both dilations and strides > 1 is not supported for now"}; + "Dilations > 1 is not supported for API < 29 now"}; } +#endif const auto weight_name = m(node.input(1)); if (onnx_tensors_.has(weight_name)) { const auto &onnx_weight = onnx_tensors_.at(weight_name); @@ -798,7 +820,6 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto, const auto output_name = m(node.output(0)); WriteDaqLayer_RELU(input_name, output_name); VLOG(5) << "Converting Relu completed"; - } else if (op == "PRelu") { VLOG(5) << "Start converting PRelu"; const auto input_name = m(node.input(0)); @@ -964,6 +985,26 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto, const auto output_name = m(node.output(0)); WriteDaqLayer_LOGISTIC(input_name, output_name); VLOG(5) << "Converting Sigmoid completed"; + } else if (op == "Abs") { + VLOG(5) << "Start converting Abs"; + const auto input_name = m(node.input(0)); + const auto output_name = m(node.output(0)); + WriteDaqLayer_ABS(input_name, output_name); + VLOG(5) << "Converting Abs completed"; + } else if (op == "Exp") { + VLOG(5) << "Start converting Exp"; + const auto input_name = m(node.input(0)); + const auto output_name = m(node.output(0)); + WriteDaqLayer_EXP(input_name, output_name); + VLOG(5) << "Converting Exp completed"; + } else if (op == "Sub") { + VLOG(5) << "Start converting Sub"; + const auto input1_name = m(node.input(0)); + const auto input2_name = m(node.input(1)); + const auto output_name = m(node.output(0)); + const auto act = FindActivation(model_proto_, output_name).second; + WriteDaqLayer_SUB(input1_name, input2_name, act, output_name); + VLOG(5) << "Converting Sub completed"; } else { throw std::invalid_argument("Unsupported operator " + op); } diff --git a/tools/onnx2daq/OnnxConverterImpl.cpp b/tools/onnx2daq/OnnxConverterImpl.cpp index ab9a0a1..784682e 100644 --- a/tools/onnx2daq/OnnxConverterImpl.cpp +++ b/tools/onnx2daq/OnnxConverterImpl.cpp @@ -906,6 +906,99 @@ void OnnxConverter::WriteDaqLayer_LOG(const std::string &input, layers_.push_back(layer); } +void OnnxConverter::WriteDaqLayer_ABS(const std::string &input, + const std::string &output) { + { + const auto name = input; + + if (onnx_tensors_.has(name)) { + const auto &onnx_tensor = onnx_tensors_.at(name); + const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor); + shaper_.AddShape(name, new_tensor.shape); + nnapi_tensors_[name] = new_tensor; + CreateTensorFb(name, new_tensor); + } + } + + shaper_.Identity(m(input), output); + const auto input_param = + DNN::CreateABS_InputDirect(builder_, m(input).c_str()); + const auto output_param = + DNN::CreateABS_OutputDirect(builder_, output.c_str()); + const auto param = DNN::CreateABS(builder_, input_param, output_param); + const auto layer = + DNN::CreateLayer(builder_, DNN::LayerType::ABS, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param); + layers_.push_back(layer); +} + +void OnnxConverter::WriteDaqLayer_EXP(const std::string &input, + const std::string &output) { + { + const auto name = input; + + if (onnx_tensors_.has(name)) { + const auto &onnx_tensor = onnx_tensors_.at(name); + const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor); + shaper_.AddShape(name, new_tensor.shape); + nnapi_tensors_[name] = new_tensor; + CreateTensorFb(name, new_tensor); + } + } + + shaper_.Identity(m(input), output); + const auto input_param = + DNN::CreateEXP_InputDirect(builder_, m(input).c_str()); + const auto output_param = + DNN::CreateEXP_OutputDirect(builder_, output.c_str()); + const auto param = DNN::CreateEXP(builder_, input_param, output_param); + const auto layer = DNN::CreateLayer(builder_, DNN::LayerType::EXP, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, param); + layers_.push_back(layer); +} + +void OnnxConverter::WriteDaqLayer_SUB(const std::string &input1, + const std::string &input2, + FuseCode fuse_code, + const std::string &output) { + { + const auto name = input1; + + if (onnx_tensors_.has(name)) { + const auto &onnx_tensor = onnx_tensors_.at(name); + const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor); + shaper_.AddShape(name, new_tensor.shape); + nnapi_tensors_[name] = new_tensor; + CreateTensorFb(name, new_tensor); + } + } + + { + const auto name = input2; + + if (onnx_tensors_.has(name)) { + const auto &onnx_tensor = onnx_tensors_.at(name); + const auto new_tensor = OnnxToNnapiAxes0231(onnx_tensor); + shaper_.AddShape(name, new_tensor.shape); + nnapi_tensors_[name] = new_tensor; + CreateTensorFb(name, new_tensor); + } + } + + shaper_.Eltwise(m(input1), m(input2), output); + const auto input_param = DNN::CreateSUB_InputDirect( + builder_, m(input1).c_str(), m(input2).c_str(), + ConvertFuseCodeType(fuse_code)); + const auto output_param = + DNN::CreateSUB_OutputDirect(builder_, output.c_str()); + const auto param = DNN::CreateSUB(builder_, input_param, output_param); + const auto layer = DNN::CreateLayer(builder_, DNN::LayerType::SUB, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, param); + layers_.push_back(layer); +} + // OnnxConverter auto generated methods end } // namespace dnn