Skip to content

Commit

Permalink
Merge pull request #67 from JDAI-CV/update_script_add_new_ops
Browse files Browse the repository at this point in the history
[WIP] Make most boilerplate code auto-generated & refactor
  • Loading branch information
daquexian authored Jan 28, 2020
2 parents a5d5627 + 2980add commit ad11c32
Show file tree
Hide file tree
Showing 30 changed files with 9,009 additions and 2,654 deletions.
2 changes: 1 addition & 1 deletion .daq_pm/configs/infer
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name DNNLibrary
type cpp
target dnn_retrieve_result
build_dir build_infer
build_dir build
cmake_options -DCMAKE_SYSTEM_NAME=Android -DCMAKE_TOOLCHAIN_FILE=~/Android/Sdk/ndk-bundle/build/cmake/android.toolchain.cmake -DANDROID_CPP_FEATURES=exceptions -DANDROID_PLATFORM=android-28 -DANDROID_ABI=arm64-v8a -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
program_arguments ~/adb_push_and_run.sh binaries/dnn_retrieve_result
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,6 @@ dist/
onnx2daq.egg-info/
__pycache__/
.eggs/

.clangd/
venv/
31 changes: 16 additions & 15 deletions binaries/ex_model_builder.cpp
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
/**
* It is an example showing how to use the ModelBuilder API to build an model
*/
#include <chrono>
#include <iostream>
#include <vector>

#include <common/helper.h>
#include <dnnlibrary/ModelBuilder.h>
#include <glog/logging.h>

#include <chrono>
#include <iostream>
#include <vector>

using namespace android::nn::wrapper;
using dnn::ModelBuilder;

Expand All @@ -26,23 +26,24 @@ int main() {
{Type::TENSOR_QUANT8_ASYMM, {3, 1, 1, 3}, 0.1, 150});
builder.AddTensorFromBuffer("bias", bias_buf,
{Type::TENSOR_INT32, {3}, 0.1, 0});
builder.AddDepthWiseConv("data", 1, 1, 0, 0, 0, 0,
ModelBuilder::ACTIVATION_NONE, 1, "weight",
"bias", "conv_fwd",
std::make_optional<ModelBuilder::QuantInfo>(
{Type::TENSOR_QUANT8_ASYMM, {0.5}, 100}));
builder.AddReLU("conv_fwd", "relu_fwd");
builder.AddOperationAdd("data", "relu_fwd", "output",
std::make_optional<ModelBuilder::QuantInfo>(
{Type::TENSOR_QUANT8_ASYMM, {0.05}, 100}));
builder.AddLayer_DEPTHWISE_CONV_2D(
"data", "weight", "bias", 1, 1, 0, 0, 0, 0, 1, dnn::FuseCode::NONE,
"conv_fwd",
std::make_optional<ModelBuilder::QuantInfo>(
{Type::TENSOR_QUANT8_ASYMM, {0.5}, 100}));
builder.AddLayer_RELU("conv_fwd", "relu_fwd");
builder.AddLayer_ADD("data", "relu_fwd", dnn::FuseCode::NONE, "output",
std::make_optional<ModelBuilder::QuantInfo>(
{Type::TENSOR_QUANT8_ASYMM, {0.05}, 100}));
} else {
builder.AddInput("data", {Type::TENSOR_FLOAT32, {1, 224, 224, 3}});
builder.AddTensorFromBuffer("weight", weight_buf,
{Type::TENSOR_FLOAT32, {3, 1, 1, 3}});
builder.AddTensorFromBuffer("bias", bias_buf,
{Type::TENSOR_FLOAT32, {3}});
builder.AddConv("data", 1, 1, 0, 0, 0, 0, ModelBuilder::ACTIVATION_NONE,
"weight", "bias", "output");
builder.AddLayer_CONV_2D("data", "weight", "bias", 1, 1, 0, 0, 0, 0,
dnn::FuseCode::NONE, false, 1, 1, "output",
dnn::nullopt);
}
auto model = builder.AddOutput("output").Compile(
ModelBuilder::PREFERENCE_FAST_SINGLE_ANSWER);
Expand Down
16 changes: 8 additions & 8 deletions ci/download_and_test_models.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,19 @@

set -e

wget "https://s3.amazonaws.com/onnx-model-zoo/squeezenet/squeezenet1.1/squeezenet1.1.tar.gz" -O squeezenet1.1.tar.gz
tar xvf squeezenet1.1.tar.gz
wget -q "https://s3.amazonaws.com/onnx-model-zoo/squeezenet/squeezenet1.1/squeezenet1.1.tar.gz" -O squeezenet1.1.tar.gz
tar xf squeezenet1.1.tar.gz
python3 ci/validate_onnx.py squeezenet1.1 build_dnnlibrary/binaries/dnn_retrieve_result

wget "https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/mobilenetv2-1.0.tar.gz" -O mobilenetv2-1.0.tar.gz
tar xvf mobilenetv2-1.0.tar.gz
wget -q "https://s3.amazonaws.com/onnx-model-zoo/mobilenet/mobilenetv2-1.0/mobilenetv2-1.0.tar.gz" -O mobilenetv2-1.0.tar.gz
tar xf mobilenetv2-1.0.tar.gz
python3 ci/validate_onnx.py mobilenetv2-1.0 build_dnnlibrary/binaries/dnn_retrieve_result

wget "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v2/resnet18v2.tar.gz" -O resnet18v2.tar.gz
tar xvf resnet18v2.tar.gz
wget -q "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet18v2/resnet18v2.tar.gz" -O resnet18v2.tar.gz
tar xf resnet18v2.tar.gz
python3 ci/validate_onnx.py resnet18v2 build_dnnlibrary/binaries/dnn_retrieve_result

wget "https://s3.amazonaws.com/download.onnx/models/opset_9/bvlc_googlenet.tar.gz" -O bvlc_googlenet.tar.gz
tar xvf bvlc_googlenet.tar.gz
wget -q "https://s3.amazonaws.com/download.onnx/models/opset_9/bvlc_googlenet.tar.gz" -O bvlc_googlenet.tar.gz
tar xf bvlc_googlenet.tar.gz
python3 ci/validate_onnx.py bvlc_googlenet build_dnnlibrary/binaries/dnn_retrieve_result

81 changes: 43 additions & 38 deletions common/Shaper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,35 +10,39 @@ Shaper::len_t Shaper::total(const Shape &shape) {
return Product(shape);
}

/**
* strides: [stride_y, stride_x]
* paddings: [top, left, bottom, right]
*/
void Shaper::Conv(const std::string &input_name, const std::string &weight_name,
const std::vector<int32_t> paddings,
const std::vector<int32_t> strides,
const std::string &output_name) {
Shaper::Conv(input_name, strides[1], strides[0], 1, 1, paddings[1],
paddings[3], paddings[0], paddings[2], weight_name,
output_name);
}

void Shaper::Conv(const std::string &input_name,
const std::vector<int32_t> paddings,
const std::vector<int32_t> strides,
const std::vector<int32_t> dilations,
const std::string &weight_name,
const std::string &output_name) {
Shaper::Conv(input_name, strides[1], strides[0], dilations[1], dilations[0],
paddings[1], paddings[3], paddings[0], paddings[2],
weight_name, output_name);
}
// /**
// * strides: [stride_y, stride_x]
// * paddings: [top, left, bottom, right]
// */
// void Shaper::Conv(const std::string &input_name, const std::string
// &weight_name,
// const std::vector<int32_t> paddings,
// const std::vector<int32_t> strides,
// const std::string &output_name) {
// Shaper::Conv(input_name, strides[1], strides[0], 1, 1, paddings[1],
// paddings[3], paddings[0], paddings[2], weight_name,
// output_name);
// }
//
// void Shaper::Conv(const std::string &input_name,
// const std::vector<int32_t> paddings,
// const std::vector<int32_t> strides,
// const std::vector<int32_t> dilations,
// const std::string &weight_name,
// const std::string &output_name) {
// Shaper::Conv(input_name, strides[1], strides[0], dilations[1],
// dilations[0],
// paddings[1], paddings[3], paddings[0], paddings[2],
// weight_name, output_name);
// }
void Shaper::Conv(const std::string &input, const std::string &weight,
int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, const std::string &output) {
Conv(input, stride_x, stride_y, 1, 1, padding_left, padding_right,
padding_top, padding_bottom, weight, output);
int32_t stride_y, const bool nchw, const int32_t dilation_x,
const int32_t dilation_y, const std::string &output) {
DNN_ASSERT_EQ(nchw, false);
Conv(input, stride_x, stride_y, dilation_x, dilation_y, padding_left,
padding_right, padding_top, padding_bottom, weight, output);
}

void Shaper::Conv(const std::string &input_name, int32_t strideX,
Expand All @@ -63,17 +67,18 @@ void Shaper::Conv(const std::string &input_name, int32_t strideX,
shape_map_[output_name] = outputDimen;
}

void Shaper::DepthwiseConv(const std::string &input_name,
const std::vector<int32_t> paddings,
const std::vector<int32_t> strides,
const std::vector<int32_t> dilations,
const std::string &weight_name,
const std::string &output_name) {
Shaper::DepthwiseConv(input_name, strides[1], strides[0], dilations[1],
dilations[0], paddings[1], paddings[3], paddings[0],
paddings[2], weight_name, output_name);
}

// void Shaper::DepthwiseConv(const std::string &input_name,
// const std::vector<int32_t> paddings,
// const std::vector<int32_t> strides,
// const std::vector<int32_t> dilations,
// const std::string &weight_name,
// const std::string &output_name) {
// Shaper::DepthwiseConv(input_name, strides[1], strides[0], dilations[1],
// dilations[0], paddings[1], paddings[3],
// paddings[0], paddings[2], weight_name,
// output_name);
// }
//
void Shaper::DepthwiseConv(const std::string &input_name,
const std::string &weight_name, int32_t padding_left,
int32_t padding_right, int32_t padding_top,
Expand Down Expand Up @@ -185,7 +190,7 @@ void Shaper::Softmax(const std::string &input_name,
shape_map_[output_name] = shape_map_.at(input_name);
}

void Shaper::Relu(const std::string &input_name,
void Shaper::ReLU(const std::string &input_name,
const std::string &output_name) {
shape_map_[output_name] = shape_map_.at(input_name);
}
Expand Down
Loading

0 comments on commit ad11c32

Please sign in to comment.