Skip to content

Commit

Permalink
Update api names in OnnxConverter
Browse files Browse the repository at this point in the history
  • Loading branch information
daquexian committed Nov 21, 2019
1 parent 512b539 commit 4cd010e
Showing 1 changed file with 20 additions and 20 deletions.
40 changes: 20 additions & 20 deletions tools/onnx2daq/OnnxConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -718,13 +718,13 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
const auto &onnx_weight = onnx_tensors_.at(ori_weight_name);
if (group == 1) {
VLOG(5) << "Vanilla conv";
WriteDaqLayerCONV_2D(input_name, ori_weight_name, bias_name,
WriteDaqLayer_CONV_2D(input_name, ori_weight_name, bias_name,
onnx_pads[1], onnx_pads[3], onnx_pads[0],
onnx_pads[2], onnx_strides[1], onnx_strides[0],
output_name);
} else if (onnx_weight.shape[1] == 1) { // depthwise
VLOG(5) << "Depthwise conv";
WriteDaqLayerDEPTHWISE_CONV_2D(
WriteDaqLayer_DEPTHWISE_CONV_2D(
input_name, ori_weight_name, bias_name, onnx_pads[1],
onnx_pads[3], onnx_pads[0], onnx_pads[2], onnx_strides[1],
onnx_strides[0], onnx_weight.shape[0] / group, output_name);
Expand Down Expand Up @@ -767,12 +767,12 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
CHECK_EQ(nnapi_strides.size(), 2ul);
// kernel_shape of onnx model is [height, width]
if (op == "AveragePool") {
WriteDaqLayerAVERAGE_POOL_2D(
WriteDaqLayer_AVERAGE_POOL_2D(
input_name, onnx_pads[1], onnx_pads[3], onnx_pads[0],
onnx_pads[2], onnx_strides[1], onnx_strides[0],
kernel_shape[1], kernel_shape[0], output_name);
} else {
WriteDaqLayerMAX_POOL_2D(
WriteDaqLayer_MAX_POOL_2D(
input_name, onnx_pads[1], onnx_pads[3], onnx_pads[0],
onnx_pads[2], onnx_strides[1], onnx_strides[0],
kernel_shape[1], kernel_shape[0], output_name);
Expand All @@ -781,11 +781,11 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
const auto input_height = shaper_[input_name][1];
const auto input_width = shaper_[input_name][2];
if (op == "GlobalAveragePool") {
WriteDaqLayerAVERAGE_POOL_2D(input_name, 0, 0, 0, 0, 1, 1,
WriteDaqLayer_AVERAGE_POOL_2D(input_name, 0, 0, 0, 0, 1, 1,
input_width, input_height,
output_name);
} else {
WriteDaqLayerMAX_POOL_2D(input_name, 0, 0, 0, 0, 1, 1,
WriteDaqLayer_MAX_POOL_2D(input_name, 0, 0, 0, 0, 1, 1,
input_width, input_height, output_name);
}
}
Expand All @@ -794,29 +794,29 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
VLOG(5) << "Start converting Relu";
const auto input_name = m(node.input(0));
const auto output_name = m(node.output(0));
WriteDaqLayerRELU(input_name, output_name);
WriteDaqLayer_RELU(input_name, output_name);
VLOG(5) << "Converting Relu completed";

} else if (op == "PRelu") {
VLOG(5) << "Start converting PRelu";
const auto input_name = m(node.input(0));
const auto slope_name = m(node.input(1));
const auto output_name = m(node.output(0));
WriteDaqLayerPRELU(input_name, slope_name, output_name);
WriteDaqLayer_PRELU(input_name, slope_name, output_name);
VLOG(5) << "Converting PRelu completed";
} else if (op == "Add") {
VLOG(5) << "Start converting Add";
const auto input1_name = m(node.input(0));
const auto input2_name = m(node.input(1));
const auto output_name = m(node.output(0));
WriteDaqLayerADD(input1_name, input2_name, output_name);
WriteDaqLayer_ADD(input1_name, input2_name, output_name);
VLOG(5) << "Converting Add completed";
} else if (op == "Mul") {
VLOG(5) << "Start converting Mul";
const auto input1_name = m(node.input(0));
const auto input2_name = m(node.input(1));
const auto output_name = m(node.output(0));
WriteDaqLayerMUL(input1_name, input2_name, output_name);
WriteDaqLayer_MUL(input1_name, input2_name, output_name);
VLOG(5) << "Converting Mul completed";
} else if (op == "Gemm") {
VLOG(5) << "Start converting Gemm";
Expand All @@ -832,7 +832,7 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
const auto alpha = helper.get("alpha", 1.0f);
const auto beta = helper.get("beta", 1.0f);
if (transA == 0 && transB == 1 && alpha == 1.f && beta == 1.f) {
WriteDaqLayerFULLY_CONNECTED(input_name, weight_name, bias_name,
WriteDaqLayer_FULLY_CONNECTED(input_name, weight_name, bias_name,
output_name);
} else {
throw std::invalid_argument(
Expand All @@ -846,7 +846,7 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
VLOG(5) << "Start converting Softmax";
const auto input_name = m(node.input(0));
const auto output_name = m(node.output(0));
WriteDaqLayerSOFTMAX(input_name, 1.f, output_name);
WriteDaqLayer_SOFTMAX(input_name, 1.f, output_name);
VLOG(5) << "Converting Softmax completed";
} else if (op == "Concat") {
VLOG(5) << "Start converting Concat";
Expand All @@ -857,7 +857,7 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
const uint32_t axis_nchw_to_nhwc[4]{0, 3, 1, 2};
const auto axis = helper.get("axis", 1);
const auto output_name = m(node.output(0));
WriteDaqLayerCONCATENATION(concat_inputs_str, axis_nchw_to_nhwc[axis],
WriteDaqLayer_CONCATENATION(concat_inputs_str, axis_nchw_to_nhwc[axis],
output_name);
VLOG(5) << "Converting Concat completed";
} else if (op == "Dropout") {
Expand Down Expand Up @@ -909,8 +909,8 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
shaper_.AddShape(tensor_b_name, scale_tensor.shape);
tensors_.push_back(flat_tensor_a);
tensors_.push_back(flat_tensor_b);
WriteDaqLayerMUL(input_name, tensor_a_name, tensor_imm_product_name);
WriteDaqLayerADD(tensor_imm_product_name, tensor_b_name, output_name);
WriteDaqLayer_MUL(input_name, tensor_a_name, tensor_imm_product_name);
WriteDaqLayer_ADD(tensor_imm_product_name, tensor_b_name, output_name);

VLOG(5) << "Converting BatchNormalization completed";
} else if (op == "Reshape") {
Expand All @@ -935,26 +935,26 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
const auto radius = (size - 1) / 2;
alpha /= size; // The implementation of ONNX LRN is not the same as
// that of NNAPI LRN
WriteDaqLayerLOCAL_RESPONSE_NORMALIZATION(node.input(0), radius, bias,
WriteDaqLayer_LOCAL_RESPONSE_NORMALIZATION(node.input(0), radius, bias,
alpha, beta, node.output(0));
VLOG(5) << "Converting LRN completed";
} else if (op == "Tanh") {
VLOG(5) << "Start converting Tanh";
const auto input_name = m(node.input(0));
const auto output_name = m(node.output(0));
WriteDaqLayerTANH(input_name, output_name);
WriteDaqLayer_TANH(input_name, output_name);
VLOG(5) << "Converting Tanh completed";
} else if (op == "Floor") {
VLOG(5) << "Start converting Floor";
const auto input_name = m(node.input(0));
const auto output_name = m(node.output(0));
WriteDaqLayerFLOOR(input_name, output_name);
WriteDaqLayer_FLOOR(input_name, output_name);
VLOG(5) << "Converting Floor completed";
} else if (op == "Sigmoid") {
VLOG(5) << "Start converting Sigmoid";
const auto input_name = m(node.input(0));
const auto output_name = m(node.output(0));
WriteDaqLayerLOGISTIC(input_name, output_name);
WriteDaqLayer_LOGISTIC(input_name, output_name);
VLOG(5) << "Converting Sigmoid completed";
} else {
throw std::invalid_argument("Unsupported operator " + op);
Expand All @@ -964,7 +964,7 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
if (std::find(dequantize_after_.begin(), dequantize_after_.end(),
output) != dequantize_after_.end()) {
css dequant_output = output + "_dequant";
WriteDaqLayerDEQUANTIZE(output, dequant_output);
WriteDaqLayer_DEQUANTIZE(output, dequant_output);
name_map_[output] = dequant_output;
}
}
Expand Down

0 comments on commit 4cd010e

Please sign in to comment.