Skip to content

Commit

Permalink
code style
Browse files Browse the repository at this point in the history
Signed-off-by: Alexander Peskov <[email protected]>
  • Loading branch information
apeskov committed May 27, 2022
1 parent 6913eaf commit 0372e45
Show file tree
Hide file tree
Showing 3 changed files with 269 additions and 243 deletions.
150 changes: 75 additions & 75 deletions src/runtime/contrib/dnnl/dnnl_json_runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
/* Thread safe implementation of Run. Keep runtime instance immutable */
void Run(const TVMArgs& args) const {
auto arg_data_provider = makeIODataProvider(args);
auto mem_solver = tensor_registry_.makeSolver(arg_data_provider);
auto mem_solver = tensor_registry_.MakeSolver(arg_data_provider);
// Execute primitives one by one
for (const auto& act : net_) {
auto prim = std::get<0>(act);
Expand Down Expand Up @@ -222,15 +222,15 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
auto wgh_tr = getInput(nid, 1);
auto dst_tr = getOutput(nid, 0);
auto bias_tr = has_bias ? getInput(nid, 2) : getInput(nid, -1);
auto strides = getAttr<std::vector<int64_t>>(node, "strides");
auto dilates = getAttr<std::vector<int64_t>>(node, "dilation");
auto padding = getAttr<std::vector<int64_t>>(node, "padding");
auto strides = GetAttr<std::vector<int64_t>>(node, "strides");
auto dilates = GetAttr<std::vector<int64_t>>(node, "dilation");
auto padding = GetAttr<std::vector<int64_t>>(node, "padding");
std::vector<int64_t> padding_l(padding.begin(), padding.begin() + padding.size() / 2);
std::vector<int64_t> padding_r(padding.begin() + padding.size() / 2, padding.end());
auto groups = getAttr<int>(node, "groups");
auto src_layout = getAttr<std::string>(node, "data_layout");
auto dst_layout = getAttr<std::string>(node, "out_layout");
auto wgh_layout = getAttr<std::string>(node, "kernel_layout");
auto groups = GetAttr<int>(node, "groups");
auto src_layout = GetAttr<std::string>(node, "data_layout");
auto dst_layout = GetAttr<std::string>(node, "out_layout");
auto wgh_layout = GetAttr<std::string>(node, "kernel_layout");

// dst_layout == "" means to use data_layout
if (dst_layout.empty()) dst_layout = src_layout;
Expand All @@ -239,34 +239,34 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
for (auto& d : dilates) d--;

// Take into account provided layout strings
src_tr = src_tr.treatAs(src_layout);
dst_tr = dst_tr.treatAs(dst_layout);
wgh_tr = wgh_tr.treatAs(wgh_layout);
src_tr = src_tr.TreatAs(src_layout);
dst_tr = dst_tr.TreatAs(dst_layout);
wgh_tr = wgh_tr.TreatAs(wgh_layout);

// Should support G mixed with O. Like { G*O, I, H, W }
if (wgh_layout.find("G") == std::string::npos) {
auto w_dims = wgh_tr.dims();
w_dims[0] /= groups;
w_dims.insert(w_dims.begin(), groups);
wgh_tr = wgh_tr.reshape(w_dims);
wgh_tr = wgh_tr.Reshape(w_dims);
}

// Assumption that bias is correct and can be squeezed to 1D
bias_tr = bias_tr.reshape({dst_tr.dims()[1]});
bias_tr = bias_tr.Reshape({dst_tr.dims()[1]});

// Conv description.
auto conv_desc = dnnl::convolution_forward::desc(
dnnl::prop_kind::forward_inference, dnnl::algorithm::convolution_direct,
src_tr.layoutAny().desc(), wgh_tr.layoutAny().desc(), bias_tr.layoutAny().desc(),
dst_tr.layoutAny().desc(), strides, dilates, padding_l, padding_r);
src_tr.LayoutAny().desc(), wgh_tr.LayoutAny().desc(), bias_tr.LayoutAny().desc(),
dst_tr.LayoutAny().desc(), strides, dilates, padding_l, padding_r);

// Enable elementwise post-ops.
auto conv_prim_desc = dnnl::convolution_forward::primitive_desc(conv_desc, attr, engine_);

src_tr = src_tr.requestLayout(conv_prim_desc.src_desc());
wgh_tr = wgh_tr.requestLayout(conv_prim_desc.weights_desc());
dst_tr = dst_tr.requestLayout(conv_prim_desc.dst_desc());
bias_tr = bias_tr.requestLayout(conv_prim_desc.bias_desc());
src_tr = src_tr.RequestLayout(conv_prim_desc.src_desc());
wgh_tr = wgh_tr.RequestLayout(conv_prim_desc.weights_desc());
dst_tr = dst_tr.RequestLayout(conv_prim_desc.dst_desc());
bias_tr = bias_tr.RequestLayout(conv_prim_desc.bias_desc());

auto scratchpad_tr = TensorRequisite::AsIs(conv_prim_desc.scratchpad_desc());

Expand All @@ -290,15 +290,15 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
auto dst_tr = getOutput(nid, 0);
auto bias_tr = has_bias ? getInput(nid, 2) : getInput(nid, -1);

auto strides = getAttr<std::vector<int64_t>>(node, "strides");
auto dilates = getAttr<std::vector<int64_t>>(node, "dilation");
auto padding = getAttr<std::vector<int64_t>>(node, "padding");
auto strides = GetAttr<std::vector<int64_t>>(node, "strides");
auto dilates = GetAttr<std::vector<int64_t>>(node, "dilation");
auto padding = GetAttr<std::vector<int64_t>>(node, "padding");
std::vector<int64_t> padding_l(padding.begin(), padding.begin() + padding.size() / 2);
std::vector<int64_t> padding_r(padding.begin() + padding.size() / 2, padding.end());
auto groups = getAttr<int>(node, "groups");
auto src_layout = getAttr<std::string>(node, "data_layout");
auto dst_layout = getAttr<std::string>(node, "out_layout");
auto wgh_layout = getAttr<std::string>(node, "kernel_layout");
auto groups = GetAttr<int>(node, "groups");
auto src_layout = GetAttr<std::string>(node, "data_layout");
auto dst_layout = GetAttr<std::string>(node, "out_layout");
auto wgh_layout = GetAttr<std::string>(node, "kernel_layout");

// dst_layout == "" means to use data_layout
if (dst_layout.empty()) dst_layout = src_layout;
Expand All @@ -312,34 +312,34 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
if (wgh_logic_layout == "GOIDHW") wgh_logic_layout = "GIODHW";

// Take into account provided layout strings
src_tr = src_tr.treatAs(src_layout);
dst_tr = dst_tr.treatAs(dst_layout);
wgh_tr = wgh_tr.treatAs(wgh_layout, wgh_logic_layout);
src_tr = src_tr.TreatAs(src_layout);
dst_tr = dst_tr.TreatAs(dst_layout);
wgh_tr = wgh_tr.TreatAs(wgh_layout, wgh_logic_layout);

// Should support G mixed with O. Like { G*O, I, H, W }
if (wgh_layout.find("G") == std::string::npos) {
auto w_dims = wgh_tr.dims();
w_dims[0] /= groups;
w_dims.insert(w_dims.begin(), groups);
wgh_tr = wgh_tr.reshape(w_dims);
wgh_tr = wgh_tr.Reshape(w_dims);
}

// Assumption that bias is correct and can be squeezed to 1D
bias_tr = bias_tr.reshape({dst_tr.dims()[1]});
bias_tr = bias_tr.Reshape({dst_tr.dims()[1]});

// Conv description.
auto deconv_desc = dnnl::deconvolution_forward::desc(
dnnl::prop_kind::forward_inference, dnnl::algorithm::deconvolution_direct,
src_tr.layoutAny().desc(), wgh_tr.layoutAny().desc(), bias_tr.layoutAny().desc(),
dst_tr.layoutAny().desc(), strides, dilates, padding_l, padding_r);
src_tr.LayoutAny().desc(), wgh_tr.LayoutAny().desc(), bias_tr.LayoutAny().desc(),
dst_tr.LayoutAny().desc(), strides, dilates, padding_l, padding_r);

// Enable elementwise post-ops.
auto deconv_prim_desc = dnnl::deconvolution_forward::primitive_desc(deconv_desc, attr, engine_);

src_tr = src_tr.requestLayout(deconv_prim_desc.src_desc());
wgh_tr = wgh_tr.requestLayout(deconv_prim_desc.weights_desc());
dst_tr = dst_tr.requestLayout(deconv_prim_desc.dst_desc());
bias_tr = bias_tr.requestLayout(deconv_prim_desc.bias_desc());
src_tr = src_tr.RequestLayout(deconv_prim_desc.src_desc());
wgh_tr = wgh_tr.RequestLayout(deconv_prim_desc.weights_desc());
dst_tr = dst_tr.RequestLayout(deconv_prim_desc.dst_desc());
bias_tr = bias_tr.RequestLayout(deconv_prim_desc.bias_desc());

auto scratchpad_tr = TensorRequisite::AsIs(deconv_prim_desc.scratchpad_desc());

Expand All @@ -364,20 +364,20 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
auto bias_tr = has_bias ? getInput(nid, 2) : getInput(nid, -1);

// Assumption that bias is correct and can be squeezed to 1D
bias_tr = bias_tr.reshape({dst_tr.dims()[1]});
bias_tr = bias_tr.Reshape({dst_tr.dims()[1]});

// Dense description.
auto dense_desc = dnnl::inner_product_forward::desc(
dnnl::prop_kind::forward_inference, src_tr.layoutAny().desc(), wgh_tr.layoutAny().desc(),
bias_tr.layoutAny().desc(), dst_tr.layoutAny().desc());
dnnl::prop_kind::forward_inference, src_tr.LayoutAny().desc(), wgh_tr.LayoutAny().desc(),
bias_tr.LayoutAny().desc(), dst_tr.LayoutAny().desc());

// Enable elementwise post-ops.
auto dense_prim_desc = dnnl::inner_product_forward::primitive_desc(dense_desc, attr, engine_);

src_tr = src_tr.requestLayout(dense_prim_desc.src_desc());
wgh_tr = wgh_tr.requestLayout(dense_prim_desc.weights_desc());
dst_tr = dst_tr.requestLayout(dense_prim_desc.dst_desc());
bias_tr = bias_tr.requestLayout(dense_prim_desc.bias_desc());
src_tr = src_tr.RequestLayout(dense_prim_desc.src_desc());
wgh_tr = wgh_tr.RequestLayout(dense_prim_desc.weights_desc());
dst_tr = dst_tr.RequestLayout(dense_prim_desc.dst_desc());
bias_tr = bias_tr.RequestLayout(dense_prim_desc.bias_desc());

auto scratchpad_tr = TensorRequisite::AsIs(dense_prim_desc.scratchpad_desc());

Expand All @@ -398,10 +398,10 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
auto var_tr = getInput(nid, 4);
auto dst_tr = getOutput(nid, 0);

auto axis = getAttr<int>(node, "axis");
auto epsilon = getAttr<float>(node, "epsilon");
auto center = getAttr<bool>(node, "center");
auto scale = getAttr<bool>(node, "scale");
auto axis = GetAttr<int>(node, "axis");
auto epsilon = GetAttr<float>(node, "epsilon");
auto center = GetAttr<bool>(node, "center");
auto scale = GetAttr<bool>(node, "scale");

ICHECK(axis == 1 && center && scale) << "Unimplemented BatchNorm case";

Expand All @@ -416,8 +416,8 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
ICHECK(sc_sh_dims.size() == 2);
ICHECK(sc_sh_dims[0] == 2);
sc_sh_dims[0] /= 2;
auto scale_tr = scale_shift_tr.crop(sc_sh_dims, {0, 0}).squeeze();
auto shift_tr = scale_shift_tr.crop(sc_sh_dims, {1, 0}).squeeze();
auto scale_tr = scale_shift_tr.Crop(sc_sh_dims, {0, 0}).Squeeze();
auto shift_tr = scale_shift_tr.Crop(sc_sh_dims, {1, 0}).Squeeze();

auto register_copy = [this](const TensorRequisite& src, const TensorRequisite& dst) {
dnnl::reorder::primitive_desc copy_pd(engine_, src.desc(), engine_, dst.desc());
Expand All @@ -441,14 +441,14 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
auto dst_tr = getOutput(nid, 0);

// Setup attributes.
auto strides = getAttr<std::vector<int64_t>>(node, "strides");
auto dilates = getAttr<std::vector<int64_t>>(node, "dilation");
auto padding = getAttr<std::vector<int64_t>>(node, "padding");
auto strides = GetAttr<std::vector<int64_t>>(node, "strides");
auto dilates = GetAttr<std::vector<int64_t>>(node, "dilation");
auto padding = GetAttr<std::vector<int64_t>>(node, "padding");
std::vector<int64_t> padding_l(padding.begin(), padding.begin() + padding.size() / 2);
std::vector<int64_t> padding_r(padding.begin() + padding.size() / 2, padding.end());
auto kernel = getAttr<std::vector<int64_t>>(node, "pool_size");
auto src_layout = getAttr<std::string>(node, "layout");
auto dst_layout = getAttr<std::string>(node, "out_layout");
auto kernel = GetAttr<std::vector<int64_t>>(node, "pool_size");
auto src_layout = GetAttr<std::string>(node, "layout");
auto dst_layout = GetAttr<std::string>(node, "out_layout");

// dst_layout == "" means to use data_layout
if (dst_layout.empty()) dst_layout = src_layout;
Expand All @@ -457,24 +457,24 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
for (auto& d : dilates) d--;

// Take into account provided layout strings
src_tr = src_tr.treatAs(src_layout);
dst_tr = dst_tr.treatAs(dst_layout);
src_tr = src_tr.TreatAs(src_layout);
dst_tr = dst_tr.TreatAs(dst_layout);

// Attributes related to AvgPool
if (algo == dnnl::algorithm::pooling_avg) {
auto include_pad = getAttr<bool>(node, "count_include_pad");
auto include_pad = GetAttr<bool>(node, "count_include_pad");
algo = include_pad ? dnnl::algorithm::pooling_avg_include_padding
: dnnl::algorithm::pooling_avg_exclude_padding;
}

// Pooling description.
auto pool_desc = dnnl::pooling_v2_forward::desc(
dnnl::prop_kind::forward_inference, algo, src_tr.desc(), //<= Do not use any for src tensor
dst_tr.layoutAny().desc(), strides, kernel, dilates, padding_l, padding_r);
dst_tr.LayoutAny().desc(), strides, kernel, dilates, padding_l, padding_r);
auto pool_prim_desc = dnnl::pooling_v2_forward::primitive_desc(pool_desc, engine_);

src_tr = src_tr.requestLayout(pool_prim_desc.src_desc());
dst_tr = dst_tr.requestLayout(pool_prim_desc.dst_desc());
src_tr = src_tr.RequestLayout(pool_prim_desc.src_desc());
dst_tr = dst_tr.RequestLayout(pool_prim_desc.dst_desc());

auto scratchpad_tr = TensorRequisite::AsIs(pool_prim_desc.scratchpad_desc());

Expand All @@ -492,10 +492,10 @@ class DNNLJSONRuntime : public JSONRuntimeBase {

float alpha = 0., beta = 0.;
if (op_name == "clip") {
alpha = getAttr<float>(node, "a_min");
beta = getAttr<float>(node, "a_max");
alpha = GetAttr<float>(node, "a_min");
beta = GetAttr<float>(node, "a_max");
} else if (op_name == "nn.leaky_relu") {
alpha = getAttr<float>(node, "alpha");
alpha = GetAttr<float>(node, "alpha");
}

auto elt_desc = dnnl::eltwise_forward::desc(dnnl::prop_kind::forward_inference, algo,
Expand All @@ -512,7 +512,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
auto src_tr = getInput(nid, 0);
auto dst_tr = getOutput(nid, 0);

auto axis = getAttr<int>(node, "axis");
auto axis = GetAttr<int>(node, "axis");
if (axis < 0) {
axis = src_tr.dims().size() + axis;
}
Expand All @@ -535,8 +535,8 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
auto rhs_tr = getInput(nid, 1);
auto dst_tr = getOutput(nid, 0);

lhs_tr = lhs_tr.broadcast(dst_tr.dims());
rhs_tr = rhs_tr.broadcast(dst_tr.dims());
lhs_tr = lhs_tr.Broadcast(dst_tr.dims());
rhs_tr = rhs_tr.Broadcast(dst_tr.dims());

auto binary_desc = dnnl::binary::desc(algo, lhs_tr.desc(), rhs_tr.desc(), dst_tr.desc());
auto binary_prim_desc = dnnl::binary::primitive_desc(binary_desc, engine_);
Expand All @@ -558,8 +558,8 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
auto eid = node_row_ptr_[data_entry.id_] + data_entry.index_;
auto const_dl_tensor = data_entry_[eid];

auto desc = dnnl::memory::desc{utils::convert2dnnl(shape), convert2dnnl(dtype),
utils::plainLayout(shape.size())};
auto desc = dnnl::memory::desc{utils::Convert2Dnnl(shape), Convert2Dnnl(dtype),
utils::PlainLayout(shape.size())};
TensorRequisite res;
if (const_dl_tensor) {
ICHECK(const_dl_tensor->data);
Expand Down Expand Up @@ -588,24 +588,24 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
dl_tensor.ndim = shape.size();
dl_tensor.shape = shape.data();

auto desc = dnnl::memory::desc{utils::convert2dnnl(shape), convert2dnnl(dtype),
utils::plainLayout(shape.size())};
auto desc = dnnl::memory::desc{utils::Convert2Dnnl(shape), Convert2Dnnl(dtype),
utils::PlainLayout(shape.size())};

return TensorRequisite::AsIs(desc, eid).backward();
return TensorRequisite::AsIs(desc, eid).Backward();
}

// Helper function to register primitive into execution queue
void submit(const dnnl::primitive& prim,
const std::unordered_map<int, TensorRequisite>& tr_args) {
// Register all provided TR arguments
std::unordered_map<int, TensorRegistry::ArgReq> prim_arg_id;
std::unordered_map<int, TensorRegistry::ArgId> prim_arg_id;
TensorRegistry::ActionQue post_prim_actions;
for (const auto& kvp : tr_args) {
const auto& key = kvp.first;
const auto& tr = kvp.second;

if (!tr.defined()) continue; // empty arg is admitted. Just skip it
auto arg_id = tensor_registry_.registerTR(tr, tr.isReversed() ? &post_prim_actions : &net_);
auto arg_id = tensor_registry_.Register(tr, tr.IsReversed() ? &post_prim_actions : &net_);
prim_arg_id[key] = arg_id;
}

Expand Down
Loading

0 comments on commit 0372e45

Please sign in to comment.