Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Clang-format fix
Browse files Browse the repository at this point in the history
  • Loading branch information
mozga-intel committed Nov 5, 2021
1 parent cc9b6d9 commit 9599fab
Show file tree
Hide file tree
Showing 14 changed files with 49 additions and 47 deletions.
2 changes: 1 addition & 1 deletion src/engine/threaded_engine_pooled.cc
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ class ThreadedEnginePooled : public ThreadedEngine {
bool is_copy = (opr_block->opr->prop == FnProperty::kCopyFromGPU ||
opr_block->opr->prop == FnProperty::kCopyToGPU);
auto&& rctx = is_copy ? streams_->GetIORunContext(opr_block->ctx) :
streams_->GetRunContext(opr_block->ctx);
streams_->GetRunContext(opr_block->ctx);
#if MXNET_USE_CUDA
CallbackOnStart on_start;
CallbackOnComplete callback;
Expand Down
8 changes: 4 additions & 4 deletions src/kvstore/kvstore_dist.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,16 +508,16 @@ class KVStoreDist : public KVStoreLocal {
const int dtype = recv_buf.dtype();
const int num_bytes = mshadow::mshadow_sizeof(dtype);
PSKV& pskv = (gradient_compression_->get_type() == CompressionType::kNone) ?
EncodeDefaultKey(key, size, num_bytes) :
EncodeCompressedKey(key, size, false, num_bytes);
char* data = static_cast<char*>(recv_buf.data().dptr_);
EncodeDefaultKey(key, size, num_bytes) :
EncodeCompressedKey(key, size, false, num_bytes);
char* data = static_cast<char*>(recv_buf.data().dptr_);
// false means not to delete data when SArray is deleted
auto vals = new ps::SArray<char>(data, size * num_bytes, false);
// issue pull
RequestType mode = (gradient_compression_->get_type() != CompressionType::kNone) ?
RequestType::kCompressedPushPull :
RequestType::kDefaultPushPull;
const int cmd = GetCommandType(mode, dtype);
const int cmd = GetCommandType(mode, dtype);
CHECK_NOTNULL(ps_worker_)->ZPull(pskv.keys, vals, &pskv.lens, cmd, [vals, cb]() {
delete vals;
cb();
Expand Down
6 changes: 3 additions & 3 deletions src/operator/contrib/bilinear_resize-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,9 @@ static bool BilinearSampleOpInferShape(const nnvm::NodeAttrs& attrs,
new_height = ((dshape[2] % 2) == 0) ?
(int16_t)(dshape[2] * param.scale_height.value()) :
(int16_t)((dshape[2] - 1) * param.scale_height.value()) + 1;
new_width = ((dshape[3] % 2) == 0) ?
(int16_t)(dshape[3] * param.scale_width.value()) :
(int16_t)((dshape[3] - 1) * param.scale_width.value()) + 1;
new_width = ((dshape[3] % 2) == 0) ?
(int16_t)(dshape[3] * param.scale_width.value()) :
(int16_t)((dshape[3] - 1) * param.scale_width.value()) + 1;
break;
}
case bilinear_resize::like: {
Expand Down
4 changes: 2 additions & 2 deletions src/operator/contrib/bounding_box.cu
Original file line number Diff line number Diff line change
Expand Up @@ -490,8 +490,8 @@ __launch_bounds__(NMS<DType>::THRESHOLD) __global__
for (int i = 0; i < n_threads / warp_size; ++i) {
uint32_t my_mask = my_next_mask;
my_next_mask = (((i + 1) < n_threads / warp_size) && (my_element_in_batch < topk)) ?
nms_results[(i + 1) * topk * num_batches + my_element] :
full_mask;
nms_results[(i + 1) * topk * num_batches + my_element] :
full_mask;
if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) {
my_mask = my_mask | earlier_threads_mask;
// Loop over warp_size - 1 because the last
Expand Down
8 changes: 4 additions & 4 deletions src/operator/contrib/multi_lamb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ struct MultiLAMBKernelStep1 {
using namespace mshadow_op;
for (size_t index = 0; index < kernel_params.ntensors; ++index) {
if ((size_t)i < kernel_params.sizes[index]) {
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
MPDType(kernel_params.weights[index][i]);
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
MPDType(kernel_params.weights[index][i]);
MPDType scaled_grad = static_cast<MPDType>(kernel_params.grads[index][i]) * rescale_grad;
if (clip_gradient >= 0.0f)
scaled_grad = mshadow_op::clip::Map(scaled_grad, static_cast<MPDType>(clip_gradient));
Expand Down Expand Up @@ -93,8 +93,8 @@ struct MultiLAMBKernelStep2 {
if ((size_t)i < kernel_params.sizes[index]) {
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
MPDType(kernel_params.weights[index][i]);
float r1 = sqrt(sum_sq_weigths[index]);
float r2 = sqrt(sum_sq_temp_g[index]);
float r1 = sqrt(sum_sq_weigths[index]);
float r2 = sqrt(sum_sq_temp_g[index]);
if (lower_bound >= 0)
r1 = std::max(r1, lower_bound);
if (upper_bound >= 0)
Expand Down
8 changes: 4 additions & 4 deletions src/operator/contrib/multi_lans.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ struct MultiLANSKernelStep1 {
using namespace mshadow_op;
for (size_t index = 0; index < kernel_params.ntensors; ++index) {
if ((size_t)i < kernel_params.sizes[index]) {
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
MPDType(kernel_params.weights[index][i]);
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
MPDType(kernel_params.weights[index][i]);
float g_norm = sqrt(g_sq_norm[index]);
MPDType scaled_grad = static_cast<MPDType>(kernel_params.grads[index][i]) * rescale_grad;
scaled_grad /= g_norm;
Expand Down Expand Up @@ -95,8 +95,8 @@ struct MultiLANSKernelStep2 {
const OpReqType req) {
for (size_t index = 0; index < kernel_params.ntensors; ++index) {
if ((size_t)i < kernel_params.sizes[index]) {
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
MPDType(kernel_params.weights[index][i]);
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
MPDType(kernel_params.weights[index][i]);
float r1 = sqrt(sum_sq_weigths[index]);
float r2_m = sqrt(sum_sq_temp_m[index]);
float r2_g = sqrt(sum_sq_temp_g[index]);
Expand Down
8 changes: 4 additions & 4 deletions src/operator/nn/batch_norm.cu
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ __launch_bounds__(inference_forward_threads) __global__

AType invstd = small_num_channels ? saved_invstd[my_channel] :
variance_to_invstd(runningVar[my_channel], epsilon);
AType mean = small_num_channels ? saved_mean[my_channel] : runningMean[my_channel];
AType mean = small_num_channels ? saved_mean[my_channel] : runningMean[my_channel];
AType gamma =
small_num_channels ?
saved_weight[my_channel] :
Expand Down Expand Up @@ -349,8 +349,8 @@ __global__ void BatchNormalizationUpdateOutputKernel(DeviceTensor input,
const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0) ?
ScalarConvert<DType, AccReal>::to(weight[plane]) :
ScalarConvert<int, AccReal>::to(1);
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane]) :
ScalarConvert<int, AccReal>::to(0);
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane]) :
ScalarConvert<int, AccReal>::to(0);
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
Expand Down Expand Up @@ -651,7 +651,7 @@ static __global__ void BatchNormalizationBackwardKernel(const DeviceTensor input
const AccReal weightVal = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0) ?
ScalarConvert<DType, AccReal>::to(tensors.weight[plane]) :
AccReal(1);
const AccReal norm = AccReal(1) / N;
const AccReal norm = AccReal(1) / N;

// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
Expand Down
12 changes: 6 additions & 6 deletions src/operator/nn/dnnl/dnnl_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -242,19 +242,19 @@ const dnnl::memory* GetWeights(const NDArray& arr, int num_groups) {
tz = dnnl::memory::dims{arr.shape()[O], arr.shape()[I]};
format_tag = dnnl::memory::format_tag::oi;
} else if (ndim == 3) {
tz = num_groups > 1 ?
dnnl::memory::dims{
tz = num_groups > 1 ?
dnnl::memory::dims{
num_groups, arr.shape()[O] / num_groups, arr.shape()[I], arr.shape()[H]} :
dnnl::memory::dims{arr.shape()[O], arr.shape()[I], arr.shape()[H]};
dnnl::memory::dims{arr.shape()[O], arr.shape()[I], arr.shape()[H]};
format_tag = num_groups > 1 ? dnnl::memory::format_tag::goiw : dnnl::memory::format_tag::oiw;
} else if (ndim == 4) {
tz = num_groups > 1 ?
dnnl::memory::dims{num_groups,
tz = num_groups > 1 ?
dnnl::memory::dims{num_groups,
arr.shape()[O] / num_groups,
arr.shape()[I],
arr.shape()[H],
arr.shape()[W]} :
dnnl::memory::dims{arr.shape()[O], arr.shape()[I], arr.shape()[H], arr.shape()[W]};
dnnl::memory::dims{arr.shape()[O], arr.shape()[I], arr.shape()[H], arr.shape()[W]};
format_tag = num_groups > 1 ? dnnl::memory::format_tag::goihw : dnnl::memory::format_tag::oihw;
} else if (ndim == 5) {
tz = num_groups > 1 ?
Expand Down
28 changes: 14 additions & 14 deletions src/operator/nn/dnnl/dnnl_rnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -197,14 +197,14 @@ RnnPrimitive GetRnnFwdPrim(const DNNLRnnLayerParam& layer_param,
auto src_cell_desc = memory::desc(layer_param.cell_dims, data_type, tag::ldnc);
auto weight_peep_desc = memory::desc();
auto weight_proj_desc = layer_param.proj_size > 0 ?
memory::desc(layer_param.weight_proj_dims, weight_type, tag::any) :
memory::desc();
auto dst_state_desc = layer_param.state_outputs ?
memory::desc(layer_param.state_dims, data_type, tag::ldnc) :
memory::desc();
auto dst_cell_desc = layer_param.state_outputs ?
memory::desc(layer_param.cell_dims, data_type, tag::ldnc) :
memory::desc();
memory::desc(layer_param.weight_proj_dims, weight_type, tag::any) :
memory::desc();
auto dst_state_desc = layer_param.state_outputs ?
memory::desc(layer_param.state_dims, data_type, tag::ldnc) :
memory::desc();
auto dst_cell_desc = layer_param.state_outputs ?
memory::desc(layer_param.cell_dims, data_type, tag::ldnc) :
memory::desc();

auto fwd = RnnPrimitive();
switch (mode) {
Expand Down Expand Up @@ -266,8 +266,8 @@ RnnBwdPrimitive GetRnnBwdPrim(const DNNLRnnForwardTraining& fwd,
memory::data_type weight_type = get_dnnl_type(params.dtype());
const prop_kind prop = prop_kind::backward;
rnn_direction dnnl_rnn_direction = layer_param.bidirectional ?
rnn_direction::bidirectional_concat :
rnn_direction::unidirectional;
rnn_direction::bidirectional_concat :
rnn_direction::unidirectional;

auto src_layer_desc = memory::desc(layer_param.src_dims, data_type, tag::tnc);
auto weight_layer_desc = memory::desc(layer_param.weight_layer_dims, weight_type, tag::any);
Expand All @@ -276,8 +276,8 @@ RnnBwdPrimitive GetRnnBwdPrim(const DNNLRnnForwardTraining& fwd,
auto dst_layer_desc = memory::desc(layer_param.dst_dims, data_type, tag::tnc);
auto src_state_desc = memory::desc(layer_param.state_dims, data_type, tag::ldnc);
auto dst_state_desc = layer_param.state_outputs ?
memory::desc(layer_param.state_dims, data_type, tag::ldnc) :
memory::desc();
memory::desc(layer_param.state_dims, data_type, tag::ldnc) :
memory::desc();

const void* fwd_pd = fwd.GetPrimDesc();
auto bwd = RnnBwdPrimitive();
Expand Down Expand Up @@ -1127,8 +1127,8 @@ void DNNLRnnOp::Forward(const OpContext& ctx,
const int batch_size = default_param.batch_size_;
const int state_size = default_param.state_size;
const int iter_size = default_param.projection_size.has_value() ?
default_param.projection_size.value() :
default_param.state_size;
default_param.projection_size.value() :
default_param.state_size;
const int directions = default_param.bidirectional ? 2 : 1;
dnnl::memory::desc dst_desc({seq_length, batch_size, directions * iter_size},
get_dnnl_type(data_dtype),
Expand Down
4 changes: 2 additions & 2 deletions src/operator/nn/softmax-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -853,8 +853,8 @@ __global__ void masked_softmax_grad_kernel(OType* out,
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask];
final_result = negate ? -OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum) :
OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum);
final_result = mask_value ? final_result / static_cast<DType>(temperature) : DType(0.0f);
OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum);
final_result = mask_value ? final_result / static_cast<DType>(temperature) : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i * sa], Req, final_result);
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/operator/optimizer_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ struct AdamStdDnsRspDnsKernel<req, cpu> {
for (index_t j = 0; j < row_length; j++) {
const index_t data_i = row_i + j;
DType grad_rescaled = non_zero ? static_cast<DType>(grad_data[grad_i + j] * rescale_grad) :
static_cast<DType>(0);
static_cast<DType>(0);
if (clip_gradient >= 0.0f) {
grad_rescaled = clip::Map(grad_rescaled, clip_gradient);
}
Expand Down
2 changes: 1 addition & 1 deletion src/operator/optimizer_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ struct AdamStdDnsRspDnsKernel<req, gpu> {
(row_id == 0) ? prefix_sum[0] > 0 : prefix_sum[row_id] > prefix_sum[row_id - 1];
const RType grad_offset = (prefix_sum[row_id] - 1) * row_length + col_id;
DType grad_rescaled = non_zero ? static_cast<DType>(grad_data[grad_offset] * rescale_grad) :
static_cast<DType>(0);
static_cast<DType>(0);
if (clip_gradient >= 0.0f) {
grad_rescaled = clip::Map(grad_rescaled, clip_gradient);
}
Expand Down
2 changes: 1 addition & 1 deletion src/operator/subgraph/dnnl/dnnl_conv.cc
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,7 @@ static void SgDNNLConvParamParser(nnvm::NodeAttrs* attrs) {
auto& post_act_param = (param_.full_conv_param.dnnl_param.with_act && !with_act) ?
param_.full_conv_param.act_param :
param_.full_conv_param.postsum_act_param;
with_act = true;
with_act = true;
if (node_name == "Activation") {
const auto act_param = nnvm::get<ActivationParam>(node->attrs.parsed);
post_act_param.alg = GetDNNLActAlgo(act_param);
Expand Down
2 changes: 2 additions & 0 deletions src/operator/subgraph/tensorrt/onnx_to_tensorrt.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,13 @@ class TRT_Logger : public nvinfer1::ILogger {
time_t rawtime = std::time(0);
char buf[256];
strftime(&buf[0], 256, "%Y-%m-%d %H:%M:%S", std::gmtime(&rawtime));
// clang-format off
const char* sevstr = (severity == Severity::kINTERNAL_ERROR ? " BUG" :
severity == Severity::kERROR ? " ERROR" :
severity == Severity::kWARNING ? "WARNING" :
severity == Severity::kINFO ? " INFO" :
"UNKNOWN");
// clang-format on
(*_ostream) << "[" << buf << " " << sevstr << "] " << msg << std::endl;
}
}
Expand Down

0 comments on commit 9599fab

Please sign in to comment.