Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 6th Fundable Projects 2 No.22】cppcoreguidelines-pro-type-member-init_3-part #64035

Merged
merged 6 commits into from
May 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/common/enforce.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ std::string GetCurrentTraceBackString(bool for_signal) {
#if !defined(_WIN32) && !defined(PADDLE_WITH_MUSL)
static constexpr int TRACE_STACK_LIMIT = 100;

std::array<void*, TRACE_STACK_LIMIT> call_stack;
std::array<void*, TRACE_STACK_LIMIT> call_stack = {};
auto size = backtrace(call_stack.data(), TRACE_STACK_LIMIT);
auto symbols = backtrace_symbols(call_stack.data(), size);
Dl_info info;
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/distributed/ps/table/memory_sparse_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ int32_t MemorySparseTable::Load(const std::string &path,
omp_set_num_threads(thread_num);
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < _real_local_shard_num; ++i) {
FsChannelConfig channel_config;
FsChannelConfig channel_config = {};
channel_config.path = file_list[file_start_idx + i];
VLOG(1) << "MemorySparseTable::load begin load " << channel_config.path
<< " into local shard " << i;
Expand Down Expand Up @@ -227,7 +227,7 @@ int32_t MemorySparseTable::LoadPatch(const std::vector<std::string> &file_list,
omp_set_num_threads(thread_num);
#pragma omp parallel for schedule(dynamic)
for (int i = start_idx; i < end_idx; ++i) {
FsChannelConfig channel_config;
FsChannelConfig channel_config = {};
channel_config.path = file_list[i];
channel_config.converter = _value_accessor->Converter(load_param).converter;
channel_config.deconverter =
Expand Down Expand Up @@ -351,7 +351,7 @@ int32_t MemorySparseTable::Save(const std::string &dirname,
omp_set_num_threads(thread_num);
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < _real_local_shard_num; ++i) {
FsChannelConfig channel_config;
FsChannelConfig channel_config = {};
if (_config.compress_in_save() && (save_param == 0 || save_param == 3)) {
channel_config.path =
::paddle::string::format_string("%s/part-%03d-%05d.gz",
Expand Down Expand Up @@ -495,7 +495,7 @@ int32_t MemorySparseTable::Save_v2(const std::string &dirname,

#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < _real_local_shard_num; ++i) {
FsChannelConfig channel_config;
FsChannelConfig channel_config = {};
FsChannelConfig channel_config_for_slot_feature;

if (_config.compress_in_save() && (save_param == 0 || save_param == 3)) {
Expand Down Expand Up @@ -670,7 +670,7 @@ int32_t MemorySparseTable::SavePatch(const std::string &path, int save_param) {
omp_set_num_threads(thread_num);
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < _m_real_local_shard_num; ++i) {
FsChannelConfig channel_config;
FsChannelConfig channel_config = {};
channel_config.path = ::paddle::string::format_string("%s/part-%03d-%05d",
table_path.c_str(),
_shard_idx,
Expand Down Expand Up @@ -870,7 +870,7 @@ int32_t MemorySparseTable::SaveCache(
_afs_client.remove(::paddle::string::format_string(
"%s/part-%03d", table_path.c_str(), _shard_idx));
uint32_t feasign_size = 0;
FsChannelConfig channel_config;
FsChannelConfig channel_config = {};
// not compress cache model
channel_config.path = ::paddle::string::format_string(
"%s/part-%03d", table_path.c_str(), _shard_idx);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_feed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1660,7 +1660,7 @@ bool MultiSlotFileInstantDataFeed::Preprocess(const std::string& filename) {
"Fail to open file: %s in MultiSlotFileInstantDataFeed.",
filename.c_str()));

struct stat sb;
struct stat sb = {};
fstat(fd_, &sb);
end_ = static_cast<size_t>(sb.st_size);

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/io/fs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ std::shared_ptr<FILE> localfs_open_append_write(std::string path,
}

int64_t localfs_file_size(const std::string& path) {
struct stat buf;
struct stat buf = {};
if (0 != stat(path.c_str(), &buf)) {
PADDLE_THROW(platform::errors::External(
"Failed to get file status via stat function."));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class DeformableConvOpConverter : public OpConverter {
PADDLE_GET_CONST(int, op_desc.GetAttr("deformable_groups"));
auto im2col_step = PADDLE_GET_CONST(int, op_desc.GetAttr("im2col_step"));

nvinfer1::Weights weights;
nvinfer1::Weights weights = {};
weights.count = filter_tensor->numel();
// TODO(bukejiyu): deformable_conv currently does not support fp16
// mode,will be supported in the future.
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/transpose_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class TransposeOpConverter : public OpConverter {
axis[i]--;
}
}
nvinfer1::Permutation perm;
nvinfer1::Permutation perm = {};
for (int i = 0; i < dims; i++) {
int j = engine_->with_dynamic_shape() ? i : i + 1;
perm.order[i] = axis[j];
Expand Down
28 changes: 14 additions & 14 deletions paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class ExprWrapper {
friend ExprWrapper BinaryOp(const ExprWrapper& a,
const ExprWrapper& b,
nvinfer1::DimensionOperation op) {
ExprWrapper result;
ExprWrapper result = {};
if (a.expr_builder) {
result.expr_builder = a.expr_builder;
}
Expand All @@ -57,7 +57,7 @@ class ExprWrapper {
int b_value,
nvinfer1::DimensionOperation op) {
assert(a.expr_builder);
ExprWrapper b;
ExprWrapper b = {};
b.expr_builder = a.expr_builder;
b.expr = b.expr_builder->constant(b_value);
return BinaryOp(a, b, op);
Expand Down Expand Up @@ -129,7 +129,7 @@ static std::vector<ExprWrapper> DimsExprs2VecExprWrapper(

static nvinfer1::DimsExprs VecExprWrapper2DimsExprs(
const std::vector<ExprWrapper>& output_dims_wrapper) {
nvinfer1::DimsExprs output_dims;
nvinfer1::DimsExprs output_dims = {};
output_dims.nbDims = output_dims_wrapper.size();
for (int i = 0; i < output_dims.nbDims; i++) {
output_dims.d[i] = output_dims_wrapper[i].extract_expr();
Expand Down Expand Up @@ -163,7 +163,7 @@ nvinfer1::DimsExprs GatherNdInferMeta(
}
}

nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
output.nbDims = result_dims.size();
for (int i = 0; i < output.nbDims; i++) {
output.d[i] = result_dims[i];
Expand Down Expand Up @@ -196,7 +196,7 @@ nvinfer1::DimsExprs YoloBoxInferMeta(
nvinfer1::DimensionOperation::kPROD, *dim_x.d[2], *dim_x.d[3]),
*expr_builder.constant(anchor_num));

nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
output.nbDims = 3;
if (output_index == 0) {
output.d[0] = dim_x.d[0];
Expand Down Expand Up @@ -314,7 +314,7 @@ nvinfer1::DimsExprs UnfoldInferMeta(
nvinfer1::DimensionOperation::kPROD, *output_height, *output_width);

out_dims.push_back(output_col_length);
nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
output.nbDims = out_dims.size();
for (size_t i = 0; i < out_dims.size(); i++) output.d[i] = out_dims[i];
return output;
Expand Down Expand Up @@ -368,7 +368,7 @@ nvinfer1::DimsExprs Pad3dInferMeta(
const framework::OpDesc& op_desc) {
const nvinfer1::DimsExprs x_dim = inputs[0];

nvinfer1::DimsExprs out_dims;
nvinfer1::DimsExprs out_dims = {};
out_dims.nbDims = x_dim.nbDims;

out_dims.d[0] = x_dim.d[0];
Expand Down Expand Up @@ -496,7 +496,7 @@ nvinfer1::DimsExprs PNormInferMeta(
}
x_dim.d[axis] = expr_builder.constant(1);

nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
if (keepdim) {
output = x_dim;
} else {
Expand All @@ -515,7 +515,7 @@ nvinfer1::DimsExprs GridSamplerInferMeta(
const nvinfer1::DimsExprs x_dims = inputs[0];
const nvinfer1::DimsExprs grid_dims = inputs[1];

nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
if (grid_dims.nbDims == 4) {
output.nbDims = 4;
output.d[0] = x_dims.d[0];
Expand Down Expand Up @@ -684,7 +684,7 @@ nvinfer1::DimsExprs LookupTableV2InferMeta(
const auto x_dims = inputs[0];
const auto weight_dims = inputs[1];

nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
output.nbDims = x_dims.nbDims + 1;
for (int i = 0; i < x_dims.nbDims; ++i) {
output.d[i] = x_dims.d[i];
Expand Down Expand Up @@ -714,13 +714,13 @@ nvinfer1::DimsExprs MemoryEfficientAttentionInferMeta(
if (output_index == 0) {
return inputs[0];
} else if (output_index == 1) {
nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
output.nbDims = 2;
output.d[0] = inputs[0].d[0];
output.d[1] = inputs[0].d[2];
return output;
} else {
nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
output.nbDims = 1;
output.d[0] = expr_builder.constant(2);
return output;
Expand Down Expand Up @@ -815,7 +815,7 @@ nvinfer1::DimsExprs PadInferMeta(
auto paddings =
PADDLE_GET_CONST(std::vector<int>, op_desc.GetAttr("paddings"));

nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
output.nbDims = x_dims.nbDims;
for (int i = 0; i < x_dims.nbDims; ++i) {
output.d[i] = expr_builder.operation(
Expand Down Expand Up @@ -852,7 +852,7 @@ nvinfer1::DimsExprs ArgsortInferMeta(
nvinfer1::IExprBuilder& expr_builder, // NOLINT
const framework::OpDesc& op_desc) {
const nvinfer1::DimsExprs input_dims = inputs[0];
nvinfer1::DimsExprs output;
nvinfer1::DimsExprs output = {};
output.nbDims = input_dims.nbDims;
for (int i = 0; i < input_dims.nbDims; ++i) {
output.d[i] = input_dims.d[i];
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/utils/table_printer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ TablePrinter::TablePrinter(const std::vector<std::string>& header) {
terminal_width = csbi.dwSize.X;
}
#else
struct winsize terminal_size;
struct winsize terminal_size = {};
int status = ioctl(STDOUT_FILENO, TIOCGWINSZ, &terminal_size);
if (status == 0 && terminal_size.ws_col != 0) {
terminal_width = terminal_size.ws_col;
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/platform/gen_comm_id_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ int CreateListenSocket(const std::string& ep) {
int opt = 1;

// NOTE. The linger is used for skipping TIME-WAIT status forcefully.
linger ling;
linger ling = {};
ling.l_onoff = 1;
ling.l_linger = 0;

Expand All @@ -185,7 +185,7 @@ int CreateListenSocket(const std::string& ep) {
"setsockopt");
#endif

struct sockaddr_in address;
struct sockaddr_in address = {};
address.sin_family = AF_INET;
address.sin_addr.s_addr = INADDR_ANY;
address.sin_port = htons(port);
Expand Down Expand Up @@ -219,7 +219,7 @@ static int SocketAccept(int server_fd, const CommHead head) {
static_assert(sizeof(CommHead) <= 1024,
"sizeof(CommHead) must <= buffer size");

struct sockaddr_in client_addr;
struct sockaddr_in client_addr = {};
socklen_t addr_length = sizeof(client_addr);
std::array<char, 1024> buffer{0};
int conn = -1;
Expand Down Expand Up @@ -255,7 +255,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
std::string host = addr[0];
int port = std::stoi(addr[1]);

struct sockaddr_in server_addr;
struct sockaddr_in server_addr = {};
memset(&server_addr, 0, sizeof(server_addr));
server_addr.sin_family = AF_INET;
server_addr.sin_port = htons(port);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/init.cc
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ void DisableSignalHandler() {
#ifndef _WIN32
for (const auto &SignalErrorString : SignalErrorStrings) {
int signal_number = SignalErrorString.signal_number;
struct sigaction sig_action;
struct sigaction sig_action = {};
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_handler = SIG_DFL;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/device_code.cc
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ static std::string FindCUDAIncludePath() {
return pos != std::string::npos && pos == (str.length() - substr.length());
};

struct stat st;
struct stat st = {};
std::string cuda_include_path;
if (!FLAGS_cuda_dir.empty()) {
cuda_include_path = FLAGS_cuda_dir;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/common/port.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ int gettimeofday(struct timeval *tp, void *tzp) {
#endif // !_WIN32

void ExecShellCommand(const std::string &cmd, std::string *message) {
std::array<char, 128> buffer;
std::array<char, 128> buffer = {};
#if !defined(_WIN32)
std::shared_ptr<FILE> pipe(popen(cmd.c_str(), "r"), pclose);
#else
Expand All @@ -94,7 +94,7 @@ void ExecShellCommand(const std::string &cmd, std::string *message) {

bool PathExists(const std::string &path) {
#if !defined(_WIN32)
struct stat statbuf;
struct stat statbuf = {};
if (stat(path.c_str(), &statbuf) != -1) {
if (S_ISDIR(statbuf.st_mode)) {
return true;
Expand All @@ -119,7 +119,7 @@ constexpr char kSEP = '\\';

bool FileExists(const std::string &filepath) {
#if !defined(_WIN32)
struct stat buffer;
struct stat buffer = {};
return (stat(filepath.c_str(), &buffer) == 0);
#else
struct _stat buffer;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/core/distributed/store/socket.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ static int _get_sockname_of_win(int sock, char* out, int out_len) {
}
#else
static int _get_sockname(int sock, char *out, int out_len) {
struct sockaddr_in addr;
struct sockaddr_in addr = {};
socklen_t s_len = sizeof(addr);

if (::getpeername(sock, reinterpret_cast<sockaddr *>(&addr), &s_len)) {
Expand All @@ -43,7 +43,7 @@ static int _get_sockname(int sock, char *out, int out_len) {
return -1;
}

std::array<char, 128> ip;
std::array<char, 128> ip = {};
int port = 0;

// deal with both IPv4 and IPv6:
Expand Down Expand Up @@ -71,7 +71,7 @@ int GetSockName(int sock, char* out, int out_len) {
}

std::string GetSockName(int fd) {
std::array<char, 256> out;
std::array<char, 256> out = {};
GetSockName(fd, out.data(), sizeof(out));
return std::string(out.data());
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/eigvals_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ void EigvalsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
std::vector<DenseTensor> out_vectors = out->Split(1, 0);

// query workspace size
T qwork;
T qwork = T();
int info = 0;
funcs::lapackEig<T, dtype::Real<T>>('N',
'N',
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/fill_diagonal_tensor_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ void FillDiagonalTensorKernel(const Context &ctx,
auto matdims = y.dims();
auto fill_dims = common::flatten_to_2d(matdims, matdims.size() - 1);

std::array<int64_t, 2> new_dims;
std::array<int64_t, 2> strides;
std::array<int64_t, 2> new_dims = {};
std::array<int64_t, 2> strides = {};
std::vector<int64_t> matdim;
matdim.resize(fill_dims[0]);
CalMatDims(out_dims,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/index_put_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx,
bool accumulate,
DenseTensor* value_grad,
DenseTensor* x_grad) {
std::array<const int64_t*, 7> pd_indices;
std::array<const int64_t*, 7> pd_indices = {};
for (size_t i = 0; i < indices.size(); ++i) {
pd_indices[i] = indices[i]->data<int64_t>();
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/yolo_box_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ void YoloBoxKernel(const Context& dev_ctx,

memset(scores_data, 0, scores->numel() * sizeof(T));

std::array<T, 4> box;
std::array<T, 4> box = {};
for (int i = 0; i < n; i++) {
int img_height = imgsize_data[2 * i];
int img_width = imgsize_data[2 * i + 1];
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/yolo_loss_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ static inline Box<T> GetYoloBox(const T* x,
int stride,
float scale,
float bias) {
Box<T> b;
Box<T> b = {};
b.x = (i + sigmoid<T>(x[index]) * scale + bias) / grid_size;
b.y = (j + sigmoid<T>(x[index + stride]) * scale + bias) / grid_size;
b.w = std::exp(x[index + 2 * stride]) * anchors[2 * an_idx] / input_size;
Expand Down Expand Up @@ -307,7 +307,7 @@ void YoloLossKernel(const Context& dev_ctx,
// for positive sample, all losses should be calculated, and for
// other samples, only objectness loss is required.
for (int an_idx = 0; an_idx < an_num; an_idx++) {
Box<T> an_box;
Box<T> an_box = {};
an_box.x = 0.0;
an_box.y = 0.0;
an_box.w = anchors[2 * an_idx] / static_cast<T>(input_size);
Expand Down
Loading