diff --git a/paddle/common/enforce.cc b/paddle/common/enforce.cc index 6dd4f0372e2b3..843b4d7278515 100644 --- a/paddle/common/enforce.cc +++ b/paddle/common/enforce.cc @@ -95,7 +95,7 @@ std::string GetCurrentTraceBackString(bool for_signal) { #if !defined(_WIN32) && !defined(PADDLE_WITH_MUSL) static constexpr int TRACE_STACK_LIMIT = 100; - std::array call_stack; + std::array call_stack = {}; auto size = backtrace(call_stack.data(), TRACE_STACK_LIMIT); auto symbols = backtrace_symbols(call_stack.data(), size); Dl_info info; diff --git a/paddle/fluid/distributed/ps/table/memory_sparse_table.cc b/paddle/fluid/distributed/ps/table/memory_sparse_table.cc index a2f8ff346ffca..9f0835b306696 100644 --- a/paddle/fluid/distributed/ps/table/memory_sparse_table.cc +++ b/paddle/fluid/distributed/ps/table/memory_sparse_table.cc @@ -148,7 +148,7 @@ int32_t MemorySparseTable::Load(const std::string &path, omp_set_num_threads(thread_num); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < _real_local_shard_num; ++i) { - FsChannelConfig channel_config; + FsChannelConfig channel_config = {}; channel_config.path = file_list[file_start_idx + i]; VLOG(1) << "MemorySparseTable::load begin load " << channel_config.path << " into local shard " << i; @@ -227,7 +227,7 @@ int32_t MemorySparseTable::LoadPatch(const std::vector &file_list, omp_set_num_threads(thread_num); #pragma omp parallel for schedule(dynamic) for (int i = start_idx; i < end_idx; ++i) { - FsChannelConfig channel_config; + FsChannelConfig channel_config = {}; channel_config.path = file_list[i]; channel_config.converter = _value_accessor->Converter(load_param).converter; channel_config.deconverter = @@ -351,7 +351,7 @@ int32_t MemorySparseTable::Save(const std::string &dirname, omp_set_num_threads(thread_num); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < _real_local_shard_num; ++i) { - FsChannelConfig channel_config; + FsChannelConfig channel_config = {}; if (_config.compress_in_save() && (save_param == 0 || save_param == 3)) { channel_config.path = ::paddle::string::format_string("%s/part-%03d-%05d.gz", @@ -495,7 +495,7 @@ int32_t MemorySparseTable::Save_v2(const std::string &dirname, #pragma omp parallel for schedule(dynamic) for (int i = 0; i < _real_local_shard_num; ++i) { - FsChannelConfig channel_config; + FsChannelConfig channel_config = {}; FsChannelConfig channel_config_for_slot_feature; if (_config.compress_in_save() && (save_param == 0 || save_param == 3)) { @@ -670,7 +670,7 @@ int32_t MemorySparseTable::SavePatch(const std::string &path, int save_param) { omp_set_num_threads(thread_num); #pragma omp parallel for schedule(dynamic) for (int i = 0; i < _m_real_local_shard_num; ++i) { - FsChannelConfig channel_config; + FsChannelConfig channel_config = {}; channel_config.path = ::paddle::string::format_string("%s/part-%03d-%05d", table_path.c_str(), _shard_idx, @@ -870,7 +870,7 @@ int32_t MemorySparseTable::SaveCache( _afs_client.remove(::paddle::string::format_string( "%s/part-%03d", table_path.c_str(), _shard_idx)); uint32_t feasign_size = 0; - FsChannelConfig channel_config; + FsChannelConfig channel_config = {}; // not compress cache model channel_config.path = ::paddle::string::format_string( "%s/part-%03d", table_path.c_str(), _shard_idx); diff --git a/paddle/fluid/framework/data_feed.cc b/paddle/fluid/framework/data_feed.cc index fd6f4efca9c6c..8822120ab1781 100644 --- a/paddle/fluid/framework/data_feed.cc +++ b/paddle/fluid/framework/data_feed.cc @@ -1660,7 +1660,7 @@ bool MultiSlotFileInstantDataFeed::Preprocess(const std::string& filename) { "Fail to open file: %s in MultiSlotFileInstantDataFeed.", filename.c_str())); - struct stat sb; + struct stat sb = {}; fstat(fd_, &sb); end_ = static_cast(sb.st_size); diff --git a/paddle/fluid/framework/io/fs.cc b/paddle/fluid/framework/io/fs.cc index 4a689409d412b..71c0b69e01817 100644 --- a/paddle/fluid/framework/io/fs.cc +++ b/paddle/fluid/framework/io/fs.cc @@ -147,7 +147,7 @@ std::shared_ptr localfs_open_append_write(std::string path, } int64_t localfs_file_size(const std::string& path) { - struct stat buf; + struct stat buf = {}; if (0 != stat(path.c_str(), &buf)) { PADDLE_THROW(platform::errors::External( "Failed to get file status via stat function.")); diff --git a/paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc b/paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc index 4df2abb1a32ea..3983363455650 100644 --- a/paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc @@ -58,7 +58,7 @@ class DeformableConvOpConverter : public OpConverter { PADDLE_GET_CONST(int, op_desc.GetAttr("deformable_groups")); auto im2col_step = PADDLE_GET_CONST(int, op_desc.GetAttr("im2col_step")); - nvinfer1::Weights weights; + nvinfer1::Weights weights = {}; weights.count = filter_tensor->numel(); // TODO(bukejiyu): deformable_conv currently does not support fp16 // mode,will be supported in the future. diff --git a/paddle/fluid/inference/tensorrt/convert/transpose_op.cc b/paddle/fluid/inference/tensorrt/convert/transpose_op.cc index b16e8c2968714..62ef6edd2230b 100644 --- a/paddle/fluid/inference/tensorrt/convert/transpose_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/transpose_op.cc @@ -35,7 +35,7 @@ class TransposeOpConverter : public OpConverter { axis[i]--; } } - nvinfer1::Permutation perm; + nvinfer1::Permutation perm = {}; for (int i = 0; i < dims; i++) { int j = engine_->with_dynamic_shape() ? i : i + 1; perm.order[i] = axis[j]; diff --git a/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc b/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc index 942eecc6e0fe6..ea333754f894b 100644 --- a/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc +++ b/paddle/fluid/inference/tensorrt/dynamic_shape_infermeta.cc @@ -41,7 +41,7 @@ class ExprWrapper { friend ExprWrapper BinaryOp(const ExprWrapper& a, const ExprWrapper& b, nvinfer1::DimensionOperation op) { - ExprWrapper result; + ExprWrapper result = {}; if (a.expr_builder) { result.expr_builder = a.expr_builder; } @@ -57,7 +57,7 @@ class ExprWrapper { int b_value, nvinfer1::DimensionOperation op) { assert(a.expr_builder); - ExprWrapper b; + ExprWrapper b = {}; b.expr_builder = a.expr_builder; b.expr = b.expr_builder->constant(b_value); return BinaryOp(a, b, op); @@ -129,7 +129,7 @@ static std::vector DimsExprs2VecExprWrapper( static nvinfer1::DimsExprs VecExprWrapper2DimsExprs( const std::vector& output_dims_wrapper) { - nvinfer1::DimsExprs output_dims; + nvinfer1::DimsExprs output_dims = {}; output_dims.nbDims = output_dims_wrapper.size(); for (int i = 0; i < output_dims.nbDims; i++) { output_dims.d[i] = output_dims_wrapper[i].extract_expr(); @@ -163,7 +163,7 @@ nvinfer1::DimsExprs GatherNdInferMeta( } } - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; output.nbDims = result_dims.size(); for (int i = 0; i < output.nbDims; i++) { output.d[i] = result_dims[i]; @@ -196,7 +196,7 @@ nvinfer1::DimsExprs YoloBoxInferMeta( nvinfer1::DimensionOperation::kPROD, *dim_x.d[2], *dim_x.d[3]), *expr_builder.constant(anchor_num)); - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; output.nbDims = 3; if (output_index == 0) { output.d[0] = dim_x.d[0]; @@ -314,7 +314,7 @@ nvinfer1::DimsExprs UnfoldInferMeta( nvinfer1::DimensionOperation::kPROD, *output_height, *output_width); out_dims.push_back(output_col_length); - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; output.nbDims = out_dims.size(); for (size_t i = 0; i < out_dims.size(); i++) output.d[i] = out_dims[i]; return output; @@ -368,7 +368,7 @@ nvinfer1::DimsExprs Pad3dInferMeta( const framework::OpDesc& op_desc) { const nvinfer1::DimsExprs x_dim = inputs[0]; - nvinfer1::DimsExprs out_dims; + nvinfer1::DimsExprs out_dims = {}; out_dims.nbDims = x_dim.nbDims; out_dims.d[0] = x_dim.d[0]; @@ -496,7 +496,7 @@ nvinfer1::DimsExprs PNormInferMeta( } x_dim.d[axis] = expr_builder.constant(1); - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; if (keepdim) { output = x_dim; } else { @@ -515,7 +515,7 @@ nvinfer1::DimsExprs GridSamplerInferMeta( const nvinfer1::DimsExprs x_dims = inputs[0]; const nvinfer1::DimsExprs grid_dims = inputs[1]; - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; if (grid_dims.nbDims == 4) { output.nbDims = 4; output.d[0] = x_dims.d[0]; @@ -684,7 +684,7 @@ nvinfer1::DimsExprs LookupTableV2InferMeta( const auto x_dims = inputs[0]; const auto weight_dims = inputs[1]; - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; output.nbDims = x_dims.nbDims + 1; for (int i = 0; i < x_dims.nbDims; ++i) { output.d[i] = x_dims.d[i]; @@ -714,13 +714,13 @@ nvinfer1::DimsExprs MemoryEfficientAttentionInferMeta( if (output_index == 0) { return inputs[0]; } else if (output_index == 1) { - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; output.nbDims = 2; output.d[0] = inputs[0].d[0]; output.d[1] = inputs[0].d[2]; return output; } else { - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; output.nbDims = 1; output.d[0] = expr_builder.constant(2); return output; @@ -815,7 +815,7 @@ nvinfer1::DimsExprs PadInferMeta( auto paddings = PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; output.nbDims = x_dims.nbDims; for (int i = 0; i < x_dims.nbDims; ++i) { output.d[i] = expr_builder.operation( @@ -852,7 +852,7 @@ nvinfer1::DimsExprs ArgsortInferMeta( nvinfer1::IExprBuilder& expr_builder, // NOLINT const framework::OpDesc& op_desc) { const nvinfer1::DimsExprs input_dims = inputs[0]; - nvinfer1::DimsExprs output; + nvinfer1::DimsExprs output = {}; output.nbDims = input_dims.nbDims; for (int i = 0; i < input_dims.nbDims; ++i) { output.d[i] = input_dims.d[i]; diff --git a/paddle/fluid/inference/utils/table_printer.cc b/paddle/fluid/inference/utils/table_printer.cc index 19b4a94834a17..a9b6633217ad0 100644 --- a/paddle/fluid/inference/utils/table_printer.cc +++ b/paddle/fluid/inference/utils/table_printer.cc @@ -65,7 +65,7 @@ TablePrinter::TablePrinter(const std::vector& header) { terminal_width = csbi.dwSize.X; } #else - struct winsize terminal_size; + struct winsize terminal_size = {}; int status = ioctl(STDOUT_FILENO, TIOCGWINSZ, &terminal_size); if (status == 0 && terminal_size.ws_col != 0) { terminal_width = terminal_size.ws_col; diff --git a/paddle/fluid/platform/gen_comm_id_helper.cc b/paddle/fluid/platform/gen_comm_id_helper.cc index 7d16fc368d166..b461080d07af4 100644 --- a/paddle/fluid/platform/gen_comm_id_helper.cc +++ b/paddle/fluid/platform/gen_comm_id_helper.cc @@ -163,7 +163,7 @@ int CreateListenSocket(const std::string& ep) { int opt = 1; // NOTE. The linger is used for skipping TIME-WAIT status forcefully. - linger ling; + linger ling = {}; ling.l_onoff = 1; ling.l_linger = 0; @@ -185,7 +185,7 @@ int CreateListenSocket(const std::string& ep) { "setsockopt"); #endif - struct sockaddr_in address; + struct sockaddr_in address = {}; address.sin_family = AF_INET; address.sin_addr.s_addr = INADDR_ANY; address.sin_port = htons(port); @@ -219,7 +219,7 @@ static int SocketAccept(int server_fd, const CommHead head) { static_assert(sizeof(CommHead) <= 1024, "sizeof(CommHead) must <= buffer size"); - struct sockaddr_in client_addr; + struct sockaddr_in client_addr = {}; socklen_t addr_length = sizeof(client_addr); std::array buffer{0}; int conn = -1; @@ -255,7 +255,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { std::string host = addr[0]; int port = std::stoi(addr[1]); - struct sockaddr_in server_addr; + struct sockaddr_in server_addr = {}; memset(&server_addr, 0, sizeof(server_addr)); server_addr.sin_family = AF_INET; server_addr.sin_port = htons(port); diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index 39ab6e97caba1..833e01c146b37 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -342,7 +342,7 @@ void DisableSignalHandler() { #ifndef _WIN32 for (const auto &SignalErrorString : SignalErrorStrings) { int signal_number = SignalErrorString.signal_number; - struct sigaction sig_action; + struct sigaction sig_action = {}; memset(&sig_action, 0, sizeof(sig_action)); sigemptyset(&sig_action.sa_mask); sig_action.sa_handler = SIG_DFL; diff --git a/paddle/phi/backends/device_code.cc b/paddle/phi/backends/device_code.cc index eceec2723ad35..8c07131fe9c42 100644 --- a/paddle/phi/backends/device_code.cc +++ b/paddle/phi/backends/device_code.cc @@ -177,7 +177,7 @@ static std::string FindCUDAIncludePath() { return pos != std::string::npos && pos == (str.length() - substr.length()); }; - struct stat st; + struct stat st = {}; std::string cuda_include_path; if (!FLAGS_cuda_dir.empty()) { cuda_include_path = FLAGS_cuda_dir; diff --git a/paddle/phi/common/port.cc b/paddle/phi/common/port.cc index 8c94232260aef..513088decf497 100644 --- a/paddle/phi/common/port.cc +++ b/paddle/phi/common/port.cc @@ -75,7 +75,7 @@ int gettimeofday(struct timeval *tp, void *tzp) { #endif // !_WIN32 void ExecShellCommand(const std::string &cmd, std::string *message) { - std::array buffer; + std::array buffer = {}; #if !defined(_WIN32) std::shared_ptr pipe(popen(cmd.c_str(), "r"), pclose); #else @@ -94,7 +94,7 @@ void ExecShellCommand(const std::string &cmd, std::string *message) { bool PathExists(const std::string &path) { #if !defined(_WIN32) - struct stat statbuf; + struct stat statbuf = {}; if (stat(path.c_str(), &statbuf) != -1) { if (S_ISDIR(statbuf.st_mode)) { return true; @@ -119,7 +119,7 @@ constexpr char kSEP = '\\'; bool FileExists(const std::string &filepath) { #if !defined(_WIN32) - struct stat buffer; + struct stat buffer = {}; return (stat(filepath.c_str(), &buffer) == 0); #else struct _stat buffer; diff --git a/paddle/phi/core/distributed/store/socket.cpp b/paddle/phi/core/distributed/store/socket.cpp index c61144ecfe515..8b260e9da202b 100644 --- a/paddle/phi/core/distributed/store/socket.cpp +++ b/paddle/phi/core/distributed/store/socket.cpp @@ -34,7 +34,7 @@ static int _get_sockname_of_win(int sock, char* out, int out_len) { } #else static int _get_sockname(int sock, char *out, int out_len) { - struct sockaddr_in addr; + struct sockaddr_in addr = {}; socklen_t s_len = sizeof(addr); if (::getpeername(sock, reinterpret_cast(&addr), &s_len)) { @@ -43,7 +43,7 @@ static int _get_sockname(int sock, char *out, int out_len) { return -1; } - std::array ip; + std::array ip = {}; int port = 0; // deal with both IPv4 and IPv6: @@ -71,7 +71,7 @@ int GetSockName(int sock, char* out, int out_len) { } std::string GetSockName(int fd) { - std::array out; + std::array out = {}; GetSockName(fd, out.data(), sizeof(out)); return std::string(out.data()); } diff --git a/paddle/phi/kernels/cpu/eigvals_kernel.cc b/paddle/phi/kernels/cpu/eigvals_kernel.cc index f716a4de53922..a104a1c9a1805 100644 --- a/paddle/phi/kernels/cpu/eigvals_kernel.cc +++ b/paddle/phi/kernels/cpu/eigvals_kernel.cc @@ -215,7 +215,7 @@ void EigvalsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) { std::vector out_vectors = out->Split(1, 0); // query workspace size - T qwork; + T qwork = T(); int info = 0; funcs::lapackEig>('N', 'N', diff --git a/paddle/phi/kernels/cpu/fill_diagonal_tensor_kernel.cc b/paddle/phi/kernels/cpu/fill_diagonal_tensor_kernel.cc index c612b7a6a3f5d..5d0fa3c8b5753 100644 --- a/paddle/phi/kernels/cpu/fill_diagonal_tensor_kernel.cc +++ b/paddle/phi/kernels/cpu/fill_diagonal_tensor_kernel.cc @@ -88,8 +88,8 @@ void FillDiagonalTensorKernel(const Context &ctx, auto matdims = y.dims(); auto fill_dims = common::flatten_to_2d(matdims, matdims.size() - 1); - std::array new_dims; - std::array strides; + std::array new_dims = {}; + std::array strides = {}; std::vector matdim; matdim.resize(fill_dims[0]); CalMatDims(out_dims, diff --git a/paddle/phi/kernels/cpu/index_put_grad_kernel.cc b/paddle/phi/kernels/cpu/index_put_grad_kernel.cc index 8a100af33f018..83c161c2f0bb2 100644 --- a/paddle/phi/kernels/cpu/index_put_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/index_put_grad_kernel.cc @@ -79,7 +79,7 @@ void LaunchIndexPutGradKernel(const Context& dev_ctx, bool accumulate, DenseTensor* value_grad, DenseTensor* x_grad) { - std::array pd_indices; + std::array pd_indices = {}; for (size_t i = 0; i < indices.size(); ++i) { pd_indices[i] = indices[i]->data(); } diff --git a/paddle/phi/kernels/cpu/yolo_box_kernel.cc b/paddle/phi/kernels/cpu/yolo_box_kernel.cc index c6b1948f3c9a9..c80e99e9ea8bd 100644 --- a/paddle/phi/kernels/cpu/yolo_box_kernel.cc +++ b/paddle/phi/kernels/cpu/yolo_box_kernel.cc @@ -67,7 +67,7 @@ void YoloBoxKernel(const Context& dev_ctx, memset(scores_data, 0, scores->numel() * sizeof(T)); - std::array box; + std::array box = {}; for (int i = 0; i < n; i++) { int img_height = imgsize_data[2 * i]; int img_width = imgsize_data[2 * i + 1]; diff --git a/paddle/phi/kernels/cpu/yolo_loss_kernel.cc b/paddle/phi/kernels/cpu/yolo_loss_kernel.cc index 280ac791d049b..9ee91e9dd87d4 100644 --- a/paddle/phi/kernels/cpu/yolo_loss_kernel.cc +++ b/paddle/phi/kernels/cpu/yolo_loss_kernel.cc @@ -60,7 +60,7 @@ static inline Box GetYoloBox(const T* x, int stride, float scale, float bias) { - Box b; + Box b = {}; b.x = (i + sigmoid(x[index]) * scale + bias) / grid_size; b.y = (j + sigmoid(x[index + stride]) * scale + bias) / grid_size; b.w = std::exp(x[index + 2 * stride]) * anchors[2 * an_idx] / input_size; @@ -307,7 +307,7 @@ void YoloLossKernel(const Context& dev_ctx, // for positive sample, all losses should be calculated, and for // other samples, only objectness loss is required. for (int an_idx = 0; an_idx < an_num; an_idx++) { - Box an_box; + Box an_box = {}; an_box.x = 0.0; an_box.y = 0.0; an_box.w = anchors[2 * an_idx] / static_cast(input_size); diff --git a/test/cpp/phi/kernels/strided_memcpy_test.cc b/test/cpp/phi/kernels/strided_memcpy_test.cc index 6fb0014956c46..3f29d09a1e686 100644 --- a/test/cpp/phi/kernels/strided_memcpy_test.cc +++ b/test/cpp/phi/kernels/strided_memcpy_test.cc @@ -32,7 +32,7 @@ TEST(StridedMemcpy, CPUCrop) { phi::DDim src_stride({5, 1}); - std::array dst; + std::array dst = {}; phi::DDim dst_dim({2, 2}); phi::DDim dst_stride({2, 1}); @@ -54,7 +54,7 @@ TEST(StridedMemcpy, CPUConcat) { }; // clang-format on - std::array dst; + std::array dst = {}; phi::DDim src_stride({2, 1}); phi::DDim dst_dim({2, 2}); phi::DDim dst_stride({4, 1}); @@ -100,7 +100,7 @@ TEST(StridedMemcpy, GPUCrop) { phi::DDim src_stride({5, 1}); - std::array dst; + std::array dst = {}; auto dst_allocation = phi::memory_utils::Alloc(gpu0, sizeof(dst)); int* gpu_dst = reinterpret_cast(dst_allocation->ptr()); @@ -139,7 +139,7 @@ TEST(StridedMemcpy, GPUConcat) { memory_utils::Copy( gpu0, gpu_src, cpu, src.data(), sizeof(src), ctx->stream()); - std::array dst; + std::array dst = {}; auto gpu_dst_allocation = phi::memory_utils::Alloc(gpu0, sizeof(dst)); int* gpu_dst = reinterpret_cast(gpu_dst_allocation->ptr()); diff --git a/test/cpp/phi/kernels/test_cpu_vec.cc b/test/cpp/phi/kernels/test_cpu_vec.cc index 88e9d16b87b2b..d11c28bbc6b05 100644 --- a/test/cpp/phi/kernels/test_cpu_vec.cc +++ b/test/cpp/phi/kernels/test_cpu_vec.cc @@ -25,7 +25,7 @@ namespace phi { namespace tests { inline double GetCurrentUS() { - struct timeval time; + struct timeval time = {}; gettimeofday(&time, nullptr); return 1e+6 * time.tv_sec + time.tv_usec; // NOLINT }