Skip to content

Commit

Permalink
part-2
Browse files Browse the repository at this point in the history
  • Loading branch information
walkalone20 committed May 5, 2024
1 parent e9394a6 commit b7148e7
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 9 deletions.
6 changes: 3 additions & 3 deletions paddle/phi/core/distributed/store/socket.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ static int _get_sockname_of_win(int sock, char* out, int out_len) {
}
#else
static int _get_sockname(int sock, char *out, int out_len) {
struct sockaddr_in addr;
struct sockaddr_in addr = {};
socklen_t s_len = sizeof(addr);

if (::getpeername(sock, reinterpret_cast<sockaddr *>(&addr), &s_len)) {
Expand All @@ -43,7 +43,7 @@ static int _get_sockname(int sock, char *out, int out_len) {
return -1;
}

std::array<char, 128> ip;
std::array<char, 128> ip = {};
int port = 0;

// deal with both IPv4 and IPv6:
Expand Down Expand Up @@ -71,7 +71,7 @@ int GetSockName(int sock, char* out, int out_len) {
}

std::string GetSockName(int fd) {
std::array<char, 256> out;
std::array<char, 256> out = {};
GetSockName(fd, out.data(), sizeof(out));
return std::string(out.data());
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/fill_diagonal_tensor_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ void FillDiagonalTensorKernel(const Context &ctx,
auto matdims = y.dims();
auto fill_dims = common::flatten_to_2d(matdims, matdims.size() - 1);

std::array<int64_t, 2> new_dims;
std::array<int64_t, 2> strides;
std::array<int64_t, 2> new_dims = {};
std::array<int64_t, 2> strides = {};
std::vector<int64_t> matdim;
matdim.resize(fill_dims[0]);
CalMatDims(out_dims,
Expand Down
8 changes: 4 additions & 4 deletions test/cpp/phi/kernels/strided_memcpy_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ TEST(StridedMemcpy, CPUCrop) {

phi::DDim src_stride({5, 1});

std::array<int, 4> dst;
std::array<int, 4> dst = {};
phi::DDim dst_dim({2, 2});
phi::DDim dst_stride({2, 1});

Expand All @@ -54,7 +54,7 @@ TEST(StridedMemcpy, CPUConcat) {
};
// clang-format on

std::array<int, 8> dst;
std::array<int, 8> dst = {};
phi::DDim src_stride({2, 1});
phi::DDim dst_dim({2, 2});
phi::DDim dst_stride({4, 1});
Expand Down Expand Up @@ -100,7 +100,7 @@ TEST(StridedMemcpy, GPUCrop) {

phi::DDim src_stride({5, 1});

std::array<int, 4> dst;
std::array<int, 4> dst = {};
auto dst_allocation = phi::memory_utils::Alloc(gpu0, sizeof(dst));
int* gpu_dst = reinterpret_cast<int*>(dst_allocation->ptr());

Expand Down Expand Up @@ -139,7 +139,7 @@ TEST(StridedMemcpy, GPUConcat) {
memory_utils::Copy(
gpu0, gpu_src, cpu, src.data(), sizeof(src), ctx->stream());

std::array<int, 8> dst;
std::array<int, 8> dst = {};
auto gpu_dst_allocation = phi::memory_utils::Alloc(gpu0, sizeof(dst));
int* gpu_dst = reinterpret_cast<int*>(gpu_dst_allocation->ptr());

Expand Down

0 comments on commit b7148e7

Please sign in to comment.