Skip to content

Commit

Permalink
#1111: rdma: fix missing allow MPI calls causing test failure
Browse files Browse the repository at this point in the history
  • Loading branch information
lifflander committed Oct 16, 2020
1 parent 87d5d2e commit a6bef90
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 0 deletions.
1 change: 1 addition & 0 deletions src/vt/pmpi/generate_mpi_wrappers.pl
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ sub extract_defs {
MPI_Comm_rank
MPI_Comm_size
MPI_Get_.*(?<!accumulate)
MPI_Win_(lock|unlock)
);

sub should_guard_call {
Expand Down
15 changes: 15 additions & 0 deletions src/vt/rdmahandle/holder.impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
#define INCLUDED_VT_RDMAHANDLE_HOLDER_IMPL_H

#include "vt/config.h"
#include "vt/runtime/mpi_access.h"

namespace vt { namespace rdma {

Expand Down Expand Up @@ -100,6 +101,7 @@ void Holder<T,E>::allocateDataWindow(std::size_t const in_len) {

template <typename T, HandleEnum E>
std::size_t Holder<T,E>::getCount(vt::NodeType node, Lock l) {
VT_ALLOW_MPI_CALLS;
uint64_t result = 0;
auto mpi_type = TypeMPI<uint64_t>::getType();
{
Expand All @@ -116,6 +118,7 @@ std::size_t Holder<T,E>::getCount(vt::NodeType node, Lock l) {

template <typename T, HandleEnum E>
void Holder<T,E>::deallocate() {
VT_ALLOW_MPI_CALLS;
if (E == HandleEnum::StaticSize and ready_) {
MPI_Win_free(&data_window_);
MPI_Free_mem(data_base_);
Expand Down Expand Up @@ -155,6 +158,7 @@ RequestHolder Holder<T,E>::rget(
"MPI_Get({}, {}, {}, {}, {}, {}, {}, window);\n",
print_ptr(ptr), len, mpi_type_str, node, offset, len, mpi_type_str
);
VT_ALLOW_MPI_CALLS;
MPI_Get(ptr, len, mpi_type, node, offset, len, mpi_type, data_window_);
});
} else {
Expand All @@ -164,6 +168,7 @@ RequestHolder Holder<T,E>::rget(
"MPI_Rget({}, {}, {}, {}, {}, {}, {}, window);\n",
print_ptr(ptr), len, mpi_type_str, node, offset, len, mpi_type_str
);
VT_ALLOW_MPI_CALLS;
MPI_Rget(ptr, len, mpi_type, node, offset, len, mpi_type, data_window_, r.add());
}
return r;
Expand Down Expand Up @@ -191,6 +196,7 @@ RequestHolder Holder<T,E>::rput(
"MPI_Put({}, {}, {}, {}, {}, {}, {}, window);\n",
print_ptr(ptr), len, mpi_type_str, node, offset, len, mpi_type_str
);
VT_ALLOW_MPI_CALLS;
MPI_Put(ptr, len, mpi_type, node, offset, len, mpi_type, data_window_);
});
} else {
Expand All @@ -200,6 +206,7 @@ RequestHolder Holder<T,E>::rput(
"MPI_Rput({}, {}, {}, {}, {}, {}, {}, window);\n",
print_ptr(ptr), len, mpi_type_str, node, offset, len, mpi_type_str
);
VT_ALLOW_MPI_CALLS;
MPI_Rput(ptr, len, mpi_type, node, offset, len, mpi_type, data_window_, r.add());
}
return r;
Expand All @@ -224,6 +231,7 @@ T Holder<T,E>::fetchOp(vt::NodeType node, Lock l, T in, int offset, MPI_Op op) {
"MPI_Fetch_and_op({}, {}, {}, {}, {}, window);\n",
in, print_ptr(&out), mpi_type_str, node, offset
);
VT_ALLOW_MPI_CALLS;
MPI_Fetch_and_op(&in, &out, mpi_type, node, offset, op, data_window_);
}
return out;
Expand All @@ -245,6 +253,7 @@ RequestHolder Holder<T,E>::raccum(
"MPI_Accumulate({}, {}, {}, {}, {}, {}, {}, window);\n",
print_ptr(ptr), len, mpi_type_str, node, offset, len, mpi_type_str
);
VT_ALLOW_MPI_CALLS;
MPI_Accumulate(
ptr, len, mpi_type, node, offset, len, mpi_type, op, data_window_
);
Expand All @@ -256,6 +265,7 @@ RequestHolder Holder<T,E>::raccum(
"MPI_Raccumulate({}, {}, {}, {}, {}, {}, {}, window);\n",
print_ptr(ptr), len, mpi_type_str, node, offset, len, mpi_type_str
);
VT_ALLOW_MPI_CALLS;
MPI_Raccumulate(
ptr, len, mpi_type, node, offset, len, mpi_type, op, data_window_, r.add()
);
Expand All @@ -273,26 +283,31 @@ void Holder<T,E>::accum(

template <typename T, HandleEnum E>
void Holder<T,E>::fence(int assert) {
VT_ALLOW_MPI_CALLS;
MPI_Win_fence(assert, data_window_);
}

template <typename T, HandleEnum E>
void Holder<T,E>::sync() {
VT_ALLOW_MPI_CALLS;
MPI_Win_sync(data_window_);
}

template <typename T, HandleEnum E>
void Holder<T,E>::flush(vt::NodeType node) {
VT_ALLOW_MPI_CALLS;
MPI_Win_flush(node, data_window_);
}

template <typename T, HandleEnum E>
void Holder<T,E>::flushLocal(vt::NodeType node) {
VT_ALLOW_MPI_CALLS;
MPI_Win_flush_local(node, data_window_);
}

template <typename T, HandleEnum E>
void Holder<T,E>::flushAll() {
VT_ALLOW_MPI_CALLS;
MPI_Win_flush_all(data_window_);
}

Expand Down
2 changes: 2 additions & 0 deletions src/vt/rdmahandle/request_holder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@

#include "vt/config.h"
#include "vt/rdmahandle/request_holder.h"
#include "vt/runtime/mpi_access.h"

namespace vt { namespace rdma {

Expand All @@ -60,6 +61,7 @@ MPI_Request* RequestHolder::add() {
}

bool RequestHolder::test() {
VT_ALLOW_MPI_CALLS;
std::vector<MPI_Request> new_reqs;
std::vector<MPI_Status> stats;
stats.resize(reqs_.size());
Expand Down

0 comments on commit a6bef90

Please sign in to comment.