Skip to content

Commit

Permalink
Merge pull request #1997 from DARMA-tasking/1932-reduce-calls-to-thec…
Browse files Browse the repository at this point in the history
…ontext

1932 reduce calls to thecontext
  • Loading branch information
nlslatt authored Oct 20, 2022
2 parents e6b6724 + af8d214 commit c9e686a
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 27 deletions.
20 changes: 9 additions & 11 deletions src/vt/messaging/active.cc
Original file line number Diff line number Diff line change
Expand Up @@ -151,9 +151,8 @@ void ActiveMessenger::initialize() {
}

void ActiveMessenger::startup() {
auto const this_node = theContext()->getNode();
bare_handler_dummy_elm_id_for_lb_data_ =
elm::ElmIDBits::createBareHandler(this_node);
elm::ElmIDBits::createBareHandler(this_node_);

#if vt_check_enabled(lblite)
// Hook to collect LB data about objgroups
Expand Down Expand Up @@ -225,7 +224,7 @@ EventType ActiveMessenger::sendMsgBytesWithPut(
}

vtWarnIf(
dest == theContext()->getNode() &&
dest == this_node_ &&
not is_bcast &&
not theConfig()->vt_lb_self_migration,
fmt::format("Destination {} should != this node", dest)
Expand Down Expand Up @@ -375,7 +374,6 @@ EventType ActiveMessenger::sendMsgMPI(
"sendMsgMPI: (multi): size={}\n", msg_size
);
auto tag = allocateNewTag();
auto this_node = theContext()->getNode();

// Send the actual data in multiple chunks
PtrLenPairType tup = std::make_tuple(untyped_msg, msg_size);
Expand All @@ -387,7 +385,7 @@ EventType ActiveMessenger::sendMsgMPI(
mpi_event->setManagedMessage(base.to<ShortMessage>());

// Send the control message to receive the multiple chunks of data
auto m = makeMessage<MultiMsg>(info, this_node, msg_size);
auto m = makeMessage<MultiMsg>(info, this_node_, msg_size);
sendMsg<MultiMsg, chunkedMultiMsg>(dest, m);

return event_id;
Expand Down Expand Up @@ -491,9 +489,8 @@ EventType ActiveMessenger::doMessageSend(
);

// Don't go through MPI with self-send, schedule the message locally instead
auto const this_node = theContext()->getNode();
if (deliver) {
if (dest != this_node) {
if (dest != this_node_) {
sendMsgBytesWithPut(dest, base, send_tag);
} else {
recordLBDataCommForSend(dest, base, base.size());
Expand Down Expand Up @@ -611,7 +608,7 @@ std::tuple<EventType, int> ActiveMessenger::sendDataMPI(
}

if (events.size() > 1) {
ret_event = theEvent()->createParentEvent(theContext()->getNode());
ret_event = theEvent()->createParentEvent(this_node_);
auto& holder = theEvent()->getEventHolder(ret_event);
for (auto&& child_event : events) {
holder.get_event()->addEventToList(child_event);
Expand Down Expand Up @@ -864,8 +861,9 @@ void ActiveMessenger::recordLBDataCommForSend(
NodeType const dest, MsgSharedPtr<BaseMsgType> const& base,
MsgSizeType const msg_size
) {
if (theContext()->getTask() != nullptr) {
auto lb = theContext()->getTask()->get<ctx::LBData>();
auto the_task = theContext()->getTask();
if (the_task != nullptr) {
auto lb = the_task->get<ctx::LBData>();

if (lb) {
auto const& msg = base.get();
Expand All @@ -874,7 +872,7 @@ void ActiveMessenger::recordLBDataCommForSend(

if (not already_recorded) {
auto dest_elm_id = elm::ElmIDBits::createBareHandler(dest);
theContext()->getTask()->send(dest_elm_id, msg_size);
the_task->send(dest_elm_id, msg_size);
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/vt/messaging/active.impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ ActiveMessenger::PendingSendType ActiveMessenger::sendMsgCopyableImpl(
}

if (is_bcast) {
dest = theContext()->getNode();
dest = this_node_;
}
if (tag != no_tag) {
envelopeSetTag(rawMsg->env, tag);
Expand Down
23 changes: 9 additions & 14 deletions src/vt/termination/termination.cc
Original file line number Diff line number Diff line change
Expand Up @@ -154,12 +154,11 @@ TerminationDetector::getDSTerm(EpochType epoch, bool is_root) {
if (isDS(epoch)) {
auto iter = term_.find(epoch);
if (iter == term_.end()) {
auto const this_node = theContext()->getNode();
term_.emplace(
std::piecewise_construct,
std::forward_as_tuple(epoch),
std::forward_as_tuple(
TerminatorType{epoch,is_root,this_node}
TerminatorType{epoch,is_root,this_node_}
)
);
iter = term_.find(epoch);
Expand Down Expand Up @@ -278,11 +277,11 @@ std::shared_ptr<TerminationDetector::EpochGraph> TerminationDetector::makeGraph(
auto root = std::make_shared<EpochGraph>(any_epoch_state_.getEpoch(), glabel);
// Collect non-rooted epochs, just collective, excluding DS or other rooted
// epochs (info about them is localized on the creation node)
auto const this_node = theContext()->getNode();

for (auto const& elm : epoch_state_) {
auto const ep = elm.first;
bool const rooted = epoch::EpochManip::isRooted(ep);
if (not rooted or (rooted and epoch::EpochManip::node(ep) == this_node)) {
if (not rooted or (rooted and epoch::EpochManip::node(ep) == this_node_)) {
if (not isEpochTerminated(elm.first)) {
auto label = elm.second.getLabel();
live_epochs[ep] = std::make_shared<EpochGraph>(ep, label);
Expand All @@ -292,7 +291,7 @@ std::shared_ptr<TerminationDetector::EpochGraph> TerminationDetector::makeGraph(
for (auto const& elm : term_) {
// Only include DS epochs that are created here. Other nodes do not have
// proper successor info about the rooted, DS epochs
if (epoch::EpochManip::node(elm.first) == this_node) {
if (epoch::EpochManip::node(elm.first) == this_node_) {
if (not isEpochTerminated(elm.first)) {
auto label = elm.second.getLabel();
live_epochs[elm.first] = std::make_shared<EpochGraph>(
Expand Down Expand Up @@ -668,13 +667,12 @@ void TerminationDetector::epochTerminated(EpochType const& epoch, CallFromEnum f

// Matching consume on global epoch once a nested epoch terminates
if (epoch != any_epoch_sentinel) {
auto const this_node = theContext()->getNode();
bool const is_rooted = isRooted(epoch);
bool const is_ds = isDS(epoch);
if (
not is_rooted or
is_ds or
(is_rooted and epoch::EpochManip::node(epoch) == this_node)
(is_rooted and epoch::EpochManip::node(epoch) == this_node_)
) {
consumeOnGlobal(epoch);
}
Expand All @@ -686,10 +684,9 @@ void TerminationDetector::inquireTerminated(
) {
auto const& is_rooted = epoch::EpochManip::isRooted(epoch);
auto const& epoch_root_node = epoch::EpochManip::node(epoch);
auto const& this_node = theContext()->getNode();

vtAssertInfo(
!is_rooted || epoch_root_node == this_node,
!is_rooted || epoch_root_node == this_node_,
"Must be not rooted or this is root node",
is_rooted, epoch_root_node, epoch, from
);
Expand Down Expand Up @@ -757,9 +754,8 @@ TermStatusEnum TerminationDetector::testEpochTerminated(EpochType epoch) {
if (theEpoch()->getTerminatedWindow(epoch)->isTerminated(epoch)) {
status = TermStatusEnum::Terminated;
} else if (is_rooted_epoch) {
auto const& this_node = theContext()->getNode();
auto const& root = epoch::EpochManip::node(epoch);
if (root == this_node) {
if (root == this_node_) {
/*
* The idea here is that if this is executed on the root, it must have
* valid info on whether the rooted live or terminated
Expand All @@ -776,7 +772,7 @@ TermStatusEnum TerminationDetector::testEpochTerminated(EpochType epoch) {
* Send a message to the root node to find out whether this epoch is
* terminated or not
*/
auto msg = makeMessage<TermTerminatedMsg>(epoch,this_node);
auto msg = makeMessage<TermTerminatedMsg>(epoch,this_node_);
theMsg()->sendMsg<TermTerminatedMsg,inquireEpochTerminated>(root, msg);
epoch_wait_status_.insert(epoch);
}
Expand Down Expand Up @@ -1123,8 +1119,7 @@ void TerminationDetector::activateEpoch(EpochType const& epoch) {
"activateEpoch: epoch={:x}\n", epoch
);

auto const this_node = theContext()->getNode();
if (isRooted(epoch) and epoch::EpochManip::node(epoch) == this_node) {
if (isRooted(epoch) and epoch::EpochManip::node(epoch) == this_node_) {
produceOnGlobal(epoch);
}

Expand Down
2 changes: 1 addition & 1 deletion src/vt/termination/termination.impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ inline void TerminationDetector::produceConsume(

// If a node is not passed, use the current node (self-prod/cons)
if (node == uninitialized_destination) {
node = this_node_;
node = this_node_;
}

if (produce) {
Expand Down

0 comments on commit c9e686a

Please sign in to comment.