diff --git a/src/kvstore/raftex/Host.cpp b/src/kvstore/raftex/Host.cpp index 2027a746914..84079966175 100644 --- a/src/kvstore/raftex/Host.cpp +++ b/src/kvstore/raftex/Host.cpp @@ -131,9 +131,7 @@ folly::Future Host::appendLogs( if (cachingPromise_.size() <= FLAGS_max_outstanding_requests) { pendingReq_ = std::make_tuple(term, logId, - committedLogId, - prevLogTerm, - prevLogId); + committedLogId); return cachingPromise_.getFuture(); } else { PLOG_EVERY_N(INFO, 200) << idStr_ @@ -155,14 +153,17 @@ folly::Future Host::appendLogs( VLOG(2) << idStr_ << "About to send the AppendLog request"; // No request is ongoing, let's send a new request + if (UNLIKELY(lastLogIdSent_ == 0 && lastLogTermSent_ == 0)) { + LOG(INFO) << idStr_ << "This is the first time to send the logs to this host"; + lastLogIdSent_ = prevLogId; + lastLogTermSent_ = prevLogTerm; + } CHECK_GE(prevLogTerm, lastLogTermSent_); CHECK_GE(prevLogId, lastLogIdSent_); logTermToSend_ = term; logIdToSend_ = logId; - lastLogTermSent_ = prevLogTerm; - lastLogIdSent_ = prevLogId; committedLogId_ = committedLogId; - pendingReq_ = std::make_tuple(0, 0, 0, 0, 0); + pendingReq_ = std::make_tuple(0, 0, 0); promise_ = std::move(cachingPromise_); cachingPromise_ = folly::SharedPromise(); ret = promise_.getFuture(); @@ -183,7 +184,7 @@ void Host::setResponse(const cpp2::AppendLogResponse& r) { promise_.setValue(r); cachingPromise_.setValue(r); cachingPromise_ = folly::SharedPromise(); - pendingReq_ = std::make_tuple(0, 0, 0, 0, 0); + pendingReq_ = std::make_tuple(0, 0, 0); requestOnGoing_ = false; } @@ -257,7 +258,7 @@ void Host::appendLogsInternal(folly::EventBase* eb, self->promise_ = std::move(self->cachingPromise_); self->cachingPromise_ = folly::SharedPromise(); - self->pendingReq_ = std::make_tuple(0, 0, 0, 0, 0); + self->pendingReq_ = std::make_tuple(0, 0, 0); } } @@ -437,7 +438,7 @@ folly::Future Host::sendAppendLogRequest( bool Host::noRequest() const { CHECK(!lock_.try_lock()); - static auto emptyTup = std::make_tuple(0, 0, 0, 0, 0); + static auto emptyTup = std::make_tuple(0, 0, 0); return pendingReq_ == emptyTup; } diff --git a/src/kvstore/raftex/Host.h b/src/kvstore/raftex/Host.h index 021f39abac1..7a76867db42 100644 --- a/src/kvstore/raftex/Host.h +++ b/src/kvstore/raftex/Host.h @@ -97,8 +97,8 @@ class Host final : public std::enable_shared_from_this { } private: - // - using Request = std::tuple; + // + using Request = std::tuple; std::shared_ptr part_; const HostAddr addr_; @@ -115,7 +115,7 @@ class Host final : public std::enable_shared_from_this { folly::SharedPromise promise_; folly::SharedPromise cachingPromise_; - Request pendingReq_{0, 0, 0, 0, 0}; + Request pendingReq_{0, 0, 0}; // These logId and term pointing to the latest log we need to send LogID logIdToSend_{0}; diff --git a/src/kvstore/raftex/RaftPart.cpp b/src/kvstore/raftex/RaftPart.cpp index e38775a744e..e89b4045726 100644 --- a/src/kvstore/raftex/RaftPart.cpp +++ b/src/kvstore/raftex/RaftPart.cpp @@ -793,6 +793,9 @@ bool RaftPart::needToStartElection() { role_ == Role::FOLLOWER && (lastMsgRecvDur_.elapsedInSec() >= FLAGS_raft_heartbeat_interval_secs || term_ == 0)) { + LOG(INFO) << idStr_ << "Start leader election, reason: lastMsgDur " + << lastMsgRecvDur_.elapsedInSec() + << ", term " << term_; role_ = Role::CANDIDATE; } diff --git a/src/kvstore/raftex/test/LogCommandTest.cpp b/src/kvstore/raftex/test/LogCommandTest.cpp index 4d266088bf2..7fbd76532f7 100644 --- a/src/kvstore/raftex/test/LogCommandTest.cpp +++ b/src/kvstore/raftex/test/LogCommandTest.cpp @@ -55,7 +55,7 @@ TEST_F(LogCommandTest, CommandInMiddle) { ASSERT_EQ(3, leader_->commitTimes_); // need to sleep a bit more - sleep(1); + sleep(FLAGS_raft_heartbeat_interval_secs + 1); checkConsensus(copies_, 0, 9, msgs); }