diff --git a/src/graph/executor/admin/SubmitJobExecutor.cpp b/src/graph/executor/admin/SubmitJobExecutor.cpp index 64946b394b8..0d78361d60f 100644 --- a/src/graph/executor/admin/SubmitJobExecutor.cpp +++ b/src/graph/executor/admin/SubmitJobExecutor.cpp @@ -109,7 +109,8 @@ Value SubmitJobExecutor::convertJobTimestampToDateTime(int64_t timestamp) { nebula::DataSet SubmitJobExecutor::buildShowResultData( const nebula::meta::cpp2::JobDesc &jd, const std::vector &td) { - if (jd.get_cmd() == meta::cpp2::AdminCmd::DATA_BALANCE) { + if (jd.get_cmd() == meta::cpp2::AdminCmd::DATA_BALANCE || + jd.get_cmd() == meta::cpp2::AdminCmd::ZONE_BALANCE) { nebula::DataSet v( {"Job Id(spaceId:partId)", "Command(src->dst)", "Status", "Start Time", "Stop Time"}); const auto ¶s = jd.get_paras(); diff --git a/src/graph/validator/AdminJobValidator.h b/src/graph/validator/AdminJobValidator.h index 7d755da88e7..1f8ff4e5a70 100644 --- a/src/graph/validator/AdminJobValidator.h +++ b/src/graph/validator/AdminJobValidator.h @@ -38,6 +38,7 @@ class AdminJobValidator final : public Validator { case meta::cpp2::AdminCmd::FLUSH: case meta::cpp2::AdminCmd::DATA_BALANCE: case meta::cpp2::AdminCmd::LEADER_BALANCE: + case meta::cpp2::AdminCmd::ZONE_BALANCE: return true; // TODO: Also space related, but not available in CreateJobExecutor now. case meta::cpp2::AdminCmd::DOWNLOAD: diff --git a/src/interface/meta.thrift b/src/interface/meta.thrift index 6b3fe96a08c..d870f0ef8e2 100644 --- a/src/interface/meta.thrift +++ b/src/interface/meta.thrift @@ -229,6 +229,7 @@ enum AdminCmd { DOWNLOAD = 7, INGEST = 8, LEADER_BALANCE = 9, + ZONE_BALANCE = 10, UNKNOWN = 99, } (cpp.enum_strict) diff --git a/src/meta/CMakeLists.txt b/src/meta/CMakeLists.txt index 89bb7a28411..837e101f9a4 100644 --- a/src/meta/CMakeLists.txt +++ b/src/meta/CMakeLists.txt @@ -67,6 +67,8 @@ nebula_add_library( processors/job/AdminJobProcessor.cpp processors/job/ReportTaskProcessor.cpp processors/job/JobUtils.cpp + processors/job/StorageJobExecutor.cpp + processors/job/JobExecutor.cpp processors/job/MetaJobExecutor.cpp processors/job/SimpleConcurrentJobExecutor.cpp processors/job/CompactJobExecutor.cpp diff --git a/src/meta/processors/job/BalanceJobExecutor.cpp b/src/meta/processors/job/BalanceJobExecutor.cpp index ae38d659d92..02f2cd19a85 100644 --- a/src/meta/processors/job/BalanceJobExecutor.cpp +++ b/src/meta/processors/job/BalanceJobExecutor.cpp @@ -18,18 +18,11 @@ DEFINE_double(leader_balance_deviation, namespace nebula { namespace meta { -std::atomic_bool BalanceJobExecutor::running_ = false; -std::atomic_bool LeaderBalanceJobExecutor::inLeaderBalance_ = false; -std::unique_ptr DataBalanceJobExecutor::plan_ = nullptr; -std::mutex BalanceJobExecutor::lock_; BalanceJobExecutor::BalanceJobExecutor(JobID jobId, kvstore::KVStore* kvstore, AdminClient* adminClient, const std::vector& paras) - : MetaJobExecutor(jobId, kvstore, adminClient, paras) { - executor_.reset(new folly::CPUThreadPoolExecutor(1)); - toHost_ = TargetHosts::NONE; -} + : MetaJobExecutor(jobId, kvstore, adminClient, paras) {} bool BalanceJobExecutor::check() { return !paras_.empty(); } @@ -40,20 +33,9 @@ nebula::cpp2::ErrorCode BalanceJobExecutor::stop() { return nebula::cpp2::ErrorCode::SUCCEEDED; } -folly::Future BalanceJobExecutor::executeInternal(HostAddr&& address, - std::vector&& parts) { - UNUSED(address); - UNUSED(parts); - return Status::OK(); -} - -bool BalanceJobExecutor::runInMeta() { return true; } +folly::Future BalanceJobExecutor::executeInternal() { return Status::OK(); } nebula::cpp2::ErrorCode BalanceJobExecutor::recovery() { - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -nebula::cpp2::ErrorCode DataBalanceJobExecutor::recovery() { if (kvstore_ == nullptr) { return nebula::cpp2::ErrorCode::SUCCEEDED; } @@ -74,28 +56,29 @@ nebula::cpp2::ErrorCode DataBalanceJobExecutor::recovery() { auto optJobRet = JobDescription::makeJobDescription(jobKey, value); auto optJob = nebula::value(optJobRet); plan_.reset(new BalancePlan(optJob, kvstore_, adminClient_)); - plan_->onFinished_ = [this]() { - std::lock_guard lg(lock_); + plan_->setFinishCallBack([this](meta::cpp2::JobStatus status) { if (LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()) != nebula::cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; } - finishInternal(); - }; + onFinished_(status); + }); auto recRet = plan_->recovery(); if (recRet != nebula::cpp2::ErrorCode::SUCCEEDED) { plan_.reset(nullptr); return recRet; } + plan_->saveInStore(); return nebula::cpp2::ErrorCode::SUCCEEDED; } nebula::cpp2::ErrorCode BalanceJobExecutor::finish(bool ret) { UNUSED(ret); + plan_.reset(nullptr); return nebula::cpp2::ErrorCode::SUCCEEDED; } -nebula::cpp2::ErrorCode BalanceJobExecutor::getAllSpaces( +nebula::cpp2::ErrorCode LeaderBalanceJobExecutor::getAllSpaces( std::vector>& spaces) { // Get all spaces folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); @@ -130,371 +113,164 @@ nebula::cpp2::ErrorCode BalanceJobExecutor::save(const std::string& k, const std return rc; } -nebula::cpp2::ErrorCode DataBalanceJobExecutor::buildBalancePlan() { - if (plan_ != nullptr) { - LOG(ERROR) << "Balance plan should be nullptr now"; - return nebula::cpp2::ErrorCode::E_BALANCED; - } - std::vector> spaces; - auto spacesRet = getAllSpaces(spaces); - if (spacesRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't get all spaces"; - return spacesRet; - } - - plan_.reset(new BalancePlan(jobDescription_, kvstore_, adminClient_)); - for (const auto& spaceInfo : spaces) { - auto spaceId = std::get<0>(spaceInfo); - auto spaceReplica = std::get<1>(spaceInfo); - auto dependentOnZone = std::get<2>(spaceInfo); - LOG(INFO) << "Balance Space " << spaceId; - auto taskRet = genTasks(spaceId, spaceReplica, dependentOnZone, lostHosts_); - if (!ok(taskRet)) { - LOG(ERROR) << "Generate tasks on space " << std::get<0>(spaceInfo) << " failed"; - return error(taskRet); - } - - auto tasks = std::move(value(taskRet)); - for (auto& task : tasks) { - plan_->addTask(std::move(task)); +folly::Future DataBalanceJobExecutor::executeInternal() { + if (plan_ == nullptr) { + Status status = buildBalancePlan(); + if (status != Status::OK()) { + return status; } } - - plan_->onFinished_ = [this]() { - std::lock_guard lg(lock_); + plan_->setFinishCallBack([this](meta::cpp2::JobStatus status) { if (LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()) != nebula::cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; } - finishInternal(); - }; - if (plan_->tasks_.empty()) { - return nebula::cpp2::ErrorCode::E_BALANCED; - } - return plan_->saveInStore(); + onFinished_(status); + }); + plan_->invoke(); + return Status::OK(); } -ErrorOr> DataBalanceJobExecutor::genTasks( - GraphSpaceID spaceId, - int32_t spaceReplica, - bool dependentOnZone, - std::vector& lostHosts) { - HostParts hostParts; - int32_t totalParts = 0; - // hostParts is current part allocation map - auto result = getHostParts(spaceId, dependentOnZone, hostParts, totalParts); - if (!nebula::ok(result)) { - return nebula::error(result); - } - - auto retVal = nebula::value(result); - if (!retVal || totalParts == 0 || hostParts.empty()) { - LOG(ERROR) << "Invalid space " << spaceId; - return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; - } - - auto fetchHostPartsRet = fetchHostParts(spaceId, dependentOnZone, hostParts, lostHosts); - if (!nebula::ok(fetchHostPartsRet)) { - LOG(ERROR) << "Fetch hosts and parts failed"; - return nebula::error(fetchHostPartsRet); +Status DataBalanceJobExecutor::buildBalancePlan() { + std::map> lostZoneHost; + std::map> activeSortedHost; + for (auto& p : spaceInfo_.zones_) { + for (auto& ph : p.second.hosts_) { + activeSortedHost[p.first].push_back(&ph.second); + } } - - auto hostPartsRet = nebula::value(fetchHostPartsRet); - auto confirmedHostParts = hostPartsRet.first; - auto activeHosts = hostPartsRet.second; - LOG(INFO) << "Now, try to balance the confirmedHostParts"; - - // We have two parts need to balance, the first one is parts on lost hosts and - // deleted hosts The seconds one is parts on unbalanced host in - // confirmedHostParts. - std::vector tasks; - // 1. Iterate through all hosts that would not be included in - // confirmedHostParts, - // move all parts in them to host with minimum part in confirmedHostParts - for (auto& lostHost : lostHosts) { - auto& lostParts = hostParts[lostHost]; - for (auto& partId : lostParts) { - LOG(INFO) << "Try balance part " << partId << " for lost host " << lostHost; - // check whether any peers which is alive - auto alive = checkReplica(hostParts, activeHosts, spaceReplica, partId); - if (!alive.ok()) { - LOG(ERROR) << "Check Replica failed: " << alive << " Part: " << partId; - return nebula::cpp2::ErrorCode::E_NO_VALID_HOST; - } - - auto retCode = - transferLostHost(tasks, confirmedHostParts, lostHost, spaceId, partId, dependentOnZone); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Transfer lost host " << lostHost << " failed"; - return retCode; + for (HostAddr ha : lostHosts_) { + if (!spaceInfo_.hasHost(ha)) { + return Status::Error( + "Host %s does not belong to space %d", ha.toString().c_str(), spaceInfo_.spaceId_); + } + for (auto& p : spaceInfo_.zones_) { + auto it = p.second.hosts_.find(ha); + if (it != p.second.hosts_.end()) { + lostZoneHost[p.first].push_back(&it->second); + std::vector& hvec = activeSortedHost[p.first]; + hvec.erase(std::find(hvec.begin(), hvec.end(), &it->second)); + break; } } } - - // 2. Make all hosts in confirmedHostParts balanced - if (balanceParts(spaceId, confirmedHostParts, totalParts, tasks, dependentOnZone)) { - return tasks; - } else { - return nebula::cpp2::ErrorCode::E_BAD_BALANCE_PLAN; - } -} - -nebula::cpp2::ErrorCode DataBalanceJobExecutor::transferLostHost(std::vector& tasks, - HostParts& confirmedHostParts, - const HostAddr& source, - GraphSpaceID spaceId, - PartitionID partId, - bool dependentOnZone) { - // find a host with minimum parts which doesn't have this part - ErrorOr result; - if (dependentOnZone) { - result = hostWithMinimalPartsForZone(source, confirmedHostParts, partId); - } else { - result = hostWithMinimalParts(confirmedHostParts, partId); - } - - if (!nebula::ok(result)) { - LOG(ERROR) << "Can't find a host which doesn't have part: " << partId; - return nebula::error(result); - } - const auto& targetHost = nebula::value(result); - confirmedHostParts[targetHost].emplace_back(partId); - tasks.emplace_back(plan_->id(), spaceId, partId, source, targetHost, kvstore_, adminClient_); - zoneParts_[targetHost].second.emplace_back(partId); - auto zoneIt = - std::find(zoneParts_[source].second.begin(), zoneParts_[source].second.end(), partId); - if (zoneIt == zoneParts_[source].second.end()) { - LOG(ERROR) << "part not find " << partId << " at " << source; - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -ErrorOr>> -DataBalanceJobExecutor::fetchHostParts(GraphSpaceID spaceId, - bool dependentOnZone, - const HostParts& hostParts, - std::vector& lostHosts) { - ErrorOr> activeHostsRet; - if (dependentOnZone) { - activeHostsRet = ActiveHostsMan::getActiveHostsWithZones(kvstore_, spaceId); - } else { - activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); - } - - if (!nebula::ok(activeHostsRet)) { - return nebula::error(activeHostsRet); - } - - std::vector expand; - auto activeHosts = nebula::value(activeHostsRet); - calDiff(hostParts, activeHosts, expand, lostHosts); - // confirmedHostParts is new part allocation map after balance, it would - // include newlyAdded and exclude lostHosts - HostParts confirmedHostParts(hostParts); - for (const auto& h : expand) { - LOG(INFO) << "Found new host " << h; - confirmedHostParts.emplace(h, std::vector()); - } - for (const auto& h : lostHosts) { - LOG(INFO) << "Lost host " << h; - confirmedHostParts.erase(h); + for (auto& p : activeSortedHost) { + std::vector& hvec = p.second; + std::sort(hvec.begin(), hvec.end(), [](Host*& l, Host*& r) -> bool { + return l->parts_.size() < r->parts_.size(); + }); } - return std::make_pair(confirmedHostParts, activeHosts); -} - -bool DataBalanceJobExecutor::balanceParts(GraphSpaceID spaceId, - HostParts& confirmedHostParts, - int32_t totalParts, - std::vector& tasks, - bool dependentOnZone) { - auto avgLoad = static_cast(totalParts) / confirmedHostParts.size(); - VLOG(3) << "The expect avg load is " << avgLoad; - int32_t minLoad = std::floor(avgLoad); - int32_t maxLoad = std::ceil(avgLoad); - VLOG(3) << "The min load is " << minLoad << " max load is " << maxLoad; - - auto sortedHosts = sortedHostsByParts(confirmedHostParts); - if (sortedHosts.empty()) { - LOG(ERROR) << "Host is empty"; - return false; + plan_.reset(new BalancePlan(jobDescription_, kvstore_, adminClient_)); + for (auto& p : lostZoneHost) { + std::vector& hvec = activeSortedHost[p.first]; + for (Host* h : p.second) { + for (PartitionID partId : h->parts_) { + Host* dstHost = hvec.front(); + dstHost->parts_.insert(partId); + plan_->addTask(BalanceTask( + jobId_, spaceInfo_.spaceId_, partId, h->ha_, dstHost->ha_, kvstore_, adminClient_)); + for (size_t i = 0; i < hvec.size() - 1; i++) { + if (hvec[i]->parts_.size() > hvec[i + 1]->parts_.size()) { + std::swap(hvec[i], hvec[i + 1]); + } else { + break; + } + } + } + h->parts_.clear(); + } } - - auto maxPartsHost = sortedHosts.back(); - auto minPartsHost = sortedHosts.front(); - auto& sourceHost = maxPartsHost.first; - auto& targetHost = minPartsHost.first; - if (innerBalance_) { - LOG(INFO) << "maxPartsHost.first " << maxPartsHost.first << " minPartsHost.first " - << minPartsHost.first; - while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { - sortedHosts.pop_back(); - maxPartsHost = sortedHosts.back(); + lostZoneHost.clear(); + auto balanceHostVec = [this](std::vector& hostVec) -> std::vector { + size_t totalPartNum = 0; + size_t avgPartNum = 0; + for (Host* h : hostVec) { + totalPartNum += h->parts_.size(); } - - auto& source = maxPartsHost.first; - auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); - - auto& zoneName = iter->second.first; - int32_t hostsSize = zoneHosts_[zoneName].size(); - int32_t totalPartsZone = 0; - for (auto& host : zoneHosts_[zoneName]) { - auto it = confirmedHostParts.find(host); - if (it == confirmedHostParts.end()) { - LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; - continue; + avgPartNum = totalPartNum / hostVec.size(); + size_t remainder = totalPartNum - avgPartNum * hostVec.size(); + size_t leftBegin = 0; + size_t leftEnd = 0; + size_t rightBegin = 0; + size_t rightEnd = hostVec.size(); + std::vector tasks; + for (size_t i = 0; i < hostVec.size(); i++) { + if (avgPartNum <= hostVec[i]->parts_.size()) { + leftEnd = i; + break; } - totalPartsZone += it->second.size(); } - - avgLoad = static_cast(totalPartsZone) / hostsSize; - minLoad = std::floor(avgLoad); - maxLoad = std::ceil(avgLoad); - LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone - << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad - << " The min load is " << minLoad << " max load is " << maxLoad; - } - - while (maxPartsHost.second > maxLoad || minPartsHost.second < minLoad) { - auto& partsFrom = confirmedHostParts[maxPartsHost.first]; - auto& partsTo = confirmedHostParts[minPartsHost.first]; - std::sort(partsFrom.begin(), partsFrom.end()); - std::sort(partsTo.begin(), partsTo.end()); - - LOG(INFO) << maxPartsHost.first << ":" << partsFrom.size() << " -> " << minPartsHost.first - << ":" << partsTo.size(); - std::vector diff; - std::set_difference(partsFrom.begin(), - partsFrom.end(), - partsTo.begin(), - partsTo.end(), - std::inserter(diff, diff.begin())); - bool noAction = true; - for (auto& partId : diff) { - LOG(INFO) << "partsFrom size " << partsFrom.size() << " partsTo size " << partsTo.size() - << " minLoad " << minLoad << " maxLoad " << maxLoad; - if (partsFrom.size() == partsTo.size() + 1 || - partsFrom.size() == static_cast(minLoad) || - partsTo.size() == static_cast(maxLoad)) { - VLOG(3) << "No need to move any parts from " << maxPartsHost.first << " to " - << minPartsHost.first; + for (size_t i = 0; i < hostVec.size(); i++) { + if (avgPartNum < hostVec[i]->parts_.size()) { + rightBegin = i; break; } - - LOG(INFO) << "[space:" << spaceId << ", part:" << partId << "] " << maxPartsHost.first << "->" - << minPartsHost.first; - auto it = std::find(partsFrom.begin(), partsFrom.end(), partId); - if (it == partsFrom.end()) { - LOG(ERROR) << "Part " << partId << " not found in partsFrom"; - return false; + } + for (size_t right = rightBegin; right < rightEnd;) { + Host* srcHost = hostVec[right]; + if (srcHost->parts_.size() == avgPartNum + 1 && remainder) { + right++; + remainder--; + continue; } - - if (std::find(partsTo.begin(), partsTo.end(), partId) != partsTo.end()) { - LOG(ERROR) << "Part " << partId << " already existed in partsTo"; - return false; + if (srcHost->parts_.size() == avgPartNum) { + right++; + continue; } - - if (dependentOnZone) { - if (!checkZoneLegal(sourceHost, targetHost)) { - LOG(INFO) << "sourceHost " << sourceHost << " targetHost " << targetHost - << " not same zone"; - - auto& parts = relatedParts_[targetHost]; - auto minIt = std::find(parts.begin(), parts.end(), partId); - if (minIt != parts.end()) { - LOG(INFO) << "Part " << partId << " have existed"; - continue; - } - } - - auto& sourceNoneName = zoneParts_[sourceHost].first; - auto sourceHosts = zoneHosts_.find(sourceNoneName); - for (auto& sh : sourceHosts->second) { - auto& parts = relatedParts_[sh]; - auto maxIt = std::find(parts.begin(), parts.end(), partId); - if (maxIt == parts.end()) { - LOG(INFO) << "Part " << partId << " not found on " << sh; - continue; - } - parts.erase(maxIt); - } - - auto& targetNoneName = zoneParts_[targetHost].first; - auto targetHosts = zoneHosts_.find(targetNoneName); - for (auto& th : targetHosts->second) { - relatedParts_[th].emplace_back(partId); + PartitionID partId = *(srcHost->parts_.begin()); + hostVec[leftBegin]->parts_.insert(partId); + srcHost->parts_.erase(partId); + tasks.emplace_back(jobId_, + spaceInfo_.spaceId_, + partId, + srcHost->ha_, + hostVec[leftBegin]->ha_, + kvstore_, + adminClient_); + size_t leftIndex = leftBegin; + for (; leftIndex < leftEnd - 1; leftIndex++) { + if (hostVec[leftIndex]->parts_.size() > hostVec[leftIndex + 1]->parts_.size()) { + std::swap(hostVec[leftIndex], hostVec[leftIndex + 1]); + } else { + break; } } - - partsFrom.erase(it); - partsTo.emplace_back(partId); - tasks.emplace_back( - jobId_, spaceId, partId, maxPartsHost.first, minPartsHost.first, kvstore_, adminClient_); - noAction = false; - } - - if (noAction) { - LOG(INFO) << "Here is no action"; - break; - } - sortedHosts = sortedHostsByParts(confirmedHostParts); - maxPartsHost = sortedHosts.back(); - minPartsHost = sortedHosts.front(); - if (innerBalance_) { - while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { - sortedHosts.pop_back(); - maxPartsHost = sortedHosts.back(); + if (leftIndex == leftEnd - 1 && hostVec[leftIndex]->parts_.size() >= avgPartNum) { + leftEnd--; } - - auto& source = maxPartsHost.first; - auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); - - auto& zoneName = iter->second.first; - int32_t hostsSize = zoneHosts_[zoneName].size(); - int32_t totalPartsZone = 0; - for (auto& host : zoneHosts_[zoneName]) { - auto it = confirmedHostParts.find(host); - if (it == confirmedHostParts.end()) { - LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; - continue; - } - totalPartsZone += it->second.size(); + if (leftBegin == leftEnd) { + leftEnd = rightBegin; } - - avgLoad = static_cast(totalPartsZone) / hostsSize; - minLoad = std::floor(avgLoad); - maxLoad = std::ceil(avgLoad); - LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone - << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad - << " The min load is " << minLoad << " max load is " << maxLoad; + } + return tasks; + }; + for (auto& p : activeSortedHost) { + std::vector& hvec = p.second; + std::vector tasks = balanceHostVec(hvec); + for (BalanceTask& task : tasks) { + plan_->addTask(std::move(task)); } } - LOG(INFO) << "Balance tasks num: " << tasks.size(); - for (auto& task : tasks) { - LOG(INFO) << task.taskIdStr(); + if (plan_->tasks().empty()) { + return Status::Balanced(); } - - relatedParts_.clear(); - return true; + nebula::cpp2::ErrorCode rc = plan_->saveInStore(); + if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { + return Status::Error("save balance zone plan failed"); + } + return Status::OK(); } nebula::cpp2::ErrorCode DataBalanceJobExecutor::stop() { - std::lock_guard lg(lock_); - if (!running_) { - return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; - } stopped_ = true; plan_->stop(); return nebula::cpp2::ErrorCode::SUCCEEDED; } -ErrorOr BalanceJobExecutor::getHostParts(GraphSpaceID spaceId, - bool dependentOnZone, - HostParts& hostParts, - int32_t& totalParts) { +ErrorOr LeaderBalanceJobExecutor::getHostParts(GraphSpaceID spaceId, + bool dependentOnZone, + HostParts& hostParts, + int32_t& totalParts) { folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); const auto& prefix = MetaKeyUtils::partPrefix(spaceId); std::unique_ptr iter; @@ -540,8 +316,6 @@ ErrorOr BalanceJobExecutor::getHostParts(GraphSpa auto zoneNames = properties.get_zone_names(); int32_t zoneSize = zoneNames.size(); LOG(INFO) << "Zone Size " << zoneSize; - innerBalance_ = (replica == zoneSize); - auto activeHostsRet = ActiveHostsMan::getActiveHostsWithZones(kvstore_, spaceId); if (!nebula::ok(activeHostsRet)) { return nebula::error(activeHostsRet); @@ -574,7 +348,7 @@ ErrorOr BalanceJobExecutor::getHostParts(GraphSpa return true; } -nebula::cpp2::ErrorCode BalanceJobExecutor::assembleZoneParts( +nebula::cpp2::ErrorCode LeaderBalanceJobExecutor::assembleZoneParts( const std::vector& zoneNames, HostParts& hostParts) { // zoneHosts use to record this host belong to zone's hosts std::unordered_map, std::vector> zoneHosts; @@ -646,10 +420,10 @@ nebula::cpp2::ErrorCode BalanceJobExecutor::assembleZoneParts( return nebula::cpp2::ErrorCode::SUCCEEDED; } -void BalanceJobExecutor::calDiff(const HostParts& hostParts, - const std::vector& activeHosts, - std::vector& expand, - std::vector& lost) { +void LeaderBalanceJobExecutor::calDiff(const HostParts& hostParts, + const std::vector& activeHosts, + std::vector& expand, + std::vector& lost) { for (auto it = hostParts.begin(); it != hostParts.end(); it++) { VLOG(1) << "Original Host " << it->first << ", parts " << it->second.size(); if (std::find(activeHosts.begin(), activeHosts.end(), it->first) == activeHosts.end() && @@ -665,174 +439,317 @@ void BalanceJobExecutor::calDiff(const HostParts& hostParts, } } -std::vector> DataBalanceJobExecutor::sortedHostsByParts( - const HostParts& hostParts) { - std::vector> hosts; - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - LOG(INFO) << "Host " << it->first << " parts " << it->second.size(); - hosts.emplace_back(it->first, it->second.size()); - } - std::sort(hosts.begin(), hosts.end(), [](const auto& l, const auto& r) { - if (l.second != r.second) { - return l.second < r.second; - } else { - return l.first.host < r.first.host; - } - }); - return hosts; +nebula::cpp2::ErrorCode DataBalanceJobExecutor::prepare() { + auto spaceRet = getSpaceIdFromName(paras_.back()); + if (!nebula::ok(spaceRet)) { + LOG(ERROR) << "Can't find the space: " << paras_.back(); + return nebula::error(spaceRet); + } + GraphSpaceID spaceId = nebula::value(spaceRet); + nebula::cpp2::ErrorCode rc = spaceInfo_.getInfo(spaceId, kvstore_); + if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { + return rc; + } + lostHosts_.reserve(paras_.size() - 1); + for (size_t i = 0; i < paras_.size() - 1; i++) { + lostHosts_.emplace_back(HostAddr::fromString(paras_[i])); + } + return nebula::cpp2::ErrorCode::SUCCEEDED; } -Status DataBalanceJobExecutor::checkReplica(const HostParts& hostParts, - const std::vector& activeHosts, - int32_t replica, - PartitionID partId) { - // check host hold the part and alive - auto checkPart = [&](const auto& entry) { - auto& host = entry.first; - auto& parts = entry.second; - return std::find(parts.begin(), parts.end(), partId) != parts.end() && - std::find(activeHosts.begin(), activeHosts.end(), host) != activeHosts.end(); - }; - auto aliveReplica = std::count_if(hostParts.begin(), hostParts.end(), checkPart); - if (aliveReplica >= replica / 2 + 1) { - return Status::OK(); +nebula::cpp2::ErrorCode ZoneBalanceJobExecutor::prepare() { + auto spaceRet = getSpaceIdFromName(paras_.back()); + if (!nebula::ok(spaceRet)) { + LOG(ERROR) << "Can't find the space: " << paras_.back(); + return nebula::error(spaceRet); + } + GraphSpaceID spaceId = nebula::value(spaceRet); + nebula::cpp2::ErrorCode rc = spaceInfo_.getInfo(spaceId, kvstore_); + if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { + return rc; } - return Status::Error("Not enough alive host hold the part %d", partId); + lostZones_.reserve(paras_.size() - 1); + for (size_t i = 0; i < paras_.size() - 1; i++) { + lostZones_.emplace_back(paras_[i]); + } + return nebula::cpp2::ErrorCode::SUCCEEDED; } -ErrorOr DataBalanceJobExecutor::hostWithMinimalParts( - const HostParts& hostParts, PartitionID partId) { - auto hosts = sortedHostsByParts(hostParts); - for (auto& h : hosts) { - auto it = hostParts.find(h.first); - if (it == hostParts.end()) { - LOG(ERROR) << "Host " << h.first << " not found"; - return nebula::cpp2::ErrorCode::E_NO_HOSTS; - } +nebula::cpp2::ErrorCode ZoneBalanceJobExecutor::stop() { + plan_->stop(); + return nebula::cpp2::ErrorCode::SUCCEEDED; +} - if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { - return h.first; +folly::Future ZoneBalanceJobExecutor::executeInternal() { + if (plan_ == nullptr) { + Status status = buildBalancePlan(); + if (status != Status::OK()) { + return status; } } - return nebula::cpp2::ErrorCode::E_NO_HOSTS; -} - -ErrorOr DataBalanceJobExecutor::hostWithMinimalPartsForZone( - const HostAddr& source, const HostParts& hostParts, PartitionID partId) { - auto hosts = sortedHostsByParts(hostParts); - for (auto& h : hosts) { - auto it = hostParts.find(h.first); - if (it == hostParts.end()) { - LOG(ERROR) << "Host " << h.first << " not found"; - return nebula::cpp2::ErrorCode::E_NO_HOSTS; + plan_->setFinishCallBack([this](meta::cpp2::JobStatus status) { + if (LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()) != + nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; } - - LOG(INFO) << "source " << source << " h.first " << h.first; - if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { - return h.first; + if (status == meta::cpp2::JobStatus::FINISHED) { + nebula::cpp2::ErrorCode ret = updateMeta(); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + status = meta::cpp2::JobStatus::FAILED; + } } + onFinished_(status); + }); + plan_->invoke(); + return Status::OK(); +} + +nebula::cpp2::ErrorCode ZoneBalanceJobExecutor::updateMeta() { + std::string spaceKey = MetaKeyUtils::spaceKey(spaceInfo_.spaceId_); + std::string spaceVal; + kvstore_->get(kDefaultSpaceId, kDefaultPartId, spaceKey, &spaceVal); + meta::cpp2::SpaceDesc properties = MetaKeyUtils::parseSpace(spaceVal); + std::vector zones; + for (std::string& zn : lostZones_) { + spaceInfo_.zones_.erase(zn); + } + for (auto& p : spaceInfo_.zones_) { + zones.push_back(p.first); } - return nebula::cpp2::ErrorCode::E_NO_HOSTS; + properties.set_zone_names(std::move(zones)); + std::vector data; + data.emplace_back(MetaKeyUtils::spaceKey(spaceInfo_.spaceId_), + MetaKeyUtils::spaceVal(properties)); + folly::Baton baton; + auto ret = nebula::cpp2::ErrorCode::SUCCEEDED; + kvstore_->asyncMultiPut(kDefaultSpaceId, + kDefaultPartId, + std::move(data), + [&baton, &ret](nebula::cpp2::ErrorCode code) { + if (nebula::cpp2::ErrorCode::SUCCEEDED != code) { + ret = code; + LOG(ERROR) << "Can't write the kvstore, ret = " + << static_cast(code); + } + baton.post(); + }); + baton.wait(); + return ret; } -bool DataBalanceJobExecutor::checkZoneLegal(const HostAddr& source, const HostAddr& target) { - VLOG(3) << "Check " << source << " : " << target; - auto sourceIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); +Status ZoneBalanceJobExecutor::buildBalancePlan() { + for (std::string& zn : lostZones_) { + if (!spaceInfo_.zones_.count(zn)) { + return Status::Error("space %s does not have zone %s", spaceInfo_.name_.c_str(), zn.c_str()); + } + } - if (sourceIter == zoneParts_.end()) { - LOG(INFO) << "Source " << source << " not found"; - return false; + std::map activeZones; + std::map lostZones; + for (auto& p : spaceInfo_.zones_) { + activeZones.emplace(p.first, &p.second); } + for (std::string& zn : lostZones_) { + auto it = activeZones.find(zn); + if (it != activeZones.end()) { + lostZones.emplace(it->first, it->second); + activeZones.erase(it); + } + } + int32_t activeSize = activeZones.size(); + if (activeSize < spaceInfo_.replica_) { + return Status::Error("Not enough alive zone hold replica"); + } + std::vector tasks; - auto targetIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&target](const auto& pair) { - return target == pair.first; + std::vector sortedActiveZones; + sortedActiveZones.reserve(activeZones.size()); + std::map> sortedZoneHosts; + std::for_each(activeZones.begin(), + activeZones.end(), + [&sortedActiveZones, &sortedZoneHosts](std::pair& p) { + sortedActiveZones.push_back(p.second); + std::vector& hosts = sortedZoneHosts[p.first]; + for (auto& ph : p.second->hosts_) { + hosts.push_back(&ph.second); + } + std::sort(hosts.begin(), hosts.end(), [](Host*& l, Host*& r) -> bool { + return l->parts_.size() < r->parts_.size(); + }); + sortedActiveZones.back()->calPartNum(); + }); + std::sort(sortedActiveZones.begin(), sortedActiveZones.end(), [](Zone*& l, Zone*& r) -> bool { + return l->partNum_ < r->partNum_; }); - if (targetIter == zoneParts_.end()) { - LOG(INFO) << "Target " << target << " not found"; - return false; - } + auto insertPartIntoZone = [&sortedZoneHosts](Zone* zone, PartitionID partId) -> HostAddr { + std::vector& sortedHosts = sortedZoneHosts[zone->zoneName_]; + sortedHosts.front()->parts_.emplace(partId); + zone->partNum_++; + HostAddr ha = sortedHosts.front()->ha_; + for (size_t i = 0; i < sortedHosts.size() - 1; i++) { + if (sortedHosts[i]->parts_.size() >= sortedHosts[i + 1]->parts_.size()) { + std::swap(sortedHosts[i], sortedHosts[i + 1]); + } else { + break; + } + } + return ha; + }; - LOG(INFO) << sourceIter->second.first << " : " << targetIter->second.first; - return sourceIter->second.first == targetIter->second.first; -} + auto chooseZoneInsert = [&insertPartIntoZone, + &sortedActiveZones](PartitionID partId) -> HostAddr { + size_t index = 0; + for (size_t i = 0; i < sortedActiveZones.size(); i++) { + if (!sortedActiveZones[i]->partExist(partId)) { + index = i; + break; + } + } + HostAddr ha = insertPartIntoZone(sortedActiveZones[index], partId); + for (size_t i = index; i < sortedActiveZones.size() - 1; i++) { + if (sortedActiveZones[i]->partNum_ >= sortedActiveZones[i + 1]->partNum_) { + std::swap(sortedActiveZones[i], sortedActiveZones[i + 1]); + } else { + break; + } + } + return ha; + }; -nebula::cpp2::ErrorCode DataBalanceJobExecutor::prepare() { - auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); - if (!nebula::ok(activeHostsRet)) { - auto retCode = nebula::error(activeHostsRet); - LOG(ERROR) << "Get active hosts failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; + for (auto& p : lostZones) { + Zone* zone = p.second; + for (auto& ph : zone->hosts_) { + for (PartitionID partId : ph.second.parts_) { + HostAddr dst = chooseZoneInsert(partId); + tasks.emplace_back( + jobId_, spaceInfo_.spaceId_, partId, ph.first, dst, kvstore_, adminClient_); + } + ph.second.parts_.clear(); + } + zone->calPartNum(); } - auto hosts = std::move(nebula::value(activeHostsRet)); - if (hosts.empty()) { - LOG(ERROR) << "There is no active hosts"; - return nebula::cpp2::ErrorCode::E_NO_HOSTS; + int32_t totalPartNum = 0; + int32_t avgPartNum = 0; + for (auto& z : sortedActiveZones) { + totalPartNum += z->partNum_; } - lostHosts_.reserve(paras_.size() - 1); - for (size_t i = 0; i < paras_.size() - 1; i++) { - lostHosts_.emplace_back(HostAddr::fromString(paras_[i])); + avgPartNum = totalPartNum / sortedActiveZones.size(); + int32_t remainder = totalPartNum - avgPartNum * sortedActiveZones.size(); + int32_t leftBegin = 0; + int32_t leftEnd = 0; + int32_t rightBegin = 0; + int32_t rightEnd = sortedActiveZones.size(); + for (size_t i = 0; i < sortedActiveZones.size(); i++) { + if (avgPartNum <= sortedActiveZones[i]->partNum_) { + leftEnd = i; + break; + } } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -nebula::cpp2::ErrorCode DataBalanceJobExecutor::finish(bool ret) { - std::lock_guard lg(lock_); - return finishInternal(ret); -} - -nebula::cpp2::ErrorCode DataBalanceJobExecutor::finishInternal(bool ret) { - CHECK(!lock_.try_lock()); - plan_.reset(nullptr); - running_ = false; - auto rc = onFinished_(ret); - if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { - return rc; + for (size_t i = 0; i < sortedActiveZones.size(); i++) { + if (avgPartNum < sortedActiveZones[i]->partNum_) { + rightBegin = i; + break; + } } - return ret ? nebula::cpp2::ErrorCode::SUCCEEDED : nebula::cpp2::ErrorCode::E_BALANCER_FAILURE; -} - -folly::Future DataBalanceJobExecutor::executeInternal(HostAddr&& address, - std::vector&& parts) { - UNUSED(address); - UNUSED(parts); - std::unique_lock lg(lock_); - if (!running_) { - if (plan_ == nullptr) { - auto retCode = buildBalancePlan(); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - if (retCode == nebula::cpp2::ErrorCode::E_BALANCED) { - finishInternal(true); - return Status::OK(); - } else { - return Status::Error(apache::thrift::util::enumNameSafe(retCode)); + for (int32_t right = rightBegin; right < rightEnd;) { + Zone* srcZone = sortedActiveZones[right]; + if (srcZone->partNum_ == avgPartNum + 1 && remainder) { + right++; + remainder--; + continue; + } + if (srcZone->partNum_ == avgPartNum) { + right++; + continue; + } + std::vector& sortedHosts = sortedZoneHosts[srcZone->zoneName_]; + int32_t hostIndex = sortedHosts.size() - 1; + for (; hostIndex >= 0; hostIndex--) { + std::set& hostParts = sortedHosts[hostIndex]->parts_; + PartitionID movePart = -1; + for (PartitionID partId : hostParts) { + bool matched = false; + for (int32_t leftIndex = leftBegin; leftIndex < leftEnd; leftIndex++) { + if (!sortedActiveZones[leftIndex]->partExist(partId)) { + HostAddr dst = insertPartIntoZone(sortedActiveZones[leftIndex], partId); + tasks.emplace_back(jobId_, + spaceInfo_.spaceId_, + partId, + sortedHosts[hostIndex]->ha_, + dst, + kvstore_, + adminClient_); + movePart = partId; + int32_t newLeftIndex = leftIndex; + for (; newLeftIndex < leftEnd - 1; newLeftIndex++) { + if (sortedActiveZones[newLeftIndex]->partNum_ > + sortedActiveZones[newLeftIndex + 1]->partNum_) { + std::swap(sortedActiveZones[newLeftIndex], sortedActiveZones[newLeftIndex + 1]); + } else { + break; + } + } + if (newLeftIndex == leftEnd - 1 && + sortedActiveZones[newLeftIndex]->partNum_ >= avgPartNum) { + leftEnd--; + } + if (leftBegin == leftEnd) { + leftEnd = rightBegin; + } + matched = true; + break; + } + } + if (matched) { + break; } } + if (movePart != -1) { + hostParts.erase(movePart); + srcZone->partNum_--; + break; + } } - LOG(INFO) << "Start to invoke balance plan " << plan_->id(); - running_ = true; - auto fut = folly::via(executor_.get(), std::bind(&BalancePlan::invoke, plan_.get())); - lg.unlock(); - fut.wait(); - return Status::OK(); + for (int32_t i = hostIndex; i > 0; i--) { + if (sortedHosts[i]->parts_.size() <= sortedHosts[i - 1]->parts_.size()) { + std::swap(sortedHosts[i], sortedHosts[i - 1]); + } else { + break; + } + } + } + if (tasks.empty()) { + return Status::Balanced(); } - CHECK(plan_ != nullptr); - LOG(INFO) << "Balance job " << plan_->id() << " is still running"; - return Status::Error(folly::sformat("Balance job {} is still running", plan_->id())); + plan_.reset(new BalancePlan(jobDescription_, kvstore_, adminClient_)); + for (BalanceTask& task : tasks) { + plan_->addTask(std::move(task)); + } + nebula::cpp2::ErrorCode rc = plan_->saveInStore(); + if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { + return Status::Error("save balance zone plan failed"); + } + return Status::OK(); } -folly::Future LeaderBalanceJobExecutor::executeInternal(HostAddr&& address, - std::vector&& parts) { - UNUSED(address); - UNUSED(parts); - if (running_.load(std::memory_order_acquire)) { - LOG(INFO) << "Balance process still running"; - return Status::OK(); - } +LeaderBalanceJobExecutor::LeaderBalanceJobExecutor(JobID jobId, + kvstore::KVStore* kvstore, + AdminClient* adminClient, + const std::vector& params) + : MetaJobExecutor(jobId, kvstore, adminClient, params), + inLeaderBalance_(false), + hostLeaderMap_(nullptr) { + executor_.reset(new folly::CPUThreadPoolExecutor(1)); +} +nebula::cpp2::ErrorCode LeaderBalanceJobExecutor::finish(bool ret) { + UNUSED(ret); + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +folly::Future LeaderBalanceJobExecutor::executeInternal() { folly::Promise promise; auto future = promise.getFuture(); // Space ID, Replica Factor and Dependent On Group @@ -891,12 +808,12 @@ folly::Future LeaderBalanceJobExecutor::executeInternal(HostAddr&& addre inLeaderBalance_ = false; if (failed != 0) { - LOG(ERROR) << failed << " partiton failed to transfer leader"; + return Status::Error("partiton failed to transfer leader"); } - onFinished_(false); - return Status::Error("partiton failed to transfer leader"); + onFinished_(meta::cpp2::JobStatus::FINISHED); + return Status::OK(); } - onFinished_(true); + onFinished_(meta::cpp2::JobStatus::FINISHED); return Status::OK(); } @@ -1150,5 +1067,82 @@ void LeaderBalanceJobExecutor::simplifyLeaderBalnacePlan(GraphSpaceID spaceId, } } +nebula::cpp2::ErrorCode SpaceInfo::getInfo(GraphSpaceID spaceId, kvstore::KVStore* kvstore) { + spaceId_ = spaceId; + std::string spaceKey = MetaKeyUtils::spaceKey(spaceId); + std::string spaceVal; + kvstore->get(kDefaultSpaceId, kDefaultPartId, spaceKey, &spaceVal); + meta::cpp2::SpaceDesc properties = MetaKeyUtils::parseSpace(spaceVal); + name_ = properties.get_space_name(); + replica_ = properties.get_replica_factor(); + const std::vector& zones = properties.get_zone_names(); + for (const std::string& zoneName : zones) { + std::string zoneValue; + auto zoneKey = MetaKeyUtils::zoneKey(zoneName); + auto retCode = kvstore->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get zone " << zoneName + << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + std::vector hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); + Zone zone(zoneName); + for (HostAddr& ha : hosts) { + zone.hosts_.emplace(ha, Host(ha)); + } + zones_.emplace(zoneName, zone); + } + const auto& prefix = MetaKeyUtils::partPrefix(spaceId); + std::unique_ptr iter; + auto retCode = kvstore->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << " " + << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + for (; iter->valid(); iter->next()) { + auto key = iter->key(); + PartitionID partId; + memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); + std::vector partHosts = MetaKeyUtils::parsePartVal(iter->val()); + for (HostAddr& ha : partHosts) { + for (auto& [zn, zone] : zones_) { + auto it = zone.hosts_.find(ha); + if (it != zone.hosts_.end()) { + it->second.parts_.emplace(partId); + } + } + } + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +int32_t Zone::calPartNum() { + int32_t num = 0; + for (auto& p : hosts_) { + num += p.second.parts_.size(); + } + partNum_ = num; + return partNum_; +} + +bool Zone::partExist(PartitionID partId) { + for (auto& p : hosts_) { + if (p.second.parts_.count(partId)) { + return true; + } + } + return false; +} + +bool SpaceInfo::hasHost(const HostAddr& ha) { + for (auto p : zones_) { + if (p.second.hasHost(ha)) { + return true; + } + } + return false; +} + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/job/BalanceJobExecutor.h b/src/meta/processors/job/BalanceJobExecutor.h index 3832122fce1..e8f111b2111 100644 --- a/src/meta/processors/job/BalanceJobExecutor.h +++ b/src/meta/processors/job/BalanceJobExecutor.h @@ -8,20 +8,45 @@ #include "meta/processors/job/BalancePlan.h" #include "meta/processors/job/BalanceTask.h" +#include "meta/processors/job/MetaJobExecutor.h" #include "meta/processors/job/SimpleConcurrentJobExecutor.h" namespace nebula { namespace meta { - -using ZoneParts = std::pair>; using HostParts = std::unordered_map>; using PartAllocation = std::unordered_map>; using LeaderBalancePlan = std::vector>; using ZoneNameAndParts = std::pair>; -class BalanceJobExecutor : public MetaJobExecutor { - friend void testRestBlancer(); +struct Host { + explicit Host(const HostAddr& ha) : ha_(ha) {} + Host() = default; + + HostAddr ha_; + std::set parts_; +}; +struct Zone { + Zone() = default; + explicit Zone(const std::string name) : zoneName_(name) {} + bool hasHost(const HostAddr& ha) { return hosts_.find(ha) != hosts_.end(); } + int32_t calPartNum(); + bool partExist(PartitionID partId); + + std::string zoneName_; + std::map hosts_; + int32_t partNum_; +}; +struct SpaceInfo { + nebula::cpp2::ErrorCode getInfo(GraphSpaceID spaceId, kvstore::KVStore* kvstore); + bool hasHost(const HostAddr& ha); + + std::string name_; + GraphSpaceID spaceId_; + int32_t replica_; + std::map zones_; +}; +class BalanceJobExecutor : public MetaJobExecutor { public: BalanceJobExecutor(JobID jobId, kvstore::KVStore* kvstore, @@ -36,70 +61,53 @@ class BalanceJobExecutor : public MetaJobExecutor { nebula::cpp2::ErrorCode finish(bool ret = true) override; - folly::Future executeInternal(HostAddr&& address, - std::vector&& parts) override; - - bool runInMeta() override; - nebula::cpp2::ErrorCode recovery() override; protected: - nebula::cpp2::ErrorCode getAllSpaces( - std::vector>& spaces); + nebula::cpp2::ErrorCode save(const std::string& k, const std::string& v); - ErrorOr getHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - HostParts& hostParts, - int32_t& totalParts); + folly::Future executeInternal() override; - void calDiff(const HostParts& hostParts, - const std::vector& activeHosts, - std::vector& expand, - std::vector& lost); + virtual Status buildBalancePlan() { return Status::OK(); } - nebula::cpp2::ErrorCode assembleZoneParts(const std::vector& zoneNames, - HostParts& hostParts); + protected: + std::unique_ptr plan_; + std::unique_ptr executor_; + SpaceInfo spaceInfo_; +}; - nebula::cpp2::ErrorCode save(const std::string& k, const std::string& v); +class ZoneBalanceJobExecutor : public BalanceJobExecutor { + FRIEND_TEST(BalanceTest, RemoveZonePlanTest); + FRIEND_TEST(BalanceTest, BalanceZonePlanTest); + FRIEND_TEST(BalanceTest, BalanceZoneRemainderPlanTest); + FRIEND_TEST(BalanceTest, NormalZoneTest); + FRIEND_TEST(BalanceTest, StopPlanTest); + + public: + ZoneBalanceJobExecutor(JobDescription jobDescription, + kvstore::KVStore* kvstore, + AdminClient* adminClient, + const std::vector& params) + : BalanceJobExecutor(jobDescription.getJobId(), kvstore, adminClient, params), + jobDescription_(jobDescription) {} + nebula::cpp2::ErrorCode prepare() override; + nebula::cpp2::ErrorCode stop() override; protected: - static std::atomic_bool running_; - static std::mutex lock_; - bool innerBalance_ = false; - std::unique_ptr executor_; - std::unordered_map zoneParts_; - std::unordered_map> zoneHosts_; - std::unordered_map> relatedParts_; + folly::Future executeInternal() override; + Status buildBalancePlan() override; + nebula::cpp2::ErrorCode updateMeta(); + + private: + std::vector lostZones_; + JobDescription jobDescription_; }; class DataBalanceJobExecutor : public BalanceJobExecutor { - FRIEND_TEST(BalanceTest, BalancePartsTest); - FRIEND_TEST(BalanceTest, NormalTest); - FRIEND_TEST(BalanceTest, SimpleTestWithZone); - FRIEND_TEST(BalanceTest, SpecifyHostTest); - FRIEND_TEST(BalanceTest, SpecifyMultiHostTest); - FRIEND_TEST(BalanceTest, MockReplaceMachineTest); - FRIEND_TEST(BalanceTest, SingleReplicaTest); - FRIEND_TEST(BalanceTest, TryToRecoveryTest); + FRIEND_TEST(BalanceTest, BalanceDataPlanTest); + FRIEND_TEST(BalanceTest, NormalDataTest); FRIEND_TEST(BalanceTest, RecoveryTest); FRIEND_TEST(BalanceTest, StopPlanTest); - FRIEND_TEST(BalanceTest, CleanLastInvalidBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, SimpleLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalanceTest); - FRIEND_TEST(BalanceTest, ManyHostsLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithZoneTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithLargerZoneTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithComplexZoneTest); - FRIEND_TEST(BalanceTest, ExpansionZoneTest); - FRIEND_TEST(BalanceTest, ExpansionHostIntoZoneTest); - FRIEND_TEST(BalanceTest, ShrinkZoneTest); - FRIEND_TEST(BalanceTest, ShrinkHostFromZoneTest); - FRIEND_TEST(BalanceTest, BalanceWithComplexZoneTest); - FRIEND_TEST(BalanceIntegrationTest, LeaderBalanceTest); - FRIEND_TEST(BalanceIntegrationTest, BalanceTest); - friend void testRestBlancer(); public: DataBalanceJobExecutor(JobDescription jobDescription, @@ -108,99 +116,37 @@ class DataBalanceJobExecutor : public BalanceJobExecutor { const std::vector& params) : BalanceJobExecutor(jobDescription.getJobId(), kvstore, adminClient, params), jobDescription_(jobDescription) {} - nebula::cpp2::ErrorCode recovery() override; nebula::cpp2::ErrorCode prepare() override; - nebula::cpp2::ErrorCode finish(bool ret = true) override; nebula::cpp2::ErrorCode stop() override; protected: - folly::Future executeInternal(HostAddr&& address, - std::vector&& parts) override; - nebula::cpp2::ErrorCode buildBalancePlan(); - ErrorOr> genTasks( - GraphSpaceID spaceId, - int32_t spaceReplica, - bool dependentOnZone, - std::vector& lostHosts); - ErrorOr hostWithMinimalParts(const HostParts& hostParts, - PartitionID partId); - - ErrorOr hostWithMinimalPartsForZone(const HostAddr& source, - const HostParts& hostParts, - PartitionID partId); - bool balanceParts(GraphSpaceID spaceId, - HostParts& confirmedHostParts, - int32_t totalParts, - std::vector& tasks, - bool dependentOnZone); - - ErrorOr>> fetchHostParts( - GraphSpaceID spaceId, - bool dependentOnGroup, - const HostParts& hostParts, - std::vector& lostHosts); - - nebula::cpp2::ErrorCode transferLostHost(std::vector& tasks, - HostParts& confirmedHostParts, - const HostAddr& source, - GraphSpaceID spaceId, - PartitionID partId, - bool dependentOnZone); - - Status checkReplica(const HostParts& hostParts, - const std::vector& activeHosts, - int32_t replica, - PartitionID partId); - - std::vector> sortedHostsByParts(const HostParts& hostParts); - bool checkZoneLegal(const HostAddr& source, const HostAddr& target); - nebula::cpp2::ErrorCode finishInternal(bool ret = true); + folly::Future executeInternal() override; + Status buildBalancePlan() override; private: - static std::unique_ptr plan_; std::vector lostHosts_; JobDescription jobDescription_; }; -class LeaderBalanceJobExecutor : public BalanceJobExecutor { - FRIEND_TEST(BalanceTest, BalancePartsTest); - FRIEND_TEST(BalanceTest, NormalTest); - FRIEND_TEST(BalanceTest, SimpleTestWithZone); - FRIEND_TEST(BalanceTest, SpecifyHostTest); - FRIEND_TEST(BalanceTest, SpecifyMultiHostTest); - FRIEND_TEST(BalanceTest, MockReplaceMachineTest); - FRIEND_TEST(BalanceTest, SingleReplicaTest); - FRIEND_TEST(BalanceTest, TryToRecoveryTest); - FRIEND_TEST(BalanceTest, RecoveryTest); - FRIEND_TEST(BalanceTest, StopPlanTest); - FRIEND_TEST(BalanceTest, CleanLastInvalidBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalancePlanTest); +class LeaderBalanceJobExecutor : public MetaJobExecutor { FRIEND_TEST(BalanceTest, SimpleLeaderBalancePlanTest); FRIEND_TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalanceTest); FRIEND_TEST(BalanceTest, ManyHostsLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalanceTest); FRIEND_TEST(BalanceTest, LeaderBalanceWithZoneTest); FRIEND_TEST(BalanceTest, LeaderBalanceWithLargerZoneTest); FRIEND_TEST(BalanceTest, LeaderBalanceWithComplexZoneTest); - FRIEND_TEST(BalanceTest, ExpansionZoneTest); - FRIEND_TEST(BalanceTest, ExpansionHostIntoZoneTest); - FRIEND_TEST(BalanceTest, ShrinkZoneTest); - FRIEND_TEST(BalanceTest, ShrinkHostFromZoneTest); - FRIEND_TEST(BalanceTest, BalanceWithComplexZoneTest); - FRIEND_TEST(BalanceIntegrationTest, LeaderBalanceTest); - FRIEND_TEST(BalanceIntegrationTest, BalanceTest); - friend void testRestBlancer(); public: LeaderBalanceJobExecutor(JobID jobId, kvstore::KVStore* kvstore, AdminClient* adminClient, - const std::vector& params) - : BalanceJobExecutor(jobId, kvstore, adminClient, params) {} + const std::vector& params); + + nebula::cpp2::ErrorCode finish(bool ret = true) override; protected: - folly::Future executeInternal(HostAddr&& address, - std::vector&& parts) override; + folly::Future executeInternal() override; ErrorOr buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, GraphSpaceID spaceId, @@ -226,10 +172,30 @@ class LeaderBalanceJobExecutor : public BalanceJobExecutor { void simplifyLeaderBalnacePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan); + nebula::cpp2::ErrorCode getAllSpaces( + std::vector>& spaces); + + ErrorOr getHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + HostParts& hostParts, + int32_t& totalParts); + + void calDiff(const HostParts& hostParts, + const std::vector& activeHosts, + std::vector& expand, + std::vector& lost); + + nebula::cpp2::ErrorCode assembleZoneParts(const std::vector& zoneNames, + HostParts& hostParts); + private: - static std::atomic_bool inLeaderBalance_; + std::atomic_bool inLeaderBalance_; std::unique_ptr hostLeaderMap_; std::unordered_map> hostBounds_; + std::unordered_map zoneParts_; + std::unordered_map> zoneHosts_; + std::unordered_map> relatedParts_; + std::unique_ptr executor_; }; } // namespace meta diff --git a/src/meta/processors/job/BalancePlan.cpp b/src/meta/processors/job/BalancePlan.cpp index db1533f0f31..8153e6f5d61 100644 --- a/src/meta/processors/job/BalancePlan.cpp +++ b/src/meta/processors/job/BalancePlan.cpp @@ -68,7 +68,9 @@ void BalancePlan::invoke() { if (finished) { CHECK_EQ(j, this->buckets_[i].size() - 1); saveInStore(true); - onFinished_(); + onFinished_(stopped ? meta::cpp2::JobStatus::STOPPED + : failed_ ? meta::cpp2::JobStatus::FAILED + : meta::cpp2::JobStatus::FINISHED); } else if (j + 1 < this->buckets_[i].size()) { auto& task = this->tasks_[this->buckets_[i][j + 1]]; if (stopped) { @@ -83,6 +85,7 @@ void BalancePlan::invoke() { { std::lock_guard lg(lock_); finishedTaskNum_++; + failed_ = true; VLOG(1) << "Balance " << id() << " has completed " << finishedTaskNum_ << " task"; setStatus(meta::cpp2::JobStatus::FAILED); if (finishedTaskNum_ == tasks_.size()) { @@ -93,7 +96,7 @@ void BalancePlan::invoke() { } if (finished) { CHECK_EQ(j, this->buckets_[i].size() - 1); - onFinished_(); + onFinished_(stopped ? meta::cpp2::JobStatus::STOPPED : meta::cpp2::JobStatus::FAILED); } else if (j + 1 < this->buckets_[i].size()) { auto& task = this->tasks_[this->buckets_[i][j + 1]]; if (tasks_[taskIndex].spaceId_ == task.spaceId_ && @@ -121,7 +124,6 @@ void BalancePlan::invoke() { nebula::cpp2::ErrorCode BalancePlan::saveInStore(bool onlyPlan) { CHECK_NOTNULL(kv_); std::vector data; - data.emplace_back(jobDescription_.jobKey(), jobDescription_.jobVal()); if (!onlyPlan) { for (auto& task : tasks_) { data.emplace_back(MetaKeyUtils::balanceTaskKey( @@ -149,7 +151,7 @@ nebula::cpp2::ErrorCode BalancePlan::saveInStore(bool onlyPlan) { ErrorOr> BalancePlan::show( JobID jobId, kvstore::KVStore* kv, AdminClient* client) { - auto ret = getBalanceTasks(jobId, kv, client, true); + auto ret = getBalanceTasks(jobId, kv, client, false); if (!ok(ret)) { return error(ret); } @@ -180,6 +182,10 @@ ErrorOr> BalancePlan::sh return thriftTasks; } +void BalancePlan::setFinishCallBack(std::function func) { + onFinished_ = func; +} + ErrorOr> BalancePlan::getBalanceTasks( JobID jobId, kvstore::KVStore* kv, AdminClient* client, bool resume) { CHECK_NOTNULL(kv); @@ -213,7 +219,7 @@ ErrorOr> BalancePlan::getBalan task.endTimeMs_ = std::get<3>(tup); if (resume && task.ret_ != BalanceTaskResult::SUCCEEDED) { // Resume the failed task, skip the in-progress and invalid tasks - if (task.ret_ == BalanceTaskResult::FAILED) { + if (task.ret_ == BalanceTaskResult::FAILED || task.ret_ == BalanceTaskResult::INVALID) { task.ret_ = BalanceTaskResult::IN_PROGRESS; } task.status_ = BalanceTaskStatus::START; @@ -229,6 +235,7 @@ ErrorOr> BalancePlan::getBalan task.ret_ = BalanceTaskResult::INVALID; } } + task.endTimeMs_ = 0; } } tasks.emplace_back(std::move(task)); diff --git a/src/meta/processors/job/BalancePlan.h b/src/meta/processors/job/BalancePlan.h index 2dc9b969e52..96914cda544 100644 --- a/src/meta/processors/job/BalancePlan.h +++ b/src/meta/processors/job/BalancePlan.h @@ -82,6 +82,8 @@ class BalancePlan { kvstore::KVStore* kv, AdminClient* client); + void setFinishCallBack(std::function func); + private: JobDescription jobDescription_; kvstore::KVStore* kv_ = nullptr; @@ -89,8 +91,9 @@ class BalancePlan { std::vector tasks_; std::mutex lock_; size_t finishedTaskNum_ = 0; - std::function onFinished_; + std::function onFinished_; bool stopped_ = false; + bool failed_ = false; // List of task index in tasks_; using Bucket = std::vector; diff --git a/src/meta/processors/job/BalanceTask.cpp b/src/meta/processors/job/BalanceTask.cpp index 49653bf8b78..fabf2c25344 100644 --- a/src/meta/processors/job/BalanceTask.cpp +++ b/src/meta/processors/job/BalanceTask.cpp @@ -26,7 +26,8 @@ void BalanceTask::invoke() { if (ret_ == BalanceTaskResult::INVALID) { endTimeMs_ = time::WallClock::fastNowInSec(); saveInStore(); - LOG(ERROR) << taskIdStr_ << " Task invalid, status " << static_cast(status_); + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Task invalid, status " + << static_cast(status_); // When a plan is stopped or dst is not alive any more, a task will be // marked as INVALID, the task will not be executed again. Balancer will // start a new plan instead. @@ -35,11 +36,12 @@ void BalanceTask::invoke() { } else if (ret_ == BalanceTaskResult::FAILED) { endTimeMs_ = time::WallClock::fastNowInSec(); saveInStore(); - LOG(ERROR) << taskIdStr_ << " Task failed, status " << static_cast(status_); + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Task failed, status " + << static_cast(status_); onError_(); return; } else { - VLOG(3) << taskIdStr_ << " still in processing"; + VLOG(3) << taskIdStr_ + "," + commandStr_ << " still in processing"; } switch (status_) { @@ -50,7 +52,8 @@ void BalanceTask::invoke() { SAVE_STATE(); client_->checkPeers(spaceId_, partId_).thenValue([this](auto&& resp) { if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Check the peers failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Check the peers failed, status " + << resp; ret_ = BalanceTaskResult::FAILED; } else { status_ = BalanceTaskStatus::CHANGE_LEADER; @@ -61,7 +64,7 @@ void BalanceTask::invoke() { } // fallthrough case BalanceTaskStatus::CHANGE_LEADER: { - LOG(INFO) << taskIdStr_ << " Ask the src to give up the leadership."; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Ask the src to give up the leadership."; SAVE_STATE(); auto srcLivedRet = ActiveHostsMan::isLived(kv_, src_); if (nebula::ok(srcLivedRet) && nebula::value(srcLivedRet)) { @@ -73,7 +76,8 @@ void BalanceTask::invoke() { LOG(WARNING) << "Can't find part " << partId_ << " on " << src_; status_ = BalanceTaskStatus::ADD_PART_ON_DST; } else { - LOG(ERROR) << taskIdStr_ << " Transfer leader failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Transfer leader failed, status " + << resp; ret_ = BalanceTaskResult::FAILED; } } else { @@ -83,17 +87,18 @@ void BalanceTask::invoke() { }); break; } else { - LOG(INFO) << taskIdStr_ << " Src host has been lost, so no need to transfer leader"; + LOG(INFO) << taskIdStr_ + "," + commandStr_ + << " Src host has been lost, so no need to transfer leader"; status_ = BalanceTaskStatus::ADD_PART_ON_DST; } } // fallthrough case BalanceTaskStatus::ADD_PART_ON_DST: { - LOG(INFO) << taskIdStr_ << " Open the part as learner on dst."; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Open the part as learner on dst."; SAVE_STATE(); client_->addPart(spaceId_, partId_, dst_, true).thenValue([this](auto&& resp) { if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Open part failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Open part failed, status " << resp; ret_ = BalanceTaskResult::FAILED; } else { status_ = BalanceTaskStatus::ADD_LEARNER; @@ -103,11 +108,11 @@ void BalanceTask::invoke() { break; } case BalanceTaskStatus::ADD_LEARNER: { - LOG(INFO) << taskIdStr_ << " Add learner dst."; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Add learner dst."; SAVE_STATE(); client_->addLearner(spaceId_, partId_, dst_).thenValue([this](auto&& resp) { if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Add learner failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Add learner failed, status " << resp; ret_ = BalanceTaskResult::FAILED; } else { status_ = BalanceTaskStatus::CATCH_UP_DATA; @@ -117,11 +122,11 @@ void BalanceTask::invoke() { break; } case BalanceTaskStatus::CATCH_UP_DATA: { - LOG(INFO) << taskIdStr_ << " Waiting for the data catch up."; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Waiting for the data catch up."; SAVE_STATE(); client_->waitingForCatchUpData(spaceId_, partId_, dst_).thenValue([this](auto&& resp) { if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Catchup data failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Catchup data failed, status " << resp; ret_ = BalanceTaskResult::FAILED; } else { status_ = BalanceTaskStatus::MEMBER_CHANGE_ADD; @@ -131,12 +136,12 @@ void BalanceTask::invoke() { break; } case BalanceTaskStatus::MEMBER_CHANGE_ADD: { - LOG(INFO) << taskIdStr_ << " Send member change request to the leader" + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Send member change request to the leader" << ", it will add the new member on dst host"; SAVE_STATE(); client_->memberChange(spaceId_, partId_, dst_, true).thenValue([this](auto&& resp) { if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Add peer failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Add peer failed, status " << resp; ret_ = BalanceTaskResult::FAILED; } else { status_ = BalanceTaskStatus::MEMBER_CHANGE_REMOVE; @@ -146,12 +151,12 @@ void BalanceTask::invoke() { break; } case BalanceTaskStatus::MEMBER_CHANGE_REMOVE: { - LOG(INFO) << taskIdStr_ << " Send member change request to the leader" + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Send member change request to the leader" << ", it will remove the old member on src host"; SAVE_STATE(); client_->memberChange(spaceId_, partId_, src_, false).thenValue([this](auto&& resp) { if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Remove peer failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Remove peer failed, status " << resp; ret_ = BalanceTaskResult::FAILED; } else { status_ = BalanceTaskStatus::UPDATE_PART_META; @@ -161,16 +166,16 @@ void BalanceTask::invoke() { break; } case BalanceTaskStatus::UPDATE_PART_META: { - LOG(INFO) << taskIdStr_ << " Update meta for part."; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Update meta for part."; SAVE_STATE(); client_->updateMeta(spaceId_, partId_, src_, dst_).thenValue([this](auto&& resp) { // The callback will be called inside raft set value. So don't call // invoke directly here. if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Update meta failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Update meta failed, status " << resp; ret_ = BalanceTaskResult::FAILED; } else { - LOG(INFO) << taskIdStr_ << " Update meta succeeded!"; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Update meta succeeded!"; status_ = BalanceTaskStatus::REMOVE_PART_ON_SRC; } invoke(); @@ -179,12 +184,12 @@ void BalanceTask::invoke() { } case BalanceTaskStatus::REMOVE_PART_ON_SRC: { auto srcLivedRet = ActiveHostsMan::isLived(kv_, src_); - LOG(INFO) << taskIdStr_ << " Close part on src host, srcLived."; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Close part on src host, srcLived."; SAVE_STATE(); if (nebula::ok(srcLivedRet) && nebula::value(srcLivedRet)) { client_->removePart(spaceId_, partId_, src_).thenValue([this](auto&& resp) { if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Remove part failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Remove part failed, status " << resp; ret_ = BalanceTaskResult::FAILED; } else { status_ = BalanceTaskStatus::CHECK; @@ -193,17 +198,18 @@ void BalanceTask::invoke() { }); break; } else { - LOG(INFO) << taskIdStr_ << " Don't remove part on src " << src_; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Don't remove part on src " << src_; status_ = BalanceTaskStatus::CHECK; } } // fallthrough case BalanceTaskStatus::CHECK: { - LOG(INFO) << taskIdStr_ << " Check the peers..."; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Check the peers..."; SAVE_STATE(); client_->checkPeers(spaceId_, partId_).thenValue([this](auto&& resp) { if (!resp.ok()) { - LOG(ERROR) << taskIdStr_ << " Check the peers failed, status " << resp; + LOG(ERROR) << taskIdStr_ + "," + commandStr_ << " Check the peers failed, status " + << resp; ret_ = BalanceTaskResult::FAILED; } else { status_ = BalanceTaskStatus::END; @@ -213,7 +219,7 @@ void BalanceTask::invoke() { break; } case BalanceTaskStatus::END: { - LOG(INFO) << taskIdStr_ << " Part has been moved successfully!"; + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " Part has been moved successfully!"; endTimeMs_ = time::WallClock::fastNowInSec(); ret_ = BalanceTaskResult::SUCCEEDED; SAVE_STATE(); @@ -250,6 +256,9 @@ bool BalanceTask::saveInStore() { baton.post(); }); baton.wait(); + if (ret_ == BalanceTaskResult::INVALID) + LOG(INFO) << taskIdStr_ + "," + commandStr_ << " save task: " << static_cast(status_) + << " " << static_cast(ret_); return ret; } diff --git a/src/meta/processors/job/BalanceTask.h b/src/meta/processors/job/BalanceTask.h index 5add8dc1c45..404a9de8037 100644 --- a/src/meta/processors/job/BalanceTask.h +++ b/src/meta/processors/job/BalanceTask.h @@ -46,9 +46,11 @@ class BalanceTask { partId_(partId), src_(src), dst_(dst), - taskIdStr_(buildTaskId()), kv_(kv), - client_(client) {} + client_(client) { + taskIdStr_ = buildTaskId(); + commandStr_ = buildCommand(); + } const std::string& taskIdStr() const { return taskIdStr_; } diff --git a/src/meta/processors/job/JobExecutor.cpp b/src/meta/processors/job/JobExecutor.cpp new file mode 100644 index 00000000000..a075c605f47 --- /dev/null +++ b/src/meta/processors/job/JobExecutor.cpp @@ -0,0 +1,82 @@ +/* Copyright (c) 2019 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "common/network/NetworkUtils.h" +#include "common/utils/MetaKeyUtils.h" +#include "common/utils/Utils.h" +#include "interface/gen-cpp2/common_types.h" +#include "meta/ActiveHostsMan.h" +#include "meta/common/MetaCommon.h" +#include "meta/processors/Common.h" +#include "meta/processors/admin/AdminClient.h" +#include "meta/processors/job/BalanceJobExecutor.h" +#include "meta/processors/job/CompactJobExecutor.h" +#include "meta/processors/job/FlushJobExecutor.h" +#include "meta/processors/job/RebuildEdgeJobExecutor.h" +#include "meta/processors/job/RebuildFTJobExecutor.h" +#include "meta/processors/job/RebuildTagJobExecutor.h" +#include "meta/processors/job/StatsJobExecutor.h" +#include "meta/processors/job/StorageJobExecutor.h" +#include "meta/processors/job/TaskDescription.h" + +DECLARE_int32(heartbeat_interval_secs); +DECLARE_uint32(expired_time_factor); + +namespace nebula { +namespace meta { + +ErrorOr JobExecutor::getSpaceIdFromName( + const std::string& spaceName) { + auto indexKey = MetaKeyUtils::indexSpaceKey(spaceName); + std::string val; + auto retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, indexKey, &val); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get space failed, space name: " << spaceName + << " error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + return *reinterpret_cast(val.c_str()); +} + +std::unique_ptr JobExecutorFactory::createJobExecutor(const JobDescription& jd, + kvstore::KVStore* store, + AdminClient* client) { + std::unique_ptr ret; + switch (jd.getCmd()) { + case cpp2::AdminCmd::COMPACT: + ret.reset(new CompactJobExecutor(jd.getJobId(), store, client, jd.getParas())); + break; + case cpp2::AdminCmd::DATA_BALANCE: + ret.reset(new DataBalanceJobExecutor(jd, store, client, jd.getParas())); + break; + case cpp2::AdminCmd::ZONE_BALANCE: + ret.reset(new ZoneBalanceJobExecutor(jd, store, client, jd.getParas())); + break; + case cpp2::AdminCmd::LEADER_BALANCE: + ret.reset(new LeaderBalanceJobExecutor(jd.getJobId(), store, client, jd.getParas())); + break; + case cpp2::AdminCmd::FLUSH: + ret.reset(new FlushJobExecutor(jd.getJobId(), store, client, jd.getParas())); + break; + case cpp2::AdminCmd::REBUILD_TAG_INDEX: + ret.reset(new RebuildTagJobExecutor(jd.getJobId(), store, client, jd.getParas())); + break; + case cpp2::AdminCmd::REBUILD_EDGE_INDEX: + ret.reset(new RebuildEdgeJobExecutor(jd.getJobId(), store, client, jd.getParas())); + break; + case cpp2::AdminCmd::REBUILD_FULLTEXT_INDEX: + ret.reset(new RebuildFTJobExecutor(jd.getJobId(), store, client, jd.getParas())); + break; + case cpp2::AdminCmd::STATS: + ret.reset(new StatsJobExecutor(jd.getJobId(), store, client, jd.getParas())); + break; + default: + break; + } + return ret; +} + +} // namespace meta +} // namespace nebula diff --git a/src/meta/processors/job/JobExecutor.h b/src/meta/processors/job/JobExecutor.h new file mode 100644 index 00000000000..9c482dd272b --- /dev/null +++ b/src/meta/processors/job/JobExecutor.h @@ -0,0 +1,70 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#ifndef META_JOBEXECUTOR_H_ +#define META_JOBEXECUTOR_H_ + +#include + +#include "common/base/ErrorOr.h" +#include "kvstore/KVStore.h" +#include "meta/processors/admin/AdminClient.h" +#include "meta/processors/job/JobDescription.h" + +namespace nebula { +namespace meta { + +class JobExecutor { + public: + JobExecutor() = default; + explicit JobExecutor(kvstore::KVStore* kv) : kvstore_(kv) {} + virtual ~JobExecutor() = default; + + // Check the arguments about the job. + virtual bool check() = 0; + + // Prepare the Job info from the arguments. + virtual nebula::cpp2::ErrorCode prepare() = 0; + + // The skeleton to run the job. + // You should rewrite the executeInternal to trigger the calling. + virtual nebula::cpp2::ErrorCode execute() = 0; + + // Stop the job when the user cancel it. + virtual nebula::cpp2::ErrorCode stop() = 0; + + virtual nebula::cpp2::ErrorCode finish(bool) = 0; + + virtual nebula::cpp2::ErrorCode recovery() = 0; + + virtual void setSpaceId(GraphSpaceID spaceId) = 0; + + virtual bool isMetaJob() = 0; + + virtual void setFinishCallBack( + std::function func) { + UNUSED(func); + } + + virtual nebula::cpp2::ErrorCode saveSpecialTaskStatus(const cpp2::ReportTaskReq&) = 0; + + protected: + ErrorOr getSpaceIdFromName(const std::string& spaceName); + + protected: + kvstore::KVStore* kvstore_{nullptr}; +}; + +class JobExecutorFactory { + public: + static std::unique_ptr createJobExecutor(const JobDescription& jd, + kvstore::KVStore* store, + AdminClient* client); +}; + +} // namespace meta +} // namespace nebula + +#endif // META_JOBEXECUTOR_H_ diff --git a/src/meta/processors/job/JobManager.cpp b/src/meta/processors/job/JobManager.cpp index ca2c116e719..d6b708c541a 100644 --- a/src/meta/processors/job/JobManager.cpp +++ b/src/meta/processors/job/JobManager.cpp @@ -43,8 +43,7 @@ bool JobManager::init(nebula::kvstore::KVStore* store) { if (store == nullptr) { return false; } - std::lock_guard lk(statusGuard_); - if (status_ != JbmgrStatus::NOT_START) { + if (status_.load(std::memory_order_acquire) != JbmgrStatus::NOT_START) { return false; } kvStore_ = store; @@ -52,7 +51,7 @@ bool JobManager::init(nebula::kvstore::KVStore* store) { lowPriorityQueue_ = std::make_unique, true>>(); highPriorityQueue_ = std::make_unique, true>>(); - status_ = JbmgrStatus::IDLE; + status_.store(JbmgrStatus::IDLE, std::memory_order_release); if (handleRemainingJobs() != nebula::cpp2::ErrorCode::SUCCEEDED) { return false; } @@ -82,10 +81,10 @@ nebula::cpp2::ErrorCode JobManager::handleRemainingJobs() { auto optJobRet = JobDescription::makeJobDescription(iter->key(), iter->val()); if (nebula::ok(optJobRet)) { auto optJob = nebula::value(optJobRet); - std::unique_ptr je = - MetaJobExecutorFactory::createMetaJobExecutor(optJob, kvStore_, adminClient_); + std::unique_ptr je = + JobExecutorFactory::createJobExecutor(optJob, kvStore_, adminClient_); // Only balance has been recovered - if (optJob.getStatus() == cpp2::JobStatus::RUNNING && je->runInMeta()) { + if (optJob.getStatus() == cpp2::JobStatus::RUNNING && je->isMetaJob()) { jds.emplace_back(optJob); } } @@ -99,24 +98,22 @@ nebula::cpp2::ErrorCode JobManager::handleRemainingJobs() { void JobManager::shutDown() { LOG(INFO) << "JobManager::shutDown() begin"; - if (status_ == JbmgrStatus::STOPPED) { // in case of shutdown more than once + if (status_.load(std::memory_order_acquire) == + JbmgrStatus::STOPPED) { // in case of shutdown more than once LOG(INFO) << "JobManager not running, exit"; return; } - { - std::lock_guard lk(statusGuard_); - status_ = JbmgrStatus::STOPPED; - } + status_.store(JbmgrStatus::STOPPED, std::memory_order_release); bgThread_.join(); LOG(INFO) << "JobManager::shutDown() end"; } void JobManager::scheduleThread() { LOG(INFO) << "JobManager::runJobBackground() enter"; - while (status_ != JbmgrStatus::STOPPED) { + while (status_.load(std::memory_order_acquire) != JbmgrStatus::STOPPED) { std::pair opJobId; - while (status_ == JbmgrStatus::BUSY || !try_dequeue(opJobId)) { - if (status_ == JbmgrStatus::STOPPED) { + while (status_.load(std::memory_order_acquire) == JbmgrStatus::BUSY || !try_dequeue(opJobId)) { + if (status_.load(std::memory_order_acquire) == JbmgrStatus::STOPPED) { LOG(INFO) << "[JobManager] detect shutdown called, exit"; break; } @@ -129,17 +126,12 @@ void JobManager::scheduleThread() { continue; // leader change or archive happened } auto jobDesc = nebula::value(jobDescRet); - if (!jobDesc.setStatus(cpp2::JobStatus::RUNNING)) { + if (!jobDesc.setStatus(cpp2::JobStatus::RUNNING, opJobId.first == JbOp::RECOVER)) { LOG(INFO) << "[JobManager] skip job " << opJobId.second; continue; } save(jobDesc.jobKey(), jobDesc.jobVal()); - { - std::lock_guard lk(statusGuard_); - if (status_ == JbmgrStatus::IDLE) { - status_ = JbmgrStatus::BUSY; - } - } + compareChangeStatus(JbmgrStatus::IDLE, JbmgrStatus::BUSY); if (!runJobInternal(jobDesc, opJobId.first)) { jobFinished(opJobId.second, cpp2::JobStatus::FAILED); } @@ -148,8 +140,11 @@ void JobManager::scheduleThread() { // @return: true if all task dispatched, else false bool JobManager::runJobInternal(const JobDescription& jobDesc, JbOp op) { - std::unique_ptr jobExec = - MetaJobExecutorFactory::createMetaJobExecutor(jobDesc, kvStore_, adminClient_); + std::lock_guard lk(muJobFinished_); + std::unique_ptr je = + JobExecutorFactory::createJobExecutor(jobDesc, kvStore_, adminClient_); + JobExecutor* jobExec = je.get(); + runningJobs_.emplace(jobDesc.getJobId(), std::move(je)); if (jobExec == nullptr) { LOG(ERROR) << "unreconized job cmd " << apache::thrift::util::enumNameSafe(jobDesc.getCmd()); return false; @@ -172,22 +167,14 @@ bool JobManager::runJobInternal(const JobDescription& jobDesc, JbOp op) { if (op == JbOp::RECOVER) { jobExec->recovery(); } - if (jobExec->runInMeta()) { - jobExec->setFinishCallBack([this, &jobDesc](bool ret) { - SCOPE_EXIT { cleanJob(jobDesc.getJobId()); }; - if (ret) { - JobDescription jd = jobDesc; - if (!jd.setStatus(cpp2::JobStatus::FINISHED)) { - return nebula::cpp2::ErrorCode::E_SAVE_JOB_FAILURE; - } - statusGuard_.lock(); - if (status_ == JbmgrStatus::BUSY) { - status_ = JbmgrStatus::IDLE; - } - statusGuard_.unlock(); - return save(jd.jobKey(), jd.jobVal()); - } else { + if (jobExec->isMetaJob()) { + jobExec->setFinishCallBack([this, &jobDesc](meta::cpp2::JobStatus status) { + if (status == meta::cpp2::JobStatus::STOPPED) { + std::lock_guard lk(muJobFinished_); + cleanJob(jobDesc.getJobId()); return nebula::cpp2::ErrorCode::SUCCEEDED; + } else { + return jobFinished(jobDesc.getJobId(), status); } }); } @@ -205,6 +192,10 @@ void JobManager::cleanJob(JobID jobId) { if (it != inFlightJobs_.end()) { inFlightJobs_.erase(it); } + auto itr = runningJobs_.find(jobId); + if (itr != runningJobs_.end()) { + runningJobs_.erase(itr); + } } nebula::cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus jobStatus) { @@ -212,7 +203,6 @@ nebula::cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus job "{}, jobId={}, result={}", __func__, jobId, apache::thrift::util::enumNameSafe(jobStatus)); // normal job finish may race to job stop std::lock_guard lk(muJobFinished_); - SCOPE_EXIT { cleanJob(jobId); }; auto optJobDescRet = JobDescription::loadJobDescription(jobId, kvStore_); if (!nebula::ok(optJobDescRet)) { LOG(WARNING) << folly::sformat("can't load job, jobId={}", jobId); @@ -220,10 +210,7 @@ nebula::cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus job // there is a rare condition, that when job finished, // the job description is deleted(default more than a week) // but stop an invalid job should not set status to idle. - std::lock_guard statusLk(statusGuard_); - if (status_ == JbmgrStatus::BUSY) { - status_ = JbmgrStatus::IDLE; - } + compareChangeStatus(JbmgrStatus::BUSY, JbmgrStatus::IDLE); } return nebula::error(optJobDescRet); } @@ -234,23 +221,18 @@ nebula::cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus job // job already been set as finished, failed or stopped return nebula::cpp2::ErrorCode::E_SAVE_JOB_FAILURE; } - { - std::lock_guard statusLk(statusGuard_); - if (status_ == JbmgrStatus::BUSY) { - status_ = JbmgrStatus::IDLE; - } - } + compareChangeStatus(JbmgrStatus::BUSY, JbmgrStatus::IDLE); auto rc = save(optJobDesc.jobKey(), optJobDesc.jobVal()); if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { return rc; } - auto jobExec = MetaJobExecutorFactory::createMetaJobExecutor(optJobDesc, kvStore_, adminClient_); - - if (!jobExec) { - LOG(WARNING) << folly::sformat("unable to create jobExecutor, jobId={}", jobId); + auto it = runningJobs_.find(jobId); + if (it == runningJobs_.end()) { + LOG(WARNING) << folly::sformat("can't find jobExecutor, jobId={}", jobId); return nebula::cpp2::ErrorCode::E_UNKNOWN; } + std::unique_ptr& jobExec = it->second; if (!optJobDesc.getParas().empty()) { auto spaceName = optJobDesc.getParas().back(); auto spaceIdRet = getSpaceId(spaceName); @@ -268,9 +250,13 @@ nebula::cpp2::ErrorCode JobManager::jobFinished(JobID jobId, cpp2::JobStatus job jobExec->setSpaceId(spaceId); } if (jobStatus == cpp2::JobStatus::STOPPED) { - return jobExec->stop(); + jobExec->stop(); + if (!jobExec->isMetaJob()) { + cleanJob(jobId); + } } else { jobExec->finish(jobStatus == cpp2::JobStatus::FINISHED); + cleanJob(jobId); } return nebula::cpp2::ErrorCode::SUCCEEDED; @@ -293,7 +279,7 @@ nebula::cpp2::ErrorCode JobManager::saveTaskStatus(TaskDescription& td, } auto optJobDesc = nebula::value(optJobDescRet); - auto jobExec = MetaJobExecutorFactory::createMetaJobExecutor(optJobDesc, kvStore_, adminClient_); + auto jobExec = JobExecutorFactory::createJobExecutor(optJobDesc, kvStore_, adminClient_); if (!jobExec) { LOG(WARNING) << folly::sformat("createMetaJobExecutor failed(), jobId={}", jobId); @@ -322,6 +308,11 @@ nebula::cpp2::ErrorCode JobManager::saveTaskStatus(TaskDescription& td, return jobExec->saveSpecialTaskStatus(req); } +void JobManager::compareChangeStatus(JbmgrStatus expected, JbmgrStatus despire) { + JbmgrStatus ex = expected; + status_.compare_exchange_strong(ex, despire, std::memory_order_acq_rel); +} + /** * @brief * client should retry if any persist attempt @@ -333,7 +324,8 @@ nebula::cpp2::ErrorCode JobManager::reportTaskFinish(const cpp2::ReportTaskReq& auto jobId = req.get_job_id(); auto taskId = req.get_task_id(); // only an active job manager will accept task finish report - if (status_ == JbmgrStatus::STOPPED || status_ == JbmgrStatus::NOT_START) { + if (status_.load(std::memory_order_acquire) == JbmgrStatus::STOPPED || + status_.load(std::memory_order_acquire) == JbmgrStatus::NOT_START) { LOG(INFO) << folly::sformat( "report to an in-active job manager, job={}, task={}", jobId, taskId); return nebula::cpp2::ErrorCode::E_UNKNOWN; @@ -571,7 +563,8 @@ JobManager::showJob(JobID iJob, const std::string& spaceName) { ret.second.emplace_back(td.toTaskDesc()); } } - if (ret.first.get_cmd() == meta::cpp2::AdminCmd::DATA_BALANCE) { + if (ret.first.get_cmd() == meta::cpp2::AdminCmd::DATA_BALANCE || + ret.first.get_cmd() == meta::cpp2::AdminCmd::ZONE_BALANCE) { auto res = BalancePlan::show(iJob, kvStore_, adminClient_); if (ok(res)) { std::vector thriftTasks = value(res); @@ -651,7 +644,9 @@ ErrorOr JobManager::recoverJob( if (optJob.getParas().back() != spaceName) { continue; } - if (optJob.getStatus() == cpp2::JobStatus::QUEUE) { + if (optJob.getStatus() == cpp2::JobStatus::QUEUE || + (jobIds.size() && (optJob.getStatus() == cpp2::JobStatus::FAILED || + optJob.getStatus() == cpp2::JobStatus::STOPPED))) { // Check if the job exists JobID jId = 0; auto jobExist = checkJobExist(optJob.getCmd(), optJob.getParas(), jId); diff --git a/src/meta/processors/job/JobManager.h b/src/meta/processors/job/JobManager.h index 8d9a2e8d188..cf9ce60e557 100644 --- a/src/meta/processors/job/JobManager.h +++ b/src/meta/processors/job/JobManager.h @@ -18,7 +18,7 @@ #include "kvstore/NebulaStore.h" #include "meta/processors/job/JobDescription.h" #include "meta/processors/job/JobStatus.h" -#include "meta/processors/job/MetaJobExecutor.h" +#include "meta/processors/job/StorageJobExecutor.h" #include "meta/processors/job/TaskDescription.h" namespace nebula { @@ -138,23 +138,25 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab nebula::cpp2::ErrorCode saveTaskStatus(TaskDescription& td, const cpp2::ReportTaskReq& req); + void compareChangeStatus(JbmgrStatus expected, JbmgrStatus despire); + private: // Todo(pandasheep) // When folly is upgraded, PriorityUMPSCQueueSet can be used // Use two queues to simulate priority queue, Divide by job cmd std::unique_ptr, true>> lowPriorityQueue_; std::unique_ptr, true>> highPriorityQueue_; + std::map> runningJobs_; // The job in running or queue folly::ConcurrentHashMap inFlightJobs_; std::thread bgThread_; - std::mutex statusGuard_; - JbmgrStatus status_{JbmgrStatus::NOT_START}; nebula::kvstore::KVStore* kvStore_{nullptr}; AdminClient* adminClient_{nullptr}; std::mutex muReportFinish_; std::mutex muJobFinished_; + std::atomic status_ = JbmgrStatus::NOT_START; }; } // namespace meta diff --git a/src/meta/processors/job/MetaJobExecutor.cpp b/src/meta/processors/job/MetaJobExecutor.cpp index 4a1c648ed6a..e05cf0030c8 100644 --- a/src/meta/processors/job/MetaJobExecutor.cpp +++ b/src/meta/processors/job/MetaJobExecutor.cpp @@ -5,247 +5,49 @@ #include "meta/processors/job/MetaJobExecutor.h" -#include "common/network/NetworkUtils.h" -#include "common/utils/MetaKeyUtils.h" #include "common/utils/Utils.h" -#include "interface/gen-cpp2/common_types.h" -#include "meta/ActiveHostsMan.h" -#include "meta/common/MetaCommon.h" -#include "meta/processors/Common.h" -#include "meta/processors/admin/AdminClient.h" -#include "meta/processors/job/BalanceJobExecutor.h" -#include "meta/processors/job/CompactJobExecutor.h" -#include "meta/processors/job/FlushJobExecutor.h" -#include "meta/processors/job/RebuildEdgeJobExecutor.h" -#include "meta/processors/job/RebuildFTJobExecutor.h" -#include "meta/processors/job/RebuildTagJobExecutor.h" -#include "meta/processors/job/StatsJobExecutor.h" -#include "meta/processors/job/TaskDescription.h" DECLARE_int32(heartbeat_interval_secs); DECLARE_uint32(expired_time_factor); namespace nebula { namespace meta { +bool MetaJobExecutor::check() { return true; } -std::unique_ptr MetaJobExecutorFactory::createMetaJobExecutor( - const JobDescription& jd, kvstore::KVStore* store, AdminClient* client) { - std::unique_ptr ret; - switch (jd.getCmd()) { - case cpp2::AdminCmd::COMPACT: - ret.reset(new CompactJobExecutor(jd.getJobId(), store, client, jd.getParas())); - break; - case cpp2::AdminCmd::DATA_BALANCE: - ret.reset(new DataBalanceJobExecutor(jd, store, client, jd.getParas())); - break; - case cpp2::AdminCmd::LEADER_BALANCE: - ret.reset(new LeaderBalanceJobExecutor(jd.getJobId(), store, client, jd.getParas())); - break; - case cpp2::AdminCmd::FLUSH: - ret.reset(new FlushJobExecutor(jd.getJobId(), store, client, jd.getParas())); - break; - case cpp2::AdminCmd::REBUILD_TAG_INDEX: - ret.reset(new RebuildTagJobExecutor(jd.getJobId(), store, client, jd.getParas())); - break; - case cpp2::AdminCmd::REBUILD_EDGE_INDEX: - ret.reset(new RebuildEdgeJobExecutor(jd.getJobId(), store, client, jd.getParas())); - break; - case cpp2::AdminCmd::REBUILD_FULLTEXT_INDEX: - ret.reset(new RebuildFTJobExecutor(jd.getJobId(), store, client, jd.getParas())); - break; - case cpp2::AdminCmd::STATS: - ret.reset(new StatsJobExecutor(jd.getJobId(), store, client, jd.getParas())); - break; - default: - break; - } - return ret; -} - -ErrorOr MetaJobExecutor::getSpaceIdFromName( - const std::string& spaceName) { - auto indexKey = MetaKeyUtils::indexSpaceKey(spaceName); - std::string val; - auto retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, indexKey, &val); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get space failed, space name: " << spaceName - << " error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - return *reinterpret_cast(val.c_str()); -} - -ErrOrHosts MetaJobExecutor::getTargetHost(GraphSpaceID spaceId) { - std::unique_ptr iter; - const auto& partPrefix = MetaKeyUtils::partPrefix(spaceId); - auto retCode = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, partPrefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Fetch Parts Failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } +// Prepare the Job info from the arguments. +nebula::cpp2::ErrorCode MetaJobExecutor::prepare() { return nebula::cpp2::ErrorCode::SUCCEEDED; } - // use vector instead of set because this can convenient for next step - std::unordered_map> hostAndPart; - std::vector>> hosts; - while (iter->valid()) { - auto part = MetaKeyUtils::parsePartKeyPartId(iter->key()); - auto targets = MetaKeyUtils::parsePartVal(iter->val()); - for (auto& target : targets) { - hostAndPart[target].emplace_back(part); - } - iter->next(); - } - for (auto it = hostAndPart.begin(); it != hostAndPart.end(); it++) { - hosts.emplace_back(std::pair(it->first, it->second)); +// The skeleton to run the job. +// You should rewrite the executeInternal to trigger the calling. +nebula::cpp2::ErrorCode MetaJobExecutor::execute() { + folly::SemiFuture future = executeInternal(); + auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; + future.wait(); + if (!future.value().ok()) { + LOG(ERROR) << future.value().toString(); + rc = nebula::cpp2::ErrorCode::E_ADD_JOB_FAILURE; } - return hosts; + return rc; } -ErrOrHosts MetaJobExecutor::getLeaderHost(GraphSpaceID space) { - const auto& hostPrefix = MetaKeyUtils::leaderPrefix(space); - std::unique_ptr leaderIter; - auto retCode = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, hostPrefix, &leaderIter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get space " << space - << "'s part failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } +// Stop the job when the user cancel it. +nebula::cpp2::ErrorCode MetaJobExecutor::stop() { return nebula::cpp2::ErrorCode::SUCCEEDED; } - std::vector>> hosts; - HostAddr host; - nebula::cpp2::ErrorCode code; - for (; leaderIter->valid(); leaderIter->next()) { - auto spaceAndPart = MetaKeyUtils::parseLeaderKeyV3(leaderIter->key()); - auto partId = spaceAndPart.second; - std::tie(host, std::ignore, code) = MetaKeyUtils::parseLeaderValV3(leaderIter->val()); - if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { - continue; - } - auto it = - std::find_if(hosts.begin(), hosts.end(), [&](auto& item) { return item.first == host; }); - if (it == hosts.end()) { - hosts.emplace_back(std::make_pair(host, std::vector{partId})); - } else { - it->second.emplace_back(partId); - } - } - return hosts; -} +nebula::cpp2::ErrorCode MetaJobExecutor::finish(bool) { return nebula::cpp2::ErrorCode::SUCCEEDED; } -ErrOrHosts MetaJobExecutor::getListenerHost(GraphSpaceID space, cpp2::ListenerType type) { - const auto& prefix = MetaKeyUtils::listenerPrefix(space, type); - std::unique_ptr iter; - auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get space " << space - << "'s listener failed, error: " << apache::thrift::util::enumNameSafe(ret); - return ret; - } +void MetaJobExecutor::setSpaceId(GraphSpaceID spaceId) { space_ = spaceId; } - auto activeHostsRet = - ActiveHostsMan::getActiveHosts(kvstore_, - FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor, - cpp2::HostRole::LISTENER); - if (!nebula::ok(activeHostsRet)) { - return nebula::error(activeHostsRet); - } +bool MetaJobExecutor::isMetaJob() { return true; } - auto activeHosts = std::move(nebula::value(activeHostsRet)); - std::vector>> hosts; +nebula::cpp2::ErrorCode MetaJobExecutor::recovery() { return nebula::cpp2::ErrorCode::SUCCEEDED; } - while (iter->valid()) { - auto host = MetaKeyUtils::deserializeHostAddr(iter->val()); - auto part = MetaKeyUtils::parseListenerPart(iter->key()); - if (std::find(activeHosts.begin(), activeHosts.end(), host) == activeHosts.end()) { - LOG(ERROR) << "Invalid host : " << network::NetworkUtils::toHostsStr({host}); - return nebula::cpp2::ErrorCode::E_INVALID_HOST; - } - auto it = std::find_if( - hosts.begin(), hosts.end(), [&host](auto& item) { return item.first == host; }); - if (it == hosts.end()) { - hosts.emplace_back(std::make_pair(host, std::vector{part})); - } else { - it->second.emplace_back(part); - } - iter->next(); - } - if (hosts.empty()) { - return nebula::cpp2::ErrorCode::E_LISTENER_NOT_FOUND; - } - return hosts; +void MetaJobExecutor::setFinishCallBack( + std::function func) { + onFinished_ = func; } -nebula::cpp2::ErrorCode MetaJobExecutor::execute() { - ErrOrHosts addressesRet; - switch (toHost_) { - case TargetHosts::LEADER: { - addressesRet = getLeaderHost(space_); - break; - } - case TargetHosts::LISTENER: { - addressesRet = getListenerHost(space_, cpp2::ListenerType::ELASTICSEARCH); - break; - } - case TargetHosts::NONE: { - addressesRet = {{HostAddr(), {}}}; - break; - } - case TargetHosts::DEFAULT: { - addressesRet = getTargetHost(space_); - break; - } - } - - if (!nebula::ok(addressesRet)) { - LOG(ERROR) << "Can't get hosts"; - return nebula::error(addressesRet); - } - - std::vector parts; - auto addresses = nebula::value(addressesRet); - - // write all tasks first. - if (toHost_ != TargetHosts::NONE) { - for (auto i = 0U; i != addresses.size(); ++i) { - TaskDescription task(jobId_, i, addresses[i].first); - std::vector data{{task.taskKey(), task.taskVal()}}; - folly::Baton baton; - auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; - kvstore_->asyncMultiPut( - kDefaultSpaceId, kDefaultPartId, std::move(data), [&](nebula::cpp2::ErrorCode code) { - rc = code; - baton.post(); - }); - baton.wait(); - if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(INFO) << "write to kv store failed, error: " << apache::thrift::util::enumNameSafe(rc); - return rc; - } - } - } - - std::vector> futures; - for (auto& address : addresses) { - // transform to the admin host - auto h = Utils::getAdminAddrFromStoreAddr(address.first); - futures.emplace_back(executeInternal(std::move(h), std::move(address.second))); - } - - auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; - auto tries = folly::collectAll(std::move(futures)).get(); - for (auto& t : tries) { - if (t.hasException()) { - LOG(ERROR) << t.exception().what(); - rc = nebula::cpp2::ErrorCode::E_RPC_FAILURE; - continue; - } - if (!t.value().ok()) { - LOG(ERROR) << t.value().toString(); - rc = nebula::cpp2::ErrorCode::E_RPC_FAILURE; - continue; - } - } - return rc; +nebula::cpp2::ErrorCode MetaJobExecutor::saveSpecialTaskStatus(const cpp2::ReportTaskReq&) { + return nebula::cpp2::ErrorCode::SUCCEEDED; } } // namespace meta diff --git a/src/meta/processors/job/MetaJobExecutor.h b/src/meta/processors/job/MetaJobExecutor.h index bd10d0af110..1491e8a40ca 100644 --- a/src/meta/processors/job/MetaJobExecutor.h +++ b/src/meta/processors/job/MetaJobExecutor.h @@ -12,90 +12,63 @@ #include "kvstore/KVStore.h" #include "meta/processors/admin/AdminClient.h" #include "meta/processors/job/JobDescription.h" +#include "meta/processors/job/JobExecutor.h" namespace nebula { namespace meta { -using PartsOfHost = std::pair>; -using ErrOrHosts = ErrorOr>; - -class MetaJobExecutor { +class MetaJobExecutor : public JobExecutor { public: - enum class TargetHosts { LEADER = 0, LISTENER, NONE, DEFAULT }; - MetaJobExecutor(JobID jobId, kvstore::KVStore* kvstore, AdminClient* adminClient, const std::vector& paras) - : jobId_(jobId), kvstore_(kvstore), adminClient_(adminClient), paras_(paras) { - onFinished_ = [](bool) { return nebula::cpp2::ErrorCode::SUCCEEDED; }; + : JobExecutor(kvstore), jobId_(jobId), adminClient_(adminClient), paras_(paras) { + onFinished_ = [](meta::cpp2::JobStatus) { return nebula::cpp2::ErrorCode::SUCCEEDED; }; } virtual ~MetaJobExecutor() = default; // Check the arguments about the job. - virtual bool check() = 0; + bool check() override; // Prepare the Job info from the arguments. - virtual nebula::cpp2::ErrorCode prepare() = 0; + nebula::cpp2::ErrorCode prepare() override; // The skeleton to run the job. // You should rewrite the executeInternal to trigger the calling. - nebula::cpp2::ErrorCode execute(); - - void interruptExecution(JobID jobId); + nebula::cpp2::ErrorCode execute() override; // Stop the job when the user cancel it. - virtual nebula::cpp2::ErrorCode stop() = 0; + nebula::cpp2::ErrorCode stop() override; - virtual nebula::cpp2::ErrorCode finish(bool) { return nebula::cpp2::ErrorCode::SUCCEEDED; } + nebula::cpp2::ErrorCode finish(bool) override; - void setSpaceId(GraphSpaceID spaceId) { space_ = spaceId; } + void setSpaceId(GraphSpaceID spaceId) override; - virtual nebula::cpp2::ErrorCode saveSpecialTaskStatus(const cpp2::ReportTaskReq&) { - return nebula::cpp2::ErrorCode::SUCCEEDED; - } + bool isMetaJob() override; - virtual bool runInMeta() { return false; } + nebula::cpp2::ErrorCode recovery() override; - virtual nebula::cpp2::ErrorCode recovery() { return nebula::cpp2::ErrorCode::SUCCEEDED; } + void setFinishCallBack( + std::function func) override; - void setFinishCallBack(std::function func) { - onFinished_ = func; - } + nebula::cpp2::ErrorCode saveSpecialTaskStatus(const cpp2::ReportTaskReq&) override; protected: - ErrorOr getSpaceIdFromName(const std::string& spaceName); - - ErrOrHosts getTargetHost(GraphSpaceID space); - - ErrOrHosts getLeaderHost(GraphSpaceID space); - - ErrOrHosts getListenerHost(GraphSpaceID space, cpp2::ListenerType type); - - virtual folly::Future executeInternal(HostAddr&& address, - std::vector&& parts) = 0; + virtual folly::Future executeInternal() = 0; protected: JobID jobId_{INT_MIN}; TaskID taskId_{0}; - kvstore::KVStore* kvstore_{nullptr}; AdminClient* adminClient_{nullptr}; GraphSpaceID space_; std::vector paras_; - TargetHosts toHost_{TargetHosts::DEFAULT}; int32_t concurrency_{INT_MAX}; volatile bool stopped_{false}; std::mutex muInterrupt_; std::condition_variable condInterrupt_; - std::function onFinished_; -}; - -class MetaJobExecutorFactory { - public: - static std::unique_ptr createMetaJobExecutor(const JobDescription& jd, - kvstore::KVStore* store, - AdminClient* client); + std::function onFinished_; }; } // namespace meta diff --git a/src/meta/processors/job/RebuildJobExecutor.h b/src/meta/processors/job/RebuildJobExecutor.h index 8488f6f5f2a..4832b724c67 100644 --- a/src/meta/processors/job/RebuildJobExecutor.h +++ b/src/meta/processors/job/RebuildJobExecutor.h @@ -8,18 +8,18 @@ #include "interface/gen-cpp2/common_types.h" #include "meta/processors/admin/AdminClient.h" -#include "meta/processors/job/MetaJobExecutor.h" +#include "meta/processors/job/StorageJobExecutor.h" namespace nebula { namespace meta { -class RebuildJobExecutor : public MetaJobExecutor { +class RebuildJobExecutor : public StorageJobExecutor { public: RebuildJobExecutor(JobID jobId, kvstore::KVStore* kvstore, AdminClient* adminClient, const std::vector& paras) - : MetaJobExecutor(jobId, kvstore, adminClient, paras) { + : StorageJobExecutor(jobId, kvstore, adminClient, paras) { toHost_ = TargetHosts::LEADER; } diff --git a/src/meta/processors/job/SimpleConcurrentJobExecutor.cpp b/src/meta/processors/job/SimpleConcurrentJobExecutor.cpp index 4de1254f247..a69ea88c9e1 100644 --- a/src/meta/processors/job/SimpleConcurrentJobExecutor.cpp +++ b/src/meta/processors/job/SimpleConcurrentJobExecutor.cpp @@ -15,7 +15,7 @@ SimpleConcurrentJobExecutor::SimpleConcurrentJobExecutor(JobID jobId, kvstore::KVStore* kvstore, AdminClient* adminClient, const std::vector& paras) - : MetaJobExecutor(jobId, kvstore, adminClient, paras) {} + : StorageJobExecutor(jobId, kvstore, adminClient, paras) {} bool SimpleConcurrentJobExecutor::check() { auto parasNum = paras_.size(); diff --git a/src/meta/processors/job/SimpleConcurrentJobExecutor.h b/src/meta/processors/job/SimpleConcurrentJobExecutor.h index b6301b0090e..6400379bb01 100644 --- a/src/meta/processors/job/SimpleConcurrentJobExecutor.h +++ b/src/meta/processors/job/SimpleConcurrentJobExecutor.h @@ -7,12 +7,12 @@ #define META_SIMPLECONCURRENTJOBEXECUTOR_H_ #include "interface/gen-cpp2/common_types.h" -#include "meta/processors/job/MetaJobExecutor.h" +#include "meta/processors/job/StorageJobExecutor.h" namespace nebula { namespace meta { -class SimpleConcurrentJobExecutor : public MetaJobExecutor { +class SimpleConcurrentJobExecutor : public StorageJobExecutor { public: SimpleConcurrentJobExecutor(JobID jobId, kvstore::KVStore* kvstore, diff --git a/src/meta/processors/job/StatsJobExecutor.h b/src/meta/processors/job/StatsJobExecutor.h index a7e3e23ab2f..bfbc3d84478 100644 --- a/src/meta/processors/job/StatsJobExecutor.h +++ b/src/meta/processors/job/StatsJobExecutor.h @@ -8,18 +8,18 @@ #include "interface/gen-cpp2/meta_types.h" #include "meta/processors/admin/AdminClient.h" -#include "meta/processors/job/MetaJobExecutor.h" +#include "meta/processors/job/StorageJobExecutor.h" namespace nebula { namespace meta { -class StatsJobExecutor : public MetaJobExecutor { +class StatsJobExecutor : public StorageJobExecutor { public: StatsJobExecutor(JobID jobId, kvstore::KVStore* kvstore, AdminClient* adminClient, const std::vector& paras) - : MetaJobExecutor(jobId, kvstore, adminClient, paras) { + : StorageJobExecutor(jobId, kvstore, adminClient, paras) { toHost_ = TargetHosts::LEADER; } diff --git a/src/meta/processors/job/StorageJobExecutor.cpp b/src/meta/processors/job/StorageJobExecutor.cpp new file mode 100644 index 00000000000..40922456dc3 --- /dev/null +++ b/src/meta/processors/job/StorageJobExecutor.cpp @@ -0,0 +1,199 @@ +/* Copyright (c) 2019 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "meta/processors/job/StorageJobExecutor.h" + +#include "common/network/NetworkUtils.h" +#include "common/utils/MetaKeyUtils.h" +#include "common/utils/Utils.h" +#include "interface/gen-cpp2/common_types.h" +#include "meta/ActiveHostsMan.h" +#include "meta/common/MetaCommon.h" +#include "meta/processors/Common.h" +#include "meta/processors/admin/AdminClient.h" +#include "meta/processors/job/BalanceJobExecutor.h" +#include "meta/processors/job/CompactJobExecutor.h" +#include "meta/processors/job/FlushJobExecutor.h" +#include "meta/processors/job/RebuildEdgeJobExecutor.h" +#include "meta/processors/job/RebuildFTJobExecutor.h" +#include "meta/processors/job/RebuildTagJobExecutor.h" +#include "meta/processors/job/StatsJobExecutor.h" +#include "meta/processors/job/TaskDescription.h" + +DECLARE_int32(heartbeat_interval_secs); +DECLARE_uint32(expired_time_factor); + +namespace nebula { +namespace meta { + +ErrOrHosts StorageJobExecutor::getTargetHost(GraphSpaceID spaceId) { + std::unique_ptr iter; + const auto& partPrefix = MetaKeyUtils::partPrefix(spaceId); + auto retCode = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, partPrefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Fetch Parts Failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + // use vector instead of set because this can convenient for next step + std::unordered_map> hostAndPart; + std::vector>> hosts; + while (iter->valid()) { + auto part = MetaKeyUtils::parsePartKeyPartId(iter->key()); + auto targets = MetaKeyUtils::parsePartVal(iter->val()); + for (auto& target : targets) { + hostAndPart[target].emplace_back(part); + } + iter->next(); + } + for (auto it = hostAndPart.begin(); it != hostAndPart.end(); it++) { + hosts.emplace_back(std::pair(it->first, it->second)); + } + return hosts; +} + +ErrOrHosts StorageJobExecutor::getLeaderHost(GraphSpaceID space) { + const auto& hostPrefix = MetaKeyUtils::leaderPrefix(space); + std::unique_ptr leaderIter; + auto retCode = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, hostPrefix, &leaderIter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get space " << space + << "'s part failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + std::vector>> hosts; + HostAddr host; + nebula::cpp2::ErrorCode code; + for (; leaderIter->valid(); leaderIter->next()) { + auto spaceAndPart = MetaKeyUtils::parseLeaderKeyV3(leaderIter->key()); + auto partId = spaceAndPart.second; + std::tie(host, std::ignore, code) = MetaKeyUtils::parseLeaderValV3(leaderIter->val()); + if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { + continue; + } + auto it = + std::find_if(hosts.begin(), hosts.end(), [&](auto& item) { return item.first == host; }); + if (it == hosts.end()) { + hosts.emplace_back(std::make_pair(host, std::vector{partId})); + } else { + it->second.emplace_back(partId); + } + } + return hosts; +} + +ErrOrHosts StorageJobExecutor::getListenerHost(GraphSpaceID space, cpp2::ListenerType type) { + const auto& prefix = MetaKeyUtils::listenerPrefix(space, type); + std::unique_ptr iter; + auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get space " << space + << "'s listener failed, error: " << apache::thrift::util::enumNameSafe(ret); + return ret; + } + + auto activeHostsRet = + ActiveHostsMan::getActiveHosts(kvstore_, + FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor, + cpp2::HostRole::LISTENER); + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); + } + + auto activeHosts = std::move(nebula::value(activeHostsRet)); + std::vector>> hosts; + + while (iter->valid()) { + auto host = MetaKeyUtils::deserializeHostAddr(iter->val()); + auto part = MetaKeyUtils::parseListenerPart(iter->key()); + if (std::find(activeHosts.begin(), activeHosts.end(), host) == activeHosts.end()) { + LOG(ERROR) << "Invalid host : " << network::NetworkUtils::toHostsStr({host}); + return nebula::cpp2::ErrorCode::E_INVALID_HOST; + } + auto it = std::find_if( + hosts.begin(), hosts.end(), [&host](auto& item) { return item.first == host; }); + if (it == hosts.end()) { + hosts.emplace_back(std::make_pair(host, std::vector{part})); + } else { + it->second.emplace_back(part); + } + iter->next(); + } + if (hosts.empty()) { + return nebula::cpp2::ErrorCode::E_LISTENER_NOT_FOUND; + } + return hosts; +} + +nebula::cpp2::ErrorCode StorageJobExecutor::execute() { + ErrOrHosts addressesRet; + switch (toHost_) { + case TargetHosts::LEADER: { + addressesRet = getLeaderHost(space_); + break; + } + case TargetHosts::LISTENER: { + addressesRet = getListenerHost(space_, cpp2::ListenerType::ELASTICSEARCH); + break; + } + case TargetHosts::DEFAULT: { + addressesRet = getTargetHost(space_); + break; + } + } + + if (!nebula::ok(addressesRet)) { + LOG(ERROR) << "Can't get hosts"; + return nebula::error(addressesRet); + } + + std::vector parts; + auto addresses = nebula::value(addressesRet); + + // write all tasks first. + for (auto i = 0U; i != addresses.size(); ++i) { + TaskDescription task(jobId_, i, addresses[i].first); + std::vector data{{task.taskKey(), task.taskVal()}}; + folly::Baton baton; + auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; + kvstore_->asyncMultiPut( + kDefaultSpaceId, kDefaultPartId, std::move(data), [&](nebula::cpp2::ErrorCode code) { + rc = code; + baton.post(); + }); + baton.wait(); + if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(INFO) << "write to kv store failed, error: " << apache::thrift::util::enumNameSafe(rc); + return rc; + } + } + + std::vector> futures; + for (auto& address : addresses) { + // transform to the admin host + auto h = Utils::getAdminAddrFromStoreAddr(address.first); + futures.emplace_back(executeInternal(std::move(h), std::move(address.second))); + } + + auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; + auto tries = folly::collectAll(std::move(futures)).get(); + for (auto& t : tries) { + if (t.hasException()) { + LOG(ERROR) << t.exception().what(); + rc = nebula::cpp2::ErrorCode::E_RPC_FAILURE; + continue; + } + if (!t.value().ok()) { + LOG(ERROR) << t.value().toString(); + rc = nebula::cpp2::ErrorCode::E_RPC_FAILURE; + continue; + } + } + return rc; +} + +} // namespace meta +} // namespace nebula diff --git a/src/meta/processors/job/StorageJobExecutor.h b/src/meta/processors/job/StorageJobExecutor.h new file mode 100644 index 00000000000..02e51418882 --- /dev/null +++ b/src/meta/processors/job/StorageJobExecutor.h @@ -0,0 +1,89 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#ifndef META_STORAGEJOBEXECUTOR_H_ +#define META_STORAGEJOBEXECUTOR_H_ + +#include + +#include "common/base/ErrorOr.h" +#include "kvstore/KVStore.h" +#include "meta/processors/admin/AdminClient.h" +#include "meta/processors/job/JobDescription.h" +#include "meta/processors/job/JobExecutor.h" + +namespace nebula { +namespace meta { + +using PartsOfHost = std::pair>; +using ErrOrHosts = ErrorOr>; + +class StorageJobExecutor : public JobExecutor { + public: + enum class TargetHosts { LEADER = 0, LISTENER, DEFAULT }; + + StorageJobExecutor(JobID jobId, + kvstore::KVStore* kvstore, + AdminClient* adminClient, + const std::vector& paras) + : JobExecutor(kvstore), jobId_(jobId), adminClient_(adminClient), paras_(paras) { + } + + virtual ~StorageJobExecutor() = default; + + // Check the arguments about the job. + bool check() override { return true; } + + // Prepare the Job info from the arguments. + nebula::cpp2::ErrorCode prepare() override { return nebula::cpp2::ErrorCode::SUCCEEDED; } + + // The skeleton to run the job. + // You should rewrite the executeInternal to trigger the calling. + nebula::cpp2::ErrorCode execute() override; + + void interruptExecution(JobID jobId); + + // Stop the job when the user cancel it. + nebula::cpp2::ErrorCode stop() override { return nebula::cpp2::ErrorCode::SUCCEEDED; } + + nebula::cpp2::ErrorCode finish(bool) override { return nebula::cpp2::ErrorCode::SUCCEEDED; } + + void setSpaceId(GraphSpaceID spaceId) override { space_ = spaceId; } + + nebula::cpp2::ErrorCode saveSpecialTaskStatus(const cpp2::ReportTaskReq&) override { + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + + bool isMetaJob() override { return false; } + + nebula::cpp2::ErrorCode recovery() override { return nebula::cpp2::ErrorCode::SUCCEEDED; } + + protected: + ErrOrHosts getTargetHost(GraphSpaceID space); + + ErrOrHosts getLeaderHost(GraphSpaceID space); + + ErrOrHosts getListenerHost(GraphSpaceID space, cpp2::ListenerType type); + + virtual folly::Future executeInternal(HostAddr&& address, + std::vector&& parts) = 0; + + protected: + JobID jobId_{INT_MIN}; + TaskID taskId_{0}; + AdminClient* adminClient_{nullptr}; + GraphSpaceID space_; + std::vector paras_; + TargetHosts toHost_{TargetHosts::DEFAULT}; + int32_t concurrency_{INT_MAX}; + volatile bool stopped_{false}; + std::mutex muInterrupt_; + std::condition_variable condInterrupt_; +}; + +} // namespace meta +} // namespace nebula + +#endif // META_STORAGEJOBEXECUTOR_H_ diff --git a/src/meta/test/BalancerTest.cpp b/src/meta/test/BalancerTest.cpp index 3ddbc6a5544..fbdc9d03773 100644 --- a/src/meta/test/BalancerTest.cpp +++ b/src/meta/test/BalancerTest.cpp @@ -90,6 +90,310 @@ TEST(BalanceTest, BalanceTaskTest) { LOG(INFO) << "Test finished!"; } +SpaceInfo createSpaceInfo( + const std::string& name, + GraphSpaceID spaceId, + int32_t replica, + const std::vector< + std::pair>>>>& + zones) { + SpaceInfo spaceInfo; + spaceInfo.name_ = name; + spaceInfo.spaceId_ = spaceId; + spaceInfo.replica_ = replica; + for (const auto& z : zones) { + Zone zone(z.first); + for (const auto& h : z.second) { + Host host(h.first); + for (const auto& p : h.second) { + host.parts_.insert(p); + } + zone.hosts_.emplace(host.ha_, host); + } + spaceInfo.zones_.emplace(zone.zoneName_, zone); + } + return spaceInfo; +} + +void checkZoneAvg(const Zone& zone) { + int32_t avg = zone.partNum_ / zone.hosts_.size(); + for (const auto& p : zone.hosts_) { + EXPECT_EQ(p.second.parts_.size() - avg <= 1, true); + } +} + +void checkConflic(const Zone& zone) { + int32_t totalNum = 0; + for (const auto& p : zone.hosts_) { + totalNum += p.second.parts_.size(); + } + EXPECT_EQ(totalNum, zone.partNum_); +} + +TEST(BalanceTest, RemoveZonePlanTest) { + fs::TempDir rootPath("/tmp/RemoveZoneTest.XXXXXX"); + std::unique_ptr store = MockCluster::initMetaKV(rootPath.path()); + SpaceInfo spaceInfo = createSpaceInfo( + "space1", + 1, + 3, + {{"zone1", + {{{"127.0.0.1", 11}, {5}}, + {{"127.0.0.1", 12}, {10, 15}}, + {{"127.0.0.1", 13}, {12, 13, 14}}}}, + {"zone2", + {{{"127.0.0.1", 21}, {3, 4}}, {{"127.0.0.1", 22}, {8}}, {{"127.0.0.1", 23}, {15}}}}, + {"zone3", + {{{"127.0.0.1", 31}, {1, 2}}, + {{"127.0.0.1", 32}, {6, 7, 8, 9, 10}}, + {{"127.0.0.1", 33}, {11, 12}}}}, + {"zone4", + {{{"127.0.0.1", 41}, {1, 2, 3}}, + {{"127.0.0.1", 42}, {6, 7, 11}}, + {{"127.0.0.1", 43}, {12, 13, 14}}}}, + {"zone5", + {{{"127.0.0.1", 51}, {3, 4, 5}}, + {{"127.0.0.1", 52}, {9, 10, 11}}, + {{"127.0.0.1", 53}, {13, 14, 15}}}}, + {"zone6", {{{"127.0.0.1", 61}, {4}}, {{"127.0.0.1", 62}, {8, 9}}}}, + {"zone7", + {{{"127.0.0.1", 71}, {1, 2}}, {{"127.0.0.1", 72}, {6, 7}}, {{"127.0.0.1", 73}, {5}}}}}); + ZoneBalanceJobExecutor balancer(JobDescription(), store.get(), nullptr, {}); + balancer.lostZones_ = {"zone6", "zone7"}; + balancer.spaceInfo_ = spaceInfo; + Status status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::OK()); + checkZoneAvg(balancer.spaceInfo_.zones_["zone1"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone2"]); + + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone2"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone3"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone4"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone5"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone6"].partNum_, 0); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone7"].partNum_, 0); + checkConflic(balancer.spaceInfo_.zones_["zone1"]); + checkConflic(balancer.spaceInfo_.zones_["zone2"]); + checkConflic(balancer.spaceInfo_.zones_["zone3"]); + checkConflic(balancer.spaceInfo_.zones_["zone4"]); + checkConflic(balancer.spaceInfo_.zones_["zone5"]); + checkConflic(balancer.spaceInfo_.zones_["zone6"]); + checkConflic(balancer.spaceInfo_.zones_["zone7"]); +} + +TEST(BalanceTest, BalanceZonePlanTest) { + fs::TempDir rootPath("/tmp/BalanceZoneTest.XXXXXX"); + std::unique_ptr store = MockCluster::initMetaKV(rootPath.path()); + SpaceInfo spaceInfo = createSpaceInfo( + "space1", + 1, + 3, + { + {"zone1", + {{{"127.0.0.1", 11}, {5}}, + {{"127.0.0.1", 12}, {10, 15}}, + {{"127.0.0.1", 13}, {12, 13, 14}}}}, + {"zone2", + {{{"127.0.0.1", 21}, {3, 4}}, {{"127.0.0.1", 22}, {8}}, {{"127.0.0.1", 23}, {15}}}}, + {"zone3", + {{{"127.0.0.1", 31}, {1, 2}}, + {{"127.0.0.1", 32}, {6, 7, 8, 9, 10}}, + {{"127.0.0.1", 33}, {11, 12}}}}, + {"zone4", + {{{"127.0.0.1", 41}, {1, 2, 3, 4, 5}}, + {{"127.0.0.1", 42}, {6, 7, 8, 9, 11}}, + {{"127.0.0.1", 43}, {12, 13, 14}}}}, + {"zone5", + {{{"127.0.0.1", 51}, {1, 2, 3, 4, 5}}, + {{"127.0.0.1", 52}, {6, 7, 9, 10, 11}}, + {{"127.0.0.1", 53}, {13, 14, 15}}}}, + }); + ZoneBalanceJobExecutor balancer(JobDescription(), store.get(), nullptr, {}); + balancer.spaceInfo_ = spaceInfo; + Status status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::OK()); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone2"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone3"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone4"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone5"].partNum_, 9); + checkZoneAvg(balancer.spaceInfo_.zones_["zone1"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone2"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone4"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone5"]); + checkConflic(balancer.spaceInfo_.zones_["zone1"]); + checkConflic(balancer.spaceInfo_.zones_["zone2"]); + checkConflic(balancer.spaceInfo_.zones_["zone3"]); + checkConflic(balancer.spaceInfo_.zones_["zone4"]); + checkConflic(balancer.spaceInfo_.zones_["zone5"]); + balancer.lostZones_ = {"zone4", "zone5"}; + status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::OK()); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].partNum_, 15); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone2"].partNum_, 15); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone3"].partNum_, 15); + balancer.lostZones_ = {}; + status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::OK()); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone2"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone3"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone4"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone5"].partNum_, 9); +} + +TEST(BalanceTest, BalanceZoneRemainderPlanTest) { + fs::TempDir rootPath("/tmp/BalanceZoneTest.XXXXXX"); + std::unique_ptr store = MockCluster::initMetaKV(rootPath.path()); + SpaceInfo spaceInfo = createSpaceInfo( + "space1", + 1, + 3, + { + {"zone1", + {{{"127.0.0.1", 11}, {5}}, + {{"127.0.0.1", 12}, {10, 15}}, + {{"127.0.0.1", 13}, {12, 13, 14}}}}, + {"zone2", + {{{"127.0.0.1", 21}, {3, 4}}, {{"127.0.0.1", 22}, {8, 16}}, {{"127.0.0.1", 23}, {15}}}}, + {"zone3", + {{{"127.0.0.1", 31}, {1, 2}}, + {{"127.0.0.1", 32}, {6, 7, 8, 9, 10}}, + {{"127.0.0.1", 33}, {11, 12}}}}, + {"zone4", + {{{"127.0.0.1", 41}, {1, 2, 3, 4, 5}}, + {{"127.0.0.1", 42}, {6, 7, 8, 9, 11}}, + {{"127.0.0.1", 43}, {12, 13, 14, 16}}}}, + {"zone5", + {{{"127.0.0.1", 51}, {1, 2, 3, 4, 5}}, + {{"127.0.0.1", 52}, {6, 7, 9, 10, 11}}, + {{"127.0.0.1", 53}, {13, 14, 15, 16}}}}, + }); + ZoneBalanceJobExecutor balancer(JobDescription(), store.get(), nullptr, {}); + balancer.spaceInfo_ = spaceInfo; + Status status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::OK()); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone2"].partNum_, 10); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone3"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone4"].partNum_, 10); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone5"].partNum_, 10); + checkZoneAvg(balancer.spaceInfo_.zones_["zone1"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone2"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone4"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone5"]); + checkConflic(balancer.spaceInfo_.zones_["zone1"]); + checkConflic(balancer.spaceInfo_.zones_["zone2"]); + checkConflic(balancer.spaceInfo_.zones_["zone3"]); + checkConflic(balancer.spaceInfo_.zones_["zone4"]); + checkConflic(balancer.spaceInfo_.zones_["zone5"]); + + spaceInfo = createSpaceInfo( + "space1", + 1, + 3, + { + {"zone1", + {{{"127.0.0.1", 11}, {5}}, + {{"127.0.0.1", 12}, {10, 15}}, + {{"127.0.0.1", 13}, {12, 13, 14}}}}, + {"zone2", + {{{"127.0.0.1", 21}, {3, 4}}, {{"127.0.0.1", 22}, {8}}, {{"127.0.0.1", 23}, {15}}}}, + {"zone3", + {{{"127.0.0.1", 31}, {1, 2}}, + {{"127.0.0.1", 32}, {6, 7, 8, 9, 10}}, + {{"127.0.0.1", 33}, {11, 12, 16}}}}, + {"zone4", + {{{"127.0.0.1", 41}, {1, 2, 3, 4, 5}}, + {{"127.0.0.1", 42}, {6, 7, 8, 9, 11}}, + {{"127.0.0.1", 43}, {12, 13, 14, 16}}}}, + {"zone5", + {{{"127.0.0.1", 51}, {1, 2, 3, 4, 5}}, + {{"127.0.0.1", 52}, {6, 7, 9, 10, 11}}, + {{"127.0.0.1", 53}, {13, 14, 15, 16}}}}, + }); + balancer.spaceInfo_ = spaceInfo; + status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::OK()); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone2"].partNum_, 9); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone3"].partNum_, 10); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone4"].partNum_, 10); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone5"].partNum_, 10); + checkZoneAvg(balancer.spaceInfo_.zones_["zone1"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone2"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone4"]); + checkZoneAvg(balancer.spaceInfo_.zones_["zone5"]); + checkConflic(balancer.spaceInfo_.zones_["zone1"]); + checkConflic(balancer.spaceInfo_.zones_["zone2"]); + checkConflic(balancer.spaceInfo_.zones_["zone3"]); + checkConflic(balancer.spaceInfo_.zones_["zone4"]); + checkConflic(balancer.spaceInfo_.zones_["zone5"]); + status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::Balanced()); +} + +TEST(BalanceTest, BalanceDataPlanTest) { + fs::TempDir rootPath("/tmp/BalanceZoneTest.XXXXXX"); + std::unique_ptr store = MockCluster::initMetaKV(rootPath.path()); + SpaceInfo spaceInfo = createSpaceInfo( + "space1", + 1, + 3, + { + {"zone1", + {{{"127.0.0.1", 11}, {1, 2, 3, 53, 54}}, + {{"127.0.0.1", 12}, {4, 5}}, + {{"127.0.0.1", 13}, {6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}, + {{"127.0.0.1", 14}, {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}}, + {{"127.0.0.1", 15}, {31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52}}}}, + }); + DataBalanceJobExecutor balancer(JobDescription(), store.get(), nullptr, {}); + balancer.spaceInfo_ = spaceInfo; + Status status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::OK()); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 11)].parts_.size(), + 11); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 12)].parts_.size(), + 11); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 13)].parts_.size(), + 10); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 14)].parts_.size(), + 11); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 15)].parts_.size(), + 11); + + spaceInfo = createSpaceInfo("space1", + 1, + 3, + {{"zone1", + {{{"127.0.0.1", 11}, {5, 6, 7, 8, 9, 10}}, + {{"127.0.0.1", 12}, {11, 12, 13, 17, 18, 19, 20}}, + {{"127.0.0.1", 13}, {21, 22, 23, 28, 29, 30}}, + {{"127.0.0.1", 14}, {31, 32, 33, 34, 35, 36, 37, 38, 39, 40}}, + {{"127.0.0.1", 15}, {41, 42, 43, 44, 45, 46, 47, 48, 49, 50}}, + {{"127.0.0.1", 16}, {51, 52, 53, 54, 14, 15, 16}}, + {{"127.0.0.1", 17}, {1, 2, 3, 4, 24, 25, 26, 27}}}}}); + balancer.spaceInfo_ = spaceInfo; + balancer.lostHosts_ = {{"127.0.0.1", 16}, {"127.0.0.1", 17}}; + status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::OK()); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 11)].parts_.size(), + 11); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 12)].parts_.size(), + 11); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 13)].parts_.size(), + 11); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 14)].parts_.size(), + 11); + EXPECT_EQ(balancer.spaceInfo_.zones_["zone1"].hosts_[HostAddr("127.0.0.1", 15)].parts_.size(), + 10); + status = balancer.buildBalancePlan(); + EXPECT_EQ(status, Status::Balanced()); +} + void showHostLoading(kvstore::KVStore* kv, GraphSpaceID spaceId) { auto prefix = MetaKeyUtils::partPrefix(spaceId); std::unique_ptr iter; @@ -116,656 +420,6 @@ void showHostLoading(kvstore::KVStore* kv, GraphSpaceID spaceId) { } } -HostParts assignHostParts(kvstore::KVStore* kv, GraphSpaceID spaceId) { - auto prefix = MetaKeyUtils::partPrefix(spaceId); - std::unique_ptr iter; - kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - HostParts hostPart; - while (iter->valid()) { - auto key = iter->key(); - PartitionID partId; - memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); - auto hs = MetaKeyUtils::parsePartVal(iter->val()); - for (auto h : hs) { - hostPart[h].emplace_back(partId); - } - iter->next(); - } - return hostPart; -} - -void testRestBlancer() { - DataBalanceJobExecutor::plan_.reset(nullptr); - BalanceJobExecutor::lock_.unlock(); - BalanceJobExecutor::running_ = false; - LeaderBalanceJobExecutor::inLeaderBalance_ = false; -} - -TEST(BalanceTest, SimpleTestWithZone) { - fs::TempDir rootPath("/tmp/SimpleTestWithZone.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - { - std::vector hosts; - for (int i = 0; i < 4; i++) { - hosts.emplace_back(std::to_string(i), i); - } - TestUtils::createSomeHosts(kv, hosts); - TestUtils::registerHB(kv, hosts); - - // create zone and group - ZoneInfo zoneInfo = {{"zone_0", {{"0", 0}}}, - {"zone_1", {{"1", 1}}}, - {"zone_2", {{"2", 2}}}, - {"zone_3", {{"3", 3}}}}; - TestUtils::assembleZone(kv, zoneInfo); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("default_space"); - properties.set_partition_num(4); - properties.set_replica_factor(3); - std::vector zones = {"zone_0", "zone_1", "zone_2", "zone_3"}; - properties.set_zone_names(std::move(zones)); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(1, resp.get_id().get_space_id()); - } - sleep(1); - { - HostParts hostParts; - hostParts.emplace(HostAddr("0", 0), std::vector{1, 2, 3, 4}); - hostParts.emplace(HostAddr("1", 1), std::vector{1, 2, 3, 4}); - hostParts.emplace(HostAddr("2", 2), std::vector{1, 2, 3, 4}); - hostParts.emplace(HostAddr("3", 3), std::vector{}); - int32_t totalParts = 12; - std::vector tasks; - NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - std::vector zones = {"zone_0", "zone_1", "zone_2", "zone_3"}; - auto code = balancer.assembleZoneParts(zones, hostParts); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - balancer.balanceParts(0, hostParts, totalParts, tasks, true); - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - EXPECT_EQ(3, it->second.size()); - } - EXPECT_EQ(3, tasks.size()); - } - testRestBlancer(); -} - -TEST(BalanceTest, ExpansionZoneTest) { - fs::TempDir rootPath("/tmp/ExpansionZoneTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - { - std::vector hosts; - for (int i = 0; i < 3; i++) { - hosts.emplace_back(std::to_string(i), i); - } - TestUtils::createSomeHosts(kv, hosts); - TestUtils::registerHB(kv, hosts); - - // create zone and group - ZoneInfo zoneInfo = {{"zone_0", {{"0", 0}}}, {"zone_1", {{"1", 1}}}, {"zone_2", {{"2", 2}}}}; - TestUtils::assembleZone(kv, zoneInfo); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("default_space"); - properties.set_partition_num(4); - properties.set_replica_factor(3); - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - properties.set_zone_names(std::move(zones)); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(1, resp.get_id().get_space_id()); - } - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - { - std::vector hosts; - for (int i = 0; i < 4; i++) { - hosts.emplace_back(std::to_string(i), i); - } - TestUtils::createSomeHosts(kv, hosts); - TestUtils::registerHB(kv, hosts); - ZoneInfo zoneInfo = {{"zone_0", {{"0", 0}}}, - {"zone_1", {{"1", 1}}}, - {"zone_2", {{"2", 2}}}, - {"zone_3", {{"3", 3}}}}; - TestUtils::assembleZone(kv, zoneInfo); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("default_space"); - properties.set_partition_num(4); - properties.set_replica_factor(3); - std::vector zones = {"zone_0", "zone_1", "zone_2", "zone_3"}; - properties.set_zone_names(std::move(zones)); - std::vector data; - data.emplace_back(MetaKeyUtils::spaceKey(1), MetaKeyUtils::spaceVal(properties)); - folly::Baton baton; - kv->asyncMultiPut(0, 0, std::move(data), [&](nebula::cpp2::ErrorCode code) { - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - baton.post(); - }); - baton.wait(); - } - { - HostParts hostParts; - int32_t totalParts = 0; - auto result = balancer.getHostParts(1, true, hostParts, totalParts); - ASSERT_TRUE(nebula::ok(result)); - std::vector tasks; - hostParts.emplace(HostAddr("3", 3), std::vector{}); - balancer.balanceParts(0, hostParts, totalParts, tasks, true); - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - EXPECT_EQ(3, it->second.size()); - } - EXPECT_EQ(3, tasks.size()); - } - testRestBlancer(); -} - -TEST(BalanceTest, ExpansionHostIntoZoneTest) { - fs::TempDir rootPath("/tmp/ExpansionHostIntoZoneTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - { - std::vector hosts; - for (int i = 0; i < 6; i++) { - hosts.emplace_back(std::to_string(i), i); - } - TestUtils::createSomeHosts(kv, hosts); - TestUtils::registerHB(kv, hosts); - - // create zone and group - ZoneInfo zoneInfo = {{"zone_0", {{"0", 0}}}, {"zone_1", {{"1", 1}}}, {"zone_2", {{"2", 2}}}}; - TestUtils::assembleZone(kv, zoneInfo); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("default_space"); - properties.set_partition_num(4); - properties.set_replica_factor(3); - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - properties.set_zone_names(std::move(zones)); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(1, resp.get_id().get_space_id()); - } - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - { - std::vector hosts; - for (int i = 0; i < 6; i++) { - hosts.emplace_back(std::to_string(i), i); - } - TestUtils::createSomeHosts(kv, hosts); - TestUtils::registerHB(kv, hosts); - ZoneInfo zoneInfo = {{"zone_0", {{"0", 0}, {"3", 3}}}, - {"zone_1", {{"1", 1}, {"4", 4}}}, - {"zone_2", {{"2", 2}, {"5", 5}}}}; - TestUtils::assembleZone(kv, zoneInfo); - } - { - HostParts hostParts; - int32_t totalParts = 0; - auto result = balancer.getHostParts(1, true, hostParts, totalParts); - ASSERT_TRUE(nebula::ok(result)); - - std::vector tasks; - hostParts.emplace(HostAddr("3", 3), std::vector{}); - hostParts.emplace(HostAddr("4", 4), std::vector{}); - hostParts.emplace(HostAddr("5", 5), std::vector{}); - - balancer.balanceParts(0, hostParts, totalParts, tasks, true); - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - EXPECT_EQ(2, it->second.size()); - } - EXPECT_EQ(6, tasks.size()); - } - testRestBlancer(); -} - -TEST(BalanceTest, ShrinkZoneTest) { - fs::TempDir rootPath("/tmp/ShrinkZoneTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - { - std::vector hosts; - for (int i = 0; i < 4; i++) { - hosts.emplace_back(std::to_string(i), i); - } - - TestUtils::createSomeHosts(kv, hosts); - TestUtils::registerHB(kv, hosts); - // create zone and group - ZoneInfo zoneInfo = {{"zone_0", {{"0", 0}}}, - {"zone_1", {{"1", 1}}}, - {"zone_2", {{"2", 2}}}, - {"zone_3", {{"3", 3}}}}; - TestUtils::assembleZone(kv, zoneInfo); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("default_space"); - properties.set_partition_num(4); - properties.set_replica_factor(3); - std::vector zones = {"zone_0", "zone_1", "zone_2", "zone_3"}; - properties.set_zone_names(std::move(zones)); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(1, resp.get_id().get_space_id()); - } - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - balancer.lostHosts_ = {{"3", 3}}; - ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); -} - -TEST(BalanceTest, ShrinkHostFromZoneTest) { - fs::TempDir rootPath("/tmp/ShrinkHostFromZoneTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - { - std::vector hosts; - for (int i = 0; i < 6; i++) { - hosts.emplace_back(std::to_string(i), i); - } - TestUtils::createSomeHosts(kv, hosts); - TestUtils::registerHB(kv, hosts); - - // create zone and group - ZoneInfo zoneInfo = {{"zone_0", {{"0", 0}, {"3", 3}}}, - {"zone_1", {{"1", 1}, {"4", 4}}}, - {"zone_2", {{"2", 2}, {"5", 5}}}}; - TestUtils::assembleZone(kv, zoneInfo); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("default_space"); - properties.set_partition_num(4); - properties.set_replica_factor(3); - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - properties.set_zone_names(std::move(zones)); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(1, resp.get_id().get_space_id()); - } - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - JobDescription jd(0L, cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - showHostLoading(kv, 1); - - { - ZoneInfo zoneInfo = { - {"zone_0", {{"0", 0}}}, {"zone_1", {{"1", 1}, {"4", 4}}}, {"zone_2", {{"2", 2}, {"5", 5}}}}; - TestUtils::assembleZone(kv, zoneInfo); - } - balancer.lostHosts_ = {{"3", 3}}; - ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); -} - -TEST(BalanceTest, BalanceWithComplexZoneTest) { - fs::TempDir rootPath("/tmp/LeaderBalanceWithComplexZoneTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - std::vector hosts; - for (int i = 0; i < 18; i++) { - hosts.emplace_back(std::to_string(i), i); - } - TestUtils::createSomeHosts(kv, hosts); - TestUtils::registerHB(kv, hosts); - - { - ZoneInfo zoneInfo = { - {"zone_0", {HostAddr("0", 0), HostAddr("1", 1)}}, - {"zone_1", {HostAddr("2", 2), HostAddr("3", 3)}}, - {"zone_2", {HostAddr("4", 4), HostAddr("5", 5)}}, - {"zone_3", {HostAddr("6", 6), HostAddr("7", 7)}}, - {"zone_4", {HostAddr("8", 8), HostAddr("9", 9)}}, - {"zone_5", {HostAddr("10", 10), HostAddr("11", 11)}}, - {"zone_6", {HostAddr("12", 12), HostAddr("13", 13)}}, - {"zone_7", {HostAddr("14", 14), HostAddr("15", 15)}}, - {"zone_8", {HostAddr("16", 16), HostAddr("17", 17)}}, - }; - TestUtils::assembleZone(kv, zoneInfo); - } - { - { - cpp2::SpaceDesc properties; - properties.set_space_name("default_space"); - properties.set_partition_num(18); - properties.set_replica_factor(3); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(1, resp.get_id().get_space_id()); - LOG(INFO) << "Show host about space " << resp.get_id().get_space_id(); - showHostLoading(kv, resp.get_id().get_space_id()); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("space_on_group_0"); - properties.set_partition_num(64); - properties.set_replica_factor(3); - std::vector zones = {"zone_0", "zone_1", "zone_2", "zone_3", "zone_4"}; - properties.set_zone_names(std::move(zones)); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(2, resp.get_id().get_space_id()); - LOG(INFO) << "Show host about space " << resp.get_id().get_space_id(); - showHostLoading(kv, resp.get_id().get_space_id()); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("space_on_group_1"); - properties.set_partition_num(81); - properties.set_replica_factor(3); - std::vector zones = { - "zone_0", "zone_1", "zone_2", "zone_3", "zone_4", "zone_5", "zone_6", "zone_7", "zone_8"}; - properties.set_zone_names(std::move(zones)); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(3, resp.get_id().get_space_id()); - LOG(INFO) << "Show host about space " << resp.get_id().get_space_id(); - showHostLoading(kv, resp.get_id().get_space_id()); - } - } - sleep(1); - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - { - int32_t totalParts = 18 * 3; - std::vector tasks; - auto hostParts = assignHostParts(kv, 1); - balancer.balanceParts(1, hostParts, totalParts, tasks, true); - } - { - int32_t totalParts = 64 * 3; - std::vector tasks; - auto hostParts = assignHostParts(kv, 2); - std::vector zones = {"zone_0", "zone_1", "zone_2", "zone_3", "zone_4"}; - auto code = balancer.assembleZoneParts(zones, hostParts); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - balancer.balanceParts(2, hostParts, totalParts, tasks, true); - } - { - auto dump = [](const HostParts& hostParts, const std::vector& tasks) { - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - std::stringstream ss; - ss << it->first << ": "; - for (auto partId : it->second) { - ss << partId << ", "; - } - LOG(INFO) << ss.str() << " size " << it->second.size(); - } - for (const auto& task : tasks) { - LOG(INFO) << task.taskIdStr(); - } - }; - - HostParts hostParts; - std::vector parts; - for (int32_t i = 1; i <= 81; i++) { - parts.emplace_back(i); - } - - for (int32_t i = 0; i < 18; i++) { - if (i == 10 || i == 12 || i == 14) { - hostParts.emplace(HostAddr(std::to_string(i), i), parts); - } else { - hostParts.emplace(HostAddr(std::to_string(i), i), std::vector{}); - } - } - - LOG(INFO) << "=== original map ===="; - int32_t totalParts = 243; - std::vector tasks; - dump(hostParts, tasks); - std::vector zones = { - "zone_0", "zone_1", "zone_2", "zone_3", "zone_4", "zone_5", "zone_6", "zone_7", "zone_8"}; - auto code = balancer.assembleZoneParts(zones, hostParts); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - balancer.balanceParts(3, hostParts, totalParts, tasks, true); - - LOG(INFO) << "=== new map ===="; - dump(hostParts, tasks); - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - EXPECT_GE(it->second.size(), 5); - EXPECT_LE(it->second.size(), 24); - - LOG(INFO) << "Host " << it->first << " Part Size " << it->second.size(); - } - showHostLoading(kv, 3); - } -} - -TEST(BalanceTest, BalancePartsTest) { - fs::TempDir rootPath("/tmp/BalancePartsTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - - auto dump = [](const HostParts& hostParts, const std::vector& tasks) { - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - std::stringstream ss; - ss << it->first << ": "; - for (auto partId : it->second) { - ss << partId << ", "; - } - VLOG(1) << ss.str(); - } - for (const auto& task : tasks) { - VLOG(1) << task.taskIdStr(); - } - }; - { - HostParts hostParts; - hostParts.emplace(HostAddr("0", 0), std::vector{1, 2, 3, 4}); - hostParts.emplace(HostAddr("1", 0), std::vector{1, 2, 3, 4}); - hostParts.emplace(HostAddr("2", 0), std::vector{1, 2, 3, 4}); - hostParts.emplace(HostAddr("3", 0), std::vector{}); - int32_t totalParts = 12; - std::vector tasks; - VLOG(1) << "=== original map ===="; - dump(hostParts, tasks); - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - balancer.balanceParts(0, hostParts, totalParts, tasks, false); - VLOG(1) << "=== new map ===="; - dump(hostParts, tasks); - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - EXPECT_EQ(3, it->second.size()); - } - EXPECT_EQ(3, tasks.size()); - } - { - HostParts hostParts; - hostParts.emplace(HostAddr("0", 0), std::vector{1, 2, 3, 4, 5}); - hostParts.emplace(HostAddr("1", 0), std::vector{1, 2, 4, 5}); - hostParts.emplace(HostAddr("2", 0), std::vector{2, 3, 4, 5}); - hostParts.emplace(HostAddr("3", 0), std::vector{1, 3}); - int32_t totalParts = 15; - std::vector tasks; - VLOG(1) << "=== original map ===="; - dump(hostParts, tasks); - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - balancer.balanceParts(0, hostParts, totalParts, tasks, false); - VLOG(1) << "=== new map ===="; - dump(hostParts, tasks); - EXPECT_EQ(4, hostParts[HostAddr("0", 0)].size()); - EXPECT_EQ(4, hostParts[HostAddr("1", 0)].size()); - EXPECT_EQ(4, hostParts[HostAddr("2", 0)].size()); - EXPECT_EQ(3, hostParts[HostAddr("3", 0)].size()); - EXPECT_EQ(1, tasks.size()); - } - { - HostParts hostParts; - hostParts.emplace(HostAddr("0", 0), std::vector{1, 2, 3, 4}); - hostParts.emplace(HostAddr("1", 0), std::vector{1, 2, 4, 5}); - hostParts.emplace(HostAddr("2", 0), std::vector{2, 3, 4, 5}); - hostParts.emplace(HostAddr("3", 0), std::vector{1, 3, 5}); - int32_t totalParts = 15; - std::vector tasks; - VLOG(1) << "=== original map ===="; - dump(hostParts, tasks); - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - balancer.balanceParts(0, hostParts, totalParts, tasks, false); - VLOG(1) << "=== new map ===="; - dump(hostParts, tasks); - EXPECT_EQ(4, hostParts[HostAddr("0", 0)].size()); - EXPECT_EQ(4, hostParts[HostAddr("1", 0)].size()); - EXPECT_EQ(4, hostParts[HostAddr("2", 0)].size()); - EXPECT_EQ(3, hostParts[HostAddr("3", 0)].size()); - EXPECT_EQ(0, tasks.size()); - } - { - HostParts hostParts; - hostParts.emplace(HostAddr("0", 0), std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - hostParts.emplace(HostAddr("1", 0), std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - hostParts.emplace(HostAddr("2", 0), std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - hostParts.emplace(HostAddr("3", 0), std::vector{}); - hostParts.emplace(HostAddr("4", 0), std::vector{}); - hostParts.emplace(HostAddr("5", 0), std::vector{}); - hostParts.emplace(HostAddr("6", 0), std::vector{}); - hostParts.emplace(HostAddr("7", 0), std::vector{}); - hostParts.emplace(HostAddr("8", 0), std::vector{}); - int32_t totalParts = 27; - std::vector tasks; - VLOG(1) << "=== original map ===="; - dump(hostParts, tasks); - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - balancer.balanceParts(0, hostParts, totalParts, tasks, false); - VLOG(1) << "=== new map ===="; - dump(hostParts, tasks); - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - EXPECT_EQ(3, it->second.size()); - } - EXPECT_EQ(18, tasks.size()); - } - { - HostParts hostParts; - hostParts.emplace(HostAddr("0", 0), std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - hostParts.emplace(HostAddr("1", 0), std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - hostParts.emplace(HostAddr("2", 0), std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - hostParts.emplace(HostAddr("3", 0), std::vector{}); - hostParts.emplace(HostAddr("4", 0), std::vector{}); - hostParts.emplace(HostAddr("5", 0), std::vector{}); - hostParts.emplace(HostAddr("6", 0), std::vector{}); - hostParts.emplace(HostAddr("7", 0), std::vector{}); - int32_t totalParts = 27; - std::vector tasks; - VLOG(1) << "=== original map ===="; - dump(hostParts, tasks); - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - balancer.balanceParts(0, hostParts, totalParts, tasks, false); - VLOG(1) << "=== new map ===="; - dump(hostParts, tasks); - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - EXPECT_GE(4, it->second.size()); - EXPECT_LE(3, it->second.size()); - } - EXPECT_EQ(18, tasks.size()); - } -} - TEST(BalanceTest, DispatchTasksTest) { { FLAGS_task_concurrency = 10; @@ -878,7 +532,7 @@ TEST(BalanceTest, BalancePlanTest) { plan.addTask(std::move(task)); } folly::Baton b; - plan.onFinished_ = [&plan, &b]() { + plan.onFinished_ = [&plan, &b](meta::cpp2::JobStatus) { ASSERT_EQ(meta::cpp2::JobStatus::FINISHED, plan.status()); ASSERT_EQ(10, plan.finishedTaskNum_); b.post(); @@ -909,7 +563,7 @@ TEST(BalanceTest, BalancePlanTest) { plan.addTask(std::move(task)); } folly::Baton b; - plan.onFinished_ = [&plan, &b]() { + plan.onFinished_ = [&plan, &b](meta::cpp2::JobStatus) { ASSERT_EQ(meta::cpp2::JobStatus::FINISHED, plan.status()); ASSERT_EQ(10, plan.finishedTaskNum_); b.post(); @@ -950,7 +604,7 @@ TEST(BalanceTest, BalancePlanTest) { } TestUtils::registerHB(kv, hosts); folly::Baton b; - plan.onFinished_ = [&plan, &b]() { + plan.onFinished_ = [&plan, &b](meta::cpp2::JobStatus) { ASSERT_EQ(meta::cpp2::JobStatus::FAILED, plan.status()); ASSERT_EQ(10, plan.finishedTaskNum_); b.post(); @@ -960,17 +614,6 @@ TEST(BalanceTest, BalancePlanTest) { } } -void verifyBalancePlan(kvstore::KVStore* kv, JobID jobId, meta::cpp2::JobStatus jobStatus) { - std::string key = JobDescription::makeJobKey(jobId); - std::string value; - auto retcode = kv->get(kDefaultSpaceId, kDefaultPartId, key, &value); - EXPECT_EQ(retcode, nebula::cpp2::ErrorCode::SUCCEEDED); - auto optJobRet = JobDescription::makeJobDescription(key, value); - EXPECT_TRUE(nebula::ok(optJobRet)); - auto optJob = nebula::value(optJobRet); - EXPECT_EQ(jobStatus, optJob.getStatus()); -} - void verifyBalanceTask(kvstore::KVStore* kv, JobID jobId, BalanceTaskStatus status, @@ -980,227 +623,130 @@ void verifyBalanceTask(kvstore::KVStore* kv, const auto& prefix = MetaKeyUtils::balanceTaskPrefix(jobId); std::unique_ptr iter; auto code = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - ASSERT_EQ(code, nebula::cpp2::ErrorCode::SUCCEEDED); + EXPECT_EQ(code, nebula::cpp2::ErrorCode::SUCCEEDED); int32_t num = 0; while (iter->valid()) { auto keyTuple = MetaKeyUtils::parseBalanceTaskKey(iter->key()); - ASSERT_EQ(jobId, std::get<0>(keyTuple)); - ASSERT_EQ(1, std::get<1>(keyTuple)); + EXPECT_EQ(jobId, std::get<0>(keyTuple)); + EXPECT_EQ(1, std::get<1>(keyTuple)); partCount[std::get<3>(keyTuple)]--; partCount[std::get<4>(keyTuple)]++; auto valueTuple = MetaKeyUtils::parseBalanceTaskVal(iter->val()); - ASSERT_EQ(status, std::get<0>(valueTuple)); - ASSERT_EQ(result, std::get<1>(valueTuple)); - ASSERT_LT(0, std::get<2>(valueTuple)); - ASSERT_LT(0, std::get<3>(valueTuple)); + EXPECT_EQ(status, std::get<0>(valueTuple)); + EXPECT_EQ(result, std::get<1>(valueTuple)); + EXPECT_LT(0, std::get<2>(valueTuple)); + EXPECT_LT(0, std::get<3>(valueTuple)); num++; iter->next(); } if (exceptNumber != 0) { - ASSERT_EQ(exceptNumber, num); + EXPECT_EQ(exceptNumber, num); } } -TEST(BalanceTest, NormalTest) { - fs::TempDir rootPath("/tmp/NormalTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - TestUtils::createSomeHosts(kv); - TestUtils::assembleSpace(kv, 1, 8, 3, 4); - std::unordered_map partCount; - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - LOG(INFO) << "Now, we lost host " << HostAddr("3", 3); - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - sleep(1); - LOG(INFO) << "Rebalance finished!"; - verifyBalanceTask( - kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); +void verifyMetaZone(kvstore::KVStore* kv, + GraphSpaceID spaceId, + const std::vector& zones) { + std::string spaceKey = MetaKeyUtils::spaceKey(spaceId); + std::string spaceVal; + kv->get(kDefaultSpaceId, kDefaultPartId, spaceKey, &spaceVal); + meta::cpp2::SpaceDesc properties = MetaKeyUtils::parseSpace(spaceVal); + const std::vector& zns = properties.get_zone_names(); + std::set zoneSet; + for (const std::string& zoneName : zns) { + zoneSet.emplace(zoneName); + } + std::set expectZones; + for (const std::string& zoneName : zones) { + expectZones.emplace(zoneName); + } + EXPECT_EQ(zoneSet, expectZones); } -TEST(BalanceTest, SpecifyHostTest) { - fs::TempDir rootPath("/tmp/SpecifyHostTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - TestUtils::createSomeHosts(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}}); - TestUtils::assembleSpace(kv, 1, 8, 3, 4); - std::unordered_map partCount; - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - - sleep(1); - LOG(INFO) << "Now, we remove host {3, 3}"; - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}}); - balancer.lostHosts_ = {{"3", 3}}; - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - LOG(INFO) << "Rebalance finished!"; - verifyBalanceTask( - kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); +JobDescription makeJobDescription(kvstore::KVStore* kv, cpp2::AdminCmd cmd) { + JobDescription jd(testJobId.fetch_add(1, std::memory_order_relaxed), cmd, {}); + std::vector data; + data.emplace_back(jd.jobKey(), jd.jobVal()); + folly::Baton baton; + kv->asyncMultiPut(0, 0, std::move(data), [&](nebula::cpp2::ErrorCode code) { + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + baton.post(); + }); + baton.wait(); + return jd; } -TEST(BalanceTest, SpecifyMultiHostTest) { - fs::TempDir rootPath("/tmp/SpecifyMultiHostTest.XXXXXX"); +TEST(BalanceTest, NormalZoneTest) { + fs::TempDir rootPath("/tmp/NormalZoneTest.XXXXXX"); auto store = MockCluster::initMetaKV(rootPath.path()); auto* kv = dynamic_cast(store.get()); FLAGS_heartbeat_interval_secs = 1; - TestUtils::createSomeHosts(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}, {"4", 4}, {"5", 5}}); - TestUtils::assembleSpace(kv, 1, 12, 3, 6); + TestUtils::assembleSpaceWithZone(kv, 1, 8, 3, 8, 24); std::unordered_map partCount; - for (int32_t i = 0; i < 6; i++) { - partCount[HostAddr(std::to_string(i), i)] = 6; - } DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - - sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - LOG(INFO) << "Now, we want to remove host {2, 2}/{3, 3}"; - // If {"2", 2} and {"3", 3} are both dead, minority hosts for some part are - // alive, it would lead to a fail - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"4", 4}, {"5", 5}}); - balancer.lostHosts_ = {{"2", 2}, {"3", 3}}; - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(apache::thrift::util::enumNameSafe(nebula::cpp2::ErrorCode::E_NO_VALID_HOST), - ret.value().message()); - // If {"2", 2} is dead, {"3", 3} still alive, each part has majority hosts - // alive - testRestBlancer(); - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"3", 3}, {"4", 4}, {"5", 5}}); - balancer.lostHosts_ = {{"2", 2}, {"3", 3}}; - ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - sleep(1); - LOG(INFO) << "Rebalance finished!"; - - // In theory, there should be only 12 tasks, but in some environment, 13 tasks - // is generated. A partition is moved more than once from A -> B -> C, actually - // A -> C is enough. - verifyBalanceTask( - kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount); - ASSERT_EQ(9, partCount[HostAddr("0", 0)]); - ASSERT_EQ(9, partCount[HostAddr("1", 1)]); - ASSERT_EQ(0, partCount[HostAddr("2", 2)]); - ASSERT_EQ(0, partCount[HostAddr("3", 3)]); - ASSERT_EQ(9, partCount[HostAddr("4", 4)]); - ASSERT_EQ(9, partCount[HostAddr("5", 5)]); -} - -TEST(BalanceTest, MockReplaceMachineTest) { - fs::TempDir rootPath("/tmp/MockReplaceMachineTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - TestUtils::createSomeHosts(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - TestUtils::assembleSpace(kv, 1, 12, 3, 3); - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - - // add a new machine - TestUtils::createSomeHosts(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}}); - LOG(INFO) << "Now, we want to replace host {2, 2} with {3, 3}"; - // Because for all parts majority hosts still alive, we could balance - sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - // {2, 2} should be offline now - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"3", 3}}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - sleep(1); - LOG(INFO) << "Rebalance finished!"; - std::unordered_map partCount; + JobDescription jd = makeJobDescription(kv, cpp2::AdminCmd::ZONE_BALANCE); + ZoneBalanceJobExecutor balancer(jd, kv, &client, {}); + balancer.spaceInfo_.getInfo(1, kv); + auto ret = balancer.executeInternal(); + EXPECT_EQ(Status::Balanced(), ret.value()); + balancer.finish(); + balancer.lostZones_ = {"5", "6", "7", "8"}; + folly::Baton baton; + balancer.setFinishCallBack([&](meta::cpp2::JobStatus) { + baton.post(); + return nebula::cpp2::ErrorCode::SUCCEEDED; + }); + ret = balancer.executeInternal(); + baton.wait(); + EXPECT_EQ(Status::OK(), ret.value()); + verifyMetaZone(kv, balancer.spaceInfo_.spaceId_, {"1", "2", "3", "4"}); verifyBalanceTask( kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 12); } -TEST(BalanceTest, SingleReplicaTest) { - fs::TempDir rootPath("/tmp/SingleReplicaTest.XXXXXX"); +TEST(BalanceTest, NormalDataTest) { + fs::TempDir rootPath("/tmp/NormalDataTest.XXXXXX"); auto store = MockCluster::initMetaKV(rootPath.path()); auto* kv = dynamic_cast(store.get()); FLAGS_heartbeat_interval_secs = 1; - TestUtils::createSomeHosts(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}, {"4", 4}, {"5", 5}}); - TestUtils::assembleSpace(kv, 1, 12, 1, 6); + TestUtils::assembleSpaceWithZone(kv, 1, 8, 3, 1, 8); std::unordered_map partCount; - for (int32_t i = 0; i < 6; i++) { - partCount[HostAddr(std::to_string(i), i)] = 2; - } DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + JobDescription jd = makeJobDescription(kv, cpp2::AdminCmd::DATA_BALANCE); DataBalanceJobExecutor balancer(jd, kv, &client, {}); - - sleep(1); - LOG(INFO) << "Now, we want to remove host {2, 2} and {3, 3}"; - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}, {"4", 4}, {"5", 5}}); - - balancer.lostHosts_ = {{"2", 2}, {"3", 3}}; - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - sleep(1); - LOG(INFO) << "Rebalance finished!"; - + balancer.spaceInfo_.getInfo(1, kv); + auto ret = balancer.executeInternal(); + EXPECT_EQ(Status::Balanced(), ret.value()); + balancer.finish(); + balancer.lostHosts_ = {{"127.0.0.1", 1}, {"127.0.0.1", 8}}; + folly::Baton baton; + balancer.setFinishCallBack([&](meta::cpp2::JobStatus) { + baton.post(); + return nebula::cpp2::ErrorCode::SUCCEEDED; + }); + ret = balancer.executeInternal(); + baton.wait(); + EXPECT_EQ(Status::OK(), ret.value()); verifyBalanceTask( - kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 4); - ASSERT_EQ(3, partCount[HostAddr("0", 0)]); - ASSERT_EQ(3, partCount[HostAddr("1", 1)]); - ASSERT_EQ(0, partCount[HostAddr("2", 2)]); - ASSERT_EQ(0, partCount[HostAddr("3", 3)]); - ASSERT_EQ(3, partCount[HostAddr("4", 4)]); - ASSERT_EQ(3, partCount[HostAddr("5", 5)]); + kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); } -TEST(BalanceTest, TryToRecoveryTest) { +TEST(BalanceTest, RecoveryTest) { fs::TempDir rootPath("/tmp/TryToRecoveryTest.XXXXXX"); auto store = MockCluster::initMetaKV(rootPath.path()); auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - TestUtils::createSomeHosts(kv); - TestUtils::assembleSpace(kv, 1, 8, 3, 4); - - sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - LOG(INFO) << "Now, we lost host " << HostAddr("3", 3); - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - + TestUtils::assembleSpaceWithZone(kv, 1, 24, 1, 1, 8); DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - // first 6 call is the failed case, since we can't recover the plan, so only 6 - // call EXPECT_CALL(client, waitingForCatchUpData(_, _, _)) - .Times(6) + .Times(12) .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) @@ -1208,13 +754,18 @@ TEST(BalanceTest, TryToRecoveryTest) { .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))); - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + JobDescription jd = makeJobDescription(kv, cpp2::AdminCmd::DATA_BALANCE); DataBalanceJobExecutor balancer(jd, kv, &client, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - sleep(1); + balancer.spaceInfo_.getInfo(1, kv); + balancer.lostHosts_ = {{"127.0.0.1", 1}, {"127.0.0.1", 8}}; + folly::Baton baton; + balancer.setFinishCallBack([&](meta::cpp2::JobStatus) { + baton.post(); + return nebula::cpp2::ErrorCode::SUCCEEDED; + }); + auto ret = balancer.executeInternal(); + baton.wait(); + EXPECT_EQ(Status::OK(), ret.value()); std::unordered_map partCount; verifyBalanceTask(kv, balancer.jobId_, @@ -1222,148 +773,102 @@ TEST(BalanceTest, TryToRecoveryTest) { BalanceTaskResult::FAILED, partCount, 6); - - sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - LOG(INFO) << "Now let's try to recovery it. Since all host would be regarded " - "as offline, " - << "so all task will be invalid"; balancer.recovery(); - ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - sleep(1); verifyBalanceTask( - kv, balancer.jobId_, BalanceTaskStatus::START, BalanceTaskResult::INVALID, partCount, 6); -} - -TEST(BalanceTest, RecoveryTest) { - FLAGS_task_concurrency = 1; - fs::TempDir rootPath("/tmp/RecoveryTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - TestUtils::createSomeHosts(kv); - TestUtils::assembleSpace(kv, 1, 8, 3, 4); - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - // first 6 call is the failed case, the later call will return default value - // In gtest release 1.8.0 we can only write as follows: - EXPECT_CALL(client, waitingForCatchUpData(_, _, _)) - .Times(AtLeast(12)) - .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) - .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) - .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) - .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) - .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) - .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))); - - sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - LOG(INFO) << "Now, we lost host " << HostAddr("3", 3); - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &client, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - sleep(1); - std::unordered_map partCount; - verifyBalanceTask(kv, - balancer.jobId_, - BalanceTaskStatus::CATCH_UP_DATA, - BalanceTaskResult::FAILED, - partCount, - 6); - - // register hb again to prevent from regarding src as offline - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - LOG(INFO) << "Now let's try to recovery it."; - balancer.recovery(); - ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - sleep(1); + kv, balancer.jobId_, BalanceTaskStatus::START, BalanceTaskResult::IN_PROGRESS, partCount, 6); + baton.reset(); + balancer.setFinishCallBack([&](meta::cpp2::JobStatus) { + baton.post(); + return nebula::cpp2::ErrorCode::SUCCEEDED; + }); + ret = balancer.executeInternal(); + baton.wait(); verifyBalanceTask( kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); } TEST(BalanceTest, StopPlanTest) { - FLAGS_task_concurrency = 1; fs::TempDir rootPath("/tmp/StopAndRecoverTest.XXXXXX"); auto store = MockCluster::initMetaKV(rootPath.path()); auto* kv = dynamic_cast(store.get()); FLAGS_heartbeat_interval_secs = 1; TestUtils::createSomeHosts(kv); - TestUtils::assembleSpace(kv, 1, 8, 3, 4); - - // {3, 3} is lost for now - sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - + TestUtils::assembleSpaceWithZone(kv, 1, 24, 3, 5, 5); DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock delayClient; EXPECT_CALL(delayClient, waitingForCatchUpData(_, _, _)) - // first task in first plan will be blocked, all other tasks will be - // skipped, - .Times(1) + .Times(8) + .WillOnce( + Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))) + .WillOnce( + Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))) + .WillOnce( + Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))) + .WillOnce( + Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))) + .WillOnce( + Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))) + .WillOnce( + Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))) + .WillOnce( + Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))) .WillOnce( Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))); - - JobDescription jd( - testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); - DataBalanceJobExecutor balancer(jd, kv, &delayClient, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - sleep(1); - LOG(INFO) << "Rebalance should still in progress"; - - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); + FLAGS_task_concurrency = 8; + JobDescription jd = makeJobDescription(kv, cpp2::AdminCmd::DATA_BALANCE); + ZoneBalanceJobExecutor balancer(jd, kv, &delayClient, {}); + balancer.spaceInfo_.getInfo(1, kv); + balancer.lostZones_ = {"4", "5"}; + folly::Baton baton; + balancer.setFinishCallBack([&](meta::cpp2::JobStatus) { + baton.post(); + return nebula::cpp2::ErrorCode::SUCCEEDED; + }); + auto ret = balancer.executeInternal(); + EXPECT_EQ(Status::OK(), ret.value()); auto stopRet = balancer.stop(); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, stopRet); + baton.wait(); + const auto& prefix = MetaKeyUtils::balanceTaskPrefix(balancer.jobId_); + std::unique_ptr iter; + auto retcode = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + ASSERT_EQ(retcode, nebula::cpp2::ErrorCode::SUCCEEDED); + int32_t taskEnded = 0; + int32_t taskStopped = 0; + int32_t invalid = 0; + int32_t success = 0; + int32_t progress = 0; + while (iter->valid()) { + BalanceTask task; + { + auto tup = MetaKeyUtils::parseBalanceTaskVal(iter->val()); + task.status_ = std::get<0>(tup); + task.ret_ = std::get<1>(tup); + task.startTimeMs_ = std::get<2>(tup); + task.endTimeMs_ = std::get<3>(tup); + + if (task.ret_ == BalanceTaskResult::SUCCEEDED) { + success++; + } else if (task.ret_ == BalanceTaskResult::INVALID) { + invalid++; + } else if (task.ret_ == BalanceTaskResult::IN_PROGRESS) { + progress++; + } - // wait until the only IN_PROGRESS task finished; - sleep(3); - { - const auto& prefix = MetaKeyUtils::balanceTaskPrefix(balancer.jobId_); - std::unique_ptr iter; - auto retcode = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - ASSERT_EQ(retcode, nebula::cpp2::ErrorCode::SUCCEEDED); - int32_t taskEnded = 0; - int32_t taskStopped = 0; - while (iter->valid()) { - BalanceTask task; - // PartitionID partId = - // std::get<2>(BalanceTask::MetaServiceUtils(iter->key())); - { - auto tup = MetaKeyUtils::parseBalanceTaskVal(iter->val()); - task.status_ = std::get<0>(tup); - task.ret_ = std::get<1>(tup); - task.startTimeMs_ = std::get<2>(tup); - task.endTimeMs_ = std::get<3>(tup); - - if (task.status_ == BalanceTaskStatus::END) { - taskEnded++; - } else { - taskStopped++; - } + if (task.status_ == BalanceTaskStatus::END) { + taskEnded++; + } else { + taskStopped++; } - iter->next(); } - ASSERT_EQ(1, taskEnded); - ASSERT_EQ(5, taskStopped); + iter->next(); } - - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - NiceMock normalClient; - - balancer.adminClient_ = &normalClient; - testRestBlancer(); - ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::OK(), ret.value()); - testRestBlancer(); - sleep(1); + EXPECT_EQ(8, taskEnded); + EXPECT_EQ(22, taskStopped); + EXPECT_EQ(22, invalid); + EXPECT_EQ(8, success); + EXPECT_EQ(0, progress); } void verifyLeaderBalancePlan(HostLeaderMap& hostLeaderMap, @@ -1630,8 +1135,8 @@ TEST(BalanceTest, LeaderBalanceTest) { LeaderBalanceJobExecutor balancer( testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); - auto ret = balancer.executeInternal(HostAddr(), {}); - ASSERT_EQ(Status::Error("partiton failed to transfer leader"), ret.value()); + auto ret = balancer.executeInternal(); + ASSERT_EQ(Status::OK(), ret.value()); } TEST(BalanceTest, LeaderBalanceWithZoneTest) { diff --git a/src/meta/test/TestUtils.h b/src/meta/test/TestUtils.h index 2440486c3e1..978fd4f41d8 100644 --- a/src/meta/test/TestUtils.h +++ b/src/meta/test/TestUtils.h @@ -208,6 +208,62 @@ class TestUtils { baton.wait(); } + static void assembleSpaceWithZone(kvstore::KVStore* kv, + GraphSpaceID id, + int32_t partitionNum, + int32_t replica, + int32_t zoneNum, + int32_t totalHost) { + cpp2::SpaceDesc properties; + properties.set_space_name("test_space"); + properties.set_partition_num(partitionNum); + properties.set_replica_factor(replica); + auto spaceVal = MetaKeyUtils::spaceVal(properties); + std::vector data; + data.emplace_back(MetaKeyUtils::indexSpaceKey("test_space"), + std::string(reinterpret_cast(&id), sizeof(GraphSpaceID))); + std::vector>> zones; + std::vector zoneNames; + std::map zonePartNum; + for (int32_t i = 0; i < zoneNum; i++) { + zones.push_back({std::to_string(i + 1), {}}); + zonePartNum[std::to_string(i + 1)] = 0; + zoneNames.push_back(std::to_string(i + 1)); + } + properties.set_zone_names(zoneNames); + data.emplace_back(MetaKeyUtils::spaceKey(id), MetaKeyUtils::spaceVal(properties)); + std::vector allHosts; + for (int32_t i = 0; i < totalHost; i++) { + zones[i % zoneNum].second.emplace_back("127.0.0.1", i + 1); + allHosts.emplace_back("127.0.0.1", i + 1); + data.emplace_back(nebula::MetaKeyUtils::machineKey("127.0.0.1", i + 1), ""); + data.emplace_back(nebula::MetaKeyUtils::hostKey("127.0.0.1", i + 1), + HostInfo::encodeV2(HostInfo( + time::WallClock::fastNowInMilliSec(), cpp2::HostRole::STORAGE, ""))); + } + for (auto& p : zones) { + data.emplace_back(MetaKeyUtils::zoneKey(p.first), MetaKeyUtils::zoneVal(p.second)); + } + for (auto partId = 1; partId <= partitionNum; partId++) { + std::vector hosts; + size_t idx = partId; + for (int32_t i = 0; i < replica; i++, idx++) { + std::string zoneName = zones[idx % zoneNum].first; + std::vector& zoneHosts = zones[idx % zoneNum].second; + int32_t hostIndex = zonePartNum[zoneName] % zoneHosts.size(); + hosts.push_back(zoneHosts[hostIndex]); + zonePartNum[zoneName]++; + } + data.emplace_back(MetaKeyUtils::partKey(id, partId), MetaKeyUtils::partVal(hosts)); + } + folly::Baton baton; + kv->asyncMultiPut(0, 0, std::move(data), [&](nebula::cpp2::ErrorCode code) { + ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); + baton.post(); + }); + baton.wait(); + } + static void mockTag(kvstore::KVStore* kv, int32_t tagNum, SchemaVer version = 0, diff --git a/src/parser/AdminSentences.cpp b/src/parser/AdminSentences.cpp index e09bb6e5811..cd774e27894 100644 --- a/src/parser/AdminSentences.cpp +++ b/src/parser/AdminSentences.cpp @@ -247,6 +247,17 @@ std::string AdminJobSentence::toString() const { } return str; } + case meta::cpp2::AdminCmd::ZONE_BALANCE: + if (paras_.empty()) { + return "SUBMIT JOB BALANCE ZONE"; + } else { + std::string str = "SUBMIT JOB BALANCE ZONE REMOVE"; + for (size_t i = 0; i < paras_.size(); i++) { + auto &s = paras_[i]; + str += i == 0 ? " " + s : ", " + s; + } + return str; + } case meta::cpp2::AdminCmd::LEADER_BALANCE: return "SUBMIT JOB BALANCE LEADER"; case meta::cpp2::AdminCmd::UNKNOWN: diff --git a/src/parser/parser.yy b/src/parser/parser.yy index 93c38013add..ffc32ae3534 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -3140,6 +3140,22 @@ admin_job_sentence delete hl; $$ = sentence; } + | KW_SUBMIT KW_JOB KW_BALANCE KW_ZONE { + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::ADD, + meta::cpp2::AdminCmd::ZONE_BALANCE); + $$ = sentence; + } + | KW_SUBMIT KW_JOB KW_BALANCE KW_ZONE KW_REMOVE zone_name_list { + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::ADD, + meta::cpp2::AdminCmd::ZONE_BALANCE); + ZoneNameList* nl = $6; + std::vector names = nl->zoneNames(); + for (std::string& name: names) { + sentence->addPara(name); + } + delete nl; + $$ = sentence; + } ; job_concurrency