diff --git a/src/clients/storage/InternalStorageClient.cpp b/src/clients/storage/InternalStorageClient.cpp index b2537b20ab5..e6fc0f3cb5f 100644 --- a/src/clients/storage/InternalStorageClient.cpp +++ b/src/clients/storage/InternalStorageClient.cpp @@ -88,7 +88,8 @@ void InternalStorageClient::chainAddEdges(cpp2::AddEdgesRequest& directReq, auto partId = directReq.get_parts().begin()->first; auto optLeader = getLeader(directReq.get_space_id(), partId); if (!optLeader.ok()) { - LOG(WARNING) << folly::sformat("failed to get leader, space {}, part {}", spaceId, partId); + LOG(WARNING) << folly::sformat("failed to get leader, space {}, part {}", spaceId, partId) + << optLeader.status(); p.setValue(::nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND); return; } @@ -189,5 +190,47 @@ InternalStorageClient::getPartLeader( return clusters; } +void InternalStorageClient::chainDeleteEdges(cpp2::DeleteEdgesRequest& req, + const std::string& txnId, + TermID termId, + folly::Promise&& p, + folly::EventBase* evb) { + auto spaceId = req.get_space_id(); + auto partId = req.get_parts().begin()->first; + auto optLeader = getLeader(req.get_space_id(), partId); + if (!optLeader.ok()) { + LOG(WARNING) << folly::sformat("failed to get leader, space {}, part {}", spaceId, partId) + << optLeader.status(); + p.setValue(::nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND); + return; + } + HostAddr& leader = optLeader.value(); + leader.port += kInternalPortOffset; + VLOG(2) << "leader host: " << leader; + + cpp2::ChainDeleteEdgesRequest chainReq; + chainReq.space_id_ref() = req.get_space_id(); + chainReq.parts_ref() = req.get_parts(); + chainReq.txn_id_ref() = txnId; + chainReq.term_ref() = termId; + auto resp = getResponse( + evb, + std::make_pair(leader, chainReq), + [](cpp2::InternalStorageServiceAsyncClient* client, const cpp2::ChainDeleteEdgesRequest& r) { + return client->future_chainDeleteEdges(r); + }); + + std::move(resp).thenTry([=, p = std::move(p)](auto&& t) mutable { + auto code = getErrorCode(t); + if (code == nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + chainDeleteEdges(req, txnId, termId, std::move(p)); + } else { + p.setValue(code); + } + return; + }); +} + } // namespace storage } // namespace nebula diff --git a/src/clients/storage/InternalStorageClient.h b/src/clients/storage/InternalStorageClient.h index e964430450f..eb53904a803 100644 --- a/src/clients/storage/InternalStorageClient.h +++ b/src/clients/storage/InternalStorageClient.h @@ -54,6 +54,12 @@ class InternalStorageClient std::unordered_map> data, folly::EventBase* evb = nullptr); + virtual void chainDeleteEdges(cpp2::DeleteEdgesRequest& req, + const std::string& txnId, + TermID termId, + folly::Promise<::nebula::cpp2::ErrorCode>&& p, + folly::EventBase* evb = nullptr); + private: cpp2::ChainAddEdgesRequest makeChainAddReq(const cpp2::AddEdgesRequest& req, TermID termId, diff --git a/src/clients/storage/StorageClient.cpp b/src/clients/storage/StorageClient.cpp index 61381598225..6c1a81f5a8d 100644 --- a/src/clients/storage/StorageClient.cpp +++ b/src/clients/storage/StorageClient.cpp @@ -268,8 +268,10 @@ StorageRpcRespFuture StorageClient::deleteEdges( return collectResponse(param.evb, std::move(requests), - [](ThriftClientType* client, const cpp2::DeleteEdgesRequest& r) { - return client->future_deleteEdges(r); + [useToss = param.useExperimentalFeature]( + ThriftClientType* client, const cpp2::DeleteEdgesRequest& r) { + return useToss ? client->future_chainDeleteEdges(r) + : client->future_deleteEdges(r); }); } diff --git a/src/common/utils/Types.h b/src/common/utils/Types.h index 88e74106edc..e365fb4270e 100644 --- a/src/common/utils/Types.h +++ b/src/common/utils/Types.h @@ -19,6 +19,8 @@ enum class NebulaKeyType : uint32_t { kOperation = 0x00000005, kKeyValue = 0x00000006, kVertex = 0x00000007, + kPrime = 0x00000008, // used in TOSS, if we write a lock succeed + kDoublePrime = 0x00000009, // used in TOSS, if we get RPC back from remote. }; enum class NebulaSystemKeyType : uint32_t { diff --git a/src/graph/executor/mutate/DeleteExecutor.cpp b/src/graph/executor/mutate/DeleteExecutor.cpp index 3cb1c670c0b..bcee155e38e 100644 --- a/src/graph/executor/mutate/DeleteExecutor.cpp +++ b/src/graph/executor/mutate/DeleteExecutor.cpp @@ -9,6 +9,7 @@ #include "graph/context/QueryContext.h" #include "graph/executor/mutate/DeleteExecutor.h" #include "graph/planner/plan/Mutate.h" +#include "graph/service/GraphFlags.h" #include "graph/util/SchemaUtil.h" using nebula::storage::StorageClient; @@ -208,6 +209,7 @@ folly::Future DeleteEdgesExecutor::deleteEdges() { auto plan = qctx()->plan(); StorageClient::CommonRequestParam param( spaceId, qctx()->rctx()->session()->id(), plan->id(), plan->isProfileEnabled()); + param.useExperimentalFeature = FLAGS_enable_experimental_feature; return qctx() ->getStorageClient() ->deleteEdges(param, std::move(edgeKeys)) diff --git a/src/interface/storage.thrift b/src/interface/storage.thrift index 1d698c8db51..7eade231fda 100644 --- a/src/interface/storage.thrift +++ b/src/interface/storage.thrift @@ -684,6 +684,7 @@ service GraphStorageService { UpdateResponse chainUpdateEdge(1: UpdateEdgeRequest req); ExecResponse chainAddEdges(1: AddEdgesRequest req); + ExecResponse chainDeleteEdges(1: DeleteEdgesRequest req); KVGetResponse get(1: KVGetRequest req); ExecResponse put(1: KVPutRequest req); @@ -884,7 +885,6 @@ struct ChainAddEdgesRequest { 3: list prop_names, // if true, when edge already exists, do nothing 4: bool if_not_exists, - // 5: map term_of_parts, 5: i64 term 6: optional i64 edge_version // 6: optional map>( @@ -900,10 +900,20 @@ struct ChainUpdateEdgeRequest { 5: required list parts, } +struct ChainDeleteEdgesRequest { + 1: common.GraphSpaceID space_id, + // partId => edgeKeys + 2: map> + (cpp.template = "std::unordered_map") parts, + 3: binary txn_id + 4: i64 term, +} + service InternalStorageService { ExecResponse chainAddEdges(1: ChainAddEdgesRequest req); UpdateResponse chainUpdateEdge(1: ChainUpdateEdgeRequest req); // Interfaces for log storage ExecResponse syncData(1: SyncDataRequest req); + ExecResponse chainDeleteEdges(1: ChainDeleteEdgesRequest req); } diff --git a/src/kvstore/Part.cpp b/src/kvstore/Part.cpp index ec10f7c9ed7..87f64f96bf5 100644 --- a/src/kvstore/Part.cpp +++ b/src/kvstore/Part.cpp @@ -252,7 +252,7 @@ std::tuple Part::commitLogs( // Make the number of values are an even number DCHECK_EQ((kvs.size() + 1) / 2, kvs.size() / 2); for (size_t i = 0; i < kvs.size(); i += 2) { - VLOG(1) << "OP_MULTI_PUT " << folly::hexlify(kvs[i]) + VLOG(2) << "OP_MULTI_PUT " << folly::hexlify(kvs[i]) << ", val = " << folly::hexlify(kvs[i + 1]); auto code = batch->put(kvs[i], kvs[i + 1]); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { @@ -295,7 +295,7 @@ std::tuple Part::commitLogs( case OP_BATCH_WRITE: { auto data = decodeBatchValue(log); for (auto& op : data) { - VLOG(1) << "OP_BATCH_WRITE: " << folly::hexlify(op.second.first) + VLOG(2) << "OP_BATCH_WRITE: " << folly::hexlify(op.second.first) << ", val=" << folly::hexlify(op.second.second); auto code = nebula::cpp2::ErrorCode::SUCCEEDED; if (op.first == BatchLogType::OP_BATCH_PUT) { diff --git a/src/kvstore/raftex/RaftPart.cpp b/src/kvstore/raftex/RaftPart.cpp index f8571c775b4..1b845a89f40 100644 --- a/src/kvstore/raftex/RaftPart.cpp +++ b/src/kvstore/raftex/RaftPart.cpp @@ -2026,7 +2026,6 @@ void RaftPart::checkRemoteListeners(const std::set& expected) { } } } - bool RaftPart::leaseValid() { std::lock_guard g(raftLock_); if (hosts_.empty()) { diff --git a/src/mock/MockData.cpp b/src/mock/MockData.cpp index 6553dd07da5..78811aead2f 100644 --- a/src/mock/MockData.cpp +++ b/src/mock/MockData.cpp @@ -744,7 +744,7 @@ std::vector MockData::mockPlayerVerticeIds() { return ret; } -std::vector MockData::mockEdges(bool upper) { +std::vector MockData::mockEdges(bool upper, bool hasInEdges) { std::vector ret; // Use serve data, positive edgeType is 101, reverse edgeType is -101 for (auto& serve : serves_) { @@ -788,7 +788,9 @@ std::vector MockData::mockEdges(bool upper) { positiveEdge.props_ = std::move(props); auto reverseData = getReverseEdge(positiveEdge); ret.emplace_back(std::move(positiveEdge)); - ret.emplace_back(std::move(reverseData)); + if (hasInEdges) { + ret.emplace_back(std::move(reverseData)); + } } return ret; } @@ -947,11 +949,13 @@ nebula::storage::cpp2::DeleteVerticesRequest MockData::mockDeleteVerticesReq(int return req; } -nebula::storage::cpp2::AddEdgesRequest MockData::mockAddEdgesReq(bool upper, int32_t parts) { +nebula::storage::cpp2::AddEdgesRequest MockData::mockAddEdgesReq(bool upper, + int32_t parts, + bool hasInEdges) { nebula::storage::cpp2::AddEdgesRequest req; req.space_id_ref() = 1; req.if_not_exists_ref() = true; - auto retRecs = mockEdges(upper); + auto retRecs = mockEdges(upper, hasInEdges); for (auto& rec : retRecs) { nebula::storage::cpp2::NewEdge newEdge; nebula::storage::cpp2::EdgeKey edgeKey; diff --git a/src/mock/MockData.h b/src/mock/MockData.h index 80899aad8e5..d96f87ab864 100644 --- a/src/mock/MockData.h +++ b/src/mock/MockData.h @@ -115,7 +115,8 @@ class MockData { static std::vector> mockPlayerIndexKeys(bool upper = false); // generate serve edge - static std::vector mockEdges(bool upper = false); + // param: includeInEdges, if the return set has both out and in edges + static std::vector mockEdges(bool upper = false, bool includeInEdges = true); static std::vector> mockServeIndexKeys(); @@ -169,7 +170,8 @@ class MockData { int32_t parts = 6); static nebula::storage::cpp2::AddEdgesRequest mockAddEdgesReq(bool upper = false, - int32_t parts = 6); + int32_t parts = 6, + bool hasInEdges = true); static nebula::storage::cpp2::DeleteVerticesRequest mockDeleteVerticesReq(int32_t parts = 6); diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index 15f57a3f71f..7faf4be2082 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -72,17 +72,22 @@ nebula_add_library( storage_transaction_executor OBJECT transaction/TransactionManager.cpp transaction/ConsistUtil.cpp - transaction/ChainUpdateEdgeProcessorLocal.cpp - transaction/ChainUpdateEdgeProcessorRemote.cpp + transaction/ChainUpdateEdgeLocalProcessor.cpp + transaction/ChainUpdateEdgeRemoteProcessor.cpp transaction/ChainResumeProcessor.cpp transaction/ChainAddEdgesGroupProcessor.cpp - transaction/ChainAddEdgesProcessorLocal.cpp - transaction/ChainAddEdgesProcessorRemote.cpp + transaction/ChainAddEdgesLocalProcessor.cpp + transaction/ChainAddEdgesRemoteProcessor.cpp transaction/ResumeAddEdgeProcessor.cpp transaction/ResumeAddEdgeRemoteProcessor.cpp transaction/ResumeUpdateProcessor.cpp transaction/ResumeUpdateRemoteProcessor.cpp transaction/ChainProcessorFactory.cpp + transaction/ChainDeleteEdgesGroupProcessor.cpp + transaction/ChainDeleteEdgesLocalProcessor.cpp + transaction/ChainDeleteEdgesRemoteProcessor.cpp + transaction/ChainDeleteEdgesResumeProcessor.cpp + transaction/ChainDeleteEdgesResumeRemoteProcessor.cpp ) nebula_add_library( diff --git a/src/storage/GraphStorageServiceHandler.cpp b/src/storage/GraphStorageServiceHandler.cpp index 579d0fd2a68..bebabfd1987 100644 --- a/src/storage/GraphStorageServiceHandler.cpp +++ b/src/storage/GraphStorageServiceHandler.cpp @@ -21,7 +21,8 @@ #include "storage/query/ScanEdgeProcessor.h" #include "storage/query/ScanVertexProcessor.h" #include "storage/transaction/ChainAddEdgesGroupProcessor.h" -#include "storage/transaction/ChainUpdateEdgeProcessorLocal.h" +#include "storage/transaction/ChainDeleteEdgesGroupProcessor.h" +#include "storage/transaction/ChainUpdateEdgeLocalProcessor.h" #define RETURN_FUTURE(processor) \ auto f = processor->getFuture(); \ @@ -112,7 +113,7 @@ folly::Future GraphStorageServiceHandler::future_updateEdg folly::Future GraphStorageServiceHandler::future_chainUpdateEdge( const cpp2::UpdateEdgeRequest& req) { - auto* proc = ChainUpdateEdgeProcessorLocal::instance(env_); + auto* proc = ChainUpdateEdgeLocalProcessor::instance(env_); RETURN_FUTURE(proc); } @@ -160,6 +161,12 @@ folly::Future GraphStorageServiceHandler::future_chainAddEdg RETURN_FUTURE(processor); } +folly::Future GraphStorageServiceHandler::future_chainDeleteEdges( + const cpp2::DeleteEdgesRequest& req) { + auto* processor = ChainDeleteEdgesGroupProcessor::instance(env_); + RETURN_FUTURE(processor); +} + folly::Future GraphStorageServiceHandler::future_put( const cpp2::KVPutRequest& req) { auto* processor = PutProcessor::instance(env_); diff --git a/src/storage/GraphStorageServiceHandler.h b/src/storage/GraphStorageServiceHandler.h index d4e806d9bc5..9c4b7a2e898 100644 --- a/src/storage/GraphStorageServiceHandler.h +++ b/src/storage/GraphStorageServiceHandler.h @@ -57,6 +57,9 @@ class GraphStorageServiceHandler final : public cpp2::GraphStorageServiceSvIf { folly::Future future_scanVertex(const cpp2::ScanVertexRequest& req) override; + folly::Future future_chainDeleteEdges( + const cpp2::DeleteEdgesRequest& req) override; + folly::Future future_scanEdge(const cpp2::ScanEdgeRequest& req) override; folly::Future future_getUUID(const cpp2::GetUUIDReq& req) override; diff --git a/src/storage/InternalStorageServiceHandler.cpp b/src/storage/InternalStorageServiceHandler.cpp index 11f544e902c..3cde41e53b9 100644 --- a/src/storage/InternalStorageServiceHandler.cpp +++ b/src/storage/InternalStorageServiceHandler.cpp @@ -6,8 +6,9 @@ #include "storage/InternalStorageServiceHandler.h" #include "storage/kv/SyncDataProcessor.h" -#include "storage/transaction/ChainAddEdgesProcessorRemote.h" -#include "storage/transaction/ChainUpdateEdgeProcessorRemote.h" +#include "storage/transaction/ChainAddEdgesRemoteProcessor.h" +#include "storage/transaction/ChainDeleteEdgesRemoteProcessor.h" +#include "storage/transaction/ChainUpdateEdgeRemoteProcessor.h" #define RETURN_FUTURE(processor) \ auto f = processor->getFuture(); \ @@ -38,13 +39,19 @@ InternalStorageServiceHandler::InternalStorageServiceHandler(StorageEnv* env) : folly::Future InternalStorageServiceHandler::future_chainAddEdges( const cpp2::ChainAddEdgesRequest& req) { - auto* processor = ChainAddEdgesProcessorRemote::instance(env_); + auto* processor = ChainAddEdgesRemoteProcessor::instance(env_); RETURN_FUTURE(processor); } folly::Future InternalStorageServiceHandler::future_chainUpdateEdge( const cpp2::ChainUpdateEdgeRequest& req) { - auto* processor = ChainUpdateEdgeProcessorRemote::instance(env_); + auto* processor = ChainUpdateEdgeRemoteProcessor::instance(env_); + RETURN_FUTURE(processor); +} + +folly::Future InternalStorageServiceHandler::future_chainDeleteEdges( + const cpp2::ChainDeleteEdgesRequest& req) { + auto* processor = ChainDeleteEdgesRemoteProcessor::instance(env_); RETURN_FUTURE(processor); } diff --git a/src/storage/InternalStorageServiceHandler.h b/src/storage/InternalStorageServiceHandler.h index 92a6d30cd81..8f32bf7b937 100644 --- a/src/storage/InternalStorageServiceHandler.h +++ b/src/storage/InternalStorageServiceHandler.h @@ -30,6 +30,9 @@ class InternalStorageServiceHandler final : public cpp2::InternalStorageServiceS folly::Future future_syncData(const cpp2::SyncDataRequest& req) override; + folly::Future future_chainDeleteEdges( + const cpp2::ChainDeleteEdgesRequest& p_req); + private: StorageEnv* env_{nullptr}; std::shared_ptr readerPool_; diff --git a/src/storage/mutate/AddEdgesProcessor.h b/src/storage/mutate/AddEdgesProcessor.h index b8f75b6caec..cec28b69e3c 100644 --- a/src/storage/mutate/AddEdgesProcessor.h +++ b/src/storage/mutate/AddEdgesProcessor.h @@ -19,7 +19,7 @@ extern ProcessorCounters kAddEdgesCounters; class AddEdgesProcessor : public BaseProcessor { friend class TransactionManager; - friend class ChainAddEdgesProcessorLocal; + friend class ChainAddEdgesLocalProcessor; public: static AddEdgesProcessor* instance(StorageEnv* env, diff --git a/src/storage/mutate/DeleteEdgesProcessor.cpp b/src/storage/mutate/DeleteEdgesProcessor.cpp index ac6071436cd..0761fe8e7ca 100644 --- a/src/storage/mutate/DeleteEdgesProcessor.cpp +++ b/src/storage/mutate/DeleteEdgesProcessor.cpp @@ -77,8 +77,22 @@ void DeleteEdgesProcessor::process(const cpp2::DeleteEdgesRequest& req) { handleAsync(spaceId_, partId, code); continue; } - doRemove(spaceId_, partId, std::move(keys)); - stats::StatsManager::addValue(kNumEdgesDeleted, keys.size()); + + HookFuncPara para; + if (tossHookFunc_) { + para.keys.emplace(&keys); + (*tossHookFunc_)(para); + } + if (para.result) { + env_->kvstore_->asyncAppendBatch( + spaceId_, + partId, + std::move(para.result.value()), + [partId, this](nebula::cpp2::ErrorCode rc) { handleAsync(spaceId_, partId, rc); }); + } else { + doRemove(spaceId_, partId, std::move(keys)); + stats::StatsManager::addValue(kNumEdgesDeleted, keys.size()); + } } } else { for (auto& part : partEdges) { @@ -198,6 +212,11 @@ ErrorOr DeleteEdgesProcessor::deleteEdges( } } + if (tossHookFunc_) { + HookFuncPara para; + para.batch.emplace(batchHolder.get()); + (*tossHookFunc_)(para); + } return encodeBatchValue(batchHolder->getBatch()); } diff --git a/src/storage/mutate/DeleteEdgesProcessor.h b/src/storage/mutate/DeleteEdgesProcessor.h index 2950a8f63ae..273399bd1e7 100644 --- a/src/storage/mutate/DeleteEdgesProcessor.h +++ b/src/storage/mutate/DeleteEdgesProcessor.h @@ -9,6 +9,7 @@ #include "common/base/Base.h" #include "kvstore/LogEncoder.h" #include "storage/BaseProcessor.h" +#include "storage/transaction/ConsistTypes.h" namespace nebula { namespace storage { @@ -24,6 +25,11 @@ class DeleteEdgesProcessor : public BaseProcessor { void process(const cpp2::DeleteEdgesRequest& req); + using HookFunction = std::function; + void setHookFunc(HookFunction func) { + tossHookFunc_ = func; + } + private: DeleteEdgesProcessor(StorageEnv* env, const ProcessorCounters* counters) : BaseProcessor(env, counters) {} @@ -34,6 +40,11 @@ class DeleteEdgesProcessor : public BaseProcessor { private: GraphSpaceID spaceId_; std::vector> indexes_; + + protected: + // TOSS use this hook function to append some delete operation + // or may append some put operation + std::optional tossHookFunc_; }; } // namespace storage diff --git a/src/storage/test/CMakeLists.txt b/src/storage/test/CMakeLists.txt index 636d929e5db..a1e3cc248a4 100644 --- a/src/storage/test/CMakeLists.txt +++ b/src/storage/test/CMakeLists.txt @@ -759,6 +759,21 @@ nebula_add_executable( gtest ) +nebula_add_test( + NAME + chain_delete_edge_test + SOURCES + ChainDeleteEdgesTest.cpp + OBJECTS + ${storage_test_deps} + LIBRARIES + ${ROCKSDB_LIBRARIES} + ${THRIFT_LIBRARIES} + ${PROXYGEN_LIBRARIES} + wangle + gtest +) + nebula_add_executable( NAME storage_index_write_bm diff --git a/src/storage/test/ChainAddEdgesTest.cpp b/src/storage/test/ChainAddEdgesTest.cpp index 8f693d4d4bc..3881e0cc671 100644 --- a/src/storage/test/ChainAddEdgesTest.cpp +++ b/src/storage/test/ChainAddEdgesTest.cpp @@ -18,7 +18,7 @@ #include "storage/test/ChainTestUtils.h" #include "storage/test/TestUtils.h" #include "storage/transaction/ChainAddEdgesGroupProcessor.h" -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" #include "storage/transaction/ConsistUtil.h" namespace nebula { @@ -34,11 +34,11 @@ TEST(ChainAddEdgesTest, TestUtilsTest) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, fackTerm); - auto* processor = new FakeChainAddEdgesProcessorLocal(env); + auto* processor = new FakeChainAddEdgesLocalProcessor(env); processor->rcPrepareLocal = nebula::cpp2::ErrorCode::SUCCEEDED; processor->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; @@ -63,10 +63,10 @@ TEST(ChainAddEdgesTest, prepareLocalSucceedTest) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, fackTerm); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::E_RPC_FAILURE; @@ -91,10 +91,10 @@ TEST(ChainAddEdgesTest, processRemoteSucceededTest) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, fackTerm); proc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; @@ -122,11 +122,11 @@ TEST(ChainAddEdgesTest, processRemoteFailedTest) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, fackTerm); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::E_OUTDATED_TERM; LOG(INFO) << "Build AddEdgesRequest..."; @@ -144,6 +144,8 @@ TEST(ChainAddEdgesTest, processRemoteFailedTest) { // prime key should be deleted EXPECT_EQ(0, numOfKey(req, util.genPrime, env)); EXPECT_EQ(0, numOfKey(req, util.genDoublePrime, env)); + + // env->txnMan_->stop(); } TEST(ChainAddEdgesTest, processRemoteUnknownTest) { @@ -151,11 +153,11 @@ TEST(ChainAddEdgesTest, processRemoteUnknownTest) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, fackTerm); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::E_RPC_FAILURE; @@ -182,12 +184,12 @@ TEST(ChainAddEdgesTest, processRemoteTest) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, fackTerm); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); LOG(INFO) << "Build AddEdgesRequest..."; cpp2::AddEdgesRequest req = mock::MockData::mockAddEdgesReq(false, 1); diff --git a/src/storage/test/ChainDeleteEdgesTest.cpp b/src/storage/test/ChainDeleteEdgesTest.cpp new file mode 100644 index 00000000000..91ef7a00597 --- /dev/null +++ b/src/storage/test/ChainDeleteEdgesTest.cpp @@ -0,0 +1,521 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "common/fs/TempDir.h" +#include "mock/MockCluster.h" +#include "mock/MockData.h" +#include "storage/CommonUtils.h" +#include "storage/StorageFlags.h" +#include "storage/test/ChainTestUtils.h" +#include "storage/test/TestUtils.h" +#include "storage/transaction/ChainDeleteEdgesGroupProcessor.h" +#include "storage/transaction/ChainDeleteEdgesLocalProcessor.h" +#include "storage/transaction/ConsistUtil.h" + +namespace nebula { +namespace storage { + +constexpr int32_t mockSpaceId = 1; +constexpr int32_t mockPartNum = 1; +constexpr int32_t gTerm = 1; + +class GlobalCluster { + public: + static mock::MockCluster* get() { + static mock::MockCluster cluster; + static fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + static bool init = false; + if (!init) { + cluster.initStorageKV(rootPath.path()); + init = true; + } + return &cluster; + } +}; + +// class FakeChainDeleteEdgesProcessor; +class FakeChainDeleteEdgesProcessor : public ChainDeleteEdgesLocalProcessor { + public: + explicit FakeChainDeleteEdgesProcessor(StorageEnv* env); + folly::SemiFuture prepareLocal() override; + folly::SemiFuture processRemote(Code code) override; + folly::SemiFuture processLocal(Code code) override; + + cpp2::DeleteEdgesRequest makeDelRequest(cpp2::AddEdgesRequest, + int32_t limit = std::numeric_limits::max()); + + public: + folly::Optional rcPrepareLocal; + folly::Optional rcProcessRemote; + folly::Optional rcProcessLocal; +}; + +// make sure test utils works +TEST(ChainDeleteEdgesTest, TestUtilsTest) { + fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto mClient = MetaClientTestUpdater::makeDefault(); + env->metaClient_ = mClient.get(); + MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, gTerm); + + auto* proc = new FakeChainDeleteEdgesProcessor(env); + + proc->rcPrepareLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + proc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + proc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + + LOG(INFO) << "Build DeleteEdgesReq..."; + auto req = mock::MockData::mockDeleteEdgesReq(mockPartNum); + + LOG(INFO) << "Run DeleteEdgesReq..."; + auto fut = proc->getFuture(); + proc->process(req); + auto resp = std::move(fut).get(); + // EXPECT_EQ(0, resp.result.failed_parts.size()); + + LOG(INFO) << "Check data in kv store..."; + // sleep(1); + // The number of data in serve is 334 + // checkAddEdgesData(req, env, 0, 0); +} + +// delete a not exist edge +TEST(ChainDeleteEdgesTest, Test2) { + fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto mClient = MetaClientTestUpdater::makeDefault(); + env->metaClient_ = mClient.get(); + MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, gTerm); + + auto* proc = new FakeChainDeleteEdgesProcessor(env); + + // proc->rcPrepareLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + proc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + // proc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + + LOG(INFO) << "Build DeleteEdgesReq..."; + auto req = mock::MockData::mockDeleteEdgesReq(mockPartNum); + + LOG(INFO) << "Run DeleteEdgesReq..."; + LOG(INFO) << "spaceId = " << req.get_space_id(); + auto fut = proc->getFuture(); + proc->process(req); + auto resp = std::move(fut).get(); + + // we need this sleep to ensure processor deleted before transaction manager + std::this_thread::sleep_for(std::chrono::milliseconds(300)); +} + +// add some edges, then delete it, all phase succeed +TEST(ChainDeleteEdgesTest, Test3) { + fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto mClient = MetaClientTestUpdater::makeDefault(); + env->metaClient_ = mClient.get(); + MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, gTerm); + + auto* addProc = new FakeChainAddEdgesLocalProcessor(env); + addProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + + bool upperPropVal = false; + int32_t partNum = 1; + bool hasInEdges = false; + auto addReq = mock::MockData::mockAddEdgesReq(upperPropVal, partNum, hasInEdges); + + LOG(INFO) << "Run FakeChainAddEdgesLocalProcessor..."; + auto fut = addProc->getFuture(); + addProc->process(addReq); + auto resp = std::move(fut).get(); + + ChainTestUtils util; + auto edgeKeys = util.genEdgeKeys(addReq, util.genKey); + auto num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 167); + LOG(INFO) << "after add(), edge num = " << num; + + auto* delProc = new FakeChainDeleteEdgesProcessor(env); + auto delReq = delProc->makeDelRequest(addReq); + // delProc->rcPrepareLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + delProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + // delProc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + + LOG(INFO) << "Build DeleteEdgesReq..."; + // auto req = mock::MockData::mockDeleteEdgesReq(mockPartNum); + + LOG(INFO) << "Run DeleteEdgesReq..."; + auto futDel = delProc->getFuture(); + delProc->process(delReq); + std::move(futDel).get(); + + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + LOG(INFO) << "after del(), edge num = " << num; + EXPECT_EQ(num, 0); + std::this_thread::sleep_for(std::chrono::milliseconds(300)); +} + +// add some edges, then delete one of them, all phase succeed +TEST(ChainDeleteEdgesTest, Test4) { + fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto mClient = MetaClientTestUpdater::makeDefault(); + env->metaClient_ = mClient.get(); + MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, gTerm); + + auto* addProc = new FakeChainAddEdgesLocalProcessor(env); + addProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + + bool upperPropVal = false; + int32_t partNum = 1; + bool hasInEdges = false; + auto addReq = mock::MockData::mockAddEdgesReq(upperPropVal, partNum, hasInEdges); + + LOG(INFO) << "Run FakeChainAddEdgesLocalProcessor..."; + auto fut = addProc->getFuture(); + addProc->process(addReq); + auto resp = std::move(fut).get(); + + ChainTestUtils util; + auto edgeKeys = util.genEdgeKeys(addReq, util.genKey); + auto num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 167); + LOG(INFO) << "after add(), edge num = " << num; + + auto* delProc = new FakeChainDeleteEdgesProcessor(env); + int32_t limit = 1; + auto delReq = delProc->makeDelRequest(addReq, limit); + // delProc->rcPrepareLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + delProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + // delProc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + + LOG(INFO) << "Build DeleteEdgesReq..."; + // auto req = mock::MockData::mockDeleteEdgesReq(mockPartNum); + + LOG(INFO) << "Run DeleteEdgesReq..."; + auto futDel = delProc->getFuture(); + delProc->process(delReq); + std::move(futDel).get(); + + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + LOG(INFO) << "after del(), edge num = " << num; + EXPECT_EQ(num, 166); + std::this_thread::sleep_for(std::chrono::milliseconds(300)); +} + +// add some edges, then delete one of them, not execute local commit +TEST(ChainDeleteEdgesTest, Test5) { + fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto mClient = MetaClientTestUpdater::makeDefault(); + env->metaClient_ = mClient.get(); + MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, gTerm); + + auto* addProc = new FakeChainAddEdgesLocalProcessor(env); + addProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + + bool upperPropVal = false; + int32_t partNum = 1; + bool hasInEdges = false; + auto addReq = mock::MockData::mockAddEdgesReq(upperPropVal, partNum, hasInEdges); + + LOG(INFO) << "Run FakeChainAddEdgesLocalProcessor..."; + auto fut = addProc->getFuture(); + addProc->process(addReq); + auto resp = std::move(fut).get(); + + ChainTestUtils util; + auto edgeKeys = util.genEdgeKeys(addReq, util.genKey); + auto num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 167); + LOG(INFO) << "after add(), edge num = " << num; + + auto* delProc = new FakeChainDeleteEdgesProcessor(env); + auto delReq = delProc->makeDelRequest(addReq); + // delProc->rcPrepareLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + delProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + delProc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + + LOG(INFO) << "Build DeleteEdgesReq..."; + // auto req = mock::MockData::mockDeleteEdgesReq(mockPartNum); + + LOG(INFO) << "Run DeleteEdgesReq..."; + auto futDel = delProc->getFuture(); + delProc->process(delReq); + std::move(futDel).get(); + + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + LOG(INFO) << "after del(), edge num = " << num; + EXPECT_EQ(num, 167); + + env->txnMan_->scanAll(); + auto* iClient = FakeInternalStorageClient::instance(env, nebula::cpp2::ErrorCode::SUCCEEDED); + FakeInternalStorageClient::hookInternalStorageClient(env, iClient); + ChainResumeProcessor resumeProc(env); + resumeProc.process(); + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 0); + std::this_thread::sleep_for(std::chrono::milliseconds(300)); + + delete iClient; +} + +// add some edges, then delete all of them, not execute local commit +TEST(ChainDeleteEdgesTest, Test6) { + fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto mClient = MetaClientTestUpdater::makeDefault(); + env->metaClient_ = mClient.get(); + MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, gTerm); + + auto* addProc = new FakeChainAddEdgesLocalProcessor(env); + addProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + + bool upperPropVal = false; + int32_t partNum = 1; + bool hasInEdges = false; + auto addReq = mock::MockData::mockAddEdgesReq(upperPropVal, partNum, hasInEdges); + + LOG(INFO) << "Run FakeChainAddEdgesLocalProcessor..."; + auto fut = addProc->getFuture(); + addProc->process(addReq); + auto resp = std::move(fut).get(); + + ChainTestUtils util; + auto edgeKeys = util.genEdgeKeys(addReq, util.genKey); + auto num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 167); + LOG(INFO) << "after add(), edge num = " << num; + + auto* delProc = new FakeChainDeleteEdgesProcessor(env); + auto delReq = delProc->makeDelRequest(addReq); + delProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + delProc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; + + LOG(INFO) << "Run DeleteEdgesReq..."; + auto futDel = delProc->getFuture(); + delProc->process(delReq); + std::move(futDel).get(); + + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + LOG(INFO) << "after del(), edge num = " << num; + EXPECT_EQ(num, 167); + + env->txnMan_->scanAll(); + auto* iClient = FakeInternalStorageClient::instance(env, nebula::cpp2::ErrorCode::SUCCEEDED); + FakeInternalStorageClient::hookInternalStorageClient(env, iClient); + ChainResumeProcessor resumeProc(env); + resumeProc.process(); + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 0); + std::this_thread::sleep_for(std::chrono::milliseconds(300)); + + delete iClient; +} + +// add some edges, delete one of them, rpc failure +TEST(ChainDeleteEdgesTest, Test7) { + fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto mClient = MetaClientTestUpdater::makeDefault(); + env->metaClient_ = mClient.get(); + MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, gTerm); + + auto* addProc = new FakeChainAddEdgesLocalProcessor(env); + addProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + + bool upperPropVal = false; + int32_t partNum = 1; + bool hasInEdges = false; + auto addReq = mock::MockData::mockAddEdgesReq(upperPropVal, partNum, hasInEdges); + + LOG(INFO) << "Run FakeChainAddEdgesLocalProcessor..."; + auto fut = addProc->getFuture(); + addProc->process(addReq); + auto resp = std::move(fut).get(); + + ChainTestUtils util; + auto edgeKeys = util.genEdgeKeys(addReq, util.genKey); + auto num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 167); + LOG(INFO) << "after add(), edge num = " << num; + + auto* delProc = new FakeChainDeleteEdgesProcessor(env); + int32_t limit = 1; + auto delReq = delProc->makeDelRequest(addReq, limit); + delProc->rcProcessRemote = nebula::cpp2::ErrorCode::E_RPC_FAILURE; + + LOG(INFO) << "Run DeleteEdgesReq..."; + auto futDel = delProc->getFuture(); + delProc->process(delReq); + std::move(futDel).get(); + + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + LOG(INFO) << "after del(), edge num = " << num; + EXPECT_EQ(num, 166); + + env->txnMan_->scanAll(); + auto* iClient = FakeInternalStorageClient::instance(env, nebula::cpp2::ErrorCode::SUCCEEDED); + FakeInternalStorageClient::hookInternalStorageClient(env, iClient); + ChainResumeProcessor resumeProc(env); + resumeProc.process(); + LOG(INFO) << "after recover()"; + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 166); + std::this_thread::sleep_for(std::chrono::milliseconds(300)); + + delete iClient; +} + +// add some edges, then one all of them, rpc failure +TEST(ChainDeleteEdgesTest, Test8) { + fs::TempDir rootPath("/tmp/DeleteEdgesTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto mClient = MetaClientTestUpdater::makeDefault(); + env->metaClient_ = mClient.get(); + MetaClientTestUpdater::addPartTerm(env->metaClient_, mockSpaceId, mockPartNum, gTerm); + + auto* addProc = new FakeChainAddEdgesLocalProcessor(env); + addProc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; + + bool upperPropVal = false; + int32_t partNum = 1; + bool hasInEdges = false; + auto addReq = mock::MockData::mockAddEdgesReq(upperPropVal, partNum, hasInEdges); + + LOG(INFO) << "Run FakeChainAddEdgesLocalProcessor..."; + auto fut = addProc->getFuture(); + addProc->process(addReq); + auto resp = std::move(fut).get(); + + ChainTestUtils util; + auto edgeKeys = util.genEdgeKeys(addReq, util.genKey); + auto num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 167); + LOG(INFO) << "after add(), edge num = " << num; + + auto* delProc = new FakeChainDeleteEdgesProcessor(env); + int32_t limit = num; + auto delReq = delProc->makeDelRequest(addReq, limit); + delProc->rcProcessRemote = nebula::cpp2::ErrorCode::E_RPC_FAILURE; + + LOG(INFO) << "Run DeleteEdgesReq..."; + auto futDel = delProc->getFuture(); + delProc->process(delReq); + std::move(futDel).get(); + + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + LOG(INFO) << "after del(), edge num = " << num; + EXPECT_EQ(num, 0); + + env->txnMan_->scanAll(); + auto* iClient = FakeInternalStorageClient::instance(env, nebula::cpp2::ErrorCode::SUCCEEDED); + FakeInternalStorageClient::hookInternalStorageClient(env, iClient); + ChainResumeProcessor resumeProc(env); + resumeProc.process(); + num = util.checkNumOfKey(env, mockSpaceId, edgeKeys); + EXPECT_EQ(num, 0); + std::this_thread::sleep_for(std::chrono::milliseconds(300)); + + delete iClient; +} + +} // namespace storage +} // namespace nebula + +int main(int argc, char** argv) { + FLAGS_trace_toss = true; + + testing::InitGoogleTest(&argc, argv); + folly::init(&argc, &argv, false); + google::SetStderrLogging(google::INFO); + return RUN_ALL_TESTS(); +} + +namespace nebula { +namespace storage { + +FakeChainDeleteEdgesProcessor::FakeChainDeleteEdgesProcessor(StorageEnv* env) + : ChainDeleteEdgesLocalProcessor(env) { + spaceVidLen_ = 32; +} + +folly::SemiFuture FakeChainDeleteEdgesProcessor::prepareLocal() { + LOG(INFO) << "FakeChainDeleteEdgesProcessor::" << __func__ << "()"; + if (rcPrepareLocal) { + LOG(INFO) << "Fake return " << apache::thrift::util::enumNameSafe(*rcPrepareLocal); + return *rcPrepareLocal; + } + LOG(INFO) << "forward to ChainDeleteEdgesLocalProcessor::prepareLocal()"; + return ChainDeleteEdgesLocalProcessor::prepareLocal(); +} + +folly::SemiFuture FakeChainDeleteEdgesProcessor::processRemote(Code code) { + LOG(INFO) << "FakeChainDeleteEdgesProcessor::" << __func__ << "()"; + if (rcProcessRemote) { + LOG(INFO) << "processRemote() fake return " + << apache::thrift::util::enumNameSafe(*rcProcessRemote); + return *rcProcessRemote; + } + LOG(INFO) << "forward to ChainDeleteEdgesLocalProcessor::processRemote()"; + return ChainDeleteEdgesLocalProcessor::processRemote(code); +} + +folly::SemiFuture FakeChainDeleteEdgesProcessor::processLocal(Code code) { + LOG(INFO) << "FakeChainDeleteEdgesProcessor::" << __func__ << "()"; + if (rcProcessLocal) { + LOG(INFO) << "Fake return " << apache::thrift::util::enumNameSafe(*rcProcessLocal); + return *rcProcessLocal; + } + LOG(INFO) << "forward to ChainDeleteEdgesLocalProcessor::processLocal()"; + return ChainDeleteEdgesLocalProcessor::processLocal(code); +} + +// make DeleteEdgesRequest according to an AddEdgesRequest +cpp2::DeleteEdgesRequest FakeChainDeleteEdgesProcessor::makeDelRequest(cpp2::AddEdgesRequest req, + int32_t limit) { + cpp2::DeleteEdgesRequest ret; + int32_t num = 0; + // ret.set_space_id(req.get_space_id()); + ret.space_id_ref() = req.get_space_id(); + for (auto& partAndEdges : req.get_parts()) { + auto partId = partAndEdges.first; + for (auto& newEdge : partAndEdges.second) { + ret.parts_ref().value()[partId].emplace_back(newEdge.get_key()); + if (++num == limit) { + break; + } + } + if (num == limit) { + break; + } + } + return ret; +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/test/ChainResumeEdgeTest.cpp b/src/storage/test/ChainResumeEdgeTest.cpp index 61e305d0fbd..9c985a8462d 100644 --- a/src/storage/test/ChainResumeEdgeTest.cpp +++ b/src/storage/test/ChainResumeEdgeTest.cpp @@ -19,7 +19,7 @@ #include "storage/test/QueryTestUtils.h" #include "storage/test/TestUtils.h" #include "storage/transaction/ChainAddEdgesGroupProcessor.h" -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" #include "storage/transaction/ChainResumeProcessor.h" #include "storage/transaction/ConsistUtil.h" @@ -27,7 +27,7 @@ namespace nebula { namespace storage { constexpr int32_t mockSpaceId = 1; -constexpr int32_t mockPartNum = 6; +constexpr int32_t mockPartNum = 1; constexpr int32_t mockSpaceVidLen = 32; ChainTestUtils gTestUtil; @@ -47,16 +47,16 @@ TEST(ChainResumeEdgesTest, resumeTest1) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; proc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; LOG(INFO) << "Build AddEdgesRequest..."; - cpp2::AddEdgesRequest req = mock::MockData::mockAddEdgesReq(false, 1); + cpp2::AddEdgesRequest req = mock::MockData::mockAddEdgesReq(false, mockPartNum); auto fut = proc->getFuture(); proc->process(req); @@ -66,7 +66,9 @@ TEST(ChainResumeEdgesTest, resumeTest1) { EXPECT_EQ(334, numOfKey(req, gTestUtil.genPrime, env)); EXPECT_EQ(0, numOfKey(req, gTestUtil.genDoublePrime, env)); - env->txnMan_->scanPrimes(1, 1); + for (int32_t i = 1; i <= mockPartNum; ++i) { + env->txnMan_->scanPrimes(1, i); + } auto* iClient = FakeInternalStorageClient::instance(env); FakeInternalStorageClient::hookInternalStorageClient(env, iClient); @@ -76,6 +78,8 @@ TEST(ChainResumeEdgesTest, resumeTest1) { EXPECT_EQ(334, numOfKey(req, gTestUtil.genKey, env)); EXPECT_EQ(0, numOfKey(req, gTestUtil.genPrime, env)); EXPECT_EQ(0, numOfKey(req, gTestUtil.genDoublePrime, env)); + + delete iClient; } /** @@ -92,16 +96,16 @@ TEST(ChainResumeEdgesTest, resumeTest2) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; proc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; LOG(INFO) << "Build AddEdgesRequest..."; - cpp2::AddEdgesRequest req = mock::MockData::mockAddEdgesReq(false, 1); + cpp2::AddEdgesRequest req = mock::MockData::mockAddEdgesReq(false, mockPartNum); LOG(INFO) << "Test AddEdgesProcessor..."; auto fut = proc->getFuture(); @@ -122,26 +126,28 @@ TEST(ChainResumeEdgesTest, resumeTest2) { EXPECT_EQ(0, numOfKey(req, util.genKey, env)); EXPECT_EQ(334, numOfKey(req, util.genPrime, env)); EXPECT_EQ(0, numOfKey(req, util.genDoublePrime, env)); + + delete iClient; } /** - * @brief resumePrimeTest3 (resume insert prime outdated) + * @brief resumeTest3 (resume insert prime outdated) */ -TEST(ChainResumeEdgesTest, resumePrimeTest3) { +TEST(ChainResumeEdgesTest, resumeTest3) { fs::TempDir rootPath("/tmp/AddEdgesTest.XXXXXX"); mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::SUCCEEDED; proc->rcProcessLocal = nebula::cpp2::ErrorCode::SUCCEEDED; LOG(INFO) << "Build AddEdgesRequest..."; - cpp2::AddEdgesRequest req = mock::MockData::mockAddEdgesReq(false, 1); + cpp2::AddEdgesRequest req = mock::MockData::mockAddEdgesReq(false, mockPartNum); LOG(INFO) << "Test AddEdgesProcessor..."; auto fut = proc->getFuture(); @@ -157,7 +163,11 @@ TEST(ChainResumeEdgesTest, resumePrimeTest3) { auto error = nebula::cpp2::ErrorCode::E_RPC_FAILURE; auto* iClient = FakeInternalStorageClient::instance(env, error); FakeInternalStorageClient::hookInternalStorageClient(env, iClient); - env->txnMan_->scanPrimes(1, 1); + + for (auto i = 1; i <= mockPartNum; ++i) { + env->txnMan_->scanPrimes(1, i); + } + ChainResumeProcessor resumeProc(env); resumeProc.process(); @@ -165,6 +175,8 @@ TEST(ChainResumeEdgesTest, resumePrimeTest3) { EXPECT_EQ(334, numOfKey(req, util.genKey, env)); EXPECT_EQ(0, numOfKey(req, util.genPrime, env)); EXPECT_EQ(334, numOfKey(req, util.genDoublePrime, env)); + + delete iClient; } /** @@ -181,10 +193,10 @@ TEST(ChainResumeEdgesTest, resumeTest4) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::E_RPC_FAILURE; @@ -212,6 +224,8 @@ TEST(ChainResumeEdgesTest, resumeTest4) { EXPECT_EQ(334, numOfKey(req, gTestUtil.genKey, env)); EXPECT_EQ(0, numOfKey(req, gTestUtil.genPrime, env)); EXPECT_EQ(334, numOfKey(req, gTestUtil.genDoublePrime, env)); + + delete iClient; } /** @@ -222,10 +236,10 @@ TEST(ChainResumeEdgesTest, resumeTest5) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::E_RPC_FAILURE; @@ -252,6 +266,8 @@ TEST(ChainResumeEdgesTest, resumeTest5) { EXPECT_EQ(334, numOfKey(req, util.genKey, env)); EXPECT_EQ(0, numOfKey(req, util.genPrime, env)); EXPECT_EQ(334, numOfKey(req, util.genDoublePrime, env)); + + delete iClient; } /** @@ -262,10 +278,10 @@ TEST(ChainResumeEdgesTest, resumeTest6) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto* proc = new FakeChainAddEdgesProcessorLocal(env); + auto* proc = new FakeChainAddEdgesLocalProcessor(env); proc->rcProcessRemote = nebula::cpp2::ErrorCode::E_RPC_FAILURE; @@ -285,13 +301,19 @@ TEST(ChainResumeEdgesTest, resumeTest6) { auto* iClient = FakeInternalStorageClient::instance(env); FakeInternalStorageClient::hookInternalStorageClient(env, iClient); - env->txnMan_->scanPrimes(1, 1); + + for (auto i = 1; i <= mockPartNum; ++i) { + env->txnMan_->scanPrimes(1, i); + } + ChainResumeProcessor resumeProc(env); resumeProc.process(); EXPECT_EQ(334, numOfKey(req, util.genKey, env)); EXPECT_EQ(0, numOfKey(req, util.genPrime, env)); EXPECT_EQ(0, numOfKey(req, util.genDoublePrime, env)); + + delete iClient; } // resume an update left prime, check resume succeeded @@ -300,10 +322,12 @@ TEST(ChainUpdateEdgeTest, resumeTest7) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto parts = cluster.getTotalParts(); + // auto parts = cluster.getTotalParts(); + auto parts = mockPartNum; + LOG(INFO) << "total parts: " << parts; EXPECT_TRUE(QueryTestUtils::mockEdgeData(env, parts, mockSpaceVidLen)); LOG(INFO) << "Test UpdateEdgeRequest..."; @@ -326,13 +350,19 @@ TEST(ChainUpdateEdgeTest, resumeTest7) { auto* iClient = FakeInternalStorageClient::instance(env); FakeInternalStorageClient::hookInternalStorageClient(env, iClient); - env->txnMan_->scanPrimes(1, 1); + + for (auto i = 1; i <= mockPartNum; ++i) { + env->txnMan_->scanPrimes(1, i); + } + ChainResumeProcessor resumeProc(env); resumeProc.process(); EXPECT_TRUE(helper.edgeExist(env, req)); EXPECT_FALSE(helper.primeExist(env, req)); EXPECT_FALSE(helper.doublePrimeExist(env, req)); + + delete iClient; } // resume an update left prime, resume failed @@ -341,10 +371,11 @@ TEST(ChainUpdateEdgeTest, resumeTest8) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto parts = cluster.getTotalParts(); + // auto parts = cluster.getTotalParts(); + auto parts = mockPartNum; EXPECT_TRUE(QueryTestUtils::mockEdgeData(env, parts, mockSpaceVidLen)); LOG(INFO) << "Test UpdateEdgeRequest..."; @@ -373,6 +404,8 @@ TEST(ChainUpdateEdgeTest, resumeTest8) { EXPECT_TRUE(helper.edgeExist(env, req)); EXPECT_TRUE(helper.primeExist(env, req)); EXPECT_FALSE(helper.doublePrimeExist(env, req)); + + delete iClient; } // resume an update left prime, resume outdated @@ -381,10 +414,11 @@ TEST(ChainUpdateEdgeTest, resumeTest9) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto parts = cluster.getTotalParts(); + // auto parts = cluster.getTotalParts(); + auto parts = mockPartNum; EXPECT_TRUE(QueryTestUtils::mockEdgeData(env, parts, mockSpaceVidLen)); LOG(INFO) << "Test UpdateEdgeRequest..."; @@ -414,6 +448,8 @@ TEST(ChainUpdateEdgeTest, resumeTest9) { EXPECT_TRUE(helper.edgeExist(env, req)); EXPECT_FALSE(helper.primeExist(env, req)); EXPECT_TRUE(helper.doublePrimeExist(env, req)); + + delete iClient; } // resume an update left prime, check resume succeeded @@ -422,10 +458,11 @@ TEST(ChainUpdateEdgeTest, resumeTest10) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto parts = cluster.getTotalParts(); + // auto parts = cluster.getTotalParts(); + auto parts = mockPartNum; EXPECT_TRUE(QueryTestUtils::mockEdgeData(env, parts, mockSpaceVidLen)); LOG(INFO) << "Test UpdateEdgeRequest..."; @@ -452,6 +489,8 @@ TEST(ChainUpdateEdgeTest, resumeTest10) { EXPECT_TRUE(helper.edgeExist(env, req)); EXPECT_FALSE(helper.primeExist(env, req)); EXPECT_FALSE(helper.doublePrimeExist(env, req)); + + delete iClient; } // resume an update left prime, resume failed @@ -460,10 +499,11 @@ TEST(ChainUpdateEdgeTest, resumeTest11) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto parts = cluster.getTotalParts(); + // auto parts = cluster.getTotalParts(); + auto parts = mockPartNum; EXPECT_TRUE(QueryTestUtils::mockEdgeData(env, parts, mockSpaceVidLen)); LOG(INFO) << "Test UpdateEdgeRequest..."; @@ -491,6 +531,8 @@ TEST(ChainUpdateEdgeTest, resumeTest11) { EXPECT_TRUE(helper.edgeExist(env, req)); EXPECT_FALSE(helper.primeExist(env, req)); EXPECT_TRUE(helper.doublePrimeExist(env, req)); + + delete iClient; } // resume an update left prime, resume outdated @@ -499,10 +541,11 @@ TEST(ChainUpdateEdgeTest, resumeTest12) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); - auto parts = cluster.getTotalParts(); + // auto parts = cluster.getTotalParts(); + auto parts = mockPartNum; EXPECT_TRUE(QueryTestUtils::mockEdgeData(env, parts, mockSpaceVidLen)); LOG(INFO) << "Test UpdateEdgeRequest..."; @@ -530,6 +573,8 @@ TEST(ChainUpdateEdgeTest, resumeTest12) { EXPECT_TRUE(helper.edgeExist(env, req)); EXPECT_FALSE(helper.primeExist(env, req)); EXPECT_TRUE(helper.doublePrimeExist(env, req)); + + delete iClient; } } // namespace storage } // namespace nebula diff --git a/src/storage/test/ChainTestUtils.h b/src/storage/test/ChainTestUtils.h index 13da0bf8473..d94f30b2a74 100644 --- a/src/storage/test/ChainTestUtils.h +++ b/src/storage/test/ChainTestUtils.h @@ -7,8 +7,8 @@ #include "storage/CommonUtils.h" #include "storage/transaction/ChainResumeProcessor.h" -#include "storage/transaction/ChainUpdateEdgeProcessorLocal.h" -#include "storage/transaction/ChainUpdateEdgeProcessorRemote.h" +#include "storage/transaction/ChainUpdateEdgeLocalProcessor.h" +#include "storage/transaction/ChainUpdateEdgeRemoteProcessor.h" namespace nebula { namespace storage { @@ -36,6 +36,38 @@ class ChainTestUtils { }; } + std::vector genEdgeKeys(const cpp2::AddEdgesRequest& req, KeyGenerator gen) { + std::vector ret; + for (auto& partAndEdges : *req.parts_ref()) { + auto partId = partAndEdges.first; + auto& edgeVec = partAndEdges.second; + for (auto& edge : edgeVec) { + auto key = gen(partId, edge); + ret.emplace_back(std::move(key)); + } + } + return ret; + } + + // return the actual num of keys in nebula store. + int32_t checkNumOfKey(StorageEnv* env, + GraphSpaceID spaceId, + const std::vector& keys) { + int32_t ret = 0; + + std::unique_ptr iter; + for (auto& key : keys) { + iter.reset(); + auto partId = NebulaKeyUtils::getPart(key); + auto rc = env->kvstore_->prefix(spaceId, partId, key, &iter); + if (rc == Code::SUCCEEDED && iter && iter->valid()) { + ++ret; + } + } + + return ret; + } + public: int32_t spaceVidLen_{32}; KeyGenerator genKey; @@ -57,6 +89,7 @@ int numOfKey(const cpp2::AddEdgesRequest& req, KeyGenerator gen, StorageEnv* env std::unique_ptr iter; EXPECT_EQ(Code::SUCCEEDED, env->kvstore_->prefix(spaceId, partId, key, &iter)); if (iter && iter->valid()) { + // LOG(INFO) << "key = " << key; ++numOfEdges; } else { // LOG(INFO) << "key: " << key << " not exist"; @@ -82,7 +115,7 @@ bool keyExist(StorageEnv* env, GraphSpaceID spaceId, PartitionID partId, std::st return rc == Code::SUCCEEDED; } -class FakeChainAddEdgesProcessorLocal : public ChainAddEdgesProcessorLocal { +class FakeChainAddEdgesLocalProcessor : public ChainAddEdgesLocalProcessor { FRIEND_TEST(ChainAddEdgesTest, prepareLocalSucceededTest); FRIEND_TEST(ChainAddEdgesTest, processRemoteSucceededTest); FRIEND_TEST(ChainAddEdgesTest, processRemoteFailedTest); @@ -90,44 +123,44 @@ class FakeChainAddEdgesProcessorLocal : public ChainAddEdgesProcessorLocal { // all the above will test succeeded path of process local // the failed path of process local will be tested in resume test public: - explicit FakeChainAddEdgesProcessorLocal(StorageEnv* env) : ChainAddEdgesProcessorLocal(env) { + explicit FakeChainAddEdgesLocalProcessor(StorageEnv* env) : ChainAddEdgesLocalProcessor(env) { spaceVidLen_ = 32; } folly::SemiFuture prepareLocal() override { - LOG(INFO) << "FakeChainAddEdgesProcessorLocal::" << __func__ << "()"; + LOG(INFO) << "FakeChainAddEdgesLocalProcessor::" << __func__ << "()"; if (rcPrepareLocal) { LOG(INFO) << "Fake return " << apache::thrift::util::enumNameSafe(*rcPrepareLocal); return *rcPrepareLocal; } - LOG(INFO) << "forward to ChainAddEdgesProcessorLocal::prepareLocal()"; - return ChainAddEdgesProcessorLocal::prepareLocal(); + LOG(INFO) << "forward to ChainAddEdgesLocalProcessor::prepareLocal()"; + return ChainAddEdgesLocalProcessor::prepareLocal(); } folly::SemiFuture processRemote(Code code) override { - LOG(INFO) << "FakeChainAddEdgesProcessorLocal::" << __func__ << "()"; + LOG(INFO) << "FakeChainAddEdgesLocalProcessor::" << __func__ << "()"; if (rcProcessRemote) { LOG(INFO) << "processRemote() fake return " << apache::thrift::util::enumNameSafe(*rcProcessRemote); LOG_IF(FATAL, code != Code::SUCCEEDED) << "cheat must base on truth"; return *rcProcessRemote; } - LOG(INFO) << "forward to ChainAddEdgesProcessorLocal::processRemote()"; - return ChainAddEdgesProcessorLocal::processRemote(code); + LOG(INFO) << "forward to ChainAddEdgesLocalProcessor::processRemote()"; + return ChainAddEdgesLocalProcessor::processRemote(code); } folly::SemiFuture processLocal(Code code) override { - LOG(INFO) << "FakeChainAddEdgesProcessorLocal::" << __func__ << "()"; + LOG(INFO) << "FakeChainAddEdgesLocalProcessor::" << __func__ << "()"; if (rcProcessLocal) { LOG(INFO) << "Fake return " << apache::thrift::util::enumNameSafe(*rcProcessLocal); return *rcProcessLocal; } - LOG(INFO) << "forward to ChainAddEdgesProcessorLocal::processLocal()"; - return ChainAddEdgesProcessorLocal::processLocal(code); + LOG(INFO) << "forward to ChainAddEdgesLocalProcessor::processLocal()"; + return ChainAddEdgesLocalProcessor::processLocal(code); } cpp2::AddEdgesRequest reverseRequestForward(const cpp2::AddEdgesRequest& req) { - return ChainAddEdgesProcessorLocal::reverseRequest(req); + return ChainAddEdgesLocalProcessor::reverseRequest(req); } folly::Optional rcPrepareLocal; @@ -137,9 +170,9 @@ class FakeChainAddEdgesProcessorLocal : public ChainAddEdgesProcessorLocal { folly::Optional rcProcessLocal; }; -class FakeChainUpdateProcessor : public ChainUpdateEdgeProcessorLocal { +class FakeChainUpdateProcessor : public ChainUpdateEdgeLocalProcessor { public: - explicit FakeChainUpdateProcessor(StorageEnv* env) : ChainUpdateEdgeProcessorLocal(env) { + explicit FakeChainUpdateProcessor(StorageEnv* env) : ChainUpdateEdgeLocalProcessor(env) { spaceVidLen_ = 32; } @@ -149,8 +182,8 @@ class FakeChainUpdateProcessor : public ChainUpdateEdgeProcessorLocal { LOG(INFO) << "Fake return " << apache::thrift::util::enumNameSafe(*rcPrepareLocal); return *rcPrepareLocal; } - LOG(INFO) << "forward to ChainUpdateEdgeProcessorLocal::prepareLocal()"; - return ChainUpdateEdgeProcessorLocal::prepareLocal(); + LOG(INFO) << "forward to ChainUpdateEdgeLocalProcessor::prepareLocal()"; + return ChainUpdateEdgeLocalProcessor::prepareLocal(); } folly::SemiFuture processRemote(Code code) override { @@ -161,8 +194,8 @@ class FakeChainUpdateProcessor : public ChainUpdateEdgeProcessorLocal { LOG_IF(FATAL, code != Code::SUCCEEDED) << "cheat must base on truth"; return *rcProcessRemote; } - LOG(INFO) << "forward to ChainUpdateEdgeProcessorLocal::processRemote()"; - return ChainUpdateEdgeProcessorLocal::processRemote(code); + LOG(INFO) << "forward to ChainUpdateEdgeLocalProcessor::processRemote()"; + return ChainUpdateEdgeLocalProcessor::processRemote(code); } folly::SemiFuture processLocal(Code code) override { @@ -172,8 +205,8 @@ class FakeChainUpdateProcessor : public ChainUpdateEdgeProcessorLocal { << apache::thrift::util::enumNameSafe(*rcProcessLocal); return *rcProcessLocal; } - LOG(INFO) << "forward to ChainUpdateEdgeProcessorLocal::processLocal()"; - return ChainUpdateEdgeProcessorLocal::processLocal(code); + LOG(INFO) << "forward to ChainUpdateEdgeLocalProcessor::processLocal()"; + return ChainUpdateEdgeLocalProcessor::processLocal(code); } void wrapAddUnfinishedEdge(ResumeType type) { @@ -216,7 +249,7 @@ class MetaClientTestUpdater { pCache->termOfPartition_[partId] = termId; } - static std::unique_ptr makeDefaultMetaClient() { + static std::unique_ptr makeDefault() { auto exec = std::make_shared(3); std::vector addrs(1); meta::MetaClientOptions options; @@ -254,7 +287,7 @@ class FakeInternalStorageClient : public InternalStorageClient { chainReq.update_edge_request_ref() = req; chainReq.term_ref() = termOfSrc; - auto* proc = ChainUpdateEdgeProcessorRemote::instance(env_); + auto* proc = ChainUpdateEdgeRemoteProcessor::instance(env_); auto f = proc->getFuture(); proc->process(chainReq); auto resp = std::move(f).get(); @@ -280,9 +313,23 @@ class FakeInternalStorageClient : public InternalStorageClient { UNUSED(evb); } + void chainDeleteEdges(cpp2::DeleteEdgesRequest& req, + const std::string& txnId, + TermID termId, + folly::Promise<::nebula::cpp2::ErrorCode>&& p, + folly::EventBase* evb = nullptr) override { + UNUSED(req); + UNUSED(txnId); + UNUSED(termId); + p.setValue(code_); + UNUSED(evb); + } + static FakeInternalStorageClient* instance(StorageEnv* env, Code fakeCode = Code::SUCCEEDED) { auto pool = std::make_shared(3); return new FakeInternalStorageClient(env, pool, fakeCode); + // static FakeInternalStorageClient client(env, pool, fakeCode); + // return &client; } static void hookInternalStorageClient(StorageEnv* env, InternalStorageClient* client) { @@ -351,7 +398,7 @@ struct ChainUpdateEdgeTestHelper { } cpp2::UpdateEdgeRequest reverseRequest(StorageEnv* env, const cpp2::UpdateEdgeRequest& req) { - ChainUpdateEdgeProcessorLocal proc(env); + ChainUpdateEdgeLocalProcessor proc(env); return proc.reverseRequest(req); } @@ -481,12 +528,12 @@ struct ChainUpdateEdgeTestHelper { // public: // explicit ChainResumeProcessorTestHelper(ChainResumeProcessor* proc) : proc_(proc) {} -// void setAddEdgeProc(ChainAddEdgesProcessorLocal* proc) { +// void setAddEdgeProc(ChainAddEdgesLocalProcessor* proc) { // proc_->addProc = proc; // } // // setUpdProc -// void setUpdProc(ChainUpdateEdgeProcessorLocal* proc) { +// void setUpdProc(ChainUpdateEdgeLocalProcessor* proc) { // proc_->updProc = proc; // } diff --git a/src/storage/test/ChainUpdateEdgeTest.cpp b/src/storage/test/ChainUpdateEdgeTest.cpp index 048bd0fb8b0..6249dac0bdf 100644 --- a/src/storage/test/ChainUpdateEdgeTest.cpp +++ b/src/storage/test/ChainUpdateEdgeTest.cpp @@ -20,9 +20,9 @@ #include "storage/test/QueryTestUtils.h" #include "storage/test/TestUtils.h" #include "storage/transaction/ChainAddEdgesGroupProcessor.h" -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" #include "storage/transaction/ChainResumeProcessor.h" -#include "storage/transaction/ChainUpdateEdgeProcessorRemote.h" +#include "storage/transaction/ChainUpdateEdgeRemoteProcessor.h" #include "storage/transaction/ConsistUtil.h" namespace nebula { @@ -42,7 +42,7 @@ TEST(ChainUpdateEdgeTest, updateTest1) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); auto parts = cluster.getTotalParts(); @@ -73,7 +73,7 @@ TEST(ChainUpdateEdgeTest, updateTest2) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); auto parts = cluster.getTotalParts(); @@ -105,7 +105,7 @@ TEST(ChainUpdateEdgeTest, updateTest3) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); auto parts = cluster.getTotalParts(); @@ -134,7 +134,7 @@ TEST(ChainUpdateEdgeTest, updateTest4) { mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); auto* env = cluster.storageEnv_.get(); - auto mClient = MetaClientTestUpdater::makeDefaultMetaClient(); + auto mClient = MetaClientTestUpdater::makeDefault(); env->metaClient_ = mClient.get(); auto parts = cluster.getTotalParts(); diff --git a/src/storage/transaction/ChainAddEdgesGroupProcessor.cpp b/src/storage/transaction/ChainAddEdgesGroupProcessor.cpp index 56323288c25..eb6c11f64c5 100644 --- a/src/storage/transaction/ChainAddEdgesGroupProcessor.cpp +++ b/src/storage/transaction/ChainAddEdgesGroupProcessor.cpp @@ -7,7 +7,7 @@ #include "storage/StorageFlags.h" #include "storage/mutate/AddEdgesProcessor.h" -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" #include "storage/transaction/ConsistUtil.h" #include "storage/transaction/TransactionManager.h" @@ -23,7 +23,7 @@ void ChainAddEdgesGroupProcessor::process(const cpp2::AddEdgesRequest& req) { auto delegateProcess = [&](auto& item) { auto localPartId = item.first.first; - auto* proc = ChainAddEdgesProcessorLocal::instance(env_); + auto* proc = ChainAddEdgesLocalProcessor::instance(env_); proc->setRemotePartId(item.first.second); proc->getFuture().thenValue([=](auto&& resp) { auto code = resp.get_result().get_failed_parts().empty() diff --git a/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp b/src/storage/transaction/ChainAddEdgesLocalProcessor.cpp similarity index 82% rename from src/storage/transaction/ChainAddEdgesProcessorLocal.cpp rename to src/storage/transaction/ChainAddEdgesLocalProcessor.cpp index 72e983302f1..0a3eed149db 100644 --- a/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp +++ b/src/storage/transaction/ChainAddEdgesLocalProcessor.cpp @@ -3,7 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" #include @@ -17,7 +17,7 @@ namespace nebula { namespace storage { -void ChainAddEdgesProcessorLocal::process(const cpp2::AddEdgesRequest& req) { +void ChainAddEdgesLocalProcessor::process(const cpp2::AddEdgesRequest& req) { if (!prepareRequest(req)) { finish(); return; @@ -31,7 +31,7 @@ void ChainAddEdgesProcessorLocal::process(const cpp2::AddEdgesRequest& req) { * 2. set mem lock * 3. write edge prime(key = edge prime, val = ) */ -folly::SemiFuture ChainAddEdgesProcessorLocal::prepareLocal() { +folly::SemiFuture ChainAddEdgesLocalProcessor::prepareLocal() { if (FLAGS_trace_toss) { uuid_ = ConsistUtil::strUUID(); readableEdgeDesc_ = makeReadableEdge(req_); @@ -73,7 +73,7 @@ folly::SemiFuture ChainAddEdgesProcessorLocal::prepareLocal() { return std::move(fut); } -folly::SemiFuture ChainAddEdgesProcessorLocal::processRemote(Code code) { +folly::SemiFuture ChainAddEdgesLocalProcessor::processRemote(Code code) { VLOG(1) << uuid_ << " prepareLocal(), code = " << apache::thrift::util::enumNameSafe(code); if (code != Code::SUCCEEDED) { return code; @@ -86,7 +86,7 @@ folly::SemiFuture ChainAddEdgesProcessorLocal::processRemote(Code code) { return std::move(fut); } -folly::SemiFuture ChainAddEdgesProcessorLocal::processLocal(Code code) { +folly::SemiFuture ChainAddEdgesLocalProcessor::processLocal(Code code) { if (FLAGS_trace_toss) { VLOG(1) << uuid_ << " processRemote(), code = " << apache::thrift::util::enumNameSafe(code); } @@ -103,9 +103,10 @@ folly::SemiFuture ChainAddEdgesProcessorLocal::processLocal(Code code) { code_ = code; } - if (!checkTerm(req_)) { - LOG(WARNING) << "E_OUTDATED_TERM"; - code_ = Code::E_OUTDATED_TERM; + auto currTerm = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (currTerm.first != term_) { + LOG(WARNING) << "E_LEADER_CHANGED during prepare and commit local"; + code_ = Code::E_LEADER_CHANGED; } if (code == Code::E_RPC_FAILURE) { @@ -124,17 +125,17 @@ folly::SemiFuture ChainAddEdgesProcessorLocal::processLocal(Code code) { return code_; } -void ChainAddEdgesProcessorLocal::addUnfinishedEdge(ResumeType type) { +void ChainAddEdgesLocalProcessor::addUnfinishedEdge(ResumeType type) { if (lk_ != nullptr) { lk_->forceUnlock(); } - auto keys = sEdgeKey(req_); + auto keys = toStrKeys(req_); for (auto& key : keys) { env_->txnMan_->addPrime(spaceId_, key, type); } } -bool ChainAddEdgesProcessorLocal::prepareRequest(const cpp2::AddEdgesRequest& req) { +bool ChainAddEdgesLocalProcessor::prepareRequest(const cpp2::AddEdgesRequest& req) { CHECK_EQ(req.get_parts().size(), 1); req_ = req; spaceId_ = req_.get_space_id(); @@ -147,12 +148,12 @@ bool ChainAddEdgesProcessorLocal::prepareRequest(const cpp2::AddEdgesRequest& re } localPartId_ = req.get_parts().begin()->first; replaceNullWithDefaultValue(req_); - auto part = env_->kvstore_->part(spaceId_, localPartId_); - if (!nebula::ok(part)) { - pushResultCode(nebula::error(part), localPartId_); + + std::tie(term_, code_) = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (code_ != Code::SUCCEEDED) { + LOG(INFO) << "get term failed"; return false; } - restrictTerm_ = (nebula::value(part))->termId(); auto vidLen = env_->schemaMan_->getSpaceVidLen(spaceId_); if (!vidLen.ok()) { @@ -165,7 +166,7 @@ bool ChainAddEdgesProcessorLocal::prepareRequest(const cpp2::AddEdgesRequest& re return true; } -folly::SemiFuture ChainAddEdgesProcessorLocal::forwardToDelegateProcessor() { +folly::SemiFuture ChainAddEdgesLocalProcessor::forwardToDelegateProcessor() { auto* proc = AddEdgesProcessor::instance(env_, nullptr); proc->consistOp_ = [&](kvstore::BatchHolder& a, std::vector* b) { callbackOfChainOp(a, b); @@ -198,7 +199,7 @@ folly::SemiFuture ChainAddEdgesProcessorLocal::forwardToDelegateProcessor( return std::move(fut); } -Code ChainAddEdgesProcessorLocal::extractRpcError(const cpp2::ExecResponse& resp) { +Code ChainAddEdgesLocalProcessor::extractRpcError(const cpp2::ExecResponse& resp) { Code ret = Code::SUCCEEDED; auto& respComn = resp.get_result(); for (auto& part : respComn.get_failed_parts()) { @@ -207,7 +208,7 @@ Code ChainAddEdgesProcessorLocal::extractRpcError(const cpp2::ExecResponse& resp return ret; } -void ChainAddEdgesProcessorLocal::doRpc(folly::Promise&& promise, +void ChainAddEdgesLocalProcessor::doRpc(folly::Promise&& promise, cpp2::AddEdgesRequest&& req, int retry) noexcept { if (retry > retryLimit_) { @@ -217,7 +218,7 @@ void ChainAddEdgesProcessorLocal::doRpc(folly::Promise&& promise, auto* iClient = env_->txnMan_->getInternalClient(); folly::Promise p; auto f = p.getFuture(); - iClient->chainAddEdges(req, restrictTerm_, edgeVer_, std::move(p)); + iClient->chainAddEdges(req, term_, edgeVer_, std::move(p)); std::move(f).thenTry([=, p = std::move(promise)](auto&& t) mutable { auto code = t.hasValue() ? t.value() : Code::E_RPC_FAILURE; @@ -233,7 +234,7 @@ void ChainAddEdgesProcessorLocal::doRpc(folly::Promise&& promise, }); } -void ChainAddEdgesProcessorLocal::callbackOfChainOp(kvstore::BatchHolder& batch, +void ChainAddEdgesLocalProcessor::callbackOfChainOp(kvstore::BatchHolder& batch, std::vector* pData) { if (pData != nullptr) { for (auto& kv : *pData) { @@ -248,7 +249,7 @@ void ChainAddEdgesProcessorLocal::callbackOfChainOp(kvstore::BatchHolder& batch, } } -folly::SemiFuture ChainAddEdgesProcessorLocal::abort() { +folly::SemiFuture ChainAddEdgesLocalProcessor::abort() { if (kvErased_.empty()) { return Code::SUCCEEDED; } @@ -279,7 +280,7 @@ folly::SemiFuture ChainAddEdgesProcessorLocal::abort() { return std::move(fut); } -std::vector ChainAddEdgesProcessorLocal::makePrime() { +std::vector ChainAddEdgesLocalProcessor::makePrime() { std::vector ret; for (auto& edge : req_.get_parts().begin()->second) { auto key = ConsistUtil::primeKey(spaceVidLen_, localPartId_, edge.get_key()); @@ -294,7 +295,7 @@ std::vector ChainAddEdgesProcessorLocal::makePrime() { return ret; } -std::vector ChainAddEdgesProcessorLocal::makeDoublePrime() { +std::vector ChainAddEdgesLocalProcessor::makeDoublePrime() { std::vector ret; for (auto& edge : req_.get_parts().begin()->second) { auto key = ConsistUtil::doublePrime(spaceVidLen_, localPartId_, edge.get_key()); @@ -309,7 +310,7 @@ std::vector ChainAddEdgesProcessorLocal::makeDoublePrime() { return ret; } -void ChainAddEdgesProcessorLocal::erasePrime() { +void ChainAddEdgesLocalProcessor::erasePrime() { auto fn = [&](const cpp2::NewEdge& edge) { auto key = ConsistUtil::primeKey(spaceVidLen_, localPartId_, edge.get_key()); return key; @@ -319,17 +320,7 @@ void ChainAddEdgesProcessorLocal::erasePrime() { } } -void ChainAddEdgesProcessorLocal::eraseDoublePrime() { - auto fn = [&](const cpp2::NewEdge& edge) { - auto key = ConsistUtil::doublePrime(spaceVidLen_, localPartId_, edge.get_key()); - return key; - }; - for (auto& edge : req_.get_parts().begin()->second) { - kvErased_.push_back(fn(edge)); - } -} - -bool ChainAddEdgesProcessorLocal::lockEdges(const cpp2::AddEdgesRequest& req) { +bool ChainAddEdgesLocalProcessor::lockEdges(const cpp2::AddEdgesRequest& req) { auto partId = req.get_parts().begin()->first; auto* lockCore = env_->txnMan_->getLockCore(req.get_space_id(), partId); if (!lockCore) { @@ -344,40 +335,7 @@ bool ChainAddEdgesProcessorLocal::lockEdges(const cpp2::AddEdgesRequest& req) { return lk_->isLocked(); } -// we need to check term at both remote phase and local commit -bool ChainAddEdgesProcessorLocal::checkTerm(const cpp2::AddEdgesRequest& req) { - auto space = req.get_space_id(); - auto partId = req.get_parts().begin()->first; - - auto part = env_->kvstore_->part(space, partId); - if (!nebula::ok(part)) { - pushResultCode(nebula::error(part), localPartId_); - return false; - } - auto curTerm = (nebula::value(part))->termId(); - if (restrictTerm_ != curTerm) { - VLOG(1) << folly::sformat( - "check term failed, restrictTerm_={}, currTerm={}", restrictTerm_, curTerm); - return false; - } - return true; -} - -// check if current edge is not newer than the one trying to resume. -// this function only take effect in resume mode -bool ChainAddEdgesProcessorLocal::checkVersion(const cpp2::AddEdgesRequest& req) { - auto part = req.get_parts().begin()->first; - auto sKeys = sEdgeKey(req); - auto currVer = ConsistUtil::getMultiEdgeVers(env_->kvstore_, spaceId_, part, sKeys); - for (auto i = 0U; i != currVer.size(); ++i) { - if (currVer[i] < resumedEdgeVer_) { - return false; - } - } - return true; -} - -std::vector ChainAddEdgesProcessorLocal::sEdgeKey(const cpp2::AddEdgesRequest& req) { +std::vector ChainAddEdgesLocalProcessor::toStrKeys(const cpp2::AddEdgesRequest& req) { std::vector ret; for (auto& edgesOfPart : req.get_parts()) { auto partId = edgesOfPart.first; @@ -388,7 +346,7 @@ std::vector ChainAddEdgesProcessorLocal::sEdgeKey(const cpp2::AddEd return ret; } -cpp2::AddEdgesRequest ChainAddEdgesProcessorLocal::reverseRequest( +cpp2::AddEdgesRequest ChainAddEdgesLocalProcessor::reverseRequest( const cpp2::AddEdgesRequest& req) { cpp2::AddEdgesRequest reversedRequest; for (auto& edgesOfPart : *req.parts_ref()) { @@ -398,20 +356,20 @@ cpp2::AddEdgesRequest ChainAddEdgesProcessorLocal::reverseRequest( ConsistUtil::reverseEdgeKeyInplace(*newEdgeRef.key_ref()); } } - reversedRequest.space_id_ref() = (req.get_space_id()); - reversedRequest.prop_names_ref() = (req.get_prop_names()); - reversedRequest.if_not_exists_ref() = (req.get_if_not_exists()); + reversedRequest.space_id_ref() = req.get_space_id(); + reversedRequest.prop_names_ref() = req.get_prop_names(); + reversedRequest.if_not_exists_ref() = req.get_if_not_exists(); return reversedRequest; } -void ChainAddEdgesProcessorLocal::finish() { +void ChainAddEdgesLocalProcessor::finish() { VLOG(1) << uuid_ << " commitLocal(), code_ = " << apache::thrift::util::enumNameSafe(code_); pushResultCode(code_, localPartId_); finished_.setValue(code_); onFinished(); } -cpp2::AddEdgesRequest ChainAddEdgesProcessorLocal::makeSingleEdgeRequest( +cpp2::AddEdgesRequest ChainAddEdgesLocalProcessor::makeSingleEdgeRequest( PartitionID partId, const cpp2::NewEdge& edge) { cpp2::AddEdgesRequest req; req.space_id_ref() = (req_.get_space_id()); @@ -425,7 +383,7 @@ cpp2::AddEdgesRequest ChainAddEdgesProcessorLocal::makeSingleEdgeRequest( return req; } -int64_t ChainAddEdgesProcessorLocal::toInt(const ::nebula::Value& val) { +int64_t ChainAddEdgesLocalProcessor::toInt(const ::nebula::Value& val) { if (spaceVidType_ == nebula::cpp2::PropertyType::FIXED_STRING) { auto str = val.toString(); if (str.size() < 3) { @@ -439,7 +397,7 @@ int64_t ChainAddEdgesProcessorLocal::toInt(const ::nebula::Value& val) { return 0; } -std::string ChainAddEdgesProcessorLocal::makeReadableEdge(const cpp2::AddEdgesRequest& req) { +std::string ChainAddEdgesLocalProcessor::makeReadableEdge(const cpp2::AddEdgesRequest& req) { if (req.get_parts().size() != 1) { LOG(INFO) << req.get_parts().size(); return ""; @@ -461,6 +419,16 @@ std::string ChainAddEdgesProcessorLocal::makeReadableEdge(const cpp2::AddEdgesRe return oss.str(); } +void ChainAddEdgesLocalProcessor::eraseDoublePrime() { + auto fn = [&](const cpp2::NewEdge& edge) { + auto key = ConsistUtil::doublePrime(spaceVidLen_, localPartId_, edge.get_key()); + return key; + }; + for (auto& edge : req_.get_parts().begin()->second) { + kvErased_.push_back(fn(edge)); + } +} + /*** consider the following case: * * create edge known(kdate datetime default datetime(), degree int); @@ -473,7 +441,7 @@ std::string ChainAddEdgesProcessorLocal::makeReadableEdge(const cpp2::AddEdgesRe * that's why we need to replace the inconsistency prone value * at the moment the request comes * */ -void ChainAddEdgesProcessorLocal::replaceNullWithDefaultValue(cpp2::AddEdgesRequest& req) { +void ChainAddEdgesLocalProcessor::replaceNullWithDefaultValue(cpp2::AddEdgesRequest& req) { auto& edgesOfPart = *req.parts_ref(); if (edgesOfPart.empty()) { return; diff --git a/src/storage/transaction/ChainAddEdgesProcessorLocal.h b/src/storage/transaction/ChainAddEdgesLocalProcessor.h similarity index 89% rename from src/storage/transaction/ChainAddEdgesProcessorLocal.h rename to src/storage/transaction/ChainAddEdgesLocalProcessor.h index 68333e7d210..06695e29677 100644 --- a/src/storage/transaction/ChainAddEdgesProcessorLocal.h +++ b/src/storage/transaction/ChainAddEdgesLocalProcessor.h @@ -14,15 +14,15 @@ namespace nebula { namespace storage { -class ChainAddEdgesProcessorLocal : public BaseProcessor, +class ChainAddEdgesLocalProcessor : public BaseProcessor, public ChainBaseProcessor { friend class ChainResumeProcessorTestHelper; // for test friendly public: - static ChainAddEdgesProcessorLocal* instance(StorageEnv* env) { - return new ChainAddEdgesProcessorLocal(env); + static ChainAddEdgesLocalProcessor* instance(StorageEnv* env) { + return new ChainAddEdgesLocalProcessor(env); } - virtual ~ChainAddEdgesProcessorLocal() = default; + virtual ~ChainAddEdgesLocalProcessor() = default; virtual void process(const cpp2::AddEdgesRequest& req); @@ -39,7 +39,7 @@ class ChainAddEdgesProcessorLocal : public BaseProcessor, void finish() override; protected: - explicit ChainAddEdgesProcessorLocal(StorageEnv* env) : BaseProcessor(env) {} + explicit ChainAddEdgesLocalProcessor(StorageEnv* env) : BaseProcessor(env) {} bool prepareRequest(const cpp2::AddEdgesRequest& req); @@ -53,10 +53,6 @@ class ChainAddEdgesProcessorLocal : public BaseProcessor, bool lockEdges(const cpp2::AddEdgesRequest& req); - bool checkTerm(const cpp2::AddEdgesRequest& req); - - bool checkVersion(const cpp2::AddEdgesRequest& req); - /** * @brief This is a call back function, to let AddEdgesProcessor so some * addition thing for chain operation @@ -68,7 +64,7 @@ class ChainAddEdgesProcessorLocal : public BaseProcessor, /** * @brief helper function to generate string form of keys of request */ - std::vector sEdgeKey(const cpp2::AddEdgesRequest& req); + std::vector toStrKeys(const cpp2::AddEdgesRequest& req); /** * @brief normally, the prime/double prime keys will be deleted at AddEdgeProcessor @@ -134,8 +130,9 @@ class ChainAddEdgesProcessorLocal : public BaseProcessor, cpp2::AddEdgesRequest req_; std::unique_ptr lk_{nullptr}; int retryLimit_{10}; - // need to restrict all the phase in the same term. - TermID restrictTerm_{-1}; + // term at prepareLocal, not allowed to change during execution + TermID term_{-1}; + // set to true when prime insert succeed // in processLocal(), we check this to determine if need to do abort() bool primeInserted_{false}; @@ -145,6 +142,7 @@ class ChainAddEdgesProcessorLocal : public BaseProcessor, folly::Optional edgeVer_{folly::none}; int64_t resumedEdgeVer_{-1}; + // for debug / trace purpose std::string uuid_; // for debug, edge "100"->"101" will print like 2231303022->2231303122 diff --git a/src/storage/transaction/ChainAddEdgesProcessorRemote.cpp b/src/storage/transaction/ChainAddEdgesRemoteProcessor.cpp similarity index 59% rename from src/storage/transaction/ChainAddEdgesProcessorRemote.cpp rename to src/storage/transaction/ChainAddEdgesRemoteProcessor.cpp index df2973c17cc..94dfce48417 100644 --- a/src/storage/transaction/ChainAddEdgesProcessorRemote.cpp +++ b/src/storage/transaction/ChainAddEdgesRemoteProcessor.cpp @@ -3,7 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ -#include "storage/transaction/ChainAddEdgesProcessorRemote.h" +#include "storage/transaction/ChainAddEdgesRemoteProcessor.h" #include "storage/mutate/AddEdgesProcessor.h" #include "storage/transaction/ConsistUtil.h" @@ -12,22 +12,22 @@ namespace nebula { namespace storage { -void ChainAddEdgesProcessorRemote::process(const cpp2::ChainAddEdgesRequest& req) { - if (FLAGS_trace_toss) { - uuid_ = ConsistUtil::strUUID(); - } - VLOG(1) << uuid_ << ConsistUtil::dumpParts(req.get_parts()); - auto partId = req.get_parts().begin()->first; +void ChainAddEdgesRemoteProcessor::process(const cpp2::ChainAddEdgesRequest& req) { + uuid_ = ConsistUtil::strUUID(); + auto spaceId = req.get_space_id(); + auto edgeKey = req.get_parts().begin()->second.back().key(); + auto localPartId = NebulaKeyUtils::getPart(edgeKey->dst_ref()->getStr()); + auto localTerm = req.get_term(); + auto remotePartId = req.get_parts().begin()->first; auto code = nebula::cpp2::ErrorCode::SUCCEEDED; do { - if (!checkTerm(req)) { - LOG(WARNING) << uuid_ << " invalid term, incoming part " << partId + if (!env_->txnMan_->checkTermFromCache(spaceId, localPartId, localTerm)) { + LOG(WARNING) << uuid_ << " invalid term, incoming part " << remotePartId << ", term = " << req.get_term(); code = nebula::cpp2::ErrorCode::E_OUTDATED_TERM; break; } - auto spaceId = req.get_space_id(); auto vIdLen = env_->metaClient_->getSpaceVidLen(spaceId); if (!vIdLen.ok()) { code = Code::E_INVALID_SPACEVIDLEN; @@ -45,19 +45,14 @@ void ChainAddEdgesProcessorRemote::process(const cpp2::ChainAddEdgesRequest& req LOG(INFO) << uuid_ << ", key = " << folly::hexlify(key); } } - forwardRequest(req); + commit(req); } else { - pushResultCode(code, partId); + pushResultCode(code, remotePartId); onFinished(); } } -bool ChainAddEdgesProcessorRemote::checkTerm(const cpp2::ChainAddEdgesRequest& req) { - auto partId = req.get_parts().begin()->first; - return env_->txnMan_->checkTerm(req.get_space_id(), partId, req.get_term()); -} - -void ChainAddEdgesProcessorRemote::forwardRequest(const cpp2::ChainAddEdgesRequest& req) { +void ChainAddEdgesRemoteProcessor::commit(const cpp2::ChainAddEdgesRequest& req) { auto spaceId = req.get_space_id(); auto* proc = AddEdgesProcessor::instance(env_); proc->getFuture().thenValue([=](auto&& resp) { @@ -73,25 +68,7 @@ void ChainAddEdgesProcessorRemote::forwardRequest(const cpp2::ChainAddEdgesReque proc->process(ConsistUtil::toAddEdgesRequest(req)); } -bool ChainAddEdgesProcessorRemote::checkVersion(const cpp2::ChainAddEdgesRequest& req) { - if (!req.edge_version_ref()) { - return true; - } - auto spaceId = req.get_space_id(); - auto partId = req.get_parts().begin()->first; - auto strEdgeKeys = getStrEdgeKeys(req); - auto currVer = ConsistUtil::getMultiEdgeVers(env_->kvstore_, spaceId, partId, strEdgeKeys); - auto edgeVer = *req.edge_version_ref(); - for (auto i = 0U; i != currVer.size(); ++i) { - if (currVer[i] > edgeVer) { - LOG(WARNING) << "currVer[i]=" << currVer[i] << ", edgeVer=" << edgeVer; - return false; - } - } - return true; -} - -std::vector ChainAddEdgesProcessorRemote::getStrEdgeKeys( +std::vector ChainAddEdgesRemoteProcessor::getStrEdgeKeys( const cpp2::ChainAddEdgesRequest& req) { std::vector ret; for (auto& edgesOfPart : req.get_parts()) { diff --git a/src/storage/transaction/ChainAddEdgesProcessorRemote.h b/src/storage/transaction/ChainAddEdgesRemoteProcessor.h similarity index 58% rename from src/storage/transaction/ChainAddEdgesProcessorRemote.h rename to src/storage/transaction/ChainAddEdgesRemoteProcessor.h index 6f67f202bb7..56c8c2bf6fb 100644 --- a/src/storage/transaction/ChainAddEdgesProcessorRemote.h +++ b/src/storage/transaction/ChainAddEdgesRemoteProcessor.h @@ -11,22 +11,18 @@ namespace nebula { namespace storage { -class ChainAddEdgesProcessorRemote : public BaseProcessor { +class ChainAddEdgesRemoteProcessor : public BaseProcessor { public: - static ChainAddEdgesProcessorRemote* instance(StorageEnv* env) { - return new ChainAddEdgesProcessorRemote(env); + static ChainAddEdgesRemoteProcessor* instance(StorageEnv* env) { + return new ChainAddEdgesRemoteProcessor(env); } void process(const cpp2::ChainAddEdgesRequest& req); private: - explicit ChainAddEdgesProcessorRemote(StorageEnv* env) : BaseProcessor(env) {} + explicit ChainAddEdgesRemoteProcessor(StorageEnv* env) : BaseProcessor(env) {} - bool checkTerm(const cpp2::ChainAddEdgesRequest& req); - - bool checkVersion(const cpp2::ChainAddEdgesRequest& req); - - void forwardRequest(const cpp2::ChainAddEdgesRequest& req); + void commit(const cpp2::ChainAddEdgesRequest& req); std::vector getStrEdgeKeys(const cpp2::ChainAddEdgesRequest& req); diff --git a/src/storage/transaction/ChainDeleteEdgesGroupProcessor.cpp b/src/storage/transaction/ChainDeleteEdgesGroupProcessor.cpp new file mode 100644 index 00000000000..760505a41cb --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesGroupProcessor.cpp @@ -0,0 +1,75 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "storage/transaction/ChainDeleteEdgesGroupProcessor.h" + +#include "storage/StorageFlags.h" +#include "storage/transaction/ChainDeleteEdgesLocalProcessor.h" +#include "storage/transaction/ConsistUtil.h" +#include "storage/transaction/TransactionManager.h" + +namespace nebula { +namespace storage { +using ChainID = std::pair; +using SplitedRequest = std::unordered_map; + +void ChainDeleteEdgesGroupProcessor::process(const cpp2::DeleteEdgesRequest& req) { + auto spaceId = req.get_space_id(); + auto localPartId = req.get_parts().begin()->first; + auto stSplitRequest = splitRequest(req); + if (!stSplitRequest.ok()) { + // TODO(liuyu): change this when error code done + pushResultCode(Code::E_PART_NOT_FOUND, localPartId); + onFinished(); + } + + SplitedRequest splitedRequest = stSplitRequest.value(); + + callingNum_ = splitedRequest.size(); + + auto fnSplit = [&](auto& request) { + auto* proc = ChainDeleteEdgesLocalProcessor::instance(env_); + proc->getFuture().thenValue([=](auto&& resp) { + auto code = resp.get_result().get_failed_parts().empty() + ? nebula::cpp2::ErrorCode::SUCCEEDED + : resp.get_result().get_failed_parts().begin()->get_code(); + handleAsync(spaceId, localPartId, code); + }); + proc->process(request.second); + }; + + std::for_each(splitedRequest.begin(), splitedRequest.end(), fnSplit); +} + +StatusOr ChainDeleteEdgesGroupProcessor::splitRequest( + const cpp2::DeleteEdgesRequest& req) { + SplitedRequest ret; + auto numOfPart = env_->metaClient_->partsNum(req.get_space_id()); + if (!numOfPart.ok()) { + return numOfPart.status(); + } + auto partNum = numOfPart.value(); + + for (auto& onePart : req.get_parts()) { + auto localPartId = onePart.first; + for (auto& edgeKey : onePart.second) { + auto& remoteVid = edgeKey.get_dst().getStr(); + auto remotePartId = env_->metaClient_->partId(partNum, remoteVid); + auto key = std::make_pair(localPartId, remotePartId); + if (ret.count(key) == 0) { + ret[key].space_id_ref() = req.get_space_id(); + if (req.common_ref()) { + ret[key].common_ref() = req.common_ref().value(); + } + } + ret[key].parts_ref().value()[localPartId].emplace_back(edgeKey); + } + } + + return ret; +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesGroupProcessor.h b/src/storage/transaction/ChainDeleteEdgesGroupProcessor.h new file mode 100644 index 00000000000..ea195b700fc --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesGroupProcessor.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once + +#include "storage/BaseProcessor.h" +#include "storage/transaction/ChainBaseProcessor.h" +#include "storage/transaction/ConsistUtil.h" +#include "storage/transaction/TransactionManager.h" + +namespace nebula { +namespace storage { + +class ChainDeleteEdgesGroupProcessor : public BaseProcessor { + public: + static ChainDeleteEdgesGroupProcessor* instance(StorageEnv* env) { + return new ChainDeleteEdgesGroupProcessor(env); + } + + void process(const cpp2::DeleteEdgesRequest& req); + + protected: + explicit ChainDeleteEdgesGroupProcessor(StorageEnv* env) + : BaseProcessor(env) {} + + using ChainID = std::pair; + using SplitedRequest = std::unordered_map; + StatusOr splitRequest(const cpp2::DeleteEdgesRequest& src); +}; + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesLocalProcessor.cpp b/src/storage/transaction/ChainDeleteEdgesLocalProcessor.cpp new file mode 100644 index 00000000000..f9c9e1951b8 --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesLocalProcessor.cpp @@ -0,0 +1,360 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "storage/transaction/ChainDeleteEdgesLocalProcessor.h" + +#include + +#include "common/utils/DefaultValueContext.h" +#include "kvstore/Part.h" +#include "storage/StorageFlags.h" +#include "storage/mutate/DeleteEdgesProcessor.h" +#include "storage/transaction/ConsistUtil.h" +#include "storage/transaction/TransactionManager.h" + +namespace nebula { +namespace storage { + +void ChainDeleteEdgesLocalProcessor::process(const cpp2::DeleteEdgesRequest& req) { + auto rc = checkRequest(req); + if (rc != Code::SUCCEEDED) { + pushResultCode(rc, localPartId_); + finish(); + return; + } + env_->txnMan_->addChainTask(this); +} + +folly::SemiFuture ChainDeleteEdgesLocalProcessor::prepareLocal() { + txnId_ = ConsistUtil::strUUID(); + VLOG(1) << txnId_ << " prepareLocal(): " << DeleteEdgesRequestHelper::explain(req_); + + if (!lockEdges(req_)) { + return Code::E_WRITE_WRITE_CONFLICT; + } + + primes_ = makePrime(req_); + + std::vector primes(primes_); + + auto [pro, fut] = folly::makePromiseContract(); + env_->kvstore_->asyncMultiPut( + spaceId_, localPartId_, std::move(primes), [p = std::move(pro), this](auto rc) mutable { + if (rc == nebula::cpp2::ErrorCode::SUCCEEDED) { + setPrime_ = true; + } else { + LOG(WARNING) << txnId_ << "kvstore err: " << apache::thrift::util::enumNameSafe(rc); + } + + p.setValue(rc); + }); + return std::move(fut); +} + +folly::SemiFuture ChainDeleteEdgesLocalProcessor::processRemote(Code code) { + VLOG(1) << txnId_ << " prepareLocal(), code = " << apache::thrift::util::enumNameSafe(code); + if (code != Code::SUCCEEDED) { + return code; + } + DCHECK_EQ(req_.get_parts().size(), 1); + auto reversedRequest = reverseRequest(req_); + DCHECK_EQ(reversedRequest.get_parts().size(), 1); + auto [pro, fut] = folly::makePromiseContract(); + doRpc(std::move(pro), std::move(reversedRequest)); + return std::move(fut); +} + +folly::SemiFuture ChainDeleteEdgesLocalProcessor::processLocal(Code code) { + VLOG(1) << txnId_ << " processRemote(), code = " << apache::thrift::util::enumNameSafe(code); + + bool remoteFailed{false}; + if (code == Code::SUCCEEDED) { + // do nothing + } else if (code == Code::E_RPC_FAILURE) { + code_ = Code::SUCCEEDED; + } else { + code_ = code; + remoteFailed = true; + } + + auto [currTerm, suc] = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (currTerm != term_) { + LOG(WARNING) << "E_LEADER_CHANGED during prepare and commit local"; + code_ = Code::E_LEADER_CHANGED; + } + + if (code == Code::E_RPC_FAILURE) { + for (auto& kv : primes_) { + auto key = + ConsistUtil::doublePrimeTable().append(kv.first.substr(ConsistUtil::primeTable().size())); + setDoublePrime_ = true; + doublePrimes_.emplace_back(key, kv.second); + } + reportFailed(ResumeType::RESUME_REMOTE); + } + + if (code_ == Code::SUCCEEDED) { + return commitLocal(); + } else { + if (setPrime_ && remoteFailed) { + return abort(); + } + } + + return code_; +} + +void ChainDeleteEdgesLocalProcessor::reportFailed(ResumeType type) { + if (lk_ != nullptr) { + lk_->forceUnlock(); + } + for (auto& edgesOfPart : req_.get_parts()) { + auto partId = edgesOfPart.first; + for (auto& key : edgesOfPart.second) { + auto strKey = ConsistUtil::edgeKey(spaceVidLen_, partId, key); + env_->txnMan_->addPrime(spaceId_, strKey, type); + } + } +} + +std::vector ChainDeleteEdgesLocalProcessor::makePrime( + const cpp2::DeleteEdgesRequest& req) { + std::vector ret; + std::vector requests; + + for (auto& partOfKeys : req.get_parts()) { + auto partId = partOfKeys.first; + for (auto& key : partOfKeys.second) { + requests.emplace_back(); + requests.back().space_id_ref() = req_.get_space_id(); + std::unordered_map> parts; + parts[partId].emplace_back(key); + requests.back().parts_ref() = parts; + requests.back().common_ref().copy_from(req_.common_ref()); + } + } + + for (auto& singleReq : requests) { + std::string val; + apache::thrift::CompactSerializer::serialize(singleReq, &val); + val += ConsistUtil::deleteIdentifier(); + auto partId = singleReq.get_parts().begin()->first; + auto& edgeKey = singleReq.get_parts().begin()->second.back(); + auto key = ConsistUtil::primeTable(); + key += ConsistUtil::edgeKey(spaceVidLen_, partId, edgeKey); + ret.emplace_back(std::make_pair(key, val)); + } + return ret; +} + +Code ChainDeleteEdgesLocalProcessor::checkRequest(const cpp2::DeleteEdgesRequest& req) { + CHECK_EQ(req.get_parts().size(), 1); + req_ = req; + DCHECK(!req_.get_parts().empty()); + spaceId_ = req_.get_space_id(); + + auto vidType = env_->metaClient_->getSpaceVidType(spaceId_); + if (!vidType.ok()) { + LOG(WARNING) << "can't get vidType, spaceId_ = " << spaceId_; + return Code::E_SPACE_NOT_FOUND; + } else { + spaceVidType_ = vidType.value(); + } + localPartId_ = req.get_parts().begin()->first; + auto part = env_->kvstore_->part(spaceId_, localPartId_); + if (!nebula::ok(part)) { + pushResultCode(nebula::error(part), localPartId_); + return Code::E_SPACE_NOT_FOUND; + } + auto stPartNum = env_->metaClient_->partsNum(spaceId_); + if (!stPartNum.ok()) { + pushResultCode(nebula::error(part), localPartId_); + return Code::E_PART_NOT_FOUND; + } + + auto& oneEdgeKey = req.get_parts().begin()->second.front(); + auto& remoteVid = oneEdgeKey.get_dst().getStr(); + remotePartId_ = env_->metaClient_->partId(stPartNum.value(), remoteVid); + + term_ = (nebula::value(part))->termId(); + + auto vidLen = env_->schemaMan_->getSpaceVidLen(spaceId_); + if (!vidLen.ok()) { + LOG(ERROR) << "getSpaceVidLen failed, spaceId_: " << spaceId_ + << ", status: " << vidLen.status(); + return Code::E_INVALID_SPACEVIDLEN; + } + spaceVidLen_ = vidLen.value(); + return Code::SUCCEEDED; +} + +folly::SemiFuture ChainDeleteEdgesLocalProcessor::commitLocal() { + auto* proc = DeleteEdgesProcessor::instance(env_, nullptr); + auto fn = std::bind(&ChainDeleteEdgesLocalProcessor::hookFunc, this, std::placeholders::_1); + proc->setHookFunc(fn); + + auto futProc = proc->getFuture(); + auto [pro, fut] = folly::makePromiseContract(); + std::move(futProc).thenValue([&, p = std::move(pro)](auto&& resp) mutable { + auto rc = ConsistUtil::getErrorCode(resp); + VLOG(1) << txnId_ << " commitLocal() " << apache::thrift::util::enumNameSafe(rc); + if (rc == Code::SUCCEEDED) { + // do nothing + } else { + reportFailed(ResumeType::RESUME_CHAIN); + } + p.setValue(rc); + }); + proc->process(req_); + return std::move(fut); +} + +void ChainDeleteEdgesLocalProcessor::doRpc(folly::Promise&& promise, + cpp2::DeleteEdgesRequest&& req, + int retry) noexcept { + if (retry > retryLimit_) { + promise.setValue(Code::E_LEADER_CHANGED); + return; + } + auto* iClient = env_->txnMan_->getInternalClient(); + folly::Promise p; + auto f = p.getFuture(); + iClient->chainDeleteEdges(req, txnId_, term_, std::move(p)); + + std::move(f).thenTry([=, p = std::move(promise)](auto&& t) mutable { + auto code = t.hasValue() ? t.value() : Code::E_RPC_FAILURE; + switch (code) { + case Code::E_LEADER_CHANGED: + doRpc(std::move(p), std::move(req), ++retry); + break; + default: + p.setValue(code); + break; + } + return code; + }); +} + +/** + * @brief input para may be varies according to if the edge has index + * if yes, DeleteEdgeProcessor will use batch, + * else it will use a simple vector of keys + * |-------------------|--------------------------|------------------------------------| + * | | input keys | input a batch | + * |-------------------|--------------------------|------------------------------------| + * | double prime (N) | del edge, prime keys | bat.remove(prime) | + * | double prime (Y) | transform to batchHolder | bat.remove(prime) & put(double p.) | + */ +void ChainDeleteEdgesLocalProcessor::hookFunc(HookFuncPara& para) { + std::string ret; + + if (setDoublePrime_) { + if (para.keys) { + kvstore::BatchHolder bat; + for (auto& edgeKey : *para.keys.value()) { + bat.remove(std::string(edgeKey)); + } + for (auto& kv : primes_) { + bat.remove(std::string(kv.first)); + } + for (auto& kv : doublePrimes_) { + bat.put(std::string(kv.first), std::string(kv.second)); + } + para.result.emplace(kvstore::encodeBatchValue(bat.getBatch())); + } else if (para.batch) { + for (auto& kv : primes_) { + para.batch.value()->remove(std::string(kv.first)); + } + for (auto& kv : doublePrimes_) { + para.batch.value()->put(std::string(kv.first), std::string(kv.second)); + } + } else { + LOG(ERROR) << "not supposed runs here"; + } + } else { // there is no double prime + if (para.keys) { + for (auto& kv : primes_) { + para.keys.value()->emplace_back(kv.first); + } + } else if (para.batch) { + for (auto& kv : primes_) { + para.batch.value()->remove(std::string(kv.first)); + } + } else { + LOG(ERROR) << "not supposed runs here"; + } + } +} + +folly::SemiFuture ChainDeleteEdgesLocalProcessor::abort() { + if (setPrime_) { + return Code::SUCCEEDED; + } + + std::vector keyRemoved; + for (auto& key : primes_) { + keyRemoved.emplace_back(key.first); + } + + auto [pro, fut] = folly::makePromiseContract(); + env_->kvstore_->asyncMultiRemove( + req_.get_space_id(), + localPartId_, + std::move(keyRemoved), + [p = std::move(pro), this](auto rc) mutable { + VLOG(1) << txnId_ << " abort()=" << apache::thrift::util::enumNameSafe(rc); + if (rc == Code::SUCCEEDED) { + // do nothing + } else { + reportFailed(ResumeType::RESUME_CHAIN); + } + p.setValue(rc); + }); + return std::move(fut); +} + +bool ChainDeleteEdgesLocalProcessor::lockEdges(const cpp2::DeleteEdgesRequest& req) { + auto* lockCore = env_->txnMan_->getLockCore(req.get_space_id(), localPartId_); + if (!lockCore) { + VLOG(1) << txnId_ << "get lock failed."; + return false; + } + + std::vector keys; + for (auto& key : req.get_parts().begin()->second) { + auto eKey = ConsistUtil::edgeKey(spaceVidLen_, localPartId_, key); + keys.emplace_back(std::move(eKey)); + } + bool dedup = true; + lk_ = std::make_unique(lockCore, keys, dedup); + if (!lk_->isLocked()) { + VLOG(1) << txnId_ << " conflict " << ConsistUtil::readableKey(spaceVidLen_, lk_->conflictKey()); + } + return lk_->isLocked(); +} + +cpp2::DeleteEdgesRequest ChainDeleteEdgesLocalProcessor::reverseRequest( + const cpp2::DeleteEdgesRequest& req) { + cpp2::DeleteEdgesRequest reversedRequest; + reversedRequest.space_id_ref() = req.get_space_id(); + reversedRequest.common_ref().copy_from(req.common_ref()); + for (auto& keysOfPart : *req.parts_ref()) { + for (auto& edgeKey : keysOfPart.second) { + auto rEdgeKey = ConsistUtil::reverseEdgeKey(edgeKey); + (*reversedRequest.parts_ref())[remotePartId_].emplace_back(rEdgeKey); + } + } + return reversedRequest; +} + +void ChainDeleteEdgesLocalProcessor::finish() { + VLOG(1) << txnId_ << " commitLocal(), code_ = " << apache::thrift::util::enumNameSafe(code_); + pushResultCode(code_, localPartId_); + finished_.setValue(code_); + onFinished(); +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesLocalProcessor.h b/src/storage/transaction/ChainDeleteEdgesLocalProcessor.h new file mode 100644 index 00000000000..2c4f467b3d3 --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesLocalProcessor.h @@ -0,0 +1,118 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once + +#include "interface/gen-cpp2/common_types.h" +#include "interface/gen-cpp2/storage_types.h" +#include "kvstore/LogEncoder.h" +#include "storage/BaseProcessor.h" +#include "storage/transaction/ConsistUtil.h" +#include "storage/transaction/TransactionManager.h" + +namespace nebula { +namespace storage { + +class ChainDeleteEdgesLocalProcessor : public BaseProcessor, + public ChainBaseProcessor { + friend class ChainResumeProcessorTestHelper; // for test friendly + public: + static ChainDeleteEdgesLocalProcessor* instance(StorageEnv* env) { + return new ChainDeleteEdgesLocalProcessor(env); + } + + virtual ~ChainDeleteEdgesLocalProcessor() = default; + + virtual void process(const cpp2::DeleteEdgesRequest& req); + + folly::SemiFuture prepareLocal() override; + + folly::SemiFuture processRemote(Code code) override; + + folly::SemiFuture processLocal(Code code) override; + + void finish() override; + + protected: + explicit ChainDeleteEdgesLocalProcessor(StorageEnv* env) + : BaseProcessor(env) {} + + Code checkRequest(const cpp2::DeleteEdgesRequest& req); + + void doRpc(folly::Promise&& pro, cpp2::DeleteEdgesRequest&& req, int retry = 0) noexcept; + + bool lockEdges(const cpp2::DeleteEdgesRequest& req); + + /** + * @brief This is a hook function, inject to DeleteEdgesProcessor, + * called before DeleteEdgesProcessor ready to commit something + */ + void hookFunc(HookFuncPara& para); + + /** + * @brief if remote side explicit reported faild, called this + */ + folly::SemiFuture abort(); + + /** + * @brief call DeleteEdgesProcessor to do the real thing + */ + folly::SemiFuture commitLocal(); + + std::vector makePrime(const cpp2::DeleteEdgesRequest& req); + + /** + * @brief generate reversed request of the incoming req. + */ + cpp2::DeleteEdgesRequest reverseRequest(const cpp2::DeleteEdgesRequest& req); + + /** + * @brief wrapper function to get error code from ExecResponse + */ + Code extractRpcError(const cpp2::ExecResponse& resp); + + /** + * @brief if any operation failed or can not determined(RPC error) + * call this to leave a record in transaction manager + * the record can be scanned by the background resume thread + * then will do fail over logic + */ + void reportFailed(ResumeType type); + + protected: + GraphSpaceID spaceId_; + PartitionID localPartId_; + PartitionID remotePartId_; + cpp2::DeleteEdgesRequest req_; + std::unique_ptr lk_{nullptr}; + int retryLimit_{10}; + /** + * @brief this is the term when prepare called, + * and must be kept during the whole execution + * if not, will return OUT_OF_TERM ERROR + */ + TermID term_{-1}; + + // set to true when prime insert succeed + // in processLocal(), we check this to determine if need to do abort() + bool setPrime_{false}; + + bool setDoublePrime_{false}; + + std::vector primes_; + + std::vector doublePrimes_; + + std::string txnId_; + + ::nebula::cpp2::PropertyType spaceVidType_{::nebula::cpp2::PropertyType::UNKNOWN}; + + // for debug, edge "100"->"101" will print like 2231303022->2231303122 + // which is hard to recognize. Transform to human readable format + std::string readableEdgeDesc_; +}; + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesRemoteProcessor.cpp b/src/storage/transaction/ChainDeleteEdgesRemoteProcessor.cpp new file mode 100644 index 00000000000..0311019f23f --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesRemoteProcessor.cpp @@ -0,0 +1,72 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "storage/transaction/ChainDeleteEdgesRemoteProcessor.h" + +#include "storage/StorageFlags.h" +#include "storage/mutate/DeleteEdgesProcessor.h" +#include "storage/transaction/ConsistUtil.h" +#include "storage/transaction/TransactionManager.h" + +namespace nebula { +namespace storage { + +void ChainDeleteEdgesRemoteProcessor::process(const cpp2::ChainDeleteEdgesRequest& chianReq) { + txnId_ = chianReq.get_txn_id(); + cpp2::DeleteEdgesRequest req = DeleteEdgesRequestHelper::toDeleteEdgesRequest(chianReq); + auto term = chianReq.get_term(); + txnId_ = chianReq.get_txn_id(); + auto partId = req.get_parts().begin()->first; + auto code = nebula::cpp2::ErrorCode::SUCCEEDED; + do { + auto spaceId = req.get_space_id(); + if (!env_->txnMan_->checkTermFromCache(spaceId, partId, term)) { + LOG(WARNING) << txnId_ << "outdate term, incoming part " << partId << ", term = " << term; + code = nebula::cpp2::ErrorCode::E_OUTDATED_TERM; + break; + } + + auto vIdLen = env_->metaClient_->getSpaceVidLen(spaceId); + if (!vIdLen.ok()) { + code = Code::E_INVALID_SPACEVIDLEN; + break; + } else { + spaceVidLen_ = vIdLen.value(); + } + } while (0); + + if (code == nebula::cpp2::ErrorCode::SUCCEEDED) { + if (FLAGS_trace_toss) { + // need to do this after set spaceVidLen_ + auto keys = ConsistUtil::toStrKeys(req, spaceVidLen_); + for (auto& key : keys) { + VLOG(1) << txnId_ << ", key = " << folly::hexlify(key); + } + } + commit(req); + } else { + pushResultCode(code, partId); + onFinished(); + } +} + +void ChainDeleteEdgesRemoteProcessor::commit(const cpp2::DeleteEdgesRequest& req) { + auto spaceId = req.get_space_id(); + auto* proc = DeleteEdgesProcessor::instance(env_); + proc->getFuture().thenValue([=](auto&& resp) { + Code rc = Code::SUCCEEDED; + for (auto& part : resp.get_result().get_failed_parts()) { + rc = part.code; + handleErrorCode(part.code, spaceId, part.get_part_id()); + } + VLOG(1) << txnId_ << " " << apache::thrift::util::enumNameSafe(rc); + this->result_ = resp.get_result(); + this->onFinished(); + }); + proc->process(req); +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesRemoteProcessor.h b/src/storage/transaction/ChainDeleteEdgesRemoteProcessor.h new file mode 100644 index 00000000000..1c142b6ac59 --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesRemoteProcessor.h @@ -0,0 +1,35 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once + +#include "storage/BaseProcessor.h" +#include "storage/transaction/ChainBaseProcessor.h" + +namespace nebula { +namespace storage { + +class ChainDeleteEdgesRemoteProcessor : public BaseProcessor { + public: + static ChainDeleteEdgesRemoteProcessor* instance(StorageEnv* env) { + return new ChainDeleteEdgesRemoteProcessor(env); + } + + void process(const cpp2::ChainDeleteEdgesRequest& req); + + private: + explicit ChainDeleteEdgesRemoteProcessor(StorageEnv* env) + : BaseProcessor(env) {} + + void commit(const cpp2::DeleteEdgesRequest& req); + + cpp2::DeleteEdgesRequest toDeleteEdgesRequest(const cpp2::ChainDeleteEdgesRequest& req); + + private: + std::string txnId_; +}; + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesResumeProcessor.cpp b/src/storage/transaction/ChainDeleteEdgesResumeProcessor.cpp new file mode 100644 index 00000000000..19698798cff --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesResumeProcessor.cpp @@ -0,0 +1,59 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "storage/transaction/ChainDeleteEdgesResumeProcessor.h" + +#include +#include + +#include "storage/StorageFlags.h" + +namespace nebula { +namespace storage { + +ChainDeleteEdgesResumeProcessor::ChainDeleteEdgesResumeProcessor(StorageEnv* env, + const std::string& val) + : ChainDeleteEdgesLocalProcessor(env) { + req_ = DeleteEdgesRequestHelper::parseDeleteEdgesRequest(val); + + VLOG(1) << "explain req_: " << DeleteEdgesRequestHelper::explain(req_); +} + +folly::SemiFuture ChainDeleteEdgesResumeProcessor::prepareLocal() { + code_ = checkRequest(req_); + primes_ = makePrime(req_); + setPrime_ = true; + return code_; +} + +folly::SemiFuture ChainDeleteEdgesResumeProcessor::processRemote(Code code) { + VLOG(1) << txnId_ << " prepareLocal() " << apache::thrift::util::enumNameSafe(code); + return ChainDeleteEdgesLocalProcessor::processRemote(code); +} + +folly::SemiFuture ChainDeleteEdgesResumeProcessor::processLocal(Code code) { + VLOG(1) << txnId_ << " processRemote() " << apache::thrift::util::enumNameSafe(code); + setErrorCode(code); + + if (code == Code::E_RPC_FAILURE) { + for (auto& kv : primes_) { + auto key = + ConsistUtil::doublePrimeTable().append(kv.first.substr(ConsistUtil::primeTable().size())); + doublePrimes_.emplace_back(key, kv.second); + } + } + + if (code == Code::E_RPC_FAILURE || code == Code::SUCCEEDED) { + // if there are something wrong other than rpc failure + // we need to keep the resume retry(by not remove those prime key) + code_ = commitLocal().get(); + return code_; + } + + return code_; +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesResumeProcessor.h b/src/storage/transaction/ChainDeleteEdgesResumeProcessor.h new file mode 100644 index 00000000000..d7ff4060260 --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesResumeProcessor.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once + +#include "storage/transaction/ChainDeleteEdgesLocalProcessor.h" + +namespace nebula { +namespace storage { + +class ChainDeleteEdgesResumeProcessor : public ChainDeleteEdgesLocalProcessor { + public: + static ChainDeleteEdgesResumeProcessor* instance(StorageEnv* env, const std::string& val) { + return new ChainDeleteEdgesResumeProcessor(env, val); + } + + folly::SemiFuture prepareLocal() override; + + folly::SemiFuture processRemote(nebula::cpp2::ErrorCode code) override; + + folly::SemiFuture processLocal(nebula::cpp2::ErrorCode code) override; + + virtual ~ChainDeleteEdgesResumeProcessor() = default; + + protected: + ChainDeleteEdgesResumeProcessor(StorageEnv* env, const std::string& val); +}; + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesResumeRemoteProcessor.cpp b/src/storage/transaction/ChainDeleteEdgesResumeRemoteProcessor.cpp new file mode 100644 index 00000000000..a0e0cdbc84f --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesResumeRemoteProcessor.cpp @@ -0,0 +1,65 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "storage/transaction/ChainDeleteEdgesResumeRemoteProcessor.h" + +#include "storage/StorageFlags.h" + +namespace nebula { +namespace storage { + +ChainDeleteEdgesResumeRemoteProcessor::ChainDeleteEdgesResumeRemoteProcessor(StorageEnv* env, + const std::string& val) + : ChainDeleteEdgesLocalProcessor(env) { + req_ = DeleteEdgesRequestHelper::parseDeleteEdgesRequest(val); +} + +folly::SemiFuture ChainDeleteEdgesResumeRemoteProcessor::prepareLocal() { + code_ = checkRequest(req_); + return code_; +} + +folly::SemiFuture ChainDeleteEdgesResumeRemoteProcessor::processRemote(Code code) { + VLOG(1) << txnId_ << " prepareLocal() " << apache::thrift::util::enumNameSafe(code); + + return ChainDeleteEdgesLocalProcessor::processRemote(code); +} + +folly::SemiFuture ChainDeleteEdgesResumeRemoteProcessor::processLocal(Code code) { + VLOG(1) << txnId_ << " processRemote() " << apache::thrift::util::enumNameSafe(code); + + setErrorCode(code); + + if (code == Code::E_RPC_FAILURE) { + return code_; + } + + if (code == Code::SUCCEEDED) { + // if there are something wrong other than rpc failure + // we need to keep the resume retry(by not remove double prime key) + std::vector doublePrimeKeys; + for (auto& partOfKeys : req_.get_parts()) { + std::string key; + for (auto& edgeKey : partOfKeys.second) { + doublePrimeKeys.emplace_back(); + doublePrimeKeys.back() = ConsistUtil::doublePrimeTable().append( + ConsistUtil::edgeKey(spaceVidLen_, localPartId_, edgeKey)); + } + } + + folly::Baton baton; + env_->kvstore_->asyncMultiRemove( + spaceId_, localPartId_, std::move(doublePrimeKeys), [this, &baton](auto&& rc) { + this->code_ = rc; + baton.post(); + }); + baton.wait(); + } + + return code_; +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainDeleteEdgesResumeRemoteProcessor.h b/src/storage/transaction/ChainDeleteEdgesResumeRemoteProcessor.h new file mode 100644 index 00000000000..31c091f5962 --- /dev/null +++ b/src/storage/transaction/ChainDeleteEdgesResumeRemoteProcessor.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once + +#include "storage/transaction/ChainDeleteEdgesLocalProcessor.h" + +namespace nebula { +namespace storage { + +class ChainDeleteEdgesResumeRemoteProcessor : public ChainDeleteEdgesLocalProcessor { + public: + static ChainDeleteEdgesResumeRemoteProcessor* instance(StorageEnv* env, const std::string& val) { + return new ChainDeleteEdgesResumeRemoteProcessor(env, val); + } + + folly::SemiFuture prepareLocal() override; + + folly::SemiFuture processRemote(nebula::cpp2::ErrorCode code) override; + + folly::SemiFuture processLocal(nebula::cpp2::ErrorCode code) override; + + virtual ~ChainDeleteEdgesResumeRemoteProcessor() = default; + + protected: + ChainDeleteEdgesResumeRemoteProcessor(StorageEnv* env, const std::string& val); +}; + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ChainProcessorFactory.cpp b/src/storage/transaction/ChainProcessorFactory.cpp index 16d1dbf656e..b8a60e60e4d 100644 --- a/src/storage/transaction/ChainProcessorFactory.cpp +++ b/src/storage/transaction/ChainProcessorFactory.cpp @@ -5,6 +5,8 @@ #include "storage/transaction/ChainProcessorFactory.h" +#include "storage/transaction/ChainDeleteEdgesResumeProcessor.h" +#include "storage/transaction/ChainDeleteEdgesResumeRemoteProcessor.h" #include "storage/transaction/ConsistUtil.h" #include "storage/transaction/ResumeAddEdgeProcessor.h" #include "storage/transaction/ResumeAddEdgeRemoteProcessor.h" @@ -51,6 +53,22 @@ ChainBaseProcessor* ChainProcessorFactory::makeProcessor(StorageEnv* env, } break; } + case RequestType::DELETE: { + switch (options.resumeType) { + case ResumeType::RESUME_CHAIN: { + ret = ChainDeleteEdgesResumeProcessor::instance(env, options.primeValue); + break; + } + case ResumeType::RESUME_REMOTE: { + ret = ChainDeleteEdgesResumeRemoteProcessor::instance(env, options.primeValue); + break; + } + case ResumeType::UNKNOWN: { + LOG(FATAL) << "ResumeType::UNKNOWN: not supposed run here"; + } + } + break; + } case RequestType::UNKNOWN: { LOG(FATAL) << "RequestType::UNKNOWN: not supposed run here"; } diff --git a/src/storage/transaction/ChainResumeProcessor.cpp b/src/storage/transaction/ChainResumeProcessor.cpp index 8a0bc3dfd11..4fad8f13749 100644 --- a/src/storage/transaction/ChainResumeProcessor.cpp +++ b/src/storage/transaction/ChainResumeProcessor.cpp @@ -5,9 +5,9 @@ #include "storage/transaction/ChainResumeProcessor.h" -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" #include "storage/transaction/ChainProcessorFactory.h" -#include "storage/transaction/ChainUpdateEdgeProcessorLocal.h" +#include "storage/transaction/ChainUpdateEdgeLocalProcessor.h" #include "storage/transaction/ConsistUtil.h" #include "storage/transaction/TransactionManager.h" @@ -15,7 +15,7 @@ namespace nebula { namespace storage { void ChainResumeProcessor::process() { - auto* table = env_->txnMan_->getReserveTable(); + auto* table = env_->txnMan_->getDangleEdges(); std::unique_ptr iter; for (auto it = table->begin(); it != table->end(); ++it) { auto spaceId = *reinterpret_cast(const_cast(it->first.c_str())); @@ -40,10 +40,14 @@ void ChainResumeProcessor::process() { env_->txnMan_->delPrime(spaceId, edgeKey); } continue; + } else if (rc == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { + // raft may rollback want we scanned. + env_->txnMan_->delPrime(spaceId, edgeKey); } else { LOG(WARNING) << "kvstore->get() failed, " << apache::thrift::util::enumNameSafe(rc); continue; } + ResumeOptions opt(it->second, val); auto* proc = ChainProcessorFactory::makeProcessor(env_, opt); auto fut = proc->getFinished(); @@ -52,6 +56,8 @@ void ChainResumeProcessor::process() { .thenValue([=](auto&& code) { if (code == Code::SUCCEEDED) { env_->txnMan_->delPrime(spaceId, edgeKey); + } else { + VLOG(1) << "recover failed: " << apache::thrift::util::enumNameSafe(rc); } }) .get(); diff --git a/src/storage/transaction/ChainResumeProcessor.h b/src/storage/transaction/ChainResumeProcessor.h index 1e5d90aa336..ac3572e319f 100644 --- a/src/storage/transaction/ChainResumeProcessor.h +++ b/src/storage/transaction/ChainResumeProcessor.h @@ -7,9 +7,9 @@ #include "clients/storage/InternalStorageClient.h" #include "common/utils/NebulaKeyUtils.h" -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" #include "storage/transaction/ChainBaseProcessor.h" -#include "storage/transaction/ChainUpdateEdgeProcessorLocal.h" +#include "storage/transaction/ChainUpdateEdgeLocalProcessor.h" #include "storage/transaction/TransactionManager.h" namespace nebula { diff --git a/src/storage/transaction/ChainUpdateEdgeProcessorLocal.cpp b/src/storage/transaction/ChainUpdateEdgeLocalProcessor.cpp similarity index 67% rename from src/storage/transaction/ChainUpdateEdgeProcessorLocal.cpp rename to src/storage/transaction/ChainUpdateEdgeLocalProcessor.cpp index 443da4093fa..d2246ecb002 100644 --- a/src/storage/transaction/ChainUpdateEdgeProcessorLocal.cpp +++ b/src/storage/transaction/ChainUpdateEdgeLocalProcessor.cpp @@ -3,7 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ -#include "storage/transaction/ChainUpdateEdgeProcessorLocal.h" +#include "storage/transaction/ChainUpdateEdgeLocalProcessor.h" #include @@ -15,7 +15,7 @@ namespace nebula { namespace storage { -void ChainUpdateEdgeProcessorLocal::process(const cpp2::UpdateEdgeRequest& req) { +void ChainUpdateEdgeLocalProcessor::process(const cpp2::UpdateEdgeRequest& req) { if (!prepareRequest(req)) { onFinished(); } @@ -23,22 +23,19 @@ void ChainUpdateEdgeProcessorLocal::process(const cpp2::UpdateEdgeRequest& req) env_->txnMan_->addChainTask(this); } -bool ChainUpdateEdgeProcessorLocal::prepareRequest(const cpp2::UpdateEdgeRequest& req) { +bool ChainUpdateEdgeLocalProcessor::prepareRequest(const cpp2::UpdateEdgeRequest& req) { req_ = req; spaceId_ = req.get_space_id(); - partId_ = req_.get_part_id(); + localPartId_ = req_.get_part_id(); auto rc = getSpaceVidLen(spaceId_); if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { - pushResultCode(rc, partId_); + pushResultCode(rc, localPartId_); return false; } - auto __term = env_->txnMan_->getTerm(req_.get_space_id(), partId_); - if (__term.ok()) { - termOfPrepare_ = __term.value(); - } else { - pushResultCode(Code::E_PART_NOT_FOUND, partId_); + std::tie(term_, code_) = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (code_ != Code::SUCCEEDED) { return false; } return true; @@ -48,13 +45,13 @@ bool ChainUpdateEdgeProcessorLocal::prepareRequest(const cpp2::UpdateEdgeRequest * 1. set mem lock * 2. set edge prime * */ -folly::SemiFuture ChainUpdateEdgeProcessorLocal::prepareLocal() { +folly::SemiFuture ChainUpdateEdgeLocalProcessor::prepareLocal() { if (!setLock()) { LOG(INFO) << "set lock failed, return E_WRITE_WRITE_CONFLICT"; return Code::E_WRITE_WRITE_CONFLICT; } - auto key = ConsistUtil::primeKey(spaceVidLen_, partId_, req_.get_edge_key()); + auto key = ConsistUtil::primeKey(spaceVidLen_, localPartId_, req_.get_edge_key()); std::string val; apache::thrift::CompactSerializer::serialize(req_, &val); @@ -63,7 +60,7 @@ folly::SemiFuture ChainUpdateEdgeProcessorLocal::prepareLocal() { std::vector data{{key, val}}; auto c = folly::makePromiseContract(); env_->kvstore_->asyncMultiPut( - spaceId_, partId_, std::move(data), [p = std::move(c.first), this](auto rc) mutable { + spaceId_, localPartId_, std::move(data), [p = std::move(c.first), this](auto rc) mutable { if (rc == nebula::cpp2::ErrorCode::SUCCEEDED) { primeInserted_ = true; } else { @@ -74,7 +71,7 @@ folly::SemiFuture ChainUpdateEdgeProcessorLocal::prepareLocal() { return std::move(c.second); } -folly::SemiFuture ChainUpdateEdgeProcessorLocal::processRemote(Code code) { +folly::SemiFuture ChainUpdateEdgeLocalProcessor::processRemote(Code code) { LOG(INFO) << "prepareLocal()=" << apache::thrift::util::enumNameSafe(code); if (code != Code::SUCCEEDED) { return code; @@ -84,15 +81,16 @@ folly::SemiFuture ChainUpdateEdgeProcessorLocal::processRemote(Code code) return std::move(fut); } -folly::SemiFuture ChainUpdateEdgeProcessorLocal::processLocal(Code code) { +folly::SemiFuture ChainUpdateEdgeLocalProcessor::processLocal(Code code) { LOG(INFO) << "processRemote(), code = " << apache::thrift::util::enumNameSafe(code); if (code != Code::SUCCEEDED && code_ == Code::SUCCEEDED) { code_ = code; } - if (!checkTerm()) { - LOG(WARNING) << "checkTerm() failed"; - return Code::E_OUTDATED_TERM; + auto currTerm = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (currTerm.first != term_) { + LOG(WARNING) << "E_LEADER_CHANGED during prepare and commit local"; + code_ = Code::E_LEADER_CHANGED; } if (code == Code::E_RPC_FAILURE) { @@ -112,7 +110,7 @@ folly::SemiFuture ChainUpdateEdgeProcessorLocal::processLocal(Code code) { return code_; } -void ChainUpdateEdgeProcessorLocal::doRpc(folly::Promise&& promise, int retry) noexcept { +void ChainUpdateEdgeLocalProcessor::doRpc(folly::Promise&& promise, int retry) noexcept { try { if (retry > retryLimit_) { promise.setValue(Code::E_LEADER_CHANGED); @@ -123,7 +121,7 @@ void ChainUpdateEdgeProcessorLocal::doRpc(folly::Promise&& promise, int re auto reversedReq = reverseRequest(req_); auto f = p.getFuture(); - iClient->chainUpdateEdge(reversedReq, termOfPrepare_, ver_, std::move(p)); + iClient->chainUpdateEdge(reversedReq, term_, ver_, std::move(p)); std::move(f) .thenTry([=, p = std::move(promise)](auto&& t) mutable { auto code = t.hasValue() ? t.value() : Code::E_RPC_FAILURE; @@ -144,20 +142,20 @@ void ChainUpdateEdgeProcessorLocal::doRpc(folly::Promise&& promise, int re } } -void ChainUpdateEdgeProcessorLocal::erasePrime() { - auto key = ConsistUtil::primeKey(spaceVidLen_, partId_, req_.get_edge_key()); +void ChainUpdateEdgeLocalProcessor::erasePrime() { + auto key = ConsistUtil::primeKey(spaceVidLen_, localPartId_, req_.get_edge_key()); kvErased_.emplace_back(std::move(key)); } -void ChainUpdateEdgeProcessorLocal::appendDoublePrime() { - auto key = ConsistUtil::doublePrime(spaceVidLen_, partId_, req_.get_edge_key()); +void ChainUpdateEdgeLocalProcessor::appendDoublePrime() { + auto key = ConsistUtil::doublePrime(spaceVidLen_, localPartId_, req_.get_edge_key()); std::string val; apache::thrift::CompactSerializer::serialize(req_, &val); val += ConsistUtil::updateIdentifier(); kvAppend_.emplace_back(std::make_pair(std::move(key), std::move(val))); } -void ChainUpdateEdgeProcessorLocal::forwardToDelegateProcessor() { +void ChainUpdateEdgeLocalProcessor::forwardToDelegateProcessor() { kUpdateEdgeCounters.init("update_edge"); UpdateEdgeProcessor::ContextAdjuster fn = [=](EdgeContext& ctx) { ctx.kvAppend = std::move(kvAppend_); @@ -176,37 +174,22 @@ void ChainUpdateEdgeProcessorLocal::forwardToDelegateProcessor() { std::swap(resp_, resp); } -Code ChainUpdateEdgeProcessorLocal::checkAndBuildContexts(const cpp2::UpdateEdgeRequest&) { +Code ChainUpdateEdgeLocalProcessor::checkAndBuildContexts(const cpp2::UpdateEdgeRequest&) { return Code::SUCCEEDED; } -std::string ChainUpdateEdgeProcessorLocal::sEdgeKey(const cpp2::UpdateEdgeRequest& req) { +std::string ChainUpdateEdgeLocalProcessor::sEdgeKey(const cpp2::UpdateEdgeRequest& req) { return ConsistUtil::edgeKey(spaceVidLen_, req.get_part_id(), req.get_edge_key()); } -void ChainUpdateEdgeProcessorLocal::finish() { - LOG(INFO) << "ChainUpdateEdgeProcessorLocal::finish()"; +void ChainUpdateEdgeLocalProcessor::finish() { + LOG(INFO) << "ChainUpdateEdgeLocalProcessor::finish()"; pushResultCode(code_, req_.get_part_id()); onFinished(); } -bool ChainUpdateEdgeProcessorLocal::checkTerm() { - return env_->txnMan_->checkTerm(req_.get_space_id(), req_.get_part_id(), termOfPrepare_); -} - -bool ChainUpdateEdgeProcessorLocal::checkVersion() { - if (!ver_) { - return true; - } - auto [ver, rc] = ConsistUtil::versionOfUpdateReq(env_, req_); - if (rc != Code::SUCCEEDED) { - return false; - } - return *ver_ == ver; -} - -void ChainUpdateEdgeProcessorLocal::abort() { - auto key = ConsistUtil::primeKey(spaceVidLen_, partId_, req_.get_edge_key()); +void ChainUpdateEdgeLocalProcessor::abort() { + auto key = ConsistUtil::primeKey(spaceVidLen_, localPartId_, req_.get_edge_key()); kvErased_.emplace_back(std::move(key)); folly::Baton baton; @@ -221,7 +204,7 @@ void ChainUpdateEdgeProcessorLocal::abort() { baton.wait(); } -cpp2::UpdateEdgeRequest ChainUpdateEdgeProcessorLocal::reverseRequest( +cpp2::UpdateEdgeRequest ChainUpdateEdgeLocalProcessor::reverseRequest( const cpp2::UpdateEdgeRequest& req) { cpp2::UpdateEdgeRequest reversedRequest(req); auto reversedEdgeKey = ConsistUtil::reverseEdgeKey(req.get_edge_key()); @@ -236,7 +219,7 @@ cpp2::UpdateEdgeRequest ChainUpdateEdgeProcessorLocal::reverseRequest( return reversedRequest; } -bool ChainUpdateEdgeProcessorLocal::setLock() { +bool ChainUpdateEdgeLocalProcessor::setLock() { auto spaceId = req_.get_space_id(); auto* lockCore = env_->txnMan_->getLockCore(spaceId, req_.get_part_id()); if (lockCore == nullptr) { @@ -247,20 +230,7 @@ bool ChainUpdateEdgeProcessorLocal::setLock() { return lk_->isLocked(); } -int64_t ChainUpdateEdgeProcessorLocal::getVersion(const cpp2::UpdateEdgeRequest& req) { - int64_t invalidVer = -1; - auto spaceId = req.get_space_id(); - auto vIdLen = env_->metaClient_->getSpaceVidLen(spaceId); - if (!vIdLen.ok()) { - LOG(WARNING) << vIdLen.status().toString(); - return invalidVer; - } - auto partId = req.get_part_id(); - auto key = ConsistUtil::edgeKey(vIdLen.value(), partId, req.get_edge_key()); - return ConsistUtil::getSingleEdgeVer(env_->kvstore_, spaceId, partId, key); -} - -nebula::cpp2::ErrorCode ChainUpdateEdgeProcessorLocal::getErrorCode( +nebula::cpp2::ErrorCode ChainUpdateEdgeLocalProcessor::getErrorCode( const cpp2::UpdateResponse& resp) { auto& respCommon = resp.get_result(); auto& parts = respCommon.get_failed_parts(); @@ -270,7 +240,7 @@ nebula::cpp2::ErrorCode ChainUpdateEdgeProcessorLocal::getErrorCode( return parts.front().get_code(); } -void ChainUpdateEdgeProcessorLocal::addUnfinishedEdge(ResumeType type) { +void ChainUpdateEdgeLocalProcessor::addUnfinishedEdge(ResumeType type) { LOG(INFO) << "addUnfinishedEdge()"; if (lk_ != nullptr) { lk_->forceUnlock(); diff --git a/src/storage/transaction/ChainUpdateEdgeProcessorLocal.h b/src/storage/transaction/ChainUpdateEdgeLocalProcessor.h similarity index 86% rename from src/storage/transaction/ChainUpdateEdgeProcessorLocal.h rename to src/storage/transaction/ChainUpdateEdgeLocalProcessor.h index ecf61e8dc6d..2f84f343a83 100644 --- a/src/storage/transaction/ChainUpdateEdgeProcessorLocal.h +++ b/src/storage/transaction/ChainUpdateEdgeLocalProcessor.h @@ -16,15 +16,15 @@ namespace nebula { namespace storage { -class ChainUpdateEdgeProcessorLocal +class ChainUpdateEdgeLocalProcessor : public QueryBaseProcessor, public ChainBaseProcessor { friend struct ChainUpdateEdgeTestHelper; public: using Code = ::nebula::cpp2::ErrorCode; - static ChainUpdateEdgeProcessorLocal* instance(StorageEnv* env) { - return new ChainUpdateEdgeProcessorLocal(env); + static ChainUpdateEdgeLocalProcessor* instance(StorageEnv* env) { + return new ChainUpdateEdgeLocalProcessor(env); } void process(const cpp2::UpdateEdgeRequest& req) override; @@ -39,20 +39,16 @@ class ChainUpdateEdgeProcessorLocal void finish() override; - virtual ~ChainUpdateEdgeProcessorLocal() = default; + virtual ~ChainUpdateEdgeLocalProcessor() = default; protected: - explicit ChainUpdateEdgeProcessorLocal(StorageEnv* env) + explicit ChainUpdateEdgeLocalProcessor(StorageEnv* env) : QueryBaseProcessor(env, nullptr) {} std::string edgeKey(const cpp2::UpdateEdgeRequest& req); void doRpc(folly::Promise&& promise, int retry = 0) noexcept; - bool checkTerm(); - - bool checkVersion(); - folly::SemiFuture processNormalLocal(Code code); void abort(); @@ -82,9 +78,9 @@ class ChainUpdateEdgeProcessorLocal protected: cpp2::UpdateEdgeRequest req_; std::unique_ptr lk_; - PartitionID partId_; + PartitionID localPartId_; int retryLimit_{10}; - TermID termOfPrepare_{-1}; + TermID term_{-1}; // set to true when prime insert succeed // in processLocal(), we check this to determine if need to do abort() diff --git a/src/storage/transaction/ChainUpdateEdgeProcessorRemote.cpp b/src/storage/transaction/ChainUpdateEdgeRemoteProcessor.cpp similarity index 50% rename from src/storage/transaction/ChainUpdateEdgeProcessorRemote.cpp rename to src/storage/transaction/ChainUpdateEdgeRemoteProcessor.cpp index d52cbf8087d..1dbfb1daeaf 100644 --- a/src/storage/transaction/ChainUpdateEdgeProcessorRemote.cpp +++ b/src/storage/transaction/ChainUpdateEdgeRemoteProcessor.cpp @@ -3,7 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ -#include "storage/transaction/ChainUpdateEdgeProcessorRemote.h" +#include "storage/transaction/ChainUpdateEdgeRemoteProcessor.h" #include "storage/mutate/UpdateEdgeProcessor.h" #include "storage/transaction/ConsistUtil.h" @@ -14,18 +14,16 @@ namespace storage { using Code = ::nebula::cpp2::ErrorCode; -void ChainUpdateEdgeProcessorRemote::process(const cpp2::ChainUpdateEdgeRequest& req) { +void ChainUpdateEdgeRemoteProcessor::process(const cpp2::ChainUpdateEdgeRequest& req) { auto rc = Code::SUCCEEDED; - if (!checkTerm(req)) { + auto spaceId = req.get_space_id(); + auto localPartId = getLocalPart(req); + auto localTerm = req.get_term(); + if (!env_->txnMan_->checkTermFromCache(spaceId, localPartId, localTerm)) { LOG(WARNING) << "invalid term"; rc = Code::E_OUTDATED_TERM; } - if (!checkVersion(req)) { - LOG(WARNING) << "invalid term"; - rc = Code::E_OUTDATED_EDGE; - } - auto& updateRequest = req.get_update_edge_request(); if (rc != Code::SUCCEEDED) { pushResultCode(rc, updateRequest.get_part_id()); @@ -35,26 +33,13 @@ void ChainUpdateEdgeProcessorRemote::process(const cpp2::ChainUpdateEdgeRequest& onFinished(); } -bool ChainUpdateEdgeProcessorRemote::checkTerm(const cpp2::ChainUpdateEdgeRequest& req) { - auto partId = req.get_update_edge_request().get_part_id(); - return env_->txnMan_->checkTerm(req.get_space_id(), partId, req.get_term()); -} - -bool ChainUpdateEdgeProcessorRemote::checkVersion(const cpp2::ChainUpdateEdgeRequest& req) { - if (!req.edge_version_ref()) { - return true; - } - auto verExpected = *req.edge_version_ref(); - auto& updateRequest = req.get_update_edge_request(); - auto [verActually, rc] = ConsistUtil::versionOfUpdateReq(env_, updateRequest); - if (rc != Code::SUCCEEDED) { - return false; - } - return verExpected >= verActually; +PartitionID ChainUpdateEdgeRemoteProcessor::getLocalPart(const cpp2::ChainUpdateEdgeRequest& req) { + auto& edgeKey = req.get_update_edge_request().get_edge_key(); + return NebulaKeyUtils::getPart(edgeKey.dst()->getStr()); } // forward to UpdateEdgeProcessor -void ChainUpdateEdgeProcessorRemote::updateEdge(const cpp2::ChainUpdateEdgeRequest& req) { +void ChainUpdateEdgeRemoteProcessor::updateEdge(const cpp2::ChainUpdateEdgeRequest& req) { auto* proc = UpdateEdgeProcessor::instance(env_, counters_); auto f = proc->getFuture(); proc->process(req.get_update_edge_request()); diff --git a/src/storage/transaction/ChainUpdateEdgeProcessorRemote.h b/src/storage/transaction/ChainUpdateEdgeRemoteProcessor.h similarity index 64% rename from src/storage/transaction/ChainUpdateEdgeProcessorRemote.h rename to src/storage/transaction/ChainUpdateEdgeRemoteProcessor.h index 0170c54d964..b3034fc9d3d 100644 --- a/src/storage/transaction/ChainUpdateEdgeProcessorRemote.h +++ b/src/storage/transaction/ChainUpdateEdgeRemoteProcessor.h @@ -12,24 +12,22 @@ namespace nebula { namespace storage { -class ChainUpdateEdgeProcessorRemote : public BaseProcessor { +class ChainUpdateEdgeRemoteProcessor : public BaseProcessor { public: - static ChainUpdateEdgeProcessorRemote* instance(StorageEnv* env) { - return new ChainUpdateEdgeProcessorRemote(env); + static ChainUpdateEdgeRemoteProcessor* instance(StorageEnv* env) { + return new ChainUpdateEdgeRemoteProcessor(env); } void process(const cpp2::ChainUpdateEdgeRequest& req); private: - explicit ChainUpdateEdgeProcessorRemote(StorageEnv* env) + explicit ChainUpdateEdgeRemoteProcessor(StorageEnv* env) : BaseProcessor(env) {} - bool checkTerm(const cpp2::ChainUpdateEdgeRequest& req); - - bool checkVersion(const cpp2::ChainUpdateEdgeRequest& req); - void updateEdge(const cpp2::ChainUpdateEdgeRequest& req); + PartitionID getLocalPart(const cpp2::ChainUpdateEdgeRequest& req); + private: std::unique_ptr lk_; }; diff --git a/src/storage/transaction/ConsistTypes.h b/src/storage/transaction/ConsistTypes.h new file mode 100644 index 00000000000..eb0cb2c89db --- /dev/null +++ b/src/storage/transaction/ConsistTypes.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once + +#include + +#include "kvstore/LogEncoder.h" + +namespace nebula { +namespace storage { + +enum class RequestType { + UNKNOWN = 0, + INSERT, + UPDATE, + DELETE, +}; + +enum class ResumeType { + UNKNOWN = 0, + RESUME_CHAIN, + RESUME_REMOTE, +}; + +struct ResumeOptions { + ResumeOptions(ResumeType tp, std::string val) : resumeType(tp), primeValue(std::move(val)) {} + ResumeType resumeType; + std::string primeValue; +}; + +struct HookFuncPara { + std::optional*> keys; + std::optional<::nebula::kvstore::BatchHolder*> batch; + std::optional result; +}; + +} // namespace storage +} // namespace nebula diff --git a/src/storage/transaction/ConsistUtil.cpp b/src/storage/transaction/ConsistUtil.cpp index e720012bd3b..d80d288b2f7 100644 --- a/src/storage/transaction/ConsistUtil.cpp +++ b/src/storage/transaction/ConsistUtil.cpp @@ -65,6 +65,8 @@ RequestType ConsistUtil::parseType(folly::StringPiece val) { return RequestType::UPDATE; case 'a': return RequestType::INSERT; + case 'd': + return RequestType::DELETE; default: LOG(FATAL) << "should not happen, identifier is " << identifier; } @@ -98,43 +100,6 @@ std::string ConsistUtil::ConsistUtil::edgeKey(size_t vIdLen, (*key.dst_ref()).getStr()); } -std::vector ConsistUtil::getMultiEdgeVers(kvstore::KVStore* store, - GraphSpaceID spaceId, - PartitionID partId, - const std::vector& keys) { - std::vector ret(keys.size()); - std::vector _keys(keys); - auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; - std::vector status; - std::vector vals; - std::tie(rc, status) = store->multiGet(spaceId, partId, std::move(_keys), &vals); - if (rc != nebula::cpp2::ErrorCode::SUCCEEDED && rc != nebula::cpp2::ErrorCode::E_PARTIAL_RESULT) { - return ret; - } - for (auto i = 0U; i != ret.size(); ++i) { - ret[i] = getTimestamp(vals[i]); - } - return ret; -} - -// return -1 if edge version not exist -int64_t ConsistUtil::getSingleEdgeVer(kvstore::KVStore* store, - GraphSpaceID spaceId, - PartitionID partId, - const std::string& key) { - static int64_t invalidEdgeVer = -1; - std::string val; - auto rc = store->get(spaceId, partId, key, &val); - if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { - return invalidEdgeVer; - } - return getTimestamp(val); -} - -int64_t ConsistUtil::getTimestamp(const std::string& val) noexcept { - return *reinterpret_cast(val.data() + (val.size() - sizeof(int64_t))); -} - cpp2::AddEdgesRequest ConsistUtil::toAddEdgesRequest(const cpp2::ChainAddEdgesRequest& req) { cpp2::AddEdgesRequest ret; ret.space_id_ref() = req.get_space_id(); @@ -157,62 +122,71 @@ void ConsistUtil::reverseEdgeKeyInplace(cpp2::EdgeKey& edgeKey) { *edgeKey.edge_type_ref() = 0 - edgeKey.get_edge_type(); } -std::pair ConsistUtil::versionOfUpdateReq( - StorageEnv* env, const cpp2::UpdateEdgeRequest& req) { - int64_t ver = -1; - auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; +int64_t ConsistUtil::toInt(const ::nebula::Value& val) { + // return ConsistUtil::toInt2(val.toString()); + auto str = val.toString(); + if (str.size() < 3) { + return 0; + } + return *reinterpret_cast(const_cast(str.data() + 1)); +} - do { - auto spaceId = req.get_space_id(); - auto stVidLen = env->metaClient_->getSpaceVidLen(spaceId); - if (!stVidLen.ok()) { - rc = nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND; - break; - } - auto vIdLen = stVidLen.value(); - auto partId = req.get_part_id(); - auto key = ConsistUtil::edgeKey(vIdLen, partId, req.get_edge_key()); - ver = ConsistUtil::getSingleEdgeVer(env->kvstore_, spaceId, partId, key); - } while (0); +int64_t ConsistUtil::toInt2(const std::string& str) { + if (str.size() < 8) { + return 0; + } + return *reinterpret_cast(const_cast(str.data())); +} - return std::make_pair(ver, rc); +std::string ConsistUtil::readableKey(size_t vidLen, const std::string& rawKey) { + auto src = NebulaKeyUtils::getSrcId(vidLen, rawKey); + auto dst = NebulaKeyUtils::getDstId(vidLen, rawKey); + auto rank = NebulaKeyUtils::getRank(vidLen, rawKey); + std::stringstream ss; + ss << ConsistUtil::toInt2(src.str()) << "->" << ConsistUtil::toInt2(dst.str()) << "@" << rank; + return ss.str(); } -std::string ConsistUtil::dumpAddEdgeReq(const cpp2::AddEdgesRequest& req) { - std::stringstream oss; - oss << "prop_names.size() = " << req.get_prop_names().size() << " "; - for (auto& name : req.get_prop_names()) { - oss << name << " "; - } - oss << " "; - for (auto& part : req.get_parts()) { - // oss << dumpParts(part.second); - for (auto& edge : part.second) { - oss << " edge: " << folly::hexlify(edge.get_key().get_src().toString()) << "->" - << folly::hexlify(edge.get_key().get_dst().toString()) - << ", type=" << edge.get_key().get_edge_type() - << ", rank=" << edge.get_key().get_ranking() << ", vals: "; - for (auto& val : edge.get_props()) { - oss << val.toString() << ", "; - } - oss << "\n"; +std::vector ConsistUtil::toStrKeys(const cpp2::DeleteEdgesRequest& req, int vIdLen) { + std::vector ret; + for (auto& edgesOfPart : req.get_parts()) { + auto partId = edgesOfPart.first; + for (auto& key : edgesOfPart.second) { + ret.emplace_back(ConsistUtil::edgeKey(vIdLen, partId, key)); } } - return oss.str(); + return ret; +} + +::nebula::cpp2::ErrorCode ConsistUtil::getErrorCode(const cpp2::ExecResponse& resp) { + auto ret = ::nebula::cpp2::ErrorCode::SUCCEEDED; + auto& respComn = resp.get_result(); + for (auto& part : respComn.get_failed_parts()) { + ret = part.code; + } + return ret; +} + +cpp2::DeleteEdgesRequest DeleteEdgesRequestHelper::toDeleteEdgesRequest( + const cpp2::ChainDeleteEdgesRequest& req) { + cpp2::DeleteEdgesRequest ret; + ret.space_id_ref() = req.get_space_id(); + ret.parts_ref() = req.get_parts(); + return ret; +} + +cpp2::DeleteEdgesRequest DeleteEdgesRequestHelper::parseDeleteEdgesRequest(const std::string& val) { + cpp2::DeleteEdgesRequest req; + apache::thrift::CompactSerializer::deserialize(val, req); + return req; } -std::string ConsistUtil::dumpParts(const Parts& parts) { +std::string DeleteEdgesRequestHelper::explain(const cpp2::DeleteEdgesRequest& req) { std::stringstream oss; - for (auto& part : parts) { - for (auto& edge : part.second) { - oss << " edge: " << folly::hexlify(edge.get_key().get_src().toString()) << "->" - << folly::hexlify(edge.get_key().get_dst().toString()) - << ", type=" << edge.get_key().get_edge_type() - << ", rank=" << edge.get_key().get_ranking() << ", vals: "; - for (auto& val : edge.get_props()) { - oss << val.toString() << ", "; - } - oss << "\n"; + for (auto& partOfKeys : req.get_parts()) { + for (auto& key : partOfKeys.second) { + oss << ConsistUtil::toInt(key.get_src()) << "->" << ConsistUtil::toInt(key.get_dst()) << "@" + << key.get_ranking() << ", "; } } return oss.str(); diff --git a/src/storage/transaction/ConsistUtil.h b/src/storage/transaction/ConsistUtil.h index a507802bdf0..0ca2fc918d5 100644 --- a/src/storage/transaction/ConsistUtil.h +++ b/src/storage/transaction/ConsistUtil.h @@ -11,34 +11,20 @@ #include "interface/gen-cpp2/storage_types.h" #include "kvstore/KVStore.h" #include "storage/CommonUtils.h" +#include "storage/transaction/ConsistTypes.h" namespace nebula { namespace storage { - -enum class RequestType { - UNKNOWN, - INSERT, - UPDATE, -}; - -enum class ResumeType { - UNKNOWN = 0, - RESUME_CHAIN, - RESUME_REMOTE, -}; - -struct ResumeOptions { - ResumeOptions(ResumeType tp, std::string val) : resumeType(tp), primeValue(std::move(val)) {} - ResumeType resumeType; - std::string primeValue; -}; - class ConsistUtil final { public: static std::string primeTable(); static std::string doublePrimeTable(); + static std::string deletePrimeTable(); + + static std::string deleteDoublePrimeTable(); + static std::string edgeKey(size_t vIdLen, PartitionID partId, const cpp2::EdgeKey& key); static std::string primeKey(size_t vIdLen, PartitionID partId, const cpp2::EdgeKey& edgeKey); @@ -75,21 +61,6 @@ class ConsistUtil final { static std::string strUUID(); - static std::string tempRequestTable(); - - static std::vector getMultiEdgeVers(kvstore::KVStore* store, - GraphSpaceID spaceId, - PartitionID partId, - const std::vector& keys); - - // return -1 if edge version not exist - static int64_t getSingleEdgeVer(kvstore::KVStore* store, - GraphSpaceID spaceId, - PartitionID partId, - const std::string& key); - - static int64_t getTimestamp(const std::string& val) noexcept; - static cpp2::AddEdgesRequest toAddEdgesRequest(const cpp2::ChainAddEdgesRequest& req); static cpp2::EdgeKey reverseEdgeKey(const cpp2::EdgeKey& edgeKey); @@ -104,13 +75,36 @@ class ConsistUtil final { return "u"; } - static std::pair versionOfUpdateReq( - StorageEnv* env, const cpp2::UpdateEdgeRequest& req); + static std::string deleteIdentifier() noexcept { + return "d"; + } + + /** + * @brief if the vid of space is created as "Fixed string" + * when trying to print this vid, it will show a hex string + * This function trying to transform it to human readable format. + * @return -1 if failed + */ + static int64_t toInt(const ::nebula::Value& val); + + static int64_t toInt2(const std::string& val); + + static std::string readableKey(size_t vidLen, const std::string& rawKey); + + static std::vector toStrKeys(const cpp2::DeleteEdgesRequest& req, int vidLen); + + static ::nebula::cpp2::ErrorCode getErrorCode(const cpp2::ExecResponse& resp); +}; + +struct DeleteEdgesRequestHelper final { + static cpp2::DeleteEdgesRequest toDeleteEdgesRequest(const cpp2::ChainDeleteEdgesRequest& req); + + static cpp2::ChainDeleteEdgesRequest toChainDeleteEdgesRequest( + const cpp2::DeleteEdgesRequest& req); - static std::string dumpAddEdgeReq(const cpp2::AddEdgesRequest& req); + static cpp2::DeleteEdgesRequest parseDeleteEdgesRequest(const std::string& val); - using Parts = std::unordered_map>; - static std::string dumpParts(const Parts& parts); + static std::string explain(const cpp2::DeleteEdgesRequest& req); }; } // namespace storage diff --git a/src/storage/transaction/ResumeAddEdgeProcessor.cpp b/src/storage/transaction/ResumeAddEdgeProcessor.cpp index a23b34aad29..3ca1bfb18c5 100644 --- a/src/storage/transaction/ResumeAddEdgeProcessor.cpp +++ b/src/storage/transaction/ResumeAddEdgeProcessor.cpp @@ -9,13 +9,13 @@ namespace nebula { namespace storage { ResumeAddEdgeProcessor::ResumeAddEdgeProcessor(StorageEnv* env, const std::string& val) - : ChainAddEdgesProcessorLocal(env) { + : ChainAddEdgesLocalProcessor(env) { req_ = ConsistUtil::parseAddRequest(val); uuid_ = ConsistUtil::strUUID(); readableEdgeDesc_ = makeReadableEdge(req_); VLOG(1) << uuid_ << " resume prime " << readableEdgeDesc_; - ChainAddEdgesProcessorLocal::prepareRequest(req_); + ChainAddEdgesLocalProcessor::prepareRequest(req_); } folly::SemiFuture ResumeAddEdgeProcessor::prepareLocal() { @@ -28,32 +28,31 @@ folly::SemiFuture ResumeAddEdgeProcessor::prepareLocal( return Code::E_SPACE_NOT_FOUND; } auto& parts = req_.get_parts(); + auto& srcId = parts.begin()->second.back().get_key().get_src().getStr(); auto& dstId = parts.begin()->second.back().get_key().get_dst().getStr(); + localPartId_ = env_->metaClient_->partId(numOfPart.value(), srcId); remotePartId_ = env_->metaClient_->partId(numOfPart.value(), dstId); - std::vector keys = sEdgeKey(req_); - auto vers = ConsistUtil::getMultiEdgeVers(env_->kvstore_, spaceId, localPartId_, keys); - edgeVer_ = vers.front(); - - return Code::SUCCEEDED; + return code_; } folly::SemiFuture ResumeAddEdgeProcessor::processRemote(Code code) { VLOG(1) << uuid_ << " prepareLocal() " << apache::thrift::util::enumNameSafe(code); - return ChainAddEdgesProcessorLocal::processRemote(code); + return ChainAddEdgesLocalProcessor::processRemote(code); } folly::SemiFuture ResumeAddEdgeProcessor::processLocal(Code code) { VLOG(1) << uuid_ << " processRemote() " << apache::thrift::util::enumNameSafe(code); setErrorCode(code); - if (!checkTerm(req_)) { - LOG(WARNING) << this << "E_OUTDATED_TERM"; - return Code::E_OUTDATED_TERM; + auto currTerm = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (currTerm.first != term_) { + LOG(WARNING) << "E_LEADER_CHANGED during prepare and commit local"; + code_ = Code::E_LEADER_CHANGED; } if (code == Code::E_RPC_FAILURE) { - kvAppend_ = ChainAddEdgesProcessorLocal::makeDoublePrime(); + kvAppend_ = ChainAddEdgesLocalProcessor::makeDoublePrime(); } if (code == Code::E_RPC_FAILURE || code == Code::SUCCEEDED) { diff --git a/src/storage/transaction/ResumeAddEdgeProcessor.h b/src/storage/transaction/ResumeAddEdgeProcessor.h index 5608cd60360..797bf7979aa 100644 --- a/src/storage/transaction/ResumeAddEdgeProcessor.h +++ b/src/storage/transaction/ResumeAddEdgeProcessor.h @@ -5,12 +5,12 @@ #pragma once -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" namespace nebula { namespace storage { -class ResumeAddEdgeProcessor : public ChainAddEdgesProcessorLocal { +class ResumeAddEdgeProcessor : public ChainAddEdgesLocalProcessor { public: static ResumeAddEdgeProcessor* instance(StorageEnv* env, const std::string& val) { return new ResumeAddEdgeProcessor(env, val); diff --git a/src/storage/transaction/ResumeAddEdgeRemoteProcessor.cpp b/src/storage/transaction/ResumeAddEdgeRemoteProcessor.cpp index bdd1be3b664..21259f74afa 100644 --- a/src/storage/transaction/ResumeAddEdgeRemoteProcessor.cpp +++ b/src/storage/transaction/ResumeAddEdgeRemoteProcessor.cpp @@ -9,21 +9,15 @@ namespace nebula { namespace storage { ResumeAddEdgeRemoteProcessor::ResumeAddEdgeRemoteProcessor(StorageEnv* env, const std::string& val) - : ChainAddEdgesProcessorLocal(env) { + : ChainAddEdgesLocalProcessor(env) { req_ = ConsistUtil::parseAddRequest(val); - LOG(WARNING) << ConsistUtil::dumpAddEdgeReq(req_); - ChainAddEdgesProcessorLocal::prepareRequest(req_); + ChainAddEdgesLocalProcessor::prepareRequest(req_); } folly::SemiFuture ResumeAddEdgeRemoteProcessor::prepareLocal() { - if (!checkTerm(req_)) { - LOG(WARNING) << this << "E_OUTDATED_TERM"; - return Code::E_OUTDATED_TERM; - } - - if (!checkVersion(req_)) { - LOG(WARNING) << this << "E_OUTDATED_EDGE"; - return Code::E_OUTDATED_EDGE; + std::tie(term_, code_) = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (code_ != Code::SUCCEEDED) { + return code_; } auto spaceId = req_.get_space_id(); @@ -34,26 +28,19 @@ folly::SemiFuture ResumeAddEdgeRemoteProcessor::prepare auto& parts = req_.get_parts(); auto& dstId = parts.begin()->second.back().get_key().get_dst().getStr(); remotePartId_ = env_->metaClient_->partId(numOfPart.value(), dstId); - std::vector keys = sEdgeKey(req_); - auto vers = ConsistUtil::getMultiEdgeVers(env_->kvstore_, spaceId, localPartId_, keys); - edgeVer_ = vers.front(); return Code::SUCCEEDED; } folly::SemiFuture ResumeAddEdgeRemoteProcessor::processRemote(Code code) { - return ChainAddEdgesProcessorLocal::processRemote(code); + return ChainAddEdgesLocalProcessor::processRemote(code); } folly::SemiFuture ResumeAddEdgeRemoteProcessor::processLocal(Code code) { - if (!checkTerm(req_)) { - LOG(WARNING) << this << "E_OUTDATED_TERM"; - return Code::E_OUTDATED_TERM; - } - - if (!checkVersion(req_)) { - LOG(WARNING) << this << "E_OUTDATED_EDGE"; - return Code::E_OUTDATED_EDGE; + auto currTerm = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (currTerm.first != term_) { + LOG(WARNING) << "E_LEADER_CHANGED during prepare and commit local"; + code_ = Code::E_LEADER_CHANGED; } if (code == Code::E_OUTDATED_TERM) { @@ -70,7 +57,7 @@ folly::SemiFuture ResumeAddEdgeRemoteProcessor::processLocal(Code code) { if (code == Code::SUCCEEDED) { // if there are something wrong other than rpc failure // we need to keep the resume retry(by not remove those prime key) - ChainAddEdgesProcessorLocal::eraseDoublePrime(); + ChainAddEdgesLocalProcessor::eraseDoublePrime(); code_ = forwardToDelegateProcessor().get(); return code_; } diff --git a/src/storage/transaction/ResumeAddEdgeRemoteProcessor.h b/src/storage/transaction/ResumeAddEdgeRemoteProcessor.h index 9e5aed28351..a9046814064 100644 --- a/src/storage/transaction/ResumeAddEdgeRemoteProcessor.h +++ b/src/storage/transaction/ResumeAddEdgeRemoteProcessor.h @@ -5,12 +5,12 @@ #pragma once -#include "storage/transaction/ChainAddEdgesProcessorLocal.h" +#include "storage/transaction/ChainAddEdgesLocalProcessor.h" namespace nebula { namespace storage { -class ResumeAddEdgeRemoteProcessor : public ChainAddEdgesProcessorLocal { +class ResumeAddEdgeRemoteProcessor : public ChainAddEdgesLocalProcessor { public: static ResumeAddEdgeRemoteProcessor* instance(StorageEnv* env, const std::string& val) { return new ResumeAddEdgeRemoteProcessor(env, val); diff --git a/src/storage/transaction/ResumeUpdateProcessor.cpp b/src/storage/transaction/ResumeUpdateProcessor.cpp index ee883e86d01..075d0c10a2d 100644 --- a/src/storage/transaction/ResumeUpdateProcessor.cpp +++ b/src/storage/transaction/ResumeUpdateProcessor.cpp @@ -13,34 +13,29 @@ namespace nebula { namespace storage { ResumeUpdateProcessor::ResumeUpdateProcessor(StorageEnv* env, const std::string& val) - : ChainUpdateEdgeProcessorLocal(env) { + : ChainUpdateEdgeLocalProcessor(env) { req_ = ConsistUtil::parseUpdateRequest(val); - ChainUpdateEdgeProcessorLocal::prepareRequest(req_); + ChainUpdateEdgeLocalProcessor::prepareRequest(req_); } folly::SemiFuture ResumeUpdateProcessor::prepareLocal() { - ver_ = getVersion(req_); - - return Code::SUCCEEDED; + std::tie(term_, code_) = env_->txnMan_->getTerm(spaceId_, localPartId_); + return code_; } folly::SemiFuture ResumeUpdateProcessor::processRemote(Code code) { - LOG_IF(INFO, FLAGS_trace_toss) << "prepareLocal()=" << apache::thrift::util::enumNameSafe(code); - return ChainUpdateEdgeProcessorLocal::processRemote(code); + VLOG(1) << "prepareLocal()=" << apache::thrift::util::enumNameSafe(code); + return ChainUpdateEdgeLocalProcessor::processRemote(code); } folly::SemiFuture ResumeUpdateProcessor::processLocal(Code code) { - LOG_IF(INFO, FLAGS_trace_toss) << "processRemote()=" << apache::thrift::util::enumNameSafe(code); + VLOG(1) << "processRemote()=" << apache::thrift::util::enumNameSafe(code); setErrorCode(code); - if (!checkTerm()) { - LOG(WARNING) << "E_OUTDATED_TERM"; - return Code::E_OUTDATED_TERM; - } - - if (!checkVersion()) { - LOG(WARNING) << "E_OUTDATED_EDGE"; - return Code::E_OUTDATED_EDGE; + auto currTerm = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (currTerm.first != term_) { + LOG(WARNING) << "E_LEADER_CHANGED during prepare and commit local"; + code_ = Code::E_LEADER_CHANGED; } if (code == Code::E_RPC_FAILURE) { @@ -50,7 +45,7 @@ folly::SemiFuture ResumeUpdateProcessor::processLocal(Code code) { if (code == Code::E_RPC_FAILURE || code == Code::SUCCEEDED) { // if there are something wrong other than rpc failure // we need to keep the resume retry(by not remove those prime key) - auto key = ConsistUtil::primeKey(spaceVidLen_, partId_, req_.get_edge_key()); + auto key = ConsistUtil::primeKey(spaceVidLen_, localPartId_, req_.get_edge_key()); kvErased_.emplace_back(std::move(key)); forwardToDelegateProcessor(); return code_; @@ -60,7 +55,7 @@ folly::SemiFuture ResumeUpdateProcessor::processLocal(Code code) { } void ResumeUpdateProcessor::finish() { - LOG_IF(INFO, FLAGS_trace_toss) << "commitLocal()=" << apache::thrift::util::enumNameSafe(code_); + VLOG(1) << "commitLocal()=" << apache::thrift::util::enumNameSafe(code_); finished_.setValue(code_); onFinished(); } diff --git a/src/storage/transaction/ResumeUpdateProcessor.h b/src/storage/transaction/ResumeUpdateProcessor.h index ea6272e43ef..557e351b4ed 100644 --- a/src/storage/transaction/ResumeUpdateProcessor.h +++ b/src/storage/transaction/ResumeUpdateProcessor.h @@ -5,7 +5,7 @@ #pragma once -#include "storage/transaction/ChainUpdateEdgeProcessorLocal.h" +#include "storage/transaction/ChainUpdateEdgeLocalProcessor.h" namespace nebula { namespace storage { @@ -15,7 +15,7 @@ namespace storage { * if the TxnManager background resume thread found a prime key * it will create this processor to resume the complete update process */ -class ResumeUpdateProcessor : public ChainUpdateEdgeProcessorLocal { +class ResumeUpdateProcessor : public ChainUpdateEdgeLocalProcessor { public: static ResumeUpdateProcessor* instance(StorageEnv* env, const std::string& val) { return new ResumeUpdateProcessor(env, val); diff --git a/src/storage/transaction/ResumeUpdateRemoteProcessor.cpp b/src/storage/transaction/ResumeUpdateRemoteProcessor.cpp index aca10ccad0c..5bfa6ed2a65 100644 --- a/src/storage/transaction/ResumeUpdateRemoteProcessor.cpp +++ b/src/storage/transaction/ResumeUpdateRemoteProcessor.cpp @@ -11,36 +11,33 @@ namespace nebula { namespace storage { ResumeUpdateRemoteProcessor::ResumeUpdateRemoteProcessor(StorageEnv* env, const std::string& val) - : ChainUpdateEdgeProcessorLocal(env) { + : ChainUpdateEdgeLocalProcessor(env) { req_ = ConsistUtil::parseUpdateRequest(val); - ChainUpdateEdgeProcessorLocal::prepareRequest(req_); + ChainUpdateEdgeLocalProcessor::prepareRequest(req_); } folly::SemiFuture ResumeUpdateRemoteProcessor::prepareLocal() { - return Code::SUCCEEDED; + std::tie(term_, code_) = env_->txnMan_->getTerm(spaceId_, localPartId_); + return code_; } folly::SemiFuture ResumeUpdateRemoteProcessor::processRemote(Code code) { - return ChainUpdateEdgeProcessorLocal::processRemote(code); + return ChainUpdateEdgeLocalProcessor::processRemote(code); } folly::SemiFuture ResumeUpdateRemoteProcessor::processLocal(Code code) { setErrorCode(code); - if (!checkTerm()) { - LOG(WARNING) << "E_OUTDATED_TERM"; - return Code::E_OUTDATED_TERM; - } - - if (!checkVersion()) { - LOG(WARNING) << "E_OUTDATED_EDGE"; - return Code::E_OUTDATED_EDGE; + auto currTerm = env_->txnMan_->getTerm(spaceId_, localPartId_); + if (currTerm.first != term_) { + LOG(WARNING) << "E_LEADER_CHANGED during prepare and commit local"; + code_ = Code::E_LEADER_CHANGED; } if (code == Code::SUCCEEDED) { // if there are something wrong other than rpc failure // we need to keep the resume retry(by not remove those prime key) - auto key = ConsistUtil::doublePrime(spaceVidLen_, partId_, req_.get_edge_key()); + auto key = ConsistUtil::doublePrime(spaceVidLen_, localPartId_, req_.get_edge_key()); kvErased_.emplace_back(std::move(key)); forwardToDelegateProcessor(); return code; diff --git a/src/storage/transaction/ResumeUpdateRemoteProcessor.h b/src/storage/transaction/ResumeUpdateRemoteProcessor.h index d1ce5d93438..bb3171d061b 100644 --- a/src/storage/transaction/ResumeUpdateRemoteProcessor.h +++ b/src/storage/transaction/ResumeUpdateRemoteProcessor.h @@ -5,7 +5,7 @@ #pragma once -#include "storage/transaction/ChainUpdateEdgeProcessorLocal.h" +#include "storage/transaction/ChainUpdateEdgeLocalProcessor.h" namespace nebula { namespace storage { @@ -15,7 +15,7 @@ namespace storage { * if the TxnManager background resume thread found a prime key * it will create this processor to resume the complete update process */ -class ResumeUpdateRemoteProcessor : public ChainUpdateEdgeProcessorLocal { +class ResumeUpdateRemoteProcessor : public ChainUpdateEdgeLocalProcessor { public: static ResumeUpdateRemoteProcessor* instance(StorageEnv* env, const std::string& val) { return new ResumeUpdateRemoteProcessor(env, val); diff --git a/src/storage/transaction/TransactionManager.cpp b/src/storage/transaction/TransactionManager.cpp index 3c42694adc1..2c91b20b8de 100644 --- a/src/storage/transaction/TransactionManager.cpp +++ b/src/storage/transaction/TransactionManager.cpp @@ -39,7 +39,7 @@ TransactionManager::LockCore* TransactionManager::getLockCore(GraphSpaceID space GraphSpaceID partId, bool checkWhiteList) { if (checkWhiteList) { - if (whiteListParts_.find(std::make_pair(spaceId, partId)) == whiteListParts_.end()) { + if (scannedParts_.find(std::make_pair(spaceId, partId)) == scannedParts_.end()) { return nullptr; } } @@ -52,16 +52,27 @@ TransactionManager::LockCore* TransactionManager::getLockCore(GraphSpaceID space return item.first->second.get(); } -StatusOr TransactionManager::getTerm(GraphSpaceID spaceId, PartitionID partId) { - return env_->metaClient_->getTermFromCache(spaceId, partId); +std::pair TransactionManager::getTerm(GraphSpaceID spaceId, PartitionID partId) { + TermID termId = -1; + auto rc = Code::SUCCEEDED; + auto part = env_->kvstore_->part(spaceId, partId); + if (nebula::ok(part)) { + termId = nebula::value(part)->termId(); + } else { + rc = nebula::error(part); + } + return std::make_pair(termId, rc); } -bool TransactionManager::checkTerm(GraphSpaceID spaceId, PartitionID partId, TermID term) { +bool TransactionManager::checkTermFromCache(GraphSpaceID spaceId, + PartitionID partId, + TermID termId) { auto termOfMeta = env_->metaClient_->getTermFromCache(spaceId, partId); if (termOfMeta.ok()) { - if (term < termOfMeta.value()) { + if (termId < termOfMeta.value()) { LOG(WARNING) << "checkTerm() failed: " - << "spaceId=" << spaceId << ", partId=" << partId << ", in-coming term=" << term + << "spaceId=" << spaceId << ", partId=" << partId + << ", in-coming term=" << termId << ", term in meta cache=" << termOfMeta.value(); return false; } @@ -69,12 +80,12 @@ bool TransactionManager::checkTerm(GraphSpaceID spaceId, PartitionID partId, Ter auto partUUID = std::make_pair(spaceId, partId); auto it = cachedTerms_.find(partUUID); if (it != cachedTerms_.cend()) { - if (term < it->second) { + if (termId < it->second) { LOG(WARNING) << "term < it->second"; return false; } } - cachedTerms_.assign(partUUID, term); + cachedTerms_.assign(partUUID, termId); return true; } @@ -115,23 +126,21 @@ std::string TransactionManager::getEdgeKey(const std::string& lockKey) { } void TransactionManager::addPrime(GraphSpaceID spaceId, const std::string& edge, ResumeType type) { - LOG_IF(INFO, FLAGS_trace_toss) << "addPrime() space=" << spaceId - << ", hex=" << folly::hexlify(edge) - << ", ResumeType=" << static_cast(type); + VLOG(1) << "addPrime() space=" << spaceId << ", hex=" << folly::hexlify(edge) + << ", ResumeType=" << static_cast(type); auto key = makeLockKey(spaceId, edge); - reserveTable_.insert(std::make_pair(key, type)); + dangleEdges_.insert(std::make_pair(key, type)); } void TransactionManager::delPrime(GraphSpaceID spaceId, const std::string& edge) { - LOG_IF(INFO, FLAGS_trace_toss) << "delPrime() space=" << spaceId - << ", hex=" << folly::hexlify(edge); + VLOG(1) << "delPrime() space=" << spaceId << ", hex=" << folly::hexlify(edge) << ", readable " + << ConsistUtil::readableKey(8, edge); auto key = makeLockKey(spaceId, edge); - reserveTable_.erase(key); + dangleEdges_.erase(key); auto partId = NebulaKeyUtils::getPart(edge); auto* lk = getLockCore(spaceId, partId, false); - auto lockKey = makeLockKey(spaceId, edge); - lk->unlock(lockKey); + lk->unlock(edge); } void TransactionManager::scanAll() { @@ -148,6 +157,7 @@ void TransactionManager::scanAll() { scanPrimes(spaceId, partId); } } + LOG(INFO) << "finish scanAll()"; } void TransactionManager::onNewPartAdded(std::shared_ptr& part) { @@ -165,7 +175,8 @@ void TransactionManager::onLeaderLostWrapper(const ::nebula::kvstore::Part::Call opt.spaceId, opt.partId, opt.term); - whiteListParts_.erase(std::make_pair(opt.spaceId, opt.partId)); + scannedParts_.erase(std::make_pair(opt.spaceId, opt.partId)); + dangleEdges_.clear(); } void TransactionManager::onLeaderElectedWrapper( @@ -183,9 +194,10 @@ void TransactionManager::scanPrimes(GraphSpaceID spaceId, PartitionID partId) { if (rc == nebula::cpp2::ErrorCode::SUCCEEDED) { for (; iter->valid(); iter->next()) { auto edgeKey = ConsistUtil::edgeKeyFromPrime(iter->key()); - VLOG(1) << "scanned edgekey: " << folly::hexlify(edgeKey); + VLOG(1) << "scanned edgekey: " << folly::hexlify(edgeKey) + << ", readable: " << ConsistUtil::readableKey(8, edgeKey.str()); auto lockKey = makeLockKey(spaceId, edgeKey.str()); - auto insSucceed = reserveTable_.insert(std::make_pair(lockKey, ResumeType::RESUME_CHAIN)); + auto insSucceed = dangleEdges_.insert(std::make_pair(lockKey, ResumeType::RESUME_CHAIN)); if (!insSucceed.second) { LOG(ERROR) << "not supposed to insert fail: " << folly::hexlify(edgeKey); } @@ -201,13 +213,14 @@ void TransactionManager::scanPrimes(GraphSpaceID spaceId, PartitionID partId) { return; } } + prefix = ConsistUtil::doublePrimePrefix(partId); rc = env_->kvstore_->prefix(spaceId, partId, prefix, &iter); if (rc == nebula::cpp2::ErrorCode::SUCCEEDED) { for (; iter->valid(); iter->next()) { auto edgeKey = ConsistUtil::edgeKeyFromDoublePrime(iter->key()); auto lockKey = makeLockKey(spaceId, edgeKey.str()); - auto insSucceed = reserveTable_.insert(std::make_pair(lockKey, ResumeType::RESUME_REMOTE)); + auto insSucceed = dangleEdges_.insert(std::make_pair(lockKey, ResumeType::RESUME_REMOTE)); if (!insSucceed.second) { LOG(ERROR) << "not supposed to insert fail: " << folly::hexlify(edgeKey); } @@ -224,13 +237,13 @@ void TransactionManager::scanPrimes(GraphSpaceID spaceId, PartitionID partId) { } } auto partOfSpace = std::make_pair(spaceId, partId); - auto insRet = whiteListParts_.insert(std::make_pair(partOfSpace, 0)); + auto insRet = scannedParts_.insert(std::make_pair(partOfSpace, 0)); LOG(INFO) << "insert space=" << spaceId << ", part=" << partId - << ", into white list suc=" << insRet.second; + << ", into white list suc=" << std::boolalpha << insRet.second; } -folly::ConcurrentHashMap* TransactionManager::getReserveTable() { - return &reserveTable_; +folly::ConcurrentHashMap* TransactionManager::getDangleEdges() { + return &dangleEdges_; } } // namespace storage diff --git a/src/storage/transaction/TransactionManager.h b/src/storage/transaction/TransactionManager.h index 83441b958ea..acfc2517506 100644 --- a/src/storage/transaction/TransactionManager.h +++ b/src/storage/transaction/TransactionManager.h @@ -33,7 +33,9 @@ class TransactionManager { public: explicit TransactionManager(storage::StorageEnv* env); - ~TransactionManager() = default; + ~TransactionManager() { + stop(); + } void addChainTask(ChainBaseProcessor* proc) { folly::async([=] { @@ -49,19 +51,25 @@ class TransactionManager { return exec_.get(); } + bool start(); + + void stop(); + LockCore* getLockCore(GraphSpaceID spaceId, PartitionID partId, bool checkWhiteList = true); InternalStorageClient* getInternalClient() { return iClient_; } - StatusOr getTerm(GraphSpaceID spaceId, PartitionID partId); + // get term of part from kvstore, may fail if this part is not exist + std::pair getTerm(GraphSpaceID spaceId, PartitionID partId); - bool checkTerm(GraphSpaceID spaceId, PartitionID partId, TermID term); + // check get term from local term cache + // this is used by Chain...RemoteProcessor, + // to avoid an old leader request overrider a newer leader's + bool checkTermFromCache(GraphSpaceID spaceId, PartitionID partId, TermID termId); - bool start(); - - void stop(); + void reportFailed(); // leave a record for (double)prime edge, to let resume processor there is one dangling edge void addPrime(GraphSpaceID spaceId, const std::string& edgeKey, ResumeType type); @@ -70,11 +78,7 @@ class TransactionManager { bool checkUnfinishedEdge(GraphSpaceID spaceId, const folly::StringPiece& key); - // return false if there is no "edge" in reserveTable_ - // true if there is, and also erase the edge from reserveTable_. - bool takeDanglingEdge(GraphSpaceID spaceId, const std::string& edge); - - folly::ConcurrentHashMap* getReserveTable(); + folly::ConcurrentHashMap* getDangleEdges(); void scanPrimes(GraphSpaceID spaceId, PartitionID partId); @@ -106,18 +110,16 @@ class TransactionManager { std::unique_ptr resumeThread_; /** - * an update request may re-entered to an existing (double)prime key - * and wants to have its own (double)prime. - * also MVCC doesn't work. - * because (double)prime can't judge if remote side succeeded. - * to prevent insert/update re + * edges need to recover will put into this, + * resume processor will get edge from this then do resume. * */ - folly::ConcurrentHashMap reserveTable_; + folly::ConcurrentHashMap dangleEdges_; /** - * @brief only part in this white list allowed to get lock + * @brief every raft part need to do a scan, + * only scanned part allowed to insert edges */ - folly::ConcurrentHashMap, int> whiteListParts_; + folly::ConcurrentHashMap, int> scannedParts_; }; } // namespace storage