From a891780c36a069a38348c33d79242add3f5e267b Mon Sep 17 00:00:00 2001 From: V_Galaxy <1904821183@qq.com> Date: Mon, 7 Aug 2023 23:15:28 +0800 Subject: [PATCH] refact: merge pd into hugegraph (#2270) * init(pd): history merged until 2023.5.6 * refact: unify LF line separator Change-Id: I3f38685af534468a51b79b7f45d24fdb30a74f34 * refact: add header & format in test & service & grpc & dist modules Change-Id: Id2fa1374d7fa85635906a7e75655e7dad56f1807 * refact: add header/format in core & common & clitools & client modules Change-Id: Id8ec0a0d754f646c59f7251158dd89de669b2016 * refact: format root/parent modules & pd-service package name & rearrange code Change-Id: Id8ec0a0d754f646c59f7251158dd89de669b2016 * chore: search & replace to "org.apache" manually - also replace "a/b/c" regrex - replace pom version to ${revision} Change-Id: Ia3a9ce891fcff58cef4a5a1f95adb44ef8ee8049 * chore: fix some code problems - lack this with instance filed - lack {} in 'if/while/for' Change-Id: I37d21f1ab94529a8122f4598636fbe02b2264904 * refact: update code to Jun2 2023 GraphPlatform-2020 fix cache bug when direct put Change-Id: I52428d8d5d4bb977ee7796c0fe1e4ced07aff12d GraphPlatform-2020 fix cache bug when direct put Change-Id: Id57427a94732e12f2d96429a2b677d5c5f71e8ff GraphPlatform-1771 update code Change-Id: Ic3b95ca22900453adbc3571734e1da2c97e28355 * build: add flatten plugin and fix dependency version 1. Added the [flatten](https://github.com/apache/incubator-hugegraph/pull/2004) plugin. 2. Unified the global version to 1.5.0. 3. Modified the version of `jraft-core`. --------- Co-authored-by: imbajin * build: fix build for community edition compatibility (WIP) Please refer to https://hugegraph.feishu.cn/wiki/Y6d2wys9KiWf24kpzNKct0Yknnr for detailed fix documentation. --------- Co-authored-by: imbajin * refact: adapt for jraft v1.3.13 * refact: GP-2141 handle low speed import - 6.9 from inner commit 0a95b1e587e65ea6a0a06774d53c80868f36af5b --------- Co-authored-by: V_Galaxy <1904821183@qq.com> * refact: GP-2141 handle low speed import - Compile OK from inner commit 0a95b1e587e65ea6a0a06774d53c80868f36af5b fix store compile problem https://hugegraph.feishu.cn/wiki/Y6d2wys9KiWf24kpzNKct0Yknnr --------- Co-authored-by: V_Galaxy <1904821183@qq.com> * chore: enhance pom & start file --------- Co-authored-by: V_Galaxy <1904821183@qq.com> * refact: prepare before merge * chore: fix pom for submodules in hugegraph-pd * chore: add apache-rat-plugin for hugegraph-pd and add some license headers * chore: clean some comment/code --------- Co-authored-by: imbajin Co-authored-by: What is broken can be reforged <34939118+GYXkeep@users.noreply.github.com> --- hugegraph-pd/.gitattributes | 1 + hugegraph-pd/.gitignore | 13 + .../.mvn/wrapper/MavenWrapperDownloader.java | 121 ++ hugegraph-pd/.mvn/wrapper/maven-wrapper.jar | Bin 0 -> 50710 bytes .../.mvn/wrapper/maven-wrapper.properties | 18 + hugegraph-pd/README.md | 940 +++++++++ hugegraph-pd/build-pre.sh | 54 + hugegraph-pd/build.sh | 39 + hugegraph-pd/ci.yml | 43 + hugegraph-pd/conf/hugegraph.license | Bin 0 -> 856 bytes hugegraph-pd/conf/verify-license.json | 6 + hugegraph-pd/deploy-release.sh | 25 + hugegraph-pd/deploy-snapshot.sh | 25 + hugegraph-pd/hg-pd-client/pom.xml | 73 + .../hugegraph/pd/client/AbstractClient.java | 265 +++ .../pd/client/AbstractClientStubProxy.java | 72 + .../apache/hugegraph/pd/client/Channels.java | 44 + .../hugegraph/pd/client/ClientCache.java | 338 ++++ .../hugegraph/pd/client/Discoverable.java | 30 + .../hugegraph/pd/client/DiscoveryClient.java | 221 ++ .../pd/client/DiscoveryClientImpl.java | 140 ++ .../apache/hugegraph/pd/client/KvClient.java | 352 ++++ .../hugegraph/pd/client/LicenseClient.java | 71 + .../apache/hugegraph/pd/client/PDClient.java | 1347 +++++++++++++ .../apache/hugegraph/pd/client/PDConfig.java | 82 + .../apache/hugegraph/pd/client/PDPulse.java | 152 ++ .../hugegraph/pd/client/PDPulseImpl.java | 196 ++ .../apache/hugegraph/pd/client/PDWatch.java | 137 ++ .../hugegraph/pd/client/PDWatchImpl.java | 202 ++ .../hugegraph/pd/pulse/PartitionNotice.java | 49 + .../hugegraph/pd/pulse/PulseServerNotice.java | 35 + .../apache/hugegraph/pd/watch/NodeEvent.java | 99 + .../apache/hugegraph/pd/watch/PDWatcher.java | 22 + .../hugegraph/pd/watch/PartitionEvent.java | 93 + .../apache/hugegraph/pd/watch/WatchType.java | 30 + .../hugegraph/pd/PartitionCacheTest.java | 102 + .../hugegraph/pd/StoreRegisterTest.java | 135 ++ .../pd/client/DiscoveryClientImplTest.java | 147 ++ .../pd/client/LicenseClientImplTest.java | 127 ++ .../hugegraph/pd/client/PDPulseTest.java | 109 + .../hugegraph/pd/client/PDWatchTest.java | 86 + .../pd/client/test/HgPDTestUtil.java | 92 + hugegraph-pd/hg-pd-clitools/pom.xml | 74 + .../apache/hugegraph/pd/clitools/Main.java | 85 + .../hugegraph/pd/clitools/MainTest.java | 80 + hugegraph-pd/hg-pd-common/pom.xml | 54 + .../hugegraph/pd/common/GraphCache.java | 62 + .../apache/hugegraph/pd/common/HgAssert.java | 117 ++ .../apache/hugegraph/pd/common/KVPair.java | 132 ++ .../hugegraph/pd/common/PDException.java | 47 + .../pd/common/PDRuntimeException.java | 49 + .../hugegraph/pd/common/PartitionCache.java | 460 +++++ .../hugegraph/pd/common/PartitionUtils.java | 47 + hugegraph-pd/hg-pd-core/pom.xml | 94 + .../apache/hugegraph/pd/ConfigService.java | 135 ++ .../org/apache/hugegraph/pd/IdService.java | 82 + .../org/apache/hugegraph/pd/KvService.java | 317 +++ .../org/apache/hugegraph/pd/LogService.java | 67 + .../pd/PartitionInstructionListener.java | 52 + .../apache/hugegraph/pd/PartitionService.java | 1563 ++++++++++++++ .../hugegraph/pd/PartitionStatusListener.java | 29 + .../apache/hugegraph/pd/RegistryService.java | 44 + .../pd/ShardGroupStatusListener.java | 26 + .../hugegraph/pd/StoreMonitorDataService.java | 266 +++ .../apache/hugegraph/pd/StoreNodeService.java | 1074 ++++++++++ .../hugegraph/pd/StoreStatusListener.java | 31 + .../hugegraph/pd/TaskScheduleService.java | 853 ++++++++ .../apache/hugegraph/pd/config/PDConfig.java | 277 +++ .../hugegraph/pd/meta/ConfigMetaStore.java | 73 + .../hugegraph/pd/meta/DiscoveryMetaStore.java | 105 + .../apache/hugegraph/pd/meta/IdMetaStore.java | 255 +++ .../org/apache/hugegraph/pd/meta/LogMeta.java | 48 + .../hugegraph/pd/meta/MetadataFactory.java | 87 + .../hugegraph/pd/meta/MetadataKeyHelper.java | 378 ++++ .../pd/meta/MetadataRocksDBStore.java | 184 ++ .../hugegraph/pd/meta/MetadataStoreBase.java | 124 ++ .../hugegraph/pd/meta/PartitionMeta.java | 295 +++ .../apache/hugegraph/pd/meta/QueueStore.java | 56 + .../hugegraph/pd/meta/StoreInfoMeta.java | 206 ++ .../hugegraph/pd/meta/TaskInfoMeta.java | 132 ++ .../pd/raft/FutureClosureAdapter.java | 48 + .../apache/hugegraph/pd/raft/KVOperation.java | 161 ++ .../hugegraph/pd/raft/KVStoreClosure.java | 33 + .../apache/hugegraph/pd/raft/RaftEngine.java | 377 ++++ .../hugegraph/pd/raft/RaftRpcClient.java | 87 + .../hugegraph/pd/raft/RaftRpcProcessor.java | 127 ++ .../hugegraph/pd/raft/RaftStateListener.java | 22 + .../hugegraph/pd/raft/RaftStateMachine.java | 330 +++ .../hugegraph/pd/raft/RaftTaskHandler.java | 27 + .../apache/hugegraph/pd/raft/ZipUtils.java | 93 + .../pd/store/BaseKVStoreClosure.java | 48 + .../apache/hugegraph/pd/store/HgKVStore.java | 58 + .../hugegraph/pd/store/HgKVStoreImpl.java | 343 ++++ .../org/apache/hugegraph/pd/store/KV.java | 45 + .../hugegraph/pd/store/RaftKVStore.java | 324 +++ .../hugegraph/pd/MonitorServiceTest.java | 112 + .../hugegraph/pd/PartitionServiceTest.java | 45 + .../hugegraph/pd/StoreNodeServiceTest.java | 485 +++++ .../org/apache/hugegraph/pd/UnitTestBase.java | 31 + .../pd/common/PartitionUtilsTest.java | 47 + .../hugegraph/pd/store/HgKVStoreImplTest.java | 105 + .../hg-pd-core/src/test/resources/log4j2.xml | 139 ++ hugegraph-pd/hg-pd-dist/pom.xml | 151 ++ .../assembly/descriptor/server-assembly.xml | 57 + .../assembly/static/bin/start-hugegraph-pd.sh | 123 ++ .../assembly/static/bin/stop-hugegraph-pd.sh | 49 + .../src/assembly/static/bin/util.sh | 372 ++++ .../src/assembly/static/conf/application.yml | 78 + .../static/conf/application.yml.template | 71 + .../assembly/static/conf/hugegraph.license | Bin 0 -> 856 bytes .../src/assembly/static/conf/log4j2.xml | 134 ++ .../assembly/static/conf/verify-license.json | 6 + hugegraph-pd/hg-pd-grpc/pom.xml | 138 ++ .../hg-pd-grpc/src/main/proto/discovery.proto | 54 + .../hg-pd-grpc/src/main/proto/kv.proto | 126 ++ .../hg-pd-grpc/src/main/proto/metaTask.proto | 48 + .../hg-pd-grpc/src/main/proto/metapb.proto | 377 ++++ .../hg-pd-grpc/src/main/proto/pd_common.proto | 36 + .../hg-pd-grpc/src/main/proto/pd_pulse.proto | 155 ++ .../hg-pd-grpc/src/main/proto/pd_watch.proto | 86 + .../hg-pd-grpc/src/main/proto/pdpb.proto | 590 ++++++ hugegraph-pd/hg-pd-service/pom.xml | 151 ++ .../hugegraph/pd/boot/HugePDServer.java | 43 + .../pd/license/LicenseVerifierService.java | 426 ++++ .../pd/license/LicenseVerifyManager.java | 77 + .../hugegraph/pd/metrics/MetricsConfig.java | 44 + .../hugegraph/pd/metrics/PDMetrics.java | 114 ++ .../apache/hugegraph/pd/model/DemoModel.java | 73 + .../hugegraph/pd/model/GraphRestRequest.java | 26 + .../pd/model/GraphSpaceRestRequest.java | 25 + .../hugegraph/pd/model/PeerRestRequest.java | 25 + .../hugegraph/pd/model/PromTargetsModel.java | 90 + .../pd/model/RegistryQueryRestRequest.java | 30 + .../pd/model/RegistryRestRequest.java | 33 + .../pd/model/RegistryRestResponse.java | 33 + .../hugegraph/pd/model/RestApiResponse.java | 53 + .../hugegraph/pd/model/StoreRestRequest.java | 25 + .../hugegraph/pd/model/TimeRangeRequest.java | 26 + .../pd/notice/NoticeBroadcaster.java | 173 ++ .../pd/pulse/AbstractObserverSubject.java | 223 ++ .../hugegraph/pd/pulse/PDPulseSubject.java | 444 ++++ .../pd/pulse/PartitionHeartbeatSubject.java | 60 + .../pd/pulse/PdInstructionSubject.java | 56 + .../hugegraph/pd/pulse/PulseListener.java | 39 + .../org/apache/hugegraph/pd/rest/API.java | 186 ++ .../apache/hugegraph/pd/rest/GraphAPI.java | 288 +++ .../hugegraph/pd/rest/GraphSpaceAPI.java | 99 + .../apache/hugegraph/pd/rest/IndexAPI.java | 255 +++ .../apache/hugegraph/pd/rest/MemberAPI.java | 239 +++ .../hugegraph/pd/rest/PartitionAPI.java | 475 +++++ .../hugegraph/pd/rest/PromTargetsAPI.java | 92 + .../apache/hugegraph/pd/rest/RegistryAPI.java | 202 ++ .../apache/hugegraph/pd/rest/ShardAPI.java | 119 ++ .../apache/hugegraph/pd/rest/StoreAPI.java | 355 ++++ .../org/apache/hugegraph/pd/rest/TaskAPI.java | 101 + .../org/apache/hugegraph/pd/rest/TestAPI.java | 163 ++ .../pd/service/DiscoveryService.java | 147 ++ .../pd/service/KvServiceGrpcImpl.java | 592 ++++++ .../hugegraph/pd/service/PDPulseService.java | 115 ++ .../hugegraph/pd/service/PDRestService.java | 272 +++ .../hugegraph/pd/service/PDService.java | 1796 +++++++++++++++++ .../hugegraph/pd/service/PDWatchService.java | 37 + .../pd/service/PromTargetsService.java | 257 +++ .../hugegraph/pd/service/ServiceGrpc.java | 94 + .../hugegraph/pd/service/UpgradeService.java | 110 + .../pd/upgrade/VersionScriptFactory.java | 57 + .../pd/upgrade/VersionUpgradeScript.java | 59 + .../upgrade/scripts/PartitionMetaUpgrade.java | 120 ++ .../pd/upgrade/scripts/TaskCleanUpgrade.java | 65 + .../apache/hugegraph/pd/util/DateUtil.java | 71 + .../hugegraph/pd/util/HgExecutorUtil.java | 180 ++ .../apache/hugegraph/pd/util/HgMapCache.java | 103 + .../org/apache/hugegraph/pd/util/IdUtil.java | 47 + .../pd/util/grpc/GRpcServerConfig.java | 44 + .../pd/util/grpc/StreamObserverUtil.java | 49 + .../pd/watch/AbstractWatchSubject.java | 164 ++ .../hugegraph/pd/watch/KvWatchSubject.java | 288 +++ .../hugegraph/pd/watch/NodeChangeSubject.java | 65 + .../hugegraph/pd/watch/PDWatchSubject.java | 216 ++ .../pd/watch/PartitionChangeSubject.java | 63 + .../pd/watch/ShardGroupChangeSubject.java | 55 + .../src/main/resources/application.yml | 80 + .../src/main/resources/banner.txt | 6 + .../src/main/resources/log4j2.xml | 139 ++ .../src/test/java/live/PDServer0.java | 48 + .../src/test/java/live/PDServer1.java | 47 + .../src/test/java/live/PDServer2.java | 47 + .../src/test/java/live/PDServer3.java | 47 + .../test/resources/application-server0.yml | 71 + .../test/resources/application-server1.yml | 71 + .../test/resources/application-server2.yml | 73 + .../test/resources/application-server3.yml | 73 + .../src/test/resources/banner.txt | 6 + .../src/test/resources/log4j2.xml | 139 ++ hugegraph-pd/hg-pd-test/pom.xml | 370 ++++ .../hugegraph/pd/client/BaseClientTest.java | 44 + .../pd/client/DiscoveryClientTest.java | 79 + .../hugegraph/pd/client/KvClientTest.java | 121 ++ .../pd/client/PDClientSuiteTest.java | 36 + .../hugegraph/pd/client/PDClientTest.java | 418 ++++ .../pd/clitools/BaseCliToolsTest.java | 34 + .../pd/clitools/CliToolsSuiteTest.java | 35 + .../hugegraph/pd/clitools/MainTest.java | 89 + .../hugegraph/pd/common/BaseCommonTest.java | 33 + .../hugegraph/pd/common/CommonSuiteTest.java | 43 + .../hugegraph/pd/common/HgAssertTest.java | 134 ++ .../hugegraph/pd/common/KVPairTest.java | 73 + .../pd/common/MetadataKeyHelperTest.java | 217 ++ .../pd/common/PartitionCacheTest.java | 392 ++++ .../pd/common/PartitionUtilsTest.java | 35 + .../hugegraph/pd/core/BaseCoreTest.java | 74 + .../hugegraph/pd/core/PDCoreSuiteTest.java | 37 + .../pd/core/StoreNodeServiceTest.java | 119 ++ .../pd/core/meta/MetadataKeyHelperTest.java | 34 + .../hugegraph/pd/grpc/BaseGrpcTest.java | 36 + .../hugegraph/pd/grpc/GrpcSuiteTest.java | 33 + .../hugegraph/pd/service/BaseServerTest.java | 57 + .../pd/service/ConfigServiceTest.java | 106 + .../hugegraph/pd/service/IdServiceTest.java | 109 + .../hugegraph/pd/service/KvServiceTest.java | 60 + .../hugegraph/pd/service/LogServiceTest.java | 54 + .../pd/service/PartitionServiceTest.java | 133 ++ .../hugegraph/pd/service/PdTestBase.java | 219 ++ .../hugegraph/pd/service/RestApiTest.java | 120 ++ .../hugegraph/pd/service/ServerSuiteTest.java | 42 + .../service/StoreMonitorDataServiceTest.java | 82 + .../pd/service/StoreNodeServiceNewTest.java | 64 + .../pd/service/StoreServiceTest.java | 833 ++++++++ .../pd/service/TaskScheduleServiceTest.java | 114 ++ hugegraph-pd/local-release.sh | 25 + hugegraph-pd/mvnw | 308 +++ hugegraph-pd/mvnw.cmd | 182 ++ hugegraph-pd/pom.xml | 318 +++ hugegraph-pd/settings.xml | 133 ++ hugegraph-pd/start_pd_server.sh | 55 + pom.xml | 2 +- 236 files changed, 36208 insertions(+), 1 deletion(-) create mode 100644 hugegraph-pd/.gitattributes create mode 100644 hugegraph-pd/.gitignore create mode 100644 hugegraph-pd/.mvn/wrapper/MavenWrapperDownloader.java create mode 100644 hugegraph-pd/.mvn/wrapper/maven-wrapper.jar create mode 100644 hugegraph-pd/.mvn/wrapper/maven-wrapper.properties create mode 100644 hugegraph-pd/build-pre.sh create mode 100644 hugegraph-pd/build.sh create mode 100644 hugegraph-pd/ci.yml create mode 100644 hugegraph-pd/conf/hugegraph.license create mode 100644 hugegraph-pd/conf/verify-license.json create mode 100644 hugegraph-pd/deploy-release.sh create mode 100644 hugegraph-pd/deploy-snapshot.sh create mode 100644 hugegraph-pd/hg-pd-client/pom.xml create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java create mode 100644 hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java create mode 100644 hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java create mode 100644 hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java create mode 100644 hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java create mode 100644 hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java create mode 100644 hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java create mode 100644 hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java create mode 100644 hugegraph-pd/hg-pd-clitools/pom.xml create mode 100644 hugegraph-pd/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java create mode 100644 hugegraph-pd/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java create mode 100644 hugegraph-pd/hg-pd-common/pom.xml create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java create mode 100644 hugegraph-pd/hg-pd-core/pom.xml create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java create mode 100644 hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java create mode 100644 hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java create mode 100644 hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java create mode 100644 hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java create mode 100644 hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java create mode 100644 hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java create mode 100644 hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java create mode 100644 hugegraph-pd/hg-pd-core/src/test/resources/log4j2.xml create mode 100644 hugegraph-pd/hg-pd-dist/pom.xml create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/descriptor/server-assembly.xml create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/static/bin/util.sh create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/static/conf/hugegraph.license create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml create mode 100644 hugegraph-pd/hg-pd-dist/src/assembly/static/conf/verify-license.json create mode 100644 hugegraph-pd/hg-pd-grpc/pom.xml create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto create mode 100644 hugegraph-pd/hg-pd-service/pom.xml create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java create mode 100644 hugegraph-pd/hg-pd-service/src/main/resources/application.yml create mode 100644 hugegraph-pd/hg-pd-service/src/main/resources/banner.txt create mode 100644 hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml create mode 100644 hugegraph-pd/hg-pd-service/src/test/java/live/PDServer0.java create mode 100644 hugegraph-pd/hg-pd-service/src/test/java/live/PDServer1.java create mode 100644 hugegraph-pd/hg-pd-service/src/test/java/live/PDServer2.java create mode 100644 hugegraph-pd/hg-pd-service/src/test/java/live/PDServer3.java create mode 100644 hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml create mode 100644 hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml create mode 100644 hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml create mode 100644 hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml create mode 100644 hugegraph-pd/hg-pd-service/src/test/resources/banner.txt create mode 100644 hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml create mode 100644 hugegraph-pd/hg-pd-test/pom.xml create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java create mode 100755 hugegraph-pd/local-release.sh create mode 100644 hugegraph-pd/mvnw create mode 100644 hugegraph-pd/mvnw.cmd create mode 100644 hugegraph-pd/pom.xml create mode 100644 hugegraph-pd/settings.xml create mode 100644 hugegraph-pd/start_pd_server.sh diff --git a/hugegraph-pd/.gitattributes b/hugegraph-pd/.gitattributes new file mode 100644 index 0000000000..4fd3cf5c2a --- /dev/null +++ b/hugegraph-pd/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf diff --git a/hugegraph-pd/.gitignore b/hugegraph-pd/.gitignore new file mode 100644 index 0000000000..689a7a80b3 --- /dev/null +++ b/hugegraph-pd/.gitignore @@ -0,0 +1,13 @@ +/hg-pd-grpc/src/main/java/ +/.idea/ +/hg-pd-grpc/target/ +/dist/ +**/target/ +.DS_Store +**/tmp/ +*.log +*.iml +/hg-pd-common/target_B000000405016P_Oct-28-114458-2021_conflict_parent/ + +dist/ +.flattened-pom.xml diff --git a/hugegraph-pd/.mvn/wrapper/MavenWrapperDownloader.java b/hugegraph-pd/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000..574feef6be --- /dev/null +++ b/hugegraph-pd/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,121 @@ +/* + * Copyright 2007-present the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.6"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = + "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the + * wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if (mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if (mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/hugegraph-pd/.mvn/wrapper/maven-wrapper.jar b/hugegraph-pd/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..2cc7d4a55c0cd0092912bf49ae38b3a9e3fd0054 GIT binary patch literal 50710 zcmbTd1CVCTmM+|7+wQV$+qP}n>auOywyU~q+qUhh+uxis_~*a##hm*_WW?9E7Pb7N%LRFiwbEGCJ0XP=%-6oeT$XZcYgtzC2~q zk(K08IQL8oTl}>>+hE5YRgXTB@fZ4TH9>7=79e`%%tw*SQUa9~$xKD5rS!;ZG@ocK zQdcH}JX?W|0_Afv?y`-NgLum62B&WSD$-w;O6G0Sm;SMX65z)l%m1e-g8Q$QTI;(Q z+x$xth4KFvH@Bs6(zn!iF#nenk^Y^ce;XIItAoCsow38eq?Y-Auh!1in#Rt-_D>H^ z=EjbclGGGa6VnaMGmMLj`x3NcwA43Jb(0gzl;RUIRAUDcR1~99l2SAPkVhoRMMtN} zXvC<tOmX83grD8GSo_Lo?%lNfhD#EBgPo z*nf@ppMC#B!T)Ae0RG$mlJWmGl7CkuU~B8-==5i;rS;8i6rJ=PoQxf446XDX9g|c> zU64ePyMlsI^V5Jq5A+BPe#e73+kpc_r1tv#B)~EZ;7^67F0*QiYfrk0uVW;Qb=NsG zN>gsuCwvb?s-KQIppEaeXtEMdc9dy6Dfduz-tMTms+i01{eD9JE&h?Kht*$eOl#&L zJdM_-vXs(V#$Ed;5wyNWJdPNh+Z$+;$|%qR(t`4W@kDhd*{(7-33BOS6L$UPDeE_53j${QfKN-0v-HG z(QfyvFNbwPK%^!eIo4ac1;b>c0vyf9}Xby@YY!lkz-UvNp zwj#Gg|4B~?n?G^{;(W;|{SNoJbHTMpQJ*Wq5b{l9c8(%?Kd^1?H1om1de0Da9M;Q=n zUfn{f87iVb^>Exl*nZ0hs(Yt>&V9$Pg`zX`AI%`+0SWQ4Zc(8lUDcTluS z5a_KerZWe}a-MF9#Cd^fi!y3%@RFmg&~YnYZ6<=L`UJ0v={zr)>$A;x#MCHZy1st7 ztT+N07NR+vOwSV2pvWuN1%lO!K#Pj0Fr>Q~R40{bwdL%u9i`DSM4RdtEH#cW)6}+I-eE< z&tZs+(Ogu(H_;$a$!7w`MH0r%h&@KM+<>gJL@O~2K2?VrSYUBbhCn#yy?P)uF3qWU z0o09mIik+kvzV6w>vEZy@&Mr)SgxPzUiDA&%07m17udz9usD82afQEps3$pe!7fUf z0eiidkJ)m3qhOjVHC_M(RYCBO%CZKZXFb8}s0-+}@CIn&EF(rRWUX2g^yZCvl0bI} zbP;1S)iXnRC&}5-Tl(hASKqdSnO?ASGJ*MIhOXIblmEudj(M|W!+I3eDc}7t`^mtg z)PKlaXe(OH+q-)qcQ8a@!llRrpGI8DsjhoKvw9T;TEH&?s=LH0w$EzI>%u;oD@x83 zJL7+ncjI9nn!TlS_KYu5vn%f*@qa5F;| zEFxY&B?g=IVlaF3XNm_03PA)=3|{n-UCgJoTr;|;1AU9|kPE_if8!Zvb}0q$5okF$ zHaJdmO&gg!9oN|M{!qGE=tb|3pVQ8PbL$}e;NgXz<6ZEggI}wO@aBP**2Wo=yN#ZC z4G$m^yaM9g=|&!^ft8jOLuzc3Psca*;7`;gnHm}tS0%f4{|VGEwu45KptfNmwxlE~ z^=r30gi@?cOm8kAz!EylA4G~7kbEiRlRIzwrb~{_2(x^$-?|#e6Bi_**(vyr_~9Of z!n>Gqf+Qwiu!xhi9f53=PM3`3tNF}pCOiPU|H4;pzjcsqbwg*{{kyrTxk<;mx~(;; z1NMrpaQ`57yn34>Jo3b|HROE(UNcQash!0p2-!Cz;{IRv#Vp5!3o$P8!%SgV~k&Hnqhp`5eLjTcy93cK!3Hm-$`@yGnaE=?;*2uSpiZTs_dDd51U%i z{|Zd9ou-;laGS_x=O}a+ zB||za<795A?_~Q=r=coQ+ZK@@ zId~hWQL<%)fI_WDIX#=(WNl!Dm$a&ROfLTd&B$vatq!M-2Jcs;N2vps$b6P1(N}=oI3<3luMTmC|0*{ zm1w8bt7vgX($!0@V0A}XIK)w!AzUn7vH=pZEp0RU0p?}ch2XC-7r#LK&vyc2=-#Q2 z^L%8)JbbcZ%g0Du;|8=q8B>X=mIQirpE=&Ox{TiuNDnOPd-FLI^KfEF729!!0x#Es z@>3ursjFSpu%C-8WL^Zw!7a0O-#cnf`HjI+AjVCFitK}GXO`ME&on|^=~Zc}^LBp9 zj=-vlN;Uc;IDjtK38l7}5xxQF&sRtfn4^TNtnzXv4M{r&ek*(eNbIu!u$>Ed%` z5x7+&)2P&4>0J`N&ZP8$vcR+@FS0126s6+Jx_{{`3ZrIMwaJo6jdrRwE$>IU_JTZ} z(||hyyQ)4Z1@wSlT94(-QKqkAatMmkT7pCycEB1U8KQbFX&?%|4$yyxCtm3=W`$4fiG0WU3yI@c zx{wfmkZAYE_5M%4{J-ygbpH|(|GD$2f$3o_Vti#&zfSGZMQ5_f3xt6~+{RX=$H8at z?GFG1Tmp}}lmm-R->ve*Iv+XJ@58p|1_jRvfEgz$XozU8#iJS})UM6VNI!3RUU!{5 zXB(+Eqd-E;cHQ>)`h0(HO_zLmzR3Tu-UGp;08YntWwMY-9i^w_u#wR?JxR2bky5j9 z3Sl-dQQU$xrO0xa&>vsiK`QN<$Yd%YXXM7*WOhnRdSFt5$aJux8QceC?lA0_if|s> ze{ad*opH_kb%M&~(~&UcX0nFGq^MqjxW?HJIP462v9XG>j(5Gat_)#SiNfahq2Mz2 zU`4uV8m$S~o9(W>mu*=h%Gs(Wz+%>h;R9Sg)jZ$q8vT1HxX3iQnh6&2rJ1u|j>^Qf`A76K%_ubL`Zu?h4`b=IyL>1!=*%!_K)=XC z6d}4R5L+sI50Q4P3upXQ3Z!~1ZXLlh!^UNcK6#QpYt-YC=^H=EPg3)z*wXo*024Q4b2sBCG4I# zlTFFY=kQ>xvR+LsuDUAk)q%5pEcqr(O_|^spjhtpb1#aC& zghXzGkGDC_XDa%t(X`E+kvKQ4zrQ*uuQoj>7@@ykWvF332)RO?%AA&Fsn&MNzmFa$ zWk&&^=NNjxLjrli_8ESU)}U|N{%j&TQmvY~lk!~Jh}*=^INA~&QB9em!in_X%Rl1&Kd~Z(u z9mra#<@vZQlOY+JYUwCrgoea4C8^(xv4ceCXcejq84TQ#sF~IU2V}LKc~Xlr_P=ry zl&Hh0exdCbVd^NPCqNNlxM3vA13EI8XvZ1H9#bT7y*U8Y{H8nwGpOR!e!!}*g;mJ#}T{ekSb}5zIPmye*If(}}_=PcuAW#yidAa^9-`<8Gr0 z)Fz=NiZ{)HAvw{Pl5uu)?)&i&Us$Cx4gE}cIJ}B4Xz~-q7)R_%owbP!z_V2=Aq%Rj z{V;7#kV1dNT9-6R+H}}(ED*_!F=~uz>&nR3gb^Ce%+0s#u|vWl<~JD3MvS0T9thdF zioIG3c#Sdsv;LdtRv3ml7%o$6LTVL>(H`^@TNg`2KPIk*8-IB}X!MT0`hN9Ddf7yN z?J=GxPL!uJ7lqwowsl?iRrh@#5C$%E&h~Z>XQcvFC*5%0RN-Opq|=IwX(dq(*sjs+ zqy99+v~m|6T#zR*e1AVxZ8djd5>eIeCi(b8sUk)OGjAsKSOg^-ugwl2WSL@d#?mdl zib0v*{u-?cq}dDGyZ%$XRY=UkQwt2oGu`zQneZh$=^! zj;!pCBWQNtvAcwcWIBM2y9!*W|8LmQy$H~5BEx)78J`4Z0(FJO2P^!YyQU{*Al+fs z){!4JvT1iLrJ8aU3k0t|P}{RN)_^v%$$r;+p0DY7N8CXzmS*HB*=?qaaF9D@#_$SN zSz{moAK<*RH->%r7xX~9gVW$l7?b|_SYI)gcjf0VAUJ%FcQP(TpBs; zg$25D!Ry_`8xpS_OJdeo$qh#7U+cepZ??TII7_%AXsT$B z=e)Bx#v%J0j``00Zk5hsvv6%T^*xGNx%KN-=pocSoqE5_R)OK%-Pbu^1MNzfds)mL zxz^F4lDKV9D&lEY;I+A)ui{TznB*CE$=9(wgE{m}`^<--OzV-5V4X2w9j(_!+jpTr zJvD*y6;39&T+==$F&tsRKM_lqa1HC}aGL0o`%c9mO=fts?36@8MGm7Vi{Y z^<7m$(EtdSr#22<(rm_(l_(`j!*Pu~Y>>xc>I9M#DJYDJNHO&4=HM%YLIp?;iR&$m z#_$ZWYLfGLt5FJZhr3jpYb`*%9S!zCG6ivNHYzNHcI%khtgHBliM^Ou}ZVD7ehU9 zS+W@AV=?Ro!=%AJ>Kcy9aU3%VX3|XM_K0A+ZaknKDyIS3S-Hw1C7&BSW5)sqj5Ye_ z4OSW7Yu-;bCyYKHFUk}<*<(@TH?YZPHr~~Iy%9@GR2Yd}J2!N9K&CN7Eq{Ka!jdu; zQNB*Y;i(7)OxZK%IHGt#Rt?z`I|A{q_BmoF!f^G}XVeTbe1Wnzh%1g>j}>DqFf;Rp zz7>xIs12@Ke0gr+4-!pmFP84vCIaTjqFNg{V`5}Rdt~xE^I;Bxp4)|cs8=f)1YwHz zqI`G~s2~qqDV+h02b`PQpUE#^^Aq8l%y2|ByQeXSADg5*qMprEAE3WFg0Q39`O+i1 z!J@iV!`Y~C$wJ!5Z+j5$i<1`+@)tBG$JL=!*uk=2k;T<@{|s1$YL079FvK%mPhyHV zP8^KGZnp`(hVMZ;s=n~3r2y;LTwcJwoBW-(ndU-$03{RD zh+Qn$ja_Z^OuMf3Ub|JTY74s&Am*(n{J3~@#OJNYuEVVJd9*H%)oFoRBkySGm`hx! zT3tG|+aAkXcx-2Apy)h^BkOyFTWQVeZ%e2@;*0DtlG9I3Et=PKaPt&K zw?WI7S;P)TWED7aSH$3hL@Qde?H#tzo^<(o_sv_2ci<7M?F$|oCFWc?7@KBj-;N$P zB;q!8@bW-WJY9do&y|6~mEruZAVe$!?{)N9rZZxD-|oltkhW9~nR8bLBGXw<632!l z*TYQn^NnUy%Ds}$f^=yQ+BM-a5X4^GHF=%PDrRfm_uqC zh{sKwIu|O0&jWb27;wzg4w5uA@TO_j(1X?8E>5Zfma|Ly7Bklq|s z9)H`zoAGY3n-+&JPrT!>u^qg9Evx4y@GI4$n-Uk_5wttU1_t?6><>}cZ-U+&+~JE) zPlDbO_j;MoxdLzMd~Ew|1o^a5q_1R*JZ=#XXMzg?6Zy!^hop}qoLQlJ{(%!KYt`MK z8umEN@Z4w!2=q_oe=;QttPCQy3Nm4F@x>@v4sz_jo{4m*0r%J(w1cSo;D_hQtJs7W z><$QrmG^+<$4{d2bgGo&3-FV}avg9zI|Rr(k{wTyl3!M1q+a zD9W{pCd%il*j&Ft z5H$nENf>>k$;SONGW`qo6`&qKs*T z2^RS)pXk9b@(_Fw1bkb)-oqK|v}r$L!W&aXA>IpcdNZ_vWE#XO8X`#Yp1+?RshVcd zknG%rPd*4ECEI0wD#@d+3NbHKxl}n^Sgkx==Iu%}HvNliOqVBqG?P2va zQ;kRJ$J6j;+wP9cS za#m;#GUT!qAV%+rdWolk+)6kkz4@Yh5LXP+LSvo9_T+MmiaP-eq6_k;)i6_@WSJ zlT@wK$zqHu<83U2V*yJ|XJU4farT#pAA&@qu)(PO^8PxEmPD4;Txpio+2)#!9 z>&=i7*#tc0`?!==vk>s7V+PL#S1;PwSY?NIXN2=Gu89x(cToFm))7L;< z+bhAbVD*bD=}iU`+PU+SBobTQ%S!=VL!>q$rfWsaaV}Smz>lO9JXT#`CcH_mRCSf4%YQAw`$^yY z3Y*^Nzk_g$xn7a_NO(2Eb*I=^;4f!Ra#Oo~LLjlcjke*k*o$~U#0ZXOQ5@HQ&T46l z7504MUgZkz2gNP1QFN8Y?nSEnEai^Rgyvl}xZfMUV6QrJcXp;jKGqB=D*tj{8(_pV zqyB*DK$2lgYGejmJUW)*s_Cv65sFf&pb(Yz8oWgDtQ0~k^0-wdF|tj}MOXaN@ydF8 zNr={U?=;&Z?wr^VC+`)S2xl}QFagy;$mG=TUs7Vi2wws5zEke4hTa2)>O0U?$WYsZ z<8bN2bB_N4AWd%+kncgknZ&}bM~eDtj#C5uRkp21hWW5gxWvc6b*4+dn<{c?w9Rmf zIVZKsPl{W2vQAlYO3yh}-{Os=YBnL8?uN5(RqfQ=-1cOiUnJu>KcLA*tQK3FU`_bM zM^T28w;nAj5EdAXFi&Kk1Nnl2)D!M{@+D-}bIEe+Lc4{s;YJc-{F#``iS2uk;2!Zp zF9#myUmO!wCeJIoi^A+T^e~20c+c2C}XltaR!|U-HfDA=^xF97ev}$l6#oY z&-&T{egB)&aV$3_aVA51XGiU07$s9vubh_kQG?F$FycvS6|IO!6q zq^>9|3U^*!X_C~SxX&pqUkUjz%!j=VlXDo$!2VLH!rKj@61mDpSr~7B2yy{>X~_nc zRI+7g2V&k zd**H++P9dg!-AOs3;GM`(g<+GRV$+&DdMVpUxY9I1@uK28$az=6oaa+PutlO9?6#? zf-OsgT>^@8KK>ggkUQRPPgC7zjKFR5spqQb3ojCHzj^(UH~v+!y*`Smv)VpVoPwa6 zWG18WJaPKMi*F6Zdk*kU^`i~NNTfn3BkJniC`yN98L-Awd)Z&mY? zprBW$!qL-OL7h@O#kvYnLsfff@kDIegt~?{-*5A7JrA;#TmTe?jICJqhub-G@e??D zqiV#g{)M!kW1-4SDel7TO{;@*h2=_76g3NUD@|c*WO#>MfYq6_YVUP+&8e4|%4T`w zXzhmVNziAHazWO2qXcaOu@R1MrPP{t)`N)}-1&~mq=ZH=w=;-E$IOk=y$dOls{6sRR`I5>|X zpq~XYW4sd;J^6OwOf**J>a7u$S>WTFPRkjY;BfVgQst)u4aMLR1|6%)CB^18XCz+r ztkYQ}G43j~Q&1em(_EkMv0|WEiKu;z2zhb(L%$F&xWwzOmk;VLBYAZ8lOCziNoPw1 zv2BOyXA`A8z^WH!nXhKXM`t0;6D*-uGds3TYGrm8SPnJJOQ^fJU#}@aIy@MYWz**H zvkp?7I5PE{$$|~{-ZaFxr6ZolP^nL##mHOErB^AqJqn^hFA=)HWj!m3WDaHW$C)i^ z9@6G$SzB=>jbe>4kqr#sF7#K}W*Cg-5y6kun3u&0L7BpXF9=#7IN8FOjWrWwUBZiU zT_se3ih-GBKx+Uw0N|CwP3D@-C=5(9T#BH@M`F2!Goiqx+Js5xC92|Sy0%WWWp={$(am!#l~f^W_oz78HX<0X#7 zp)p1u~M*o9W@O8P{0Qkg@Wa# z2{Heb&oX^CQSZWSFBXKOfE|tsAm#^U-WkDnU;IowZ`Ok4!mwHwH=s|AqZ^YD4!5!@ zPxJj+Bd-q6w_YG`z_+r;S86zwXb+EO&qogOq8h-Ect5(M2+>(O7n7)^dP*ws_3U6v zVsh)sk^@*c>)3EML|0<-YROho{lz@Nd4;R9gL{9|64xVL`n!m$-Jjrx?-Bacp!=^5 z1^T^eB{_)Y<9)y{-4Rz@9_>;_7h;5D+@QcbF4Wv7hu)s0&==&6u)33 zHRj+&Woq-vDvjwJCYES@$C4{$?f$Ibi4G()UeN11rgjF+^;YE^5nYprYoJNoudNj= zm1pXSeG64dcWHObUetodRn1Fw|1nI$D9z}dVEYT0lQnsf_E1x2vBLql7NrHH!n&Sq z6lc*mvU=WS6=v9Lrl}&zRiu_6u;6g%_DU{9b+R z#YHqX7`m9eydf?KlKu6Sb%j$%_jmydig`B*TN`cZL-g!R)iE?+Q5oOqBFKhx z%MW>BC^(F_JuG(ayE(MT{S3eI{cKiwOtPwLc0XO*{*|(JOx;uQOfq@lp_^cZo=FZj z4#}@e@dJ>Bn%2`2_WPeSN7si^{U#H=7N4o%Dq3NdGybrZgEU$oSm$hC)uNDC_M9xc zGzwh5Sg?mpBIE8lT2XsqTt3j3?We8}3bzLBTQd639vyg^$0#1epq8snlDJP2(BF)K zSx30RM+{f+b$g{9usIL8H!hCO117Xgv}ttPJm9wVRjPk;ePH@zxv%j9k5`TzdXLeT zFgFX`V7cYIcBls5WN0Pf6SMBN+;CrQ(|EsFd*xtwr#$R{Z9FP`OWtyNsq#mCgZ7+P z^Yn$haBJ)r96{ZJd8vlMl?IBxrgh=fdq_NF!1{jARCVz>jNdC)H^wfy?R94#MPdUjcYX>#wEx+LB#P-#4S-%YH>t-j+w zOFTI8gX$ard6fAh&g=u&56%3^-6E2tpk*wx3HSCQ+t7+*iOs zPk5ysqE}i*cQocFvA68xHfL|iX(C4h*67@3|5Qwle(8wT&!&{8*{f%0(5gH+m>$tq zp;AqrP7?XTEooYG1Dzfxc>W%*CyL16q|fQ0_jp%%Bk^k!i#Nbi(N9&T>#M{gez_Ws zYK=l}adalV(nH}I_!hNeb;tQFk3BHX7N}}R8%pek^E`X}%ou=cx8InPU1EE0|Hen- zyw8MoJqB5=)Z%JXlrdTXAE)eqLAdVE-=>wGHrkRet}>3Yu^lt$Kzu%$3#(ioY}@Gu zjk3BZuQH&~7H+C*uX^4}F*|P89JX;Hg2U!pt>rDi(n(Qe-c}tzb0#6_ItoR0->LSt zR~UT<-|@TO%O`M+_e_J4wx7^)5_%%u+J=yF_S#2Xd?C;Ss3N7KY^#-vx+|;bJX&8r zD?|MetfhdC;^2WG`7MCgs>TKKN=^=!x&Q~BzmQio_^l~LboTNT=I zC5pme^P@ER``p$2md9>4!K#vV-Fc1an7pl>_|&>aqP}+zqR?+~Z;f2^`a+-!Te%V? z;H2SbF>jP^GE(R1@%C==XQ@J=G9lKX+Z<@5}PO(EYkJh=GCv#)Nj{DkWJM2}F&oAZ6xu8&g7pn1ps2U5srwQ7CAK zN&*~@t{`31lUf`O;2w^)M3B@o)_mbRu{-`PrfNpF!R^q>yTR&ETS7^-b2*{-tZAZz zw@q5x9B5V8Qd7dZ!Ai$9hk%Q!wqbE1F1c96&zwBBaRW}(^axoPpN^4Aw}&a5dMe+*Gomky_l^54*rzXro$ z>LL)U5Ry>~FJi=*{JDc)_**c)-&faPz`6v`YU3HQa}pLtb5K)u%K+BOqXP0)rj5Au$zB zW1?vr?mDv7Fsxtsr+S6ucp2l#(4dnr9sD*v+@*>g#M4b|U?~s93>Pg{{a5|rm2xfI z`>E}?9S@|IoUX{Q1zjm5YJT|3S>&09D}|2~BiMo=z4YEjXlWh)V&qs;*C{`UMxp$9 zX)QB?G$fPD6z5_pNs>Jeh{^&U^)Wbr?2D6-q?)`*1k@!UvwQgl8eG$r+)NnFoT)L6 zg7lEh+E6J17krfYJCSjWzm67hEth24pomhz71|Qodn#oAILN)*Vwu2qpJirG)4Wnv}9GWOFrQg%Je+gNrPl8mw7ykE8{ z=|B4+uwC&bpp%eFcRU6{mxRV32VeH8XxX>v$du<$(DfinaaWxP<+Y97Z#n#U~V zVEu-GoPD=9$}P;xv+S~Ob#mmi$JQmE;Iz4(){y*9pFyW-jjgdk#oG$fl4o9E8bo|L zWjo4l%n51@Kz-n%zeSCD`uB?T%FVk+KBI}=ve zvlcS#wt`U6wrJo}6I6Rwb=1GzZfwE=I&Ne@p7*pH84XShXYJRgvK)UjQL%R9Zbm(m zxzTQsLTON$WO7vM)*vl%Pc0JH7WhP;$z@j=y#avW4X8iqy6mEYr@-}PW?H)xfP6fQ z&tI$F{NNct4rRMSHhaelo<5kTYq+(?pY)Ieh8*sa83EQfMrFupMM@nfEV@EmdHUv9 z35uzIrIuo4#WnF^_jcpC@uNNaYTQ~uZWOE6P@LFT^1@$o&q+9Qr8YR+ObBkpP9=F+$s5+B!mX2~T zAuQ6RenX?O{IlLMl1%)OK{S7oL}X%;!XUxU~xJN8xk z`xywS*naF(J#?vOpB(K=o~lE;m$zhgPWDB@=p#dQIW>xe_p1OLoWInJRKbEuoncf; zmS1!u-ycc1qWnDg5Nk2D)BY%jmOwCLC+Ny>`f&UxFowIsHnOXfR^S;&F(KXd{ODlm z$6#1ccqt-HIH9)|@fHnrKudu!6B$_R{fbCIkSIb#aUN|3RM>zuO>dpMbROZ`^hvS@ z$FU-;e4W}!ubzKrU@R*dW*($tFZ>}dd*4_mv)#O>X{U@zSzQt*83l9mI zI$8O<5AIDx`wo0}f2fsPC_l>ONx_`E7kdXu{YIZbp1$(^oBAH({T~&oQ&1{X951QW zmhHUxd)t%GQ9#ak5fTjk-cahWC;>^Rg7(`TVlvy0W@Y!Jc%QL3Ozu# zDPIqBCy&T2PWBj+d-JA-pxZlM=9ja2ce|3B(^VCF+a*MMp`(rH>Rt6W1$;r{n1(VK zLs>UtkT43LR2G$AOYHVailiqk7naz2yZGLo*xQs!T9VN5Q>eE(w zw$4&)&6xIV$IO^>1N-jrEUg>O8G4^@y+-hQv6@OmF@gy^nL_n1P1-Rtyy$Bl;|VcV zF=p*&41-qI5gG9UhKmmnjs932!6hceXa#-qfK;3d*a{)BrwNFeKU|ge?N!;zk+kB! zMD_uHJR#%b54c2tr~uGPLTRLg$`fupo}cRJeTwK;~}A>(Acy4k-Xk&Aa1&eWYS1ULWUj@fhBiWY$pdfy+F z@G{OG{*v*mYtH3OdUjwEr6%_ZPZ3P{@rfbNPQG!BZ7lRyC^xlMpWH`@YRar`tr}d> z#wz87t?#2FsH-jM6m{U=gp6WPrZ%*w0bFm(T#7m#v^;f%Z!kCeB5oiF`W33W5Srdt zdU?YeOdPG@98H7NpI{(uN{FJdu14r(URPH^F6tOpXuhU7T9a{3G3_#Ldfx_nT(Hec zo<1dyhsVsTw;ZkVcJ_0-h-T3G1W@q)_Q30LNv)W?FbMH+XJ* zy=$@39Op|kZv`Rt>X`zg&at(?PO^I=X8d9&myFEx#S`dYTg1W+iE?vt#b47QwoHI9 zNP+|3WjtXo{u}VG(lLUaW0&@yD|O?4TS4dfJI`HC-^q;M(b3r2;7|FONXphw-%7~* z&;2!X17|05+kZOpQ3~3!Nb>O94b&ZSs%p)TK)n3m=4eiblVtSx@KNFgBY_xV6ts;NF;GcGxMP8OKV^h6LmSb2E#Qnw ze!6Mnz7>lE9u{AgQ~8u2zM8CYD5US8dMDX-5iMlgpE9m*s+Lh~A#P1er*rF}GHV3h z=`STo?kIXw8I<`W0^*@mB1$}pj60R{aJ7>C2m=oghKyxMbFNq#EVLgP0cH3q7H z%0?L93-z6|+jiN|@v>ix?tRBU(v-4RV`}cQH*fp|)vd3)8i9hJ3hkuh^8dz{F5-~_ zUUr1T3cP%cCaTooM8dj|4*M=e6flH0&8ve32Q)0dyisl))XkZ7Wg~N}6y`+Qi2l+e zUd#F!nJp{#KIjbQdI`%oZ`?h=5G^kZ_uN`<(`3;a!~EMsWV|j-o>c?x#;zR2ktiB! z);5rrHl?GPtr6-o!tYd|uK;Vbsp4P{v_4??=^a>>U4_aUXPWQ$FPLE4PK$T^3Gkf$ zHo&9$U&G`d(Os6xt1r?sg14n)G8HNyWa^q8#nf0lbr4A-Fi;q6t-`pAx1T*$eKM*$ z|CX|gDrk#&1}>5H+`EjV$9Bm)Njw&7-ZR{1!CJTaXuP!$Pcg69`{w5BRHysB$(tWUes@@6aM69kb|Lx$%BRY^-o6bjH#0!7b;5~{6J+jKxU!Kmi# zndh@+?}WKSRY2gZ?Q`{(Uj|kb1%VWmRryOH0T)f3cKtG4oIF=F7RaRnH0Rc_&372={_3lRNsr95%ZO{IX{p@YJ^EI%+gvvKes5cY+PE@unghjdY5#9A!G z70u6}?zmd?v+{`vCu-53_v5@z)X{oPC@P)iA3jK$`r zSA2a7&!^zmUiZ82R2=1cumBQwOJUPz5Ay`RLfY(EiwKkrx%@YN^^XuET;tE zmr-6~I7j!R!KrHu5CWGSChO6deaLWa*9LLJbcAJsFd%Dy>a!>J`N)Z&oiU4OEP-!Ti^_!p}O?7`}i7Lsf$-gBkuY*`Zb z7=!nTT;5z$_5$=J=Ko+Cp|Q0J=%oFr>hBgnL3!tvFoLNhf#D0O=X^h+x08iB;@8pXdRHxX}6R4k@i6%vmsQwu^5z zk1ip`#^N)^#Lg#HOW3sPI33xqFB4#bOPVnY%d6prwxf;Y-w9{ky4{O6&94Ra8VN@K zb-lY;&`HtxW@sF!doT5T$2&lIvJpbKGMuDAFM#!QPXW87>}=Q4J3JeXlwHys?!1^#37q_k?N@+u&Ns20pEoBeZC*np;i;M{2C0Z4_br2gsh6eL z#8`#sn41+$iD?^GL%5?cbRcaa-Nx0vE(D=*WY%rXy3B%gNz0l?#noGJGP728RMY#q z=2&aJf@DcR?QbMmN)ItUe+VM_U!ryqA@1VVt$^*xYt~-qvW!J4Tp<-3>jT=7Zow5M z8mSKp0v4b%a8bxFr>3MwZHSWD73D@+$5?nZAqGM#>H@`)mIeC#->B)P8T$zh-Pxnc z8)~Zx?TWF4(YfKuF3WN_ckpCe5;x4V4AA3(i$pm|78{%!q?|~*eH0f=?j6i)n~Hso zmTo>vqEtB)`%hP55INf7HM@taH)v`Fw40Ayc*R!T?O{ziUpYmP)AH`euTK!zg9*6Z z!>M=$3pd0!&TzU=hc_@@^Yd3eUQpX4-33}b{?~5t5lgW=ldJ@dUAH%`l5US1y_`40 zs(X`Qk}vvMDYYq+@Rm+~IyCX;iD~pMgq^KY)T*aBz@DYEB={PxA>)mI6tM*sx-DmGQHEaHwRrAmNjO!ZLHO4b;;5mf@zzlPhkP($JeZGE7 z?^XN}Gf_feGoG~BjUgVa*)O`>lX=$BSR2)uD<9 z>o^|nb1^oVDhQbfW>>!;8-7<}nL6L^V*4pB=>wwW+RXAeRvKED(n1;R`A6v$6gy0I(;Vf?!4;&sgn7F%LpM}6PQ?0%2Z@b{It<(G1CZ|>913E0nR2r^Pa*Bp z@tFGi*CQ~@Yc-?{cwu1 zsilf=k^+Qs>&WZG(3WDixisHpR>`+ihiRwkL(3T|=xsoNP*@XX3BU8hr57l3k;pni zI``=3Nl4xh4oDj<%>Q1zYXHr%Xg_xrK3Nq?vKX3|^Hb(Bj+lONTz>4yhU-UdXt2>j z<>S4NB&!iE+ao{0Tx^N*^|EZU;0kJkx@zh}S^P{ieQjGl468CbC`SWnwLRYYiStXm zOxt~Rb3D{dz=nHMcY)#r^kF8|q8KZHVb9FCX2m^X*(|L9FZg!5a7((!J8%MjT$#Fs)M1Pb zq6hBGp%O1A+&%2>l0mpaIzbo&jc^!oN^3zxap3V2dNj3x<=TwZ&0eKX5PIso9j1;e zwUg+C&}FJ`k(M|%%}p=6RPUq4sT3-Y;k-<68ciZ~_j|bt>&9ZLHNVrp#+pk}XvM{8 z`?k}o-!if>hVlCP9j%&WI2V`5SW)BCeR5>MQhF)po=p~AYN%cNa_BbV6EEh_kk^@a zD>4&>uCGCUmyA-c)%DIcF4R6!>?6T~Mj_m{Hpq`*(wj>foHL;;%;?(((YOxGt)Bhx zuS+K{{CUsaC++%}S6~CJ=|vr(iIs-je)e9uJEU8ZJAz)w166q)R^2XI?@E2vUQ!R% zn@dxS!JcOimXkWJBz8Y?2JKQr>`~SmE2F2SL38$SyR1^yqj8_mkBp)o$@+3BQ~Mid z9U$XVqxX3P=XCKj0*W>}L0~Em`(vG<>srF8+*kPrw z20{z(=^w+ybdGe~Oo_i|hYJ@kZl*(9sHw#Chi&OIc?w`nBODp?ia$uF%Hs(X>xm?j zqZQ`Ybf@g#wli`!-al~3GWiE$K+LCe=Ndi!#CVjzUZ z!sD2O*;d28zkl))m)YN7HDi^z5IuNo3^w(zy8 zszJG#mp#Cj)Q@E@r-=NP2FVxxEAeOI2e=|KshybNB6HgE^(r>HD{*}S}mO>LuRGJT{*tfTzw_#+er-0${}%YPe@CMJ1Ng#j#)i)SnY@ss3gL;g zg2D~#Kpdfu#G;q1qz_TwSz1VJT(b3zby$Vk&;Y#1(A)|xj`_?i5YQ;TR%jice5E;0 zYHg;`zS5{S*9xI6o^j>rE8Ua*XhIw{_-*&@(R|C(am8__>+Ws&Q^ymy*X4~hR2b5r zm^p3sw}yv=tdyncy_Ui7{BQS732et~Z_@{-IhHDXAV`(Wlay<#hb>%H%WDi+K$862nA@BDtM#UCKMu+kM`!JHyWSi?&)A7_ z3{cyNG%a~nnH_!+;g&JxEMAmh-Z}rC!o7>OVzW&PoMyTA_g{hqXG)SLraA^OP**<7 zjWbr7z!o2n3hnx7A=2O=WL;`@9N{vQIM@&|G-ljrPvIuJHYtss0Er0fT5cMXNUf1B z7FAwBDixt0X7C3S)mPe5g`YtME23wAnbU)+AtV}z+e8G;0BP=bI;?(#|Ep!vVfDbK zvx+|CKF>yt0hWQ3drchU#XBU+HiuG*V^snFAPUp-5<#R&BUAzoB!aZ+e*KIxa26V}s6?nBK(U-7REa573wg-jqCg>H8~>O{ z*C0JL-?X-k_y%hpUFL?I>0WV{oV`Nb)nZbJG01R~AG>flIJf)3O*oB2i8~;!P?Wo_ z0|QEB*fifiL6E6%>tlAYHm2cjTFE@*<);#>689Z6S#BySQ@VTMhf9vYQyLeDg1*F} zjq>i1*x>5|CGKN{l9br3kB0EHY|k4{%^t7-uhjd#NVipUZa=EUuE5kS1_~qYX?>hJ z$}!jc9$O$>J&wnu0SgfYods^z?J4X;X7c77Me0kS-dO_VUQ39T(Kv(Y#s}Qqz-0AH z^?WRL(4RzpkD+T5FG_0NyPq-a-B7A5LHOCqwObRJi&oRi(<;OuIN7SV5PeHU$<@Zh zPozEV`dYmu0Z&Tqd>t>8JVde9#Pt+l95iHe$4Xwfy1AhI zDM4XJ;bBTTvRFtW>E+GzkN)9k!hA5z;xUOL2 zq4}zn-DP{qc^i|Y%rvi|^5k-*8;JZ~9a;>-+q_EOX+p1Wz;>i7c}M6Nv`^NY&{J-> z`(mzDJDM}QPu5i44**2Qbo(XzZ-ZDu%6vm8w@DUarqXj41VqP~ zs&4Y8F^Waik3y1fQo`bVUH;b=!^QrWb)3Gl=QVKr+6sxc=ygauUG|cm?|X=;Q)kQ8 zM(xrICifa2p``I7>g2R~?a{hmw@{!NS5`VhH8+;cV(F>B94M*S;5#O`YzZH1Z%yD? zZ61w(M`#aS-*~Fj;x|J!KM|^o;MI#Xkh0ULJcA?o4u~f%Z^16ViA27FxU5GM*rKq( z7cS~MrZ=f>_OWx8j#-Q3%!aEU2hVuTu(7`TQk-Bi6*!<}0WQi;_FpO;fhpL4`DcWp zGOw9vx0N~6#}lz(r+dxIGZM3ah-8qrqMmeRh%{z@dbUD2w15*_4P?I~UZr^anP}DB zU9CCrNiy9I3~d#&!$DX9e?A});BjBtQ7oGAyoI$8YQrkLBIH@2;lt4E^)|d6Jwj}z z&2_E}Y;H#6I4<10d_&P0{4|EUacwFHauvrjAnAm6yeR#}f}Rk27CN)vhgRqEyPMMS7zvunj2?`f;%?alsJ+-K+IzjJx>h8 zu~m_y$!J5RWAh|C<6+uiCNsOKu)E72M3xKK(a9Okw3e_*O&}7llNV!=P87VM2DkAk zci!YXS2&=P0}Hx|wwSc9JP%m8dMJA*q&VFB0yMI@5vWoAGraygwn){R+Cj6B1a2Px z5)u(K5{+;z2n*_XD!+Auv#LJEM)(~Hx{$Yb^ldQmcYF2zNH1V30*)CN_|1$v2|`LnFUT$%-tO0Eg|c5$BB~yDfzS zcOXJ$wpzVK0MfTjBJ0b$r#_OvAJ3WRt+YOLlJPYMx~qp>^$$$h#bc|`g0pF-Ao43? z>*A+8lx>}L{p(Tni2Vvk)dtzg$hUKjSjXRagj)$h#8=KV>5s)J4vGtRn5kP|AXIz! zPgbbVxW{2o4s-UM;c#We8P&mPN|DW7_uLF!a|^0S=wr6Esx9Z$2|c1?GaupU6$tb| zY_KU`(_29O_%k(;>^|6*pZURH3`@%EuKS;Ns z1lujmf;r{qAN&Q0&m{wJSZ8MeE7RM5+Sq;ul_ z`+ADrd_Um+G37js6tKsArNB}n{p*zTUxQr>3@wA;{EUbjNjlNd6$Mx zg0|MyU)v`sa~tEY5$en7^PkC=S<2@!nEdG6L=h(vT__0F=S8Y&eM=hal#7eM(o^Lu z2?^;05&|CNliYrq6gUv;|i!(W{0N)LWd*@{2q*u)}u*> z7MQgk6t9OqqXMln?zoMAJcc zMKaof_Up})q#DzdF?w^%tTI7STI^@8=Wk#enR*)&%8yje>+tKvUYbW8UAPg55xb70 zEn5&Ba~NmOJlgI#iS8W3-@N%>V!#z-ZRwfPO1)dQdQkaHsiqG|~we2ALqG7Ruup(DqSOft2RFg_X%3w?6VqvV1uzX_@F(diNVp z4{I|}35=11u$;?|JFBEE*gb;T`dy+8gWJ9~pNsecrO`t#V9jW-6mnfO@ff9od}b(3s4>p0i30gbGIv~1@a^F2kl7YO;DxmF3? zWi-RoXhzRJV0&XE@ACc?+@6?)LQ2XNm4KfalMtsc%4!Fn0rl zpHTrHwR>t>7W?t!Yc{*-^xN%9P0cs0kr=`?bQ5T*oOo&VRRu+1chM!qj%2I!@+1XF z4GWJ=7ix9;Wa@xoZ0RP`NCWw0*8247Y4jIZ>GEW7zuoCFXl6xIvz$ezsWgKdVMBH> z{o!A7f;R-@eK9Vj7R40xx)T<2$?F2E<>Jy3F;;=Yt}WE59J!1WN367 zA^6pu_zLoZIf*x031CcwotS{L8bJE(<_F%j_KJ2P_IusaZXwN$&^t716W{M6X2r_~ zaiMwdISX7Y&Qi&Uh0upS3TyEIXNDICQlT5fHXC`aji-c{U(J@qh-mWl-uMN|T&435 z5)a1dvB|oe%b2mefc=Vpm0C%IUYYh7HI*;3UdgNIz}R##(#{(_>82|zB0L*1i4B5j-xi9O4x10rs_J6*gdRBX=@VJ+==sWb&_Qc6tSOowM{BX@(zawtjl zdU!F4OYw2@Tk1L^%~JCwb|e#3CC>srRHQ*(N%!7$Mu_sKh@|*XtR>)BmWw!;8-mq7 zBBnbjwx8Kyv|hd*`5}84flTHR1Y@@uqjG`UG+jN_YK&RYTt7DVwfEDXDW4U+iO{>K zw1hr{_XE*S*K9TzzUlJH2rh^hUm2v7_XjwTuYap|>zeEDY$HOq3X4Tz^X}E9z)x4F zs+T?Ed+Hj<#jY-`Va~fT2C$=qFT-5q$@p9~0{G&eeL~tiIAHXA!f6C(rAlS^)&k<- zXU|ZVs}XQ>s5iONo~t!XXZgtaP$Iau;JT%h)>}v54yut~pykaNye4axEK#5@?TSsQ zE;Jvf9I$GVb|S`7$pG)4vgo9NXsKr?u=F!GnA%VS2z$@Z(!MR9?EPcAqi5ft)Iz6sNl`%kj+_H-X`R<>BFrBW=fSlD|{`D%@Rcbu2?%>t7i34k?Ujb)2@J-`j#4 zLK<69qcUuniIan-$A1+fR=?@+thwDIXtF1Tks@Br-xY zfB+zblrR(ke`U;6U~-;p1Kg8Lh6v~LjW@9l2P6s+?$2!ZRPX`(ZkRGe7~q(4&gEi<$ch`5kQ?*1=GSqkeV z{SA1EaW_A!t{@^UY2D^YO0(H@+kFVzZaAh0_`A`f(}G~EP~?B|%gtxu&g%^x{EYSz zk+T;_c@d;+n@$<>V%P=nk36?L!}?*=vK4>nJSm+1%a}9UlmTJTrfX4{Lb7smNQn@T zw9p2%(Zjl^bWGo1;DuMHN(djsEm)P8mEC2sL@KyPjwD@d%QnZ$ zMJ3cnn!_!iP{MzWk%PI&D?m?C(y2d|2VChluN^yHya(b`h>~GkI1y;}O_E57zOs!{ zt2C@M$^PR2U#(dZmA-sNreB@z-yb0Bf7j*yONhZG=onhx>t4)RB`r6&TP$n zgmN*)eCqvgriBO-abHQ8ECN0bw?z5Bxpx z=jF@?zFdVn?@gD5egM4o$m`}lV(CWrOKKq(sv*`mNcHcvw&Xryfw<{ch{O&qc#WCTXX6=#{MV@q#iHYba!OUY+MGeNTjP%Fj!WgM&`&RlI^=AWTOqy-o zHo9YFt!gQ*p7{Fl86>#-JLZo(b^O`LdFK~OsZBRR@6P?ad^Ujbqm_j^XycM4ZHFyg ziUbIFW#2tj`65~#2V!4z7DM8Z;fG0|APaQ{a2VNYpNotB7eZ5kp+tPDz&Lqs0j%Y4tA*URpcfi z_M(FD=fRGdqf430j}1z`O0I=;tLu81bwJXdYiN7_&a-?ly|-j*+=--XGvCq#32Gh(=|qj5F?kmihk{%M&$}udW5)DHK zF_>}5R8&&API}o0osZJRL3n~>76nUZ&L&iy^s>PMnNcYZ|9*1$v-bzbT3rpWsJ+y{ zPrg>5Zlery96Um?lc6L|)}&{992{_$J&=4%nRp9BAC6!IB=A&=tF>r8S*O-=!G(_( zwXbX_rGZgeiK*&n5E;f=k{ktyA1(;x_kiMEt0*gpp_4&(twlS2e5C?NoD{n>X2AT# zY@Zp?#!b1zNq96MQqeO*M1MMBin5v#RH52&Xd~DO6-BZLnA6xO1$sou(YJ1Dlc{WF zVa%2DyYm`V#81jP@70IJ;DX@y*iUt$MLm)ByAD$eUuji|5{ptFYq(q)mE(5bOpxjM z^Q`AHWq44SG3`_LxC9fwR)XRVIp=B%<(-lOC3jI#bb@dK(*vjom!=t|#<@dZql%>O z15y^{4tQoeW9Lu%G&V$90x6F)xN6y_oIn;!Q zs)8jT$;&;u%Y>=T3hg34A-+Y*na=|glcStr5D;&5*t5*DmD~x;zQAV5{}Ya`?RRGa zT*t9@$a~!co;pD^!J5bo?lDOWFx%)Y=-fJ+PDGc0>;=q=s?P4aHForSB+)v0WY2JH z?*`O;RHum6j%#LG)Vu#ciO#+jRC3!>T(9fr+XE7T2B7Z|0nR5jw@WG)kDDzTJ=o4~ zUpeyt7}_nd`t}j9BKqryOha{34erm)RmST)_9Aw)@ zHbiyg5n&E{_CQR@h<}34d7WM{s{%5wdty1l+KX8*?+-YkNK2Be*6&jc>@{Fd;Ps|| z26LqdI3#9le?;}risDq$K5G3yoqK}C^@-8z^wj%tdgw-6@F#Ju{Sg7+y)L?)U$ez> zoOaP$UFZ?y5BiFycir*pnaAaY+|%1%8&|(@VB)zweR%?IidwJyK5J!STzw&2RFx zZV@qeaCB01Hu#U9|1#=Msc8Pgz5P*4Lrp!Q+~(G!OiNR{qa7|r^H?FC6gVhkk3y7=uW#Sh;&>78bZ}aK*C#NH$9rX@M3f{nckYI+5QG?Aj1DM)@~z_ zw!UAD@gedTlePB*%4+55naJ8ak_;))#S;4ji!LOqY5VRI){GMwHR~}6t4g>5C_#U# ztYC!tjKjrKvRy=GAsJVK++~$|+s!w9z3H4G^mACv=EErXNSmH7qN}%PKcN|8%9=i)qS5+$L zu&ya~HW%RMVJi4T^pv?>mw*Gf<)-7gf#Qj|e#w2|v4#t!%Jk{&xlf;$_?jW*n!Pyx zkG$<18kiLOAUPuFfyu-EfWX%4jYnjBYc~~*9JEz6oa)_R|8wjZA|RNrAp%}14L7fW zi7A5Wym*K+V8pkqqO-X#3ft{0qs?KVt^)?kS>AicmeO&q+~J~ zp0YJ_P~_a8j= zsAs~G=8F=M{4GZL{|B__UorX@MRNQLn?*_gym4aW(~+i13knnk1P=khoC-ViMZk+x zLW(l}oAg1H`dU+Fv**;qw|ANDSRs>cGqL!Yw^`; zv;{E&8CNJcc)GHzTYM}f&NPw<6j{C3gaeelU#y!M)w-utYEHOCCJo|Vgp7K6C_$14 zqIrLUB0bsgz^D%V%fbo2f9#yb#CntTX?55Xy|Kps&Xek*4_r=KDZ z+`TQuv|$l}MWLzA5Ay6Cvsa^7xvwXpy?`w(6vx4XJ zWuf1bVSb#U8{xlY4+wlZ$9jjPk)X_;NFMqdgq>m&W=!KtP+6NL57`AMljW+es zzqjUjgz;V*kktJI?!NOg^s_)ph45>4UDA!Vo0hn>KZ+h-3=?Y3*R=#!fOX zP$Y~+14$f66ix?UWB_6r#fMcC^~X4R-<&OD1CSDNuX~y^YwJ>sW0j`T<2+3F9>cLo z#!j57$ll2K9(%$4>eA7(>FJX5e)pR5&EZK!IMQzOfik#FU*o*LGz~7u(8}XzIQRy- z!U7AlMTIe|DgQFmc%cHy_9^{o`eD%ja_L>ckU6$O4*U**o5uR7`FzqkU8k4gxtI=o z^P^oGFPm5jwZMI{;nH}$?p@uV8FT4r=|#GziKXK07bHJLtK}X%I0TON$uj(iJ`SY^ zc$b2CoxCQ>7LH@nxcdW&_C#fMYBtTxcg46dL{vf%EFCZ~eErMvZq&Z%Lhumnkn^4A zsx$ay(FnN7kYah}tZ@0?-0Niroa~13`?hVi6`ndno`G+E8;$<6^gsE-K3)TxyoJ4M zb6pj5=I8^FD5H@`^V#Qb2^0cx7wUz&cruA5g>6>qR5)O^t1(-qqP&1g=qvY#s&{bx zq8Hc%LsbK1*%n|Y=FfojpE;w~)G0-X4i*K3{o|J7`krhIOd*c*$y{WIKz2n2*EXEH zT{oml3Th5k*vkswuFXdGDlcLj15Nec5pFfZ*0?XHaF_lVuiB%Pv&p7z)%38}%$Gup zVTa~C8=cw%6BKn_|4E?bPNW4PT7}jZQLhDJhvf4z;~L)506IE0 zX!tWXX(QOQPRj-p80QG79t8T2^az4Zp2hOHziQlvT!|H)jv{Ixodabzv6lBj)6WRB z{)Kg@$~~(7$-az?lw$4@L%I&DI0Lo)PEJJziWP33a3azb?jyXt1v0N>2kxwA6b%l> zZqRpAo)Npi&loWbjFWtEV)783BbeIAhqyuc+~>i7aQ8shIXt)bjCWT6$~ro^>99G} z2XfmT0(|l!)XJb^E!#3z4oEGIsL(xd; zYX1`1I(cG|u#4R4T&C|m*9KB1`UzKvho5R@1eYtUL9B72{i(ir&ls8g!pD ztR|25xGaF!4z5M+U@@lQf(12?xGy`!|3E}7pI$k`jOIFjiDr{tqf0va&3pOn6Pu)% z@xtG2zjYuJXrV)DUrIF*y<1O1<$#54kZ#2;=X51J^F#0nZ0(;S$OZDt_U2bx{RZ=Q zMMdd$fH|!s{ zXq#l;{`xfV`gp&C>A`WrQU?d{!Ey5(1u*VLJt>i27aZ-^&2IIk=zP5p+{$q(K?2(b z8?9h)kvj9SF!Dr zoyF}?V|9;6abHxWk2cEvGs$-}Pg}D+ZzgkaN&$Snp%;5m%zh1E#?Wac-}x?BYlGN#U#Mek*}kek#I9XaHt?mz3*fDrRTQ#&#~xyeqJk1QJ~E$7qsw6 z?sV;|?*=-{M<1+hXoj?@-$y+(^BJ1H~wQ9G8C0#^aEAyhDduNX@haoa=PuPp zYsGv8UBfQaRHgBgLjmP^eh>fLMeh{8ic)?xz?#3kX-D#Z{;W#cd_`9OMFIaJg-=t`_3*!YDgtNQ2+QUEAJB9M{~AvT$H`E)IKmCR21H532+ata8_i_MR@ z2Xj<3w<`isF~Ah$W{|9;51ub*f4#9ziKrOR&jM{x7I_7()O@`F*5o$KtZ?fxU~g`t zUovNEVKYn$U~VX8eR)qb`7;D8pn*Pp$(otYTqL)5KH$lUS-jf}PGBjy$weoceAcPp z&5ZYB$r&P$MN{0H0AxCe4Qmd3T%M*5d4i%#!nmBCN-WU-4m4Tjxn-%j3HagwTxCZ9 z)j5vO-C7%s%D!&UfO>bi2oXiCw<-w{vVTK^rVbv#W=WjdADJy8$khnU!`ZWCIU`># zyjc^1W~pcu>@lDZ{zr6gv%)2X4n27~Ve+cQqcND%0?IFSP4sH#yIaXXYAq^z3|cg` z`I3$m%jra>e2W-=DiD@84T!cb%||k)nPmEE09NC%@PS_OLhkrX*U!cgD*;;&gIaA(DyVT4QD+q_xu z>r`tg{hiGY&DvD-)B*h+YEd+Zn)WylQl}<4>(_NlsKXCRV;a)Rcw!wtelM2_rWX`j zTh5A|i6=2BA(iMCnj_fob@*eA;V?oa4Z1kRBGaU07O70fb6-qmA$Hg$ps@^ka1=RO zTbE_2#)1bndC3VuK@e!Sftxq4=Uux}fDxXE#Q5_x=E1h>T5`DPHz zbH<_OjWx$wy7=%0!mo*qH*7N4tySm+R0~(rbus`7;+wGh;C0O%x~fEMkt!eV>U$`i z5>Q(o z=t$gPjgGh0&I7KY#k50V7DJRX<%^X z>6+ebc9efB3@eE2Tr){;?_w`vhgF>`-GDY(YkR{9RH(MiCnyRtd!LxXJ75z+?2 zGi@m^+2hKJ5sB1@Xi@s_@p_Kwbc<*LQ_`mr^Y%j}(sV_$`J(?_FWP)4NW*BIL~sR>t6 zM;qTJZ~GoY36&{h-Pf}L#y2UtR}>ZaI%A6VkU>vG4~}9^i$5WP2Tj?Cc}5oQxe2=q z8BeLa$hwCg_psjZyC2+?yX4*hJ58Wu^w9}}7X*+i5Rjqu5^@GzXiw#SUir1G1`jY% zOL=GE_ENYxhcyUrEt9XlMNP6kx6h&%6^u3@zB8KUCAa18T(R2J`%JjWZ z!{7cXaEW+Qu*iJPu+m>QqW}Lo$4Z+!I)0JNzZ&_M%=|B1yejFRM04bGAvu{=lNPd+ zJRI^DRQ(?FcVUD+bgEcAi@o(msqys9RTCG#)TjI!9~3-dc`>gW;HSJuQvH~d`MQs86R$|SKXHh zqS9Qy)u;T`>>a!$LuaE2keJV%;8g)tr&Nnc;EkvA-RanHXsy)D@XN0a>h}z2j81R; zsUNJf&g&rKpuD0WD@=dDrPHdBoK42WoBU|nMo17o(5^;M|dB4?|FsAGVrSyWcI`+FVw^vTVC`y}f(BwJl zrw3Sp151^9=}B})6@H*i4-dIN_o^br+BkcLa^H56|^2XsT0dESw2 zMX>(KqNl=x2K5=zIKg}2JpGAZu{I_IO}0$EQ5P{4zol**PCt3F4`GX}2@vr8#Y)~J zKb)gJeHcFnR@4SSh%b;c%J`l=W*40UPjF#q{<}ywv-=vHRFmDjv)NtmC zQx9qm)d%0zH&qG7AFa3VAU1S^(n8VFTC~Hb+HjYMjX8r#&_0MzlNR*mnLH5hi}`@{ zK$8qiDDvS_(L9_2vHgzEQ${DYSE;DqB!g*jhJghE&=LTnbgl&Xepo<*uRtV{2wDHN z)l;Kg$TA>Y|K8Lc&LjWGj<+bp4Hiye_@BfU(y#nF{fpR&|Ltbye?e^j0}8JC4#xi% zv29ZR%8%hk=3ZDvO-@1u8KmQ@6p%E|dlHuy#H1&MiC<*$YdLkHmR#F3ae;bKd;@*i z2_VfELG=B}JMLCO-6UQy^>RDE%K4b>c%9ki`f~Z2Qu8hO7C#t%Aeg8E%+}6P7Twtg z-)dj(w}_zFK&86KR@q9MHicUAucLVshUdmz_2@32(V`y3`&Kf8Q2I)+!n0mR=rrDU zXvv^$ho;yh*kNqJ#r1}b0|i|xRUF6;lhx$M*uG3SNLUTC@|htC z-=fsw^F%$qqz4%QdjBrS+ov}Qv!z00E+JWas>p?z@=t!WWU3K*?Z(0meTuTOC7OTx zU|kFLE0bLZ+WGcL$u4E}5dB0g`h|uwv3=H6f+{5z9oLv-=Q45+n~V4WwgO=CabjM% zBAN+RjM65(-}>Q2V#i1Na@a0`08g&y;W#@sBiX6Tpy8r}*+{RnyGUT`?XeHSqo#|J z^ww~c;ou|iyzpErDtlVU=`8N7JSu>4M z_pr9=tX0edVn9B}YFO2y(88j#S{w%E8vVOpAboK*27a7e4Ekjt0)hIX99*1oE;vex z7#%jhY=bPijA=Ce@9rRO(Vl_vnd00!^TAc<+wVvRM9{;hP*rqEL_(RzfK$er_^SN; z)1a8vo8~Dr5?;0X0J62Cusw$A*c^Sx1)dom`-)Pl7hsW4i(r*^Mw`z5K>!2ixB_mu z*Ddqjh}zceRFdmuX1akM1$3>G=#~|y?eYv(e-`Qy?bRHIq=fMaN~fB zUa6I8Rt=)jnplP>yuS+P&PxeWpJ#1$F`iqRl|jF$WL_aZFZl@kLo&d$VJtu&w?Q0O zzuXK>6gmygq(yXJy0C1SL}T8AplK|AGNUOhzlGeK_oo|haD@)5PxF}rV+5`-w{Aag zus45t=FU*{LguJ11Sr-28EZkq;!mJO7AQGih1L4rEyUmp>B!%X0YemsrV3QFvlgt* z5kwlPzaiJ+kZ^PMd-RRbl(Y?F*m`4*UIhIuf#8q>H_M=fM*L_Op-<_r zBZagV=4B|EW+KTja?srADTZXCd3Yv%^Chfpi)cg{ED${SI>InNpRj5!euKv?=Xn92 zsS&FH(*w`qLIy$doc>RE&A5R?u zzkl1sxX|{*fLpXvIW>9d<$ePROttn3oc6R!sN{&Y+>Jr@yeQN$sFR z;w6A<2-0%UA?c8Qf;sX7>>uKRBv3Ni)E9pI{uVzX|6Bb0U)`lhLE3hK58ivfRs1}d zNjlGK0hdq0qjV@q1qI%ZFMLgcpWSY~mB^LK)4GZ^h_@H+3?dAe_a~k*;9P_d7%NEFP6+ zgV(oGr*?W(ql?6SQ~`lUsjLb%MbfC4V$)1E0Y_b|OIYxz4?O|!kRb?BGrgiH5+(>s zoqM}v*;OBfg-D1l`M6T6{K`LG+0dJ1)!??G5g(2*vlNkm%Q(MPABT$r13q?|+kL4- zf)Mi5r$sn;u41aK(K#!m+goyd$c!KPl~-&-({j#D4^7hQkV3W|&>l_b!}!z?4($OA z5IrkfuT#F&S1(`?modY&I40%gtroig{YMvF{K{>5u^I51k8RriGd${z)=5k2tG zM|&Bp5kDTfb#vfuTTd?)a=>bX=lokw^y9+2LS?kwHQIWI~pYgy7 zb?A-RKVm_vM5!9?C%qYdfRAw& zAU7`up~%g=p@}pg#b7E)BFYx3g%(J36Nw(Dij!b>cMl@CSNbrW!DBDbTD4OXk!G4x zi}JBKc8HBYx$J~31PXH+4^x|UxK~(<@I;^3pWN$E=sYma@JP|8YL`L(zI6Y#c%Q{6 z*APf`DU$S4pr#_!60BH$FGViP14iJmbrzSrOkR;f3YZa{#E7Wpd@^4E-zH8EgPc-# zKWFPvh%WbqU_%ZEt`=Q?odKHc7@SUmY{GK`?40VuL~o)bS|is$Hn=<=KGHOsEC5tB zFb|q}gGlL97NUf$G$>^1b^3E18PZ~Pm9kX%*ftnolljiEt@2#F2R5ah$zbXd%V_Ev zyDd{1o_uuoBga$fB@Fw!V5F3jIr=a-ykqrK?WWZ#a(bglI_-8pq74RK*KfQ z0~Dzus7_l;pMJYf>Bk`)`S8gF!To-BdMnVw5M-pyu+aCiC5dwNH|6fgRsIKZcF&)g zr}1|?VOp}I3)IR@m1&HX1~#wsS!4iYqES zK}4J{Ei>;e3>LB#Oly>EZkW14^@YmpbgxCDi#0RgdM${&wxR+LiX}B+iRioOB0(pDKpVEI;ND?wNx>%e|m{RsqR_{(nmQ z3ZS}@t!p4a(BKx_-CYwrcyJ5u1TO9bcXti$8sy>xcLKqKCc#~UOZYD{llKTSFEjJ~ zyNWt>tLU}*>^`TvPxtP%F`ZJQw@W0^>x;!^@?k_)9#bF$j0)S3;mH-IR5y82l|%=F z2lR8zhP?XNP-ucZZ6A+o$xOyF!w;RaLHGh57GZ|TCXhJqY~GCh)aXEV$1O&$c}La1 zjuJxkY9SM4av^Hb;i7efiYaMwI%jGy`3NdY)+mcJhF(3XEiSlU3c|jMBi|;m-c?~T z+x0_@;SxcoY=(6xNgO$bBt~Pj8`-<1S|;Bsjrzw3@zSjt^JC3X3*$HI79i~!$RmTz zsblZsLYs7L$|=1CB$8qS!tXrWs!F@BVuh?kN(PvE5Av-*r^iYu+L^j^m9JG^#=m>@ z=1soa)H*w6KzoR$B8mBCXoU;f5^bVuwQ3~2LKg!yxomG1#XPmn(?YH@E~_ED+W6mxs%x{%Z<$pW`~ON1~2XjP5v(0{C{+6Dm$00tsd3w=f=ZENy zOgb-=f}|Hb*LQ$YdWg<(u7x3`PKF)B7ZfZ6;1FrNM63 z?O6tE%EiU@6%rVuwIQjvGtOofZBGZT1Sh(xLIYt9c4VI8`!=UJd2BfLjdRI#SbVAX ziT(f*RI^T!IL5Ac>ql7uduF#nuCRJ1)2bdvAyMxp-5^Ww5p#X{rb5)(X|fEhDHHW{ zw(Lfc$g;+Q`B0AiPGtmK%*aWfQQ$d!*U<|-@n2HZvCWSiw^I>#vh+LyC;aaVWGbmkENr z&kl*8o^_FW$T?rDYLO1Pyi%>@&kJKQoH2E0F`HjcN}Zlnx1ddoDA>G4Xu_jyp6vuT zPvC}pT&Owx+qB`zUeR|4G;OH(<<^_bzkjln0k40t`PQxc$7h(T8Ya~X+9gDc8Z9{Z z&y0RAU}#_kQGrM;__MK9vwIwK^aoqFhk~dK!ARf1zJqHMxF2?7-8|~yoO@_~Ed;_wvT%Vs{9RK$6uUQ|&@#6vyBsFK9eZW1Ft#D2)VpQRwpR(;x^ zdoTgMqfF9iBl%{`QDv7B0~8{8`8k`C4@cbZAXBu00v#kYl!#_Wug{)2PwD5cNp?K^ z9+|d-4z|gZ!L{57>!Ogfbzchm>J1)Y%?NThxIS8frAw@z>Zb9v%3_3~F@<=LG%r*U zaTov}{{^z~SeX!qgSYow`_5)ij*QtGp4lvF`aIGQ>@3ZTkDmsl#@^5*NGjOuu82}o zzLF~Q9SW+mP=>88%eSA1W4_W7-Q>rdq^?t=m6}^tDPaBRGFLg%ak93W!kOp#EO{6& zP%}Iff5HZQ9VW$~+9r=|Quj#z*=YwcnssS~9|ub2>v|u1JXP47vZ1&L1O%Z1DsOrDfSIMHU{VT>&>H=9}G3i@2rP+rx@eU@uE8rJNec zij~#FmuEBj03F1~ct@C@$>y)zB+tVyjV3*n`mtAhIM0$58vM9jOQC}JJOem|EpwqeMuYPxu3sv}oMS?S#o6GGK@8PN59)m&K4Dc&X% z(;XL_kKeYkafzS3Wn5DD>Yiw{LACy_#jY4op(>9q>>-*9@C0M+=b#bknAWZ37^(Ij zq>H%<@>o4a#6NydoF{_M4i4zB_KG)#PSye9bk0Ou8h%1Dtl7Q_y#7*n%g)?m>xF~( zjqvOwC;*qvN_3(*a+w2|ao0D?@okOvg8JskUw(l7n`0fncglavwKd?~l_ryKJ^Ky! zKCHkIC-o7%fFvPa$)YNh022lakMar^dgL=t#@XLyNHHw!b?%WlM)R@^!)I!smZL@k zBi=6wE5)2v&!UNV(&)oOYW(6Qa!nUjDKKBf-~Da=#^HE4(@mWk)LPvhyN3i4goB$3K8iV7uh zsv+a?#c4&NWeK(3AH;ETrMOIFgu{_@%XRwCZ;L=^8Ts)hix4Pf3yJRQ<8xb^CkdmC z?c_gB)XmRsk`9ch#tx4*hO=#qS7={~Vb4*tTf<5P%*-XMfUUYkI9T1cEF;ObfxxI-yNuA=I$dCtz3ey znVkctYD*`fUuZ(57+^B*R=Q}~{1z#2!ca?)+YsRQb+lt^LmEvZt_`=j^wqig+wz@n@ z`LIMQJT3bxMzuKg8EGBU+Q-6cs5(@5W?N>JpZL{$9VF)veF`L5%DSYTNQEypW%6$u zm_~}T{HeHj1bAlKl8ii92l9~$dm=UM21kLemA&b$;^!wB7#IKWGnF$TVq!!lBlG4 z{?Rjz?P(uvid+|i$VH?`-C&Gcb3{(~Vpg`w+O);Wk1|Mrjxrht0GfRUnZqz2MhrXa zqgVC9nemD5)H$to=~hp)c=l9?#~Z_7i~=U-`FZxb-|TR9@YCxx;Zjo-WpMNOn2)z) zFPGGVl%3N$f`gp$gPnWC+f4(rmts%fidpo^BJx72zAd7|*Xi{2VXmbOm)1`w^tm9% znM=0Fg4bDxH5PxPEm{P3#A(mxqlM7SIARP?|2&+c7qmU8kP&iApzL|F>Dz)Ixp_`O zP%xrP1M6@oYhgo$ZWwrAsYLa4 z|I;DAvJxno9HkQrhLPQk-8}=De{9U3U%)dJ$955?_AOms!9gia%)0E$Mp}$+0er@< zq7J&_SzvShM?e%V?_zUu{niL@gt5UFOjFJUJ}L?$f%eU%jUSoujr{^O=?=^{19`ON zlRIy8Uo_nqcPa6@yyz`CM?pMJ^^SN^Fqtt`GQ8Q#W4kE7`V9^LT}j#pMChl!j#g#J zr-=CCaV%xyFeQ9SK+mG(cTwW*)xa(eK;_Z(jy)woZp~> zA(4}-&VH+TEeLzPTqw&FOoK(ZjD~m{KW05fiGLe@E3Z2`rLukIDahE*`u!ubU)9`o zn^-lyht#E#-dt~S>}4y$-mSbR8{T@}22cn^refuQ08NjLOv?JiEWjyOnzk<^R5%gO zhUH_B{oz~u#IYwVnUg8?3P*#DqD8#X;%q%HY**=I>>-S|!X*-!x1{^l#OnR56O>iD zc;i;KS+t$koh)E3)w0OjWJl_aW2;xF=9D9Kr>)(5}4FqUbk# zI#$N8o0w;IChL49m9CJTzoC!|u{Ljd%ECgBOf$}&jA^$(V#P#~)`&g`H8E{uv52pp zwto`xUL-L&WTAVREEm$0g_gYPL(^vHq(*t1WCH_6alhkeW&GCZ3hL)|{O-jiFOBrF z!EW=Jej|dqQitT6!B-7&io2K)WIm~Q)v@yq%U|VpV+I?{y0@Yd%n8~-NuuM*pM~KA z85YB};IS~M(c<}4Hxx>qRK0cdl&e?t253N%vefkgds>Ubn8X}j6Vpgs>a#nFq$osY z1ZRwLqFv=+BTb=i%D2Wv>_yE0z}+niZ4?rE|*a3d7^kndWGwnFqt+iZ(7+aln<}jzbAQ(#Z2SS}3S$%Bd}^ zc9ghB%O)Z_mTZMRC&H#)I#fiLuIkGa^`4e~9oM5zKPx?zjkC&Xy0~r{;S?FS%c7w< zWbMpzc(xSw?9tGxG~_l}Acq}zjt5ClaB7-!vzqnlrX;}$#+PyQ9oU)_DfePh2E1<7 ztok6g6K^k^DuHR*iJ?jw?bs_whk|bx`dxu^nC6#e{1*m~z1eq7m}Cf$*^Eua(oi_I zAL+3opNhJteu&mWQ@kQWPucmiP)4|nFG`b2tpC;h{-PI@`+h?9v=9mn|0R-n8#t=+Z*FD(c5 zjj79Jxkgck*DV=wpFgRZuwr%}KTm+dx?RT@aUHJdaX-ODh~gByS?WGx&czAkvkg;x zrf92l8$Or_zOwJVwh>5rB`Q5_5}ef6DjS*$x30nZbuO3dijS*wvNEqTY5p1_A0gWr znH<(Qvb!os14|R)n2Ost>jS2;d1zyLHu`Svm|&dZD+PpP{Bh>U&`Md;gRl64q;>{8MJJM$?UNUd`aC>BiLe>*{ zJY15->yW+<3rLgYeTruFDtk1ovU<$(_y7#HgUq>)r0{^}Xbth}V#6?%5jeFYt;SG^ z3qF)=uWRU;Jj)Q}cpY8-H+l_n$2$6{ZR?&*IGr{>ek!69ZH0ZoJ*Ji+ezzlJ^%qL3 zO5a`6gwFw(moEzqxh=yJ9M1FTn!eo&qD#y5AZXErHs%22?A+JmS&GIolml!)rZTnUDM3YgzYfT#;OXn)`PWv3Ta z!-i|-Wojv*k&bC}_JJDjiAK(Ba|YZgUI{f}TdEOFT2+}nPmttytw7j%@bQZDV1vvj z^rp{gRkCDmYJHGrE1~e~AE!-&6B6`7UxVQuvRrfdFkGX8H~SNP_X4EodVd;lXd^>eV1jN+Tt4}Rsn)R0LxBz0c=NXU|pUe!MQQFkGBWbR3&(jLm z%RSLc#p}5_dO{GD=DEFr=Fc% z85CBF>*t!6ugI?soX(*JNxBp+-DdZ4X0LldiK}+WWGvXV(C(Ht|!3$psR=&c*HIM=BmX;pRIpz@Ale{9dhGe(U2|Giv;# zOc|;?p67J=Q(kamB*aus=|XP|m{jN^6@V*Bpm?ye56Njh#vyJqE=DweC;?Rv7faX~ zde03n^I~0B2vUmr;w^X37tVxUK?4}ifsSH5_kpKZIzpYu0;Kv}SBGfI2AKNp+VN#z`nI{UNDRbo-wqa4NEls zICRJpu)??cj^*WcZ^MAv+;bDbh~gpN$1Cor<{Y2oyIDws^JsfW^5AL$azE(T0p&pP z1Mv~6Q44R&RHoH95&OuGx2srIr<@zYJTOMKiVs;Bx3py89I87LOb@%mr`0)#;7_~Z zzcZj8?w=)>%5@HoCHE_&hnu(n_yQ-L(~VjpjjkbT7e)Dk5??fApg(d>vwLRJ-x{um z*Nt?DqTSxh_MIyogY!vf1mU1`Gld-&L)*43f6dilz`Q@HEz;+>MDDYv9u!s;WXeao zUq=TaL$P*IFgJzrGc>j1dDOd zed+=ZBo?w4mr$2)Ya}?vedDopomhW1`#P<%YOJ_j=WwClX0xJH-f@s?^tmzs_j7t!k zK@j^zS0Q|mM4tVP5Ram$VbS6|YDY&y?Q1r1joe9dj08#CM{RSMTU}(RCh`hp_Rkl- zGd|Cv~G@F{DLhCizAm9AN!^{rNs8hu!G@8RpnGx7e`-+K$ffN<0qjR zGq^$dj_Tv!n*?zOSyk5skI7JVKJ)3jysnjIu-@VSzQiP8r6MzudCU=~?v-U8yzo^7 zGf~SUTvEp+S*!X9uX!sq=o}lH;r{pzk~M*VA(uyQ`3C8!{C;)&6)95fv(cK!%Cuz$ z_Zal57H6kPN>25KNiI6z6F)jzEkh#%OqU#-__Xzy)KyH};81#N6OfX$$IXWzOn`Q& z4f$Z1t>)8&8PcYfEwY5UadU1yg+U*(1m2ZlHoC-!2?gB!!fLhmTl))D@dhvkx#+Yj z1O=LV{(T%{^IeCuFK>%QR!VZ4GnO5tK8a+thWE zg4VytZrwcS?7^ zuZfhYnB8dwd%VLO?DK7pV5Wi<(`~DYqOXn8#jUIL^)12*Dbhk4GmL_E2`WX&iT16o zk(t|hok(Y|v-wzn?4x34T)|+SfZP>fiq!><*%vnxGN~ypST-FtC+@TPv*vYv@iU!_ z@2gf|PrgQ?Ktf*9^CnJ(x*CtZVB8!OBfg0%!wL;Z8(tYYre0vcnPGlyCc$V(Ipl*P z_(J!a=o@vp^%Efme!K74(Ke7A>Y}|sxV+JL^aYa{~m%5#$$+R1? zGaQhZTTX!#s#=Xtpegqero$RNt&`4xn3g$)=y*;=N=Qai)}~`xtxI_N*#MMCIq#HFifT zz(-*m;pVH&+4bixL&Bbg)W5FN^bH87pAHp)zPkWNMfTFqS=l~AC$3FX3kQUSh_C?-ZftyClgM)o_D7cX$RGlEYblux0jv5 zTr|i-I3@ZPCGheCl~BGhImF)K4!9@?pC(gi3ozX=a!|r1)LFxy_8c&wY0<^{2cm|P zv6Y`QktY*;I)IUd5y3ne1CqpVanlY45z8hf4&$EUBnucDj16pDa4&GI&TArYhf*xh zdj>*%APH8(h~c>o@l#%T>R$e>rwVx_WUB|~V`p^JHsg*y12lzj&zF}w6W09HwB2yb z%Q~`es&(;7#*DUC_w-Dmt7|$*?TA_m;zB+-u{2;Bg{O}nV7G_@7~<)Bv8fH^G$XG8$(&{A zwXJK5LRK%M34(t$&NI~MHT{UQ9qN-V_yn|%PqC81EIiSzmMM=2zb`mIwiP_b)x+2M z7Gd`83h79j#SItpQ}luuf2uOU`my_rY5T{6P#BNlb%h%<#MZb=m@y5aW;#o1^2Z)SWo+b`y0gV^iRcZtz5!-05vF z7wNo=hc6h4hc&s@uL^jqRvD6thVYtbErDK9k!;+a0xoE0WL7zLixjn5;$fXvT=O3I zT6jI&^A7k6R{&5#lVjz#8%_RiAa2{di{`kx79K+j72$H(!ass|B%@l%KeeKchYLe_ z>!(JC2fxsv>XVen+Y42GeYPxMWqm`6F$(E<6^s|g(slNk!lL*6v^W2>f6hh^mE$s= z3D$)}{V5(Qm&A6bp%2Q}*GZ5Qrf}n7*Hr51?bJOyA-?B4vg6y_EX<*-e20h{=0Mxs zbuQGZ$fLyO5v$nQ&^kuH+mNq9O#MWSfThtH|0q1i!NrWj^S}_P;Q1OkYLW6U^?_7G zx2wg?CULj7))QU(n{$0JE%1t2dWrMi2g-Os{v|8^wK{@qlj%+1b^?NI z$}l2tjp0g>K3O+p%yK<9!XqmQ?E9>z&(|^Pi~aSRwI5x$jaA62GFz9%fmO3t3a>cq zK8Xbv=5Ps~4mKN5+Eqw12(!PEyedFXv~VLxMB~HwT1Vfo51pQ#D8e$e4pFZ{&RC2P z5gTIzl{3!&(tor^BwZfR8j4k{7Rq#`riKXP2O-Bh66#WWK2w=z;iD9GLl+3 zpHIaI4#lQ&S-xBK8PiQ%dwOh?%BO~DCo06pN7<^dnZCN@NzY{_Z1>rrB0U|nC&+!2 z2y!oBcTd2;@lzyk(B=TkyZ)zy0deK05*Q0zk+o$@nun`VI1Er7pjq>8V zNmlW{p7S^Btgb(TA}jL(uR>`0w8gHP^T~Sh5Tkip^spk4SBAhC{TZU}_Z)UJw-}zm zPq{KBm!k)?P{`-(9?LFt&YN4s%SIZ-9lJ!Ws~B%exHOeVFk3~}HewnnH(d)qkLQ_d z6h>O)pEE{vbOVw}E+jdYC^wM+AAhaI(YAibUc@B#_mDss0Ji&BK{WG`4 zOk>vSNq(Bq2IB@s>>Rxm6Wv?h;ZXkpb1l8u|+_qXWdC*jjcPCixq;!%BVPSp#hP zqo`%cNf&YoQXHC$D=D45RiT|5ngPlh?0T~?lUf*O)){K@*Kbh?3RW1j9-T?%lDk@y z4+~?wKI%Y!-=O|_IuKz|=)F;V7ps=5@g)RrE;;tvM$gUhG>jHcw2Hr@fS+k^Zr~>G z^JvPrZc}_&d_kEsqAEMTMJw!!CBw)u&ZVzmq+ZworuaE&TT>$pYsd9|g9O^0orAe8 z221?Va!l1|Y5X1Y?{G7rt1sX#qFA^?RLG^VjoxPf63;AS=_mVDfGJKg73L zsGdnTUD40y(>S##2l|W2Cy!H(@@5KBa(#gs`vlz}Y~$ot5VsqPQ{{YtjYFvIumZzt zA{CcxZLJR|4#{j7k~Tu*jkwz8QA|5G1$Cl895R`Zyp;irp1{KN){kB30O8P1W5;@bG znvX74roeMmQlUi=v9Y%(wl$ZC#9tKNFpvi3!C}f1m6Ct|l2g%psc{TJp)@yu)*e2> z((p0Fg*8gJ!|3WZke9;Z{8}&NRkv7iP=#_y-F}x^y?2m%-D_aj^)f04%mneyjo_;) z6qc_Zu$q37d~X``*eP~Q>I2gg%rrV8v=kDfpp$=%Vj}hF)^dsSWygoN(A$g*E=Do6FX?&(@F#7pbiJ`;c0c@Ul zDqW_90Wm#5f2L<(Lf3)3TeXtI7nhYwRm(F;*r_G6K@OPW4H(Y3O5SjUzBC}u3d|eQ8*8d@?;zUPE+i#QNMn=r(ap?2SH@vo*m z3HJ%XuG_S6;QbWy-l%qU;8x;>z>4pMW7>R}J%QLf%@1BY(4f_1iixd-6GlO7Vp*yU zp{VU^3?s?90i=!#>H`lxT!q8rk>W_$2~kbpz7eV{3wR|8E=8**5?qn8#n`*(bt1xRQrdGxyx2y%B$qmw#>ZV$c7%cO#%JM1lY$Y0q?Yuo> ze9KdJoiM)RH*SB%^;TAdX-zEjA7@%y=!0=Zg%iWK7jVI9b&Dk}0$Af&08KHo+ zOwDhFvA(E|ER%a^cdh@^wLUlmIv6?_3=BvX8jKk92L=Y}7Jf5OGMfh` zBdR1wFCi-i5@`9km{isRb0O%TX+f~)KNaEz{rXQa89`YIF;EN&gN)cigu6mNh>?Cm zAO&Im2flv6D{jwm+y<%WsPe4!89n~KN|7}Cb{Z;XweER73r}Qp2 zz}WP4j}U0&(uD&9yGy6`!+_v-S(yG*iytsTR#x_Rc>=6u^vnRDnf1gP{#2>`ffrAC% zTZ5WQ@hAK;P;>kX{D)mIXe4%a5p=LO1xXH@8T?mz7Q@d)$3pL{{B!2{-v70L*o1AO+|n5beiw~ zk@(>m?T3{2k2c;NWc^`4@P&Z?BjxXJ@;x1qhn)9Mn*IFdt_J-dIqx5#d`NfyfX~m( zIS~5)MfZ2Uy?_4W`47i}u0ZgPh<{D|w_d#;D}Q&U$Q-G}xM1A@1f{#%A$jh6Qp&0hQ<0bPOM z-{1Wm&p%%#eb_?x7i;bol EfAhh=DF6Tf literal 0 HcmV?d00001 diff --git a/hugegraph-pd/.mvn/wrapper/maven-wrapper.properties b/hugegraph-pd/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000..9c2bd37721 --- /dev/null +++ b/hugegraph-pd/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar diff --git a/hugegraph-pd/README.md b/hugegraph-pd/README.md index e69de29bb2..c01d5a26b2 100644 --- a/hugegraph-pd/README.md +++ b/hugegraph-pd/README.md @@ -0,0 +1,940 @@ +# PD部署说明 + +## PD配置 + +- 配置文件在application.yml + +```` +license: + # 验证使用的配置文件所在目录,包括主题、密码等 + verify-path: 'conf/verify-license.json' + # license文件所在目录,通过hugegraph-signature项目生成 + license-path: 'conf/hugegraph.license' +pd: + # 存储路径 + data-path: ./pd_data + # 自动扩容的检查周期,定时检查每个Store的分区数量,自动进行分区数量平衡 + patrol-interval: 1800 + # 是否允许批量单副本入库 + enable-batch-load: false +store: + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 172800 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 12 +```` + +##store配置 +-配置文件在application.yml,配置pdserver的address + +```` +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: pdserver ip:端口 +```` + +## Hugegraph配置 + +- 配置项在hugegraph的启动脚本start-hugegraph.sh中 + +```` +if [ -z "$META_SERVERS" ];then + META_SERVERS="pdserver ip:端口" +fi +if [ -z "$PD_PEERS" ];then + PD_PEERS="pdserver ip:端口" +fi +```` + +## RESTFUL API + +- pd提供了一些restful API可以获取集群分区,图,存储节点等一系列信息 + +###获取集群统计信息 + +#### 获取集群统计信息 + +###### Method & Url + +``` +GET http://localhost:8620/v1/cluster +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "state": "Cluster_OK", + "pdList": [ + { + "raftUrl": "10.232.132.38:8610", + "grpcUrl": "10.232.132.38:8686", + "restUrl": "10.232.132.38:8620", + "state": "Up", + "dataPath": "./pd_data", + "role": "Leader", + "serviceName": "10.232.132.38:8686-PD", + "serviceVersion": "", + "startTimeStamp": 0 + } + ], + "pdLeader": { + "raftUrl": "10.232.132.38:8610", + "grpcUrl": "10.232.132.38:8686", + "restUrl": "10.232.132.38:8620", + "state": "Up", + "dataPath": "./pd_data", + "role": "Leader", + "serviceName": "10.232.132.38:8686-PD", + "serviceVersion": "", + "startTimeStamp": 0 + }, + "memberSize": 1, + "stores": [ + { + "storeId": 110645464809417136, + "address": "10.232.132.38:8500", + "raftAddress": "10.232.132.38:8510", + "version": "3.6.3", + "state": "Up" + } + ], + "storeSize": 1, + "onlineStoreSize": 1, + "offlineStoreSize": 0, + "graphSize": 3, + "partitionSize": 4, + "shardCount": 3, + "keyCount": 1707, + "dataSize": 19 + }, + "status": 0 +} +``` + +#### 获取pd集群成员信息 + +###### Method & Url + +``` +GET http://localhost:8620/v1/member +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "pdLeader": { + "raftUrl": "10.232.132.38:8610", + "grpcUrl": "10.232.132.38:8686", + "restUrl": "10.232.132.38:8620", + "state": "Up", + "dataPath": "./pd_data", + "role": "Leader", + "serviceName": "10.232.132.38:8686-PD", + "serviceVersion": "", + "startTimeStamp": 0 + }, + "pdList": [ + { + "raftUrl": "10.232.132.38:8610", + "grpcUrl": "10.232.132.38:8686", + "restUrl": "10.232.132.38:8620", + "state": "Up", + "dataPath": "./pd_data", + "role": "Leader", + "serviceName": "10.232.132.38:8686-PD", + "serviceVersion": "", + "startTimeStamp": 0 + } + ], + "state": "Cluster_OK" + }, + "status": 0 +} +``` + +###存储节点相关 + +#### 获取集群所有的store的信息 + +###### Method & Url + +``` +GET http://localhost:8620/v1/stores +``` + +###### Response Status + +```json +200 +``` + +###### Request Body + +```json +{ + "message": "OK", + "data": { + "stores": [ + { + "storeId": 110645464809417136, + "address": "10.232.132.38:8500", + "raftAddress": "10.232.132.38:8510", + "version": "3.6.3", + "state": "Up", + "deployPath": "", + "startTimeStamp": 1658491024, + "lastHeatBeat": 1658491748560, + "capacity": 1968740712448, + "available": 1959665557504, + "partitionCount": 4, + "graphSize": 3, + "keyCount": 1128, + "leaderCount": 4, + "serviceName": "10.232.132.38:8500-store", + "serviceVersion": "3.6.3", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/s", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 1, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 2, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 3, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/m", + "role": "Leader", + "workState": "PState_Normal" + } + ] + } + ] + }, + "status": 0 +} +``` + +#### 获取单个store的信息 + +###### Method & Url + +``` +GET http://localhost:8620/v1/store/{storeId} +``` + +###### Response Status + +```json +200 +``` + +###### Request Body + +```json +{ + "message": "OK", + "data": { + "storeId": 110645464809417136, + "address": "10.232.132.38:8500", + "raftAddress": "10.232.132.38:8510", + "version": "3.6.3", + "state": "Up", + "deployPath": "", + "startTimeStamp": 1658491024, + "lastHeatBeat": 1658491838632, + "capacity": 1968740712448, + "available": 1959665549312, + "partitionCount": 4, + "graphSize": 3, + "keyCount": 1128, + "leaderCount": 4, + "serviceName": "10.232.132.38:8500-store", + "serviceVersion": "3.6.3", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/s", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 1, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 2, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 3, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/m", + "role": "Leader", + "workState": "PState_Normal" + } + ] + }, + "status": 0 +} +``` + +### 分区相关 + +#### 获取分区信息 + +###### Method & Url + +``` +GET http://localhost:8620/v1/highLevelPartitions +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "partitions": [ + { + "partitionId": 0, + "state": "PState_Normal", + "leaderAddress": "10.232.132.38:8500", + "keyCount": 0, + "dataSize": 0, + "shardState": "SState_Normal", + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "keyCount": 361, + "startKey": 0, + "endKey": 0, + "dataSize": 8, + "workState": "PState_Normal", + "partitionId": 0 + }, + { + "graphName": "DEFAULT/hugegraph/m", + "keyCount": 361, + "startKey": 0, + "endKey": 0, + "dataSize": 13, + "workState": "PState_Normal", + "partitionId": 0 + }, + { + "graphName": "DEFAULT/hugegraph/s", + "keyCount": 361, + "startKey": 0, + "endKey": 65535, + "dataSize": 6, + "workState": "PState_Normal", + "partitionId": 0 + } + ], + "shards": [ + { + "storeId": 110645464809417136, + "role": "Leader", + "state": "SState_Normal", + "progress": 0, + "partitionId": 0, + "address": "10.232.132.38:8500" + } + ] + }, + { + "partitionId": 1, + "state": "PState_Normal", + "leaderAddress": "10.232.132.38:8500", + "keyCount": 0, + "dataSize": 0, + "shardState": "SState_Normal", + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "keyCount": 8, + "startKey": 16384, + "endKey": 32768, + "dataSize": 5, + "workState": "PState_Normal", + "partitionId": 1 + } + ], + "shards": [ + { + "storeId": 110645464809417136, + "role": "Leader", + "state": "SState_Normal", + "progress": 0, + "partitionId": 1, + "address": "10.232.132.38:8500" + } + ] + }, + { + "partitionId": 2, + "state": "PState_Normal", + "leaderAddress": "10.232.132.38:8500", + "keyCount": 0, + "dataSize": 0, + "shardState": "SState_Normal", + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "keyCount": 18, + "startKey": 32768, + "endKey": 49152, + "dataSize": 8, + "workState": "PState_Normal", + "partitionId": 2 + } + ], + "shards": [ + { + "storeId": 110645464809417136, + "role": "Leader", + "state": "SState_Normal", + "progress": 0, + "partitionId": 2, + "address": "10.232.132.38:8500" + } + ] + }, + { + "partitionId": 3, + "state": "PState_Normal", + "leaderAddress": "10.232.132.38:8500", + "keyCount": 0, + "dataSize": 0, + "shardState": "SState_Normal", + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "keyCount": 19, + "startKey": 49152, + "endKey": 65536, + "dataSize": 8, + "workState": "PState_Normal", + "partitionId": 3 + } + ], + "shards": [ + { + "storeId": 110645464809417136, + "role": "Leader", + "state": "SState_Normal", + "progress": 0, + "partitionId": 3, + "address": "10.232.132.38:8500" + } + ] + } + ] + }, + "status": 0 +} +``` + +###获取图信息 + +#### 获取所有的图信息 + +###### Method & Url + +``` +GET http://localhost:8620/v1/graphs +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "partitionCount": 4, + "state": "PState_Normal", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 0, + "endKey": 16384, + "shards": [ + { + "partitionId": 0, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 1, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 16384, + "endKey": 32768, + "shards": [ + { + "partitionId": 1, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 2, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 32768, + "endKey": 49152, + "shards": [ + { + "partitionId": 2, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 3, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 49152, + "endKey": 65536, + "shards": [ + { + "partitionId": 3, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + } + ], + "dataSize": 48, + "keyCount": 1128, + "nodeCount": 0, + "edgeCount": 0 + }, + { + "graphName": "DEFAULT/hugegraph/m", + "partitionCount": 1, + "state": "PState_Normal", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/m", + "workState": "PState_Normal", + "startKey": 0, + "endKey": 65535, + "shards": [ + { + "partitionId": 0, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + } + ], + "dataSize": 48, + "keyCount": 1128, + "nodeCount": 0, + "edgeCount": 0 + }, + { + "graphName": "DEFAULT/hugegraph/s", + "partitionCount": 1, + "state": "PState_Normal", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/s", + "workState": "PState_Normal", + "startKey": 0, + "endKey": 65535, + "shards": [ + { + "partitionId": 0, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + } + ], + "dataSize": 48, + "keyCount": 1128, + "nodeCount": 0, + "edgeCount": 0 + } + ] + }, + "status": 0 +} +``` + +#### 获取单个图信息 + +###### Method & Url + +``` +GET http://localhost:8620/v1/graph/{graphName} +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "graphName": "DEFAULT/hugegraph/g", + "partitionCount": 4, + "state": "PState_Normal", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 0, + "endKey": 16384, + "shards": [ + { + "partitionId": 0, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 1, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 16384, + "endKey": 32768, + "shards": [ + { + "partitionId": 1, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 2, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 32768, + "endKey": 49152, + "shards": [ + { + "partitionId": 2, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 3, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 49152, + "endKey": 65536, + "shards": [ + { + "partitionId": 3, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + } + ], + "dataSize": 48, + "keyCount": 1128, + "nodeCount": 0, + "edgeCount": 0 + }, + "status": 0 +} +``` + +###获取shard的信息 + +#### 获取所有shard的信息 + +###### Method & Url + +``` +GET http://localhost:8620/v1/shards +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "shards": [ + { + "storeId": 110645464809417136, + "partitionId": 0, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/g", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 1, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/g", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 2, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/g", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 3, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/g", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 0, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/m", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 0, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/s", + "progress": 0 + } + ] + }, + "status": 0 +} +``` + +###服务注册 + +#### 注册服务 + +###### Method & Url + +``` +POST http://127.0.0.1:8620/v1/registry +``` + +###### Request Body + +```json +200 +``` + +###### Response Status + +```json +{ + "appName":"aaaa", + "version":"version1", + "address":"address1", + "interval":"9223372036854775807", + "labels": { + "aaa": "aaaavalue" + } +} +``` + +appName:所属服务名 +version:所属服务版本号 +address:服务实例地址+端口 +interval:实例心跳间隔,字符串,最大9223372036854775807 +labels: 自定义标签,若服务名为'hg'即hugeserver时,需要提供key为cores的项,进行cpu核数的验证 + +###### Response Body + +```json +{ + "errorType": "OK", + "message": "", + "data": null +} +``` + +errorType:状态码 +message:状态码为错误时的具体出错信息 +data:无返回数据 + +#### 服务实例获取 + +###### Method & Url + +``` +POST http://127.0.0.1:8620/v1/registryInfo +``` + +###### Request Body + +```json +200 +``` + +###### Response Status + +```json +{ + "appName":"aaaa", + "version":"version1", + "labels": { + "aaa": "aaaavalue" + } +} +``` + +以下三项可全部为空,则获取所有服务节点的信息: +-- appName:过滤所属服务名的条件 +-- version:过滤所属服务版本号的条件,此项有值,则appName不能为空 +-- labels: 过滤自定义标签的条件 + +###### Response Body + +```json +{ + "errorType": "OK", + "message": null, + "data": [ + { + "id": null, + "appName": "aaaa", + "version": "version1", + "address": "address1", + "interval": "9223372036854775807", + "labels": { + "aaa": "aaaavalue" + } + } + ] +} +``` + +errorType:状态码 +message:状态码为错误时的具体出错信息 +data:获取的服务节点信息 \ No newline at end of file diff --git a/hugegraph-pd/build-pre.sh b/hugegraph-pd/build-pre.sh new file mode 100644 index 0000000000..f92d104eea --- /dev/null +++ b/hugegraph-pd/build-pre.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +mkdir output +touch output/1 +export JAVA_HOME=$ORACLEJDK_1_8_0_HOME + +readonly VER=3.6.3 +readonly REPO_URL=http://127.0.0.1:8081/artifactory/star-local + +$MAVEN_3_5_3_BIN/mvn -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set +$MAVEN_3_5_3_BIN/mvn --settings ./settings.xml -Dmaven.test.skip=true -DaltDeploymentRepository=star-local::default::${REPO_URL} clean deploy +$MAVEN_3_5_3_BIN/mvn versions:revert + + +#------------------repo----------------------- +readonly FILE_NAME=hugegraph-pd-3.6.3.tar.gz +readonly REPO_URL_FILE=http://127.0.0.1:8081/artifactory/star-file + +localFilePath=dist/${FILE_NAME} +targetFolder="${REPO_URL_FILE}/dist/$(date '+%Y-%m-%d')/" +artifactoryUser="admin" +artifactoryPassword="JFrog12345" + +md5Value="$(md5sum "$localFilePath")" +md5Value="${md5Value:0:32}" +sha1Value="$(sha1sum "$localFilePath")" +sha1Value="${sha1Value:0:40}" +sha256Value="$(sha256sum "$localFilePath")" +sha256Value="${sha256Value:0:65}" + +#curl -X PUT -u admin:JFrog12345 -T ${localFilePath} "${REPO_URL_FILE}/dist/${data_folder}/" +echo "INFO: Uploading $localFilePath to $targetFolder" +curl -i -X PUT -u "$artifactoryUser:$artifactoryPassword" \ + -H "X-Checksum-Md5: $md5Value" \ + -H "X-Checksum-Sha1: $sha1Value" \ + -H "X-Checksum-Sha256: $sha256Value" \ + -T "$localFilePath" \ + "$targetFolder" diff --git a/hugegraph-pd/build.sh b/hugegraph-pd/build.sh new file mode 100644 index 0000000000..f77358e1cc --- /dev/null +++ b/hugegraph-pd/build.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +export PATH=$MAVEN_3_5_3_BIN:$ORACLEJDK_11_0_7_BIN:$PATH +export JAVA_HOME=$ORACLEJDK_11_0_7_HOME +export MAVEN_HOME=$MAVEN_3_5_3_HOME + +# TODO: remove later +readonly REPO_URL=http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots + +if [ ! -n "$1" ] ;then + GOAL=package +else + GOAL=$1 +fi + +$MAVEN_3_5_3_BIN/mvn -Dmaven.test.skip=true -DaltDeploymentRepository=Baidu_Local_Snapshots::default::${REPO_URL} clean ${GOAL} +echo "mv dist...." +mv dist output +ls output +echo "mv dist done" +echo "show output...." +ls output +echo "show output done" diff --git a/hugegraph-pd/ci.yml b/hugegraph-pd/ci.yml new file mode 100644 index 0000000000..0eb7515ae3 --- /dev/null +++ b/hugegraph-pd/ci.yml @@ -0,0 +1,43 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +Global: + tool: build_submitter + languages: + # Java Version: 1.7, 1.8, 9, 10 + - language: java + version: 11 + envs: + # Maven Version: 3.0.4, 3.1.1, 3.2.5, 3.3.9, 3.5.3 + - env: maven + version: 3.5.3 +Default: + profile: [ dev ] +Profiles: + - profile: + name: dev + tool: build_submitter + env: DECK_CENTOS7U5_K3 + command: sh build.sh + release: true + + - profile: + name: deploy + tool: build_submitter + env: DECK_CENTOS7U5_K3 + command: sh build.sh deploy + release: true diff --git a/hugegraph-pd/conf/hugegraph.license b/hugegraph-pd/conf/hugegraph.license new file mode 100644 index 0000000000000000000000000000000000000000..bca71af9a1966024e7a99880302f6b592a33ca84 GIT binary patch literal 856 zcmV-e1E>69hn&I!I)GnSv~^hE{iID$vF0rJw0JnV&Nu&pJ?*HLQ#!nms8=CeFwzT% z)5v&U=s7DO7$r#-RSw|2Vq*`|(2D5<5GKxmIX=E1#UCniwf>oeSOP{p(e{%an?_vT zYc)wIUQsJ<9Ai=YsETz;YFaYQ^&YAJD(|KO1~NKs#B+QiROyg@d5VjOt~PJcGl>9= zPZ!gB)$<%a;*Yw{lDP)9Cj#PNktQ)_SRi&!l=1Y})2$oO3ASGnC|m#T{b9DXGt@^9 zRQlb4GeipvU&m_F1_$wwu0?i5Cz0TTSl`#uc}6(t4&YjvWkFI)*jMO@Me_Le_=M(v zSrA?RjyET9V15A)-p(d1q)ry6yF$rH4#5n`?m|ws9TUw@;$3qiBbOlEoKdy_MT)~V z=N#&y{vn8U2Og2iJ{)W-ZO7$zLHEUMN-l$XRvs_I_2T?X5&UXMi&;q8i8d}FWtd=8y@PLaCFm+wpR#G)aC{`rVrs%8)@u!?}9E$ZnDYo!c5hiA?8Qm}y{K9F) zTlyJ+OL<98%B`lIL|}hc9JSDFm9?7x2CnOjGJE_7%Jp&Bif`p29?ai>C(n0UWicnv zrZH578>dDfFP_O?YlK@|!U^eZflg1)Hs(pq1{WqS2Cflfgsa$(yyyfhqxr2y)xO<- ztE~YE$2dMsybWD-Bi<89DZ@L;S$hw~0DG7-yoR%(%Gon5wDYD`dL4k_U6j*47Z4h4 zwCZyUzNtj|D0Iax|0F9J(~M5|FP&r1ysHQXgy8J7aEa}MA3?yWdO@W6NuOQq zxI&O;%`%{#7kN9?F(hYrclJ8cN^CxZ6s68>v%<^cjA#tlBAKkmyCm&l%`g32m?oed z6hL($sbOP?W%_S#UNEW$*u&~DQD5$(f*}}lGTzkLe#?(+LA~;aL=0#EKm?Ju+ zaOe2lRrWmfBd1Mk57Xciq<;l;cn=*wkW>MbOpHWHg~zj9arkkR#q-;Ch&7kk6E(mR i^Sj|__)(b4Oh-2cCDrT@A%)082}=;$lBJ*eQVTP`m7MAT literal 0 HcmV?d00001 diff --git a/hugegraph-pd/conf/verify-license.json b/hugegraph-pd/conf/verify-license.json new file mode 100644 index 0000000000..868ccbebbb --- /dev/null +++ b/hugegraph-pd/conf/verify-license.json @@ -0,0 +1,6 @@ +{ + "subject": "hugegraph-license", + "public_alias": "publiccert", + "store_ticket": "803b6cc3-d144-47e8-948f-ec8b39c8881e", + "publickey_path": "/public-certs.store" +} diff --git a/hugegraph-pd/deploy-release.sh b/hugegraph-pd/deploy-release.sh new file mode 100644 index 0000000000..118a214d7f --- /dev/null +++ b/hugegraph-pd/deploy-release.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +readonly VER=3.6.3 +readonly REPO_URL=http://127.0.0.1:8081/artifactory/star-local +#mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set + +./mvnw -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set +./mvnw --settings ./settings.xml -Dmaven.test.skip=true -DaltDeploymentRepository=star-local::default::${REPO_URL} clean deploy +./mvnw versions:revert diff --git a/hugegraph-pd/deploy-snapshot.sh b/hugegraph-pd/deploy-snapshot.sh new file mode 100644 index 0000000000..118a214d7f --- /dev/null +++ b/hugegraph-pd/deploy-snapshot.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +readonly VER=3.6.3 +readonly REPO_URL=http://127.0.0.1:8081/artifactory/star-local +#mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set + +./mvnw -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set +./mvnw --settings ./settings.xml -Dmaven.test.skip=true -DaltDeploymentRepository=star-local::default::${REPO_URL} clean deploy +./mvnw versions:revert diff --git a/hugegraph-pd/hg-pd-client/pom.xml b/hugegraph-pd/hg-pd-client/pom.xml new file mode 100644 index 0000000000..c2d67ff92d --- /dev/null +++ b/hugegraph-pd/hg-pd-client/pom.xml @@ -0,0 +1,73 @@ + + + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-pd + ${revision} + ../pom.xml + + hg-pd-client + + + + org.projectlombok + lombok + 1.18.20 + + + org.apache.logging.log4j + log4j-slf4j-impl + 2.17.0 + + + org.apache.hugegraph + hg-pd-grpc + ${revision} + + + org.apache.hugegraph + hg-pd-common + ${revision} + compile + + + junit + junit + 4.13.2 + test + + + commons-io + commons-io + 2.8.0 + + + org.yaml + snakeyaml + 1.28 + test + + + diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java new file mode 100644 index 0000000000..db48eeee37 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java @@ -0,0 +1,265 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.LinkedList; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.stream.Stream; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.PDGrpc.PDBlockingStub; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse; + +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import io.grpc.stub.ClientCalls; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public abstract class AbstractClient implements Closeable { + + private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); + public static Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); + protected final Pdpb.RequestHeader header; + protected final AbstractClientStubProxy stubProxy; + protected final PDConfig config; + protected ManagedChannel channel = null; + protected volatile ConcurrentMap stubs = null; + + protected AbstractClient(PDConfig config) { + String[] hosts = config.getServerHost().split(","); + this.stubProxy = new AbstractClientStubProxy(hosts); + this.header = Pdpb.RequestHeader.getDefaultInstance(); + this.config = config; + } + + public static Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build(); + return header; + } + + protected static void handleErrors(Pdpb.ResponseHeader header) throws PDException { + if (header.hasError() && header.getError().getType() != Pdpb.ErrorType.OK) { + throw new PDException(header.getError().getTypeValue(), + String.format("PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } + } + + protected AbstractBlockingStub getBlockingStub() throws PDException { + if (stubProxy.getBlockingStub() == null) { + synchronized (this) { + if (stubProxy.getBlockingStub() == null) { + String host = resetStub(); + if (host.isEmpty()) { + throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + + config.getServerHost()); + } + } + } + } + return (AbstractBlockingStub) stubProxy.getBlockingStub() + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + } + + protected AbstractStub getStub() throws PDException { + if (stubProxy.getStub() == null) { + synchronized (this) { + if (stubProxy.getStub() == null) { + String host = resetStub(); + if (host.isEmpty()) { + throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + + config.getServerHost()); + } + } + } + } + return stubProxy.getStub(); + } + + protected abstract AbstractStub createStub(); + + protected abstract AbstractBlockingStub createBlockingStub(); + + private String resetStub() { + String leaderHost = ""; + for (int i = 0; i < stubProxy.getHostCount(); i++) { + String host = stubProxy.nextHost(); + channel = ManagedChannelBuilder.forTarget(host).usePlaintext().build(); + PDBlockingStub blockingStub = PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + try { + GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header).build(); + GetMembersResponse members = blockingStub.getMembers(request); + Metapb.Member leader = members.getLeader(); + leaderHost = leader.getGrpcUrl(); + close(); + channel = ManagedChannelBuilder.forTarget(leaderHost).usePlaintext().build(); + stubProxy.setBlockingStub(createBlockingStub()); + stubProxy.setStub(createStub()); + log.info("PDClient connect to host = {} success", leaderHost); + break; + } catch (Exception e) { + log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(), + e.getCause() != null ? e.getCause().getMessage() : ""); + } + } + return leaderHost; + } + + protected > RespT blockingUnaryCall( + MethodDescriptor method, ReqT req) throws PDException { + return blockingUnaryCall(method, req, 5); + } + + protected > RespT blockingUnaryCall( + MethodDescriptor method, ReqT req, int retry) throws PDException { + AbstractBlockingStub stub = getBlockingStub(); + try { + RespT resp = + ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), + req); + return resp; + } catch (Exception e) { + log.error(method.getFullMethodName() + " exception, {}", e.getMessage()); + if (e instanceof StatusRuntimeException) { + if (retry < stubProxy.getHostCount()) { + // 网络不通,关掉之前连接,换host重新连接 + synchronized (this) { + stubProxy.setBlockingStub(null); + } + return blockingUnaryCall(method, req, ++retry); + } + } + } + return null; + } + + // this.stubs = new ConcurrentHashMap(hosts.length); + private AbstractBlockingStub getConcurrentBlockingStub(String address) { + AbstractBlockingStub stub = stubs.get(address); + if (stub != null) { + return stub; + } + Channel ch = ManagedChannelBuilder.forTarget(address).usePlaintext().build(); + PDBlockingStub blockingStub = + PDGrpc.newBlockingStub(ch).withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + stubs.put(address, blockingStub); + return blockingStub; + + } + + protected KVPair concurrentBlockingUnaryCall( + MethodDescriptor method, ReqT req, Predicate predicate) { + LinkedList hostList = this.stubProxy.getHostList(); + if (this.stubs == null) { + synchronized (this) { + if (this.stubs == null) { + this.stubs = new ConcurrentHashMap<>(hostList.size()); + } + } + } + Stream respTStream = hostList.parallelStream().map((address) -> { + AbstractBlockingStub stub = getConcurrentBlockingStub(address); + RespT resp = ClientCalls.blockingUnaryCall(stub.getChannel(), + method, stub.getCallOptions(), req); + return resp; + }); + KVPair pair; + AtomicReference response = new AtomicReference<>(); + boolean result = respTStream.anyMatch((r) -> { + response.set(r); + return predicate.test(r); + }); + if (result) { + pair = new KVPair<>(true, null); + } else { + pair = new KVPair<>(false, response.get()); + } + return pair; + } + + protected void streamingCall(MethodDescriptor method, ReqT request, + StreamObserver responseObserver, + int retry) throws PDException { + AbstractStub stub = getStub(); + try { + ClientCall call = stub.getChannel().newCall(method, stub.getCallOptions()); + ClientCalls.asyncServerStreamingCall(call, request, responseObserver); + } catch (Exception e) { + if (e instanceof StatusRuntimeException) { + if (retry < stubProxy.getHostCount()) { + synchronized (this) { + stubProxy.setStub(null); + } + streamingCall(method, request, responseObserver, ++retry); + return; + } + } + log.error("rpc call with exception, {}", e.getMessage()); + } + } + + @Override + public void close() { + closeChannel(channel); + if (stubs != null) { + for (AbstractBlockingStub stub : stubs.values()) { + closeChannel((ManagedChannel) stub.getChannel()); + } + } + + } + + private void closeChannel(ManagedChannel channel) { + try { + while (channel != null && + !channel.shutdownNow().awaitTermination(100, TimeUnit.MILLISECONDS)) { + continue; + } + } catch (Exception e) { + log.info("Close channel with error : ", e); + } + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java new file mode 100644 index 0000000000..150633ba24 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.LinkedList; + +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; + +public class AbstractClientStubProxy { + + private final LinkedList hostList = new LinkedList<>(); + private AbstractBlockingStub blockingStub; + private AbstractStub stub; + + public AbstractClientStubProxy(String[] hosts) { + for (String host : hosts) { + if (!host.isEmpty()) { + hostList.offer(host); + } + } + } + + public LinkedList getHostList() { + return hostList; + } + + public String nextHost() { + String host = hostList.poll(); + hostList.offer(host); //移到尾部 + return host; + } + + public AbstractBlockingStub getBlockingStub() { + return this.blockingStub; + } + + public void setBlockingStub(AbstractBlockingStub stub) { + this.blockingStub = stub; + } + + public String getHost() { + return hostList.peek(); + } + + public int getHostCount() { + return hostList.size(); + } + + public AbstractStub getStub() { + return stub; + } + + public void setStub(AbstractStub stub) { + this.stub = stub; + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java new file mode 100644 index 0000000000..c441fb8761 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.concurrent.ConcurrentHashMap; + +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; + +public class Channels { + + private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); + + public static ManagedChannel getChannel(String target) { + + ManagedChannel channel; + if ((channel = chs.get(target)) == null || channel.isShutdown() || channel.isTerminated()) { + synchronized (chs) { + if ((channel = chs.get(target)) == null || channel.isShutdown() || + channel.isTerminated()) { + channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + chs.put(target, channel); + } + } + } + + return channel; + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java new file mode 100644 index 0000000000..92d8cc95d3 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java @@ -0,0 +1,338 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hugegraph.pd.common.GraphCache; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.Partition; +import org.apache.hugegraph.pd.grpc.Metapb.Shard; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; + +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class ClientCache { + + private final AtomicBoolean initialized = new AtomicBoolean(false); + private final org.apache.hugegraph.pd.client.PDClient client; + private volatile Map> groups; + private volatile Map stores; + private volatile Map caches = new ConcurrentHashMap<>(); + + public ClientCache(org.apache.hugegraph.pd.client.PDClient pdClient) { + groups = new ConcurrentHashMap<>(); + stores = new ConcurrentHashMap<>(); + client = pdClient; + } + + private GraphCache getGraphCache(String graphName) { + GraphCache graph; + if ((graph = caches.get(graphName)) == null) { + synchronized (caches) { + if ((graph = caches.get(graphName)) == null) { + graph = new GraphCache(); + caches.put(graphName, graph); + } + } + } + return graph; + } + + public KVPair getPartitionById(String graphName, int partId) { + try { + GraphCache graph = initGraph(graphName); + Partition partition = graph.getPartition(partId); + Shard shard = groups.get(partId).getValue(); + if (partition == null || shard == null) { + return null; + } + return new KVPair<>(partition, shard); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private KVPair getPair(int partId, GraphCache graph) { + Partition p = graph.getPartition(partId); + KVPair pair = groups.get(partId); + if (p != null && pair != null) { + Shard s = pair.getValue(); + if (s == null) { + pair.setValue(getLeader(partId)); + return new KVPair<>(p, pair.getValue()); + } else { + return new KVPair<>(p, s); + } + } + return null; + } + + /** + * 根据key的hashcode返回分区信息 + * + * @param graphName + * @param code + * @return + */ + public KVPair getPartitionByCode(String graphName, long code) { + try { + GraphCache graph = initGraph(graphName); + RangeMap range = graph.getRange(); + Integer pId = range.get(code); + if (pId != null) { + return getPair(pId, graph); + } + return null; + } catch (PDException e) { + throw new RuntimeException(e); + } + } + + private GraphCache initGraph(String graphName) throws PDException { + initCache(); + GraphCache graph = getGraphCache(graphName); + if (!graph.getInitialized().get()) { + synchronized (graph) { + if (!graph.getInitialized().get()) { + CachePartitionResponse pc = client.getPartitionCache(graphName); + RangeMap range = graph.getRange(); + List ps = pc.getPartitionsList(); + HashMap gps = new HashMap<>(ps.size(), 1); + for (Partition p : ps) { + gps.put(p.getId(), p); + range.put(Range.closedOpen(p.getStartKey(), p.getEndKey()), p.getId()); + } + graph.setPartitions(gps); + graph.getInitialized().set(true); + } + } + } + return graph; + } + + private void initCache() throws PDException { + if (!initialized.get()) { + synchronized (this) { + if (!initialized.get()) { + CacheResponse cache = client.getClientCache(); + List shardGroups = cache.getShardsList(); + for (ShardGroup s : shardGroups) { + this.groups.put(s.getId(), new KVPair<>(s, getLeader(s.getId()))); + } + List stores = cache.getStoresList(); + for (Metapb.Store store : stores) { + this.stores.put(store.getId(), store); + } + List graphs = cache.getGraphsList(); + for (Metapb.Graph g : graphs) { + GraphCache c = new GraphCache(g); + caches.put(g.getGraphName(), c); + } + initialized.set(true); + } + } + } + } + + /** + * 返回key所在的分区信息 + * + * @param key + * @return + */ + public KVPair getPartitionByKey(String graphName, byte[] key) { + int code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + public boolean update(String graphName, int partId, Partition partition) { + GraphCache graph = getGraphCache(graphName); + try { + Partition p = graph.getPartition(partId); + if (p != null && p.equals(partition)) { + return false; + } + RangeMap range = graph.getRange(); + graph.addPartition(partId, partition); + if (p != null) { + // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3) + // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) + if (Objects.equals(partition.getId(), range.get(partition.getStartKey())) && + Objects.equals(partition.getId(), range.get(partition.getEndKey() - 1))) { + range.remove(range.getEntry(partition.getStartKey()).getKey()); + } + } + range.put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + } catch (Exception e) { + throw new RuntimeException(e); + } + return true; + } + + public void removePartition(String graphName, int partId) { + GraphCache graph = getGraphCache(graphName); + Partition p = graph.removePartition(partId); + if (p != null) { + RangeMap range = graph.getRange(); + if (Objects.equals(p.getId(), range.get(p.getStartKey())) && + Objects.equals(p.getId(), range.get(p.getEndKey() - 1))) { + range.remove(range.getEntry(p.getStartKey()).getKey()); + } + } + } + + /** + * remove all partitions + */ + public void removePartitions() { + for (Entry entry : caches.entrySet()) { + removePartitions(entry.getValue()); + } + } + + private void removePartitions(GraphCache graph) { + graph.getState().clear(); + graph.getRange().clear(); + } + + /** + * remove partition cache of graphName + * + * @param graphName + */ + public void removeAll(String graphName) { + GraphCache graph = caches.get(graphName); + if (graph != null) { + removePartitions(graph); + } + } + + public boolean updateShardGroup(ShardGroup shardGroup) { + KVPair old = groups.get(shardGroup.getId()); + Shard leader = getLeader(shardGroup); + if (old != null) { + old.setKey(shardGroup); + old.setValue(leader); + return false; + } + groups.put(shardGroup.getId(), new KVPair<>(shardGroup, leader)); + return true; + } + + public void deleteShardGroup(int shardGroupId) { + groups.remove(shardGroupId); + } + + public ShardGroup getShardGroup(int groupId) { + KVPair pair = groups.get(groupId); + if (pair != null) { + return pair.getKey(); + } + return null; + } + + public boolean addStore(Long storeId, Metapb.Store store) { + Metapb.Store oldStore = stores.get(storeId); + if (oldStore != null && oldStore.equals(store)) { + return false; + } + stores.put(storeId, store); + return true; + } + + public Metapb.Store getStoreById(Long storeId) { + return stores.get(storeId); + } + + public void removeStore(Long storeId) { + stores.remove(storeId); + } + + public void reset() { + groups = new ConcurrentHashMap<>(); + stores = new ConcurrentHashMap<>(); + caches = new ConcurrentHashMap<>(); + } + + public Shard getLeader(int partitionId) { + KVPair pair = groups.get(partitionId); + if (pair != null) { + if (pair.getValue() != null) { + return pair.getValue(); + } + for (Shard shard : pair.getKey().getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + pair.setValue(shard); + return shard; + } + } + } + + return null; + } + + public Shard getLeader(ShardGroup shardGroup) { + if (shardGroup != null) { + for (Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + return shard; + } + } + } + + return null; + } + + public void updateLeader(int partitionId, Shard leader) { + KVPair pair = groups.get(partitionId); + if (pair != null && leader != null) { + Shard l = getLeader(partitionId); + if (l == null || leader.getStoreId() != l.getStoreId()) { + ShardGroup shardGroup = pair.getKey(); + ShardGroup.Builder builder = ShardGroup.newBuilder(shardGroup).clearShards(); + for (var shard : shardGroup.getShardsList()) { + builder.addShards( + Shard.newBuilder() + .setStoreId(shard.getStoreId()) + .setRole(shard.getStoreId() == leader.getStoreId() ? + Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) + .build() + ); + } + pair.setKey(builder.build()); + pair.setValue(leader); + } + } + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java new file mode 100644 index 0000000000..4222bfbe50 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; + +public interface Discoverable { + + NodeInfos getNodeInfos(Query query); + + void scheduleTask(); + + void cancelTask(); +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java new file mode 100644 index 0000000000..aa80ec606f --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.LinkedList; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; +import java.util.function.Function; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; + +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public abstract class DiscoveryClient implements Closeable, Discoverable { + + private final Timer timer = new Timer("serverHeartbeat", true); + private final AtomicBoolean requireResetStub = new AtomicBoolean(false); + protected int period; //心跳周期 + LinkedList pdAddresses = new LinkedList<>(); + ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private volatile int currentIndex; // 当前在用pd地址位置 + private int maxTime = 6; + private ManagedChannel channel = null; + private DiscoveryServiceGrpc.DiscoveryServiceBlockingStub registerStub; + private DiscoveryServiceGrpc.DiscoveryServiceBlockingStub blockingStub; + + public DiscoveryClient(String centerAddress, int delay) { + String[] addresses = centerAddress.split(","); + for (int i = 0; i < addresses.length; i++) { + String singleAddress = addresses[i]; + if (singleAddress == null || singleAddress.length() <= 0) { + continue; + } + pdAddresses.add(addresses[i]); + } + this.period = delay; + if (maxTime < addresses.length) { + maxTime = addresses.length; + } + } + + private R tryWithTimes(Function function, V v) { + R r; + Exception ex = null; + for (int i = 0; i < maxTime; i++) { + try { + r = function.apply(v); + return r; + } catch (Exception e) { + requireResetStub.set(true); + resetStub(); + ex = e; + } + } + if (ex != null) { + log.error("Try discovery method with error: {}", ex.getMessage()); + } + return null; + } + + /*** + * 按照pd列表重置stub + */ + private void resetStub() { + String errLog = null; + for (int i = currentIndex + 1; i <= pdAddresses.size() + currentIndex; i++) { + currentIndex = i % pdAddresses.size(); + String singleAddress = pdAddresses.get(currentIndex); + try { + if (requireResetStub.get()) { + resetChannel(singleAddress); + } + errLog = null; + break; + } catch (Exception e) { + requireResetStub.set(true); + if (errLog == null) { + errLog = e.getMessage(); + } + continue; + } + } + if (errLog != null) { + log.error(errLog); + } + } + + /*** + * 按照某个pd的地址重置channel和stub + * @param singleAddress + * @throws PDException + */ + private void resetChannel(String singleAddress) throws PDException { + + readWriteLock.writeLock().lock(); + try { + if (requireResetStub.get()) { + while (channel != null && !channel.shutdownNow().awaitTermination( + 100, TimeUnit.MILLISECONDS)) { + continue; + } + channel = ManagedChannelBuilder.forTarget( + singleAddress).usePlaintext().build(); + this.registerStub = DiscoveryServiceGrpc.newBlockingStub( + channel); + this.blockingStub = DiscoveryServiceGrpc.newBlockingStub( + channel); + requireResetStub.set(false); + } + } catch (Exception e) { + throw new PDException(-1, String.format( + "Reset channel with error : %s.", e.getMessage())); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + /*** + * 获取注册节点信息 + * @param query + * @return + */ + @Override + public NodeInfos getNodeInfos(Query query) { + return tryWithTimes((q) -> { + this.readWriteLock.readLock().lock(); + NodeInfos nodes; + try { + nodes = this.blockingStub.getNodes(q); + } catch (Exception e) { + throw e; + } finally { + this.readWriteLock.readLock().unlock(); + } + return nodes; + }, query); + } + + /*** + * 启动心跳任务 + */ + @Override + public void scheduleTask() { + timer.schedule(new TimerTask() { + @Override + public void run() { + NodeInfo nodeInfo = getRegisterNode(); + tryWithTimes((t) -> { + RegisterInfo register; + readWriteLock.readLock().lock(); + try { + register = registerStub.register(t); + log.debug("Discovery Client work done."); + Consumer consumer = getRegisterConsumer(); + if (consumer != null) { + consumer.accept(register); + } + } catch (Exception e) { + throw e; + } finally { + readWriteLock.readLock().unlock(); + } + return register; + }, nodeInfo); + } + }, 0, period); + } + + abstract NodeInfo getRegisterNode(); + + abstract Consumer getRegisterConsumer(); + + @Override + public void cancelTask() { + this.timer.cancel(); + } + + @Override + public void close() { + this.timer.cancel(); + readWriteLock.writeLock().lock(); + try { + while (channel != null && !channel.shutdownNow().awaitTermination( + 100, TimeUnit.MILLISECONDS)) { + continue; + } + } catch (Exception e) { + log.info("Close channel with error : {}.", e); + } finally { + readWriteLock.writeLock().unlock(); + } + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java new file mode 100644 index 0000000000..77ec9a36b2 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.Map; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.RegisterType; + +public class DiscoveryClientImpl extends DiscoveryClient { + + private final String id; + private final RegisterType type; // 心跳类型,备用 + private final String version; + private final String appName; + private final int times; // 心跳过期次数,备用 + private final String address; + private final Map labels; + private final Consumer registerConsumer; + + + private DiscoveryClientImpl(Builder builder) { + super(builder.centerAddress, builder.delay); + period = builder.delay; + id = builder.id; + type = builder.type; + version = builder.version; + appName = builder.appName; + times = builder.times; + address = builder.address; + labels = builder.labels; + registerConsumer = builder.registerConsumer; + } + + public static Builder newBuilder() { + return new Builder(); + } + + + @Override + NodeInfo getRegisterNode() { + return NodeInfo.newBuilder().setAddress(this.address) + .setVersion(this.version) + .setAppName(this.appName).setInterval(this.period) + .setId(this.id).putAllLabels(labels).build(); + } + + @Override + Consumer getRegisterConsumer() { + return registerConsumer; + } + + + public static final class Builder { + + private int delay; + private String centerAddress; + private String id; + private RegisterType type; + private String address; + private Map labels; + private String version; + private String appName; + private int times; + private Consumer registerConsumer; + + private Builder() { + } + + public Builder setDelay(int val) { + delay = val; + return this; + } + + public Builder setCenterAddress(String val) { + centerAddress = val; + return this; + } + + public Builder setId(String val) { + id = val; + return this; + } + + public Builder setType(RegisterType val) { + type = val; + return this; + } + + public Builder setAddress(String val) { + address = val; + return this; + } + + public Builder setLabels(Map val) { + labels = val; + return this; + } + + public Builder setVersion(String val) { + version = val; + return this; + } + + public Builder setAppName(String val) { + appName = val; + return this; + } + + public Builder setTimes(int val) { + times = val; + return this; + } + + public Builder setRegisterConsumer(Consumer registerConsumer) { + this.registerConsumer = registerConsumer; + return this; + } + + public DiscoveryClientImpl build() { + return new DiscoveryClientImpl(this); + } + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java new file mode 100644 index 0000000000..27975ca57c --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.client.AbstractClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.kv.K; +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.KvResponse; +import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +import org.apache.hugegraph.pd.grpc.kv.LockRequest; +import org.apache.hugegraph.pd.grpc.kv.LockResponse; +import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchEvent; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchRequest; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchType; + +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class KvClient extends AbstractClient implements Closeable { + + private final AtomicLong clientId = new AtomicLong(0); + private final Semaphore semaphore = new Semaphore(1); + private final ConcurrentHashMap observers = new ConcurrentHashMap<>(); + + public KvClient(PDConfig pdConfig) { + super(pdConfig); + } + + @Override + protected AbstractStub createStub() { + return KvServiceGrpc.newStub(channel); + } + + @Override + protected AbstractBlockingStub createBlockingStub() { + return KvServiceGrpc.newBlockingStub(channel); + } + + public KvResponse put(String key, String value) throws PDException { + Kv kv = Kv.newBuilder().setKey(key).setValue(value).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getPutMethod(), kv); + handleErrors(response.getHeader()); + return response; + } + + + public KResponse get(String key) throws PDException { + K k = K.newBuilder().setKey(key).build(); + KResponse response = blockingUnaryCall(KvServiceGrpc.getGetMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + + public KvResponse delete(String key) throws PDException { + K k = K.newBuilder().setKey(key).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeleteMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + + public KvResponse deletePrefix(String prefix) throws PDException { + K k = K.newBuilder().setKey(prefix).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeletePrefixMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + + public ScanPrefixResponse scanPrefix(String prefix) throws PDException { + K k = K.newBuilder().setKey(prefix).build(); + ScanPrefixResponse response = blockingUnaryCall(KvServiceGrpc.getScanPrefixMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + public TTLResponse keepTTLAlive(String key) throws PDException { + TTLRequest request = TTLRequest.newBuilder().setKey(key).build(); + TTLResponse response = blockingUnaryCall(KvServiceGrpc.getKeepTTLAliveMethod(), request); + handleErrors(response.getHeader()); + return response; + } + + public TTLResponse putTTL(String key, String value, long ttl) throws PDException { + TTLRequest request = + TTLRequest.newBuilder().setKey(key).setValue(value).setTtl(ttl).build(); + TTLResponse response = blockingUnaryCall(KvServiceGrpc.getPutTTLMethod(), request); + handleErrors(response.getHeader()); + return response; + } + + private void onEvent(WatchResponse value, Consumer consumer) { + log.info("receive message for {},event Count:{}", value, value.getEventsCount()); + clientId.compareAndSet(0L, value.getClientId()); + if (value.getEventsCount() != 0) { + consumer.accept((T) value); + } + } + + private StreamObserver getObserver(String key, Consumer consumer, + BiConsumer listenWrapper, + long client) { + StreamObserver observer; + if ((observer = observers.get(client)) == null) { + synchronized (this) { + if ((observer = observers.get(client)) == null) { + observer = getObserver(key, consumer, listenWrapper); + observers.put(client, observer); + } + } + } + return observer; + } + + private StreamObserver getObserver(String key, Consumer consumer, + BiConsumer listenWrapper) { + return new StreamObserver() { + @Override + public void onNext(WatchResponse value) { + switch (value.getState()) { + case Starting: + boolean b = clientId.compareAndSet(0, value.getClientId()); + if (b) { + observers.put(value.getClientId(), this); + log.info("set watch client id to :{}", value.getClientId()); + } + semaphore.release(); + break; + case Started: + onEvent(value, consumer); + break; + case Leader_Changed: + listenWrapper.accept(key, consumer); + break; + case Alive: + // only for check client is alive, do nothing + break; + default: + break; + } + } + + @Override + public void onError(Throwable t) { + listenWrapper.accept(key, consumer); + } + + + @Override + public void onCompleted() { + + } + }; + } + + public void listen(String key, Consumer consumer) throws PDException { + long value = clientId.get(); + StreamObserver observer = getObserver(key, consumer, listenWrapper, value); + acquire(); + WatchRequest k = WatchRequest.newBuilder().setClientId(value).setKey(key).build(); + streamingCall(KvServiceGrpc.getWatchMethod(), k, observer, 1); + } + + public void listenPrefix(String prefix, Consumer consumer) throws PDException { + long value = clientId.get(); + StreamObserver observer = + getObserver(prefix, consumer, prefixListenWrapper, value); + acquire(); + WatchRequest k = + WatchRequest.newBuilder().setClientId(clientId.get()).setKey(prefix).build(); + streamingCall(KvServiceGrpc.getWatchPrefixMethod(), k, observer, 1); + } + + private void acquire() { + if (clientId.get() == 0L) { + try { + semaphore.acquire(); + if (clientId.get() != 0L) { + semaphore.release(); + } + } catch (InterruptedException e) { + log.error("get semaphore with error:", e); + } + } + } + + public List getWatchList(T response) { + List values = new LinkedList<>(); + List eventsList = response.getEventsList(); + for (WatchEvent event : eventsList) { + if (event.getType() != WatchType.Put) { + return null; + } + String value = event.getCurrent().getValue(); + values.add(value); + } + return values; + } + + public Map getWatchMap(T response) { + Map values = new HashMap<>(); + List eventsList = response.getEventsList(); + for (WatchEvent event : eventsList) { + if (event.getType() != WatchType.Put) { + return null; + } + WatchKv current = event.getCurrent(); + String key = current.getKey(); + String value = current.getValue(); + values.put(key, value); + } + return values; + } + + public LockResponse lock(String key, long ttl) throws PDException { + acquire(); + LockResponse response; + try { + LockRequest k = + LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl) + .build(); + response = blockingUnaryCall(KvServiceGrpc.getLockMethod(), k); + handleErrors(response.getHeader()); + if (clientId.compareAndSet(0L, response.getClientId())) { + semaphore.release(); + } + } catch (Exception e) { + if (clientId.get() == 0L) { + semaphore.release(); + } + throw e; + } + return response; + } BiConsumer listenWrapper = (key, consumer) -> { + try { + listen(key, consumer); + } catch (PDException e) { + try { + log.warn("start listen with warning:", e); + Thread.sleep(1000); + } catch (InterruptedException ex) { + } + } + }; + + public LockResponse lockWithoutReentrant(String key, long ttl) throws PDException { + acquire(); + LockResponse response; + try { + LockRequest k = + LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl) + .build(); + response = blockingUnaryCall(KvServiceGrpc.getLockWithoutReentrantMethod(), k); + handleErrors(response.getHeader()); + if (clientId.compareAndSet(0L, response.getClientId())) { + semaphore.release(); + } + } catch (Exception e) { + if (clientId.get() == 0L) { + semaphore.release(); + } + throw e; + } + return response; + } + + public LockResponse isLocked(String key) throws PDException { + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getIsLockedMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + public LockResponse unlock(String key) throws PDException { + assert clientId.get() != 0; + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getUnlockMethod(), k); + handleErrors(response.getHeader()); + clientId.compareAndSet(0L, response.getClientId()); + assert clientId.get() == response.getClientId(); + return response; + } + + public LockResponse keepAlive(String key) throws PDException { + assert clientId.get() != 0; + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getKeepAliveMethod(), k); + handleErrors(response.getHeader()); + clientId.compareAndSet(0L, response.getClientId()); + assert clientId.get() == response.getClientId(); + return response; + } + + @Override + public void close() { + super.close(); + } + + + + BiConsumer prefixListenWrapper = (key, consumer) -> { + try { + listenPrefix(key, consumer); + } catch (PDException e) { + try { + log.warn("start listenPrefix with warning:", e); + Thread.sleep(1000); + } catch (InterruptedException ex) { + } + } + }; + + +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java new file mode 100644 index 0000000000..b86d6b3946 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; + +import com.google.protobuf.ByteString; + +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class LicenseClient extends AbstractClient { + + public LicenseClient(PDConfig config) { + super(config); + } + + @Override + protected AbstractStub createStub() { + return PDGrpc.newStub(channel); + } + + @Override + protected AbstractBlockingStub createBlockingStub() { + return PDGrpc.newBlockingStub(channel); + } + + public Pdpb.PutLicenseResponse putLicense(byte[] content) { + Pdpb.PutLicenseRequest request = Pdpb.PutLicenseRequest.newBuilder() + .setContent( + ByteString.copyFrom(content)) + .build(); + try { + KVPair pair = concurrentBlockingUnaryCall( + PDGrpc.getPutLicenseMethod(), request, + (rs) -> rs.getHeader().getError().getType().equals(Pdpb.ErrorType.OK)); + if (pair.getKey()) { + Pdpb.PutLicenseResponse.Builder builder = Pdpb.PutLicenseResponse.newBuilder(); + builder.setHeader(okHeader); + return builder.build(); + } else { + return pair.getValue(); + } + } catch (Exception e) { + e.printStackTrace(); + log.debug("put license with error:{} ", e); + Pdpb.ResponseHeader rh = + newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); + return Pdpb.PutLicenseResponse.newBuilder().setHeader(rh).build(); + } + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java new file mode 100644 index 0000000000..ae57e622cb --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -0,0 +1,1347 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import static org.apache.hugegraph.pd.watch.NodeEvent.EventType.NODE_PD_LEADER_CHANGE; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionByCodeRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.PartitionEvent; + +import com.google.protobuf.ByteString; + +import io.grpc.ManagedChannel; +import io.grpc.MethodDescriptor; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.AbstractBlockingStub; +import lombok.extern.slf4j.Slf4j; + +/** + * PD客户端实现类 + * + * @author yanjinbing + */ +@Slf4j +public class PDClient { + private final PDConfig config; + private final Pdpb.RequestHeader header; + private final ClientCache cache; + private final StubProxy stubProxy; + private final List eventListeners; + private PDWatch.Watcher partitionWatcher; + private PDWatch.Watcher storeWatcher; + private PDWatch.Watcher graphWatcher; + private PDWatch.Watcher shardGroupWatcher; + private PDWatch pdWatch; + + private PDClient(PDConfig config) { + this.config = config; + this.header = Pdpb.RequestHeader.getDefaultInstance(); + this.stubProxy = new StubProxy(config.getServerHost().split(",")); + this.eventListeners = new CopyOnWriteArrayList<>(); + this.cache = new ClientCache(this); + } + + /** + * 创建PDClient对象,并初始化stub + * + * @param config + * @return + */ + public static PDClient create(PDConfig config) { + return new PDClient(config); + } + + private synchronized void newBlockingStub() throws PDException { + if (stubProxy.get() != null) { + return; + } + + String host = newLeaderStub(); + if (host.isEmpty()) { + throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + config.getServerHost()); + } + + log.info("PDClient enable cache, init PDWatch object"); + connectPdWatch(host); + } + + public void connectPdWatch(String leader) { + + if (pdWatch != null && Objects.equals(pdWatch.getCurrentHost(), leader) && + pdWatch.checkChannel()) { + return; + } + + log.info("PDWatch client connect host:{}", leader); + pdWatch = new PDWatchImpl(leader); + + partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener<>() { + @Override + public void onNext(PartitionEvent response) { + // log.info("PDClient receive partition event {}-{} {}", + // response.getGraph(), response.getPartitionId(), response.getChangeType()); + invalidPartitionCache(response.getGraph(), response.getPartitionId()); + + if (response.getChangeType() == PartitionEvent.ChangeType.DEL) { + cache.removeAll(response.getGraph()); + } + + eventListeners.forEach(listener -> { + listener.onPartitionChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.error("watchPartition exception {}", throwable.getMessage()); + closeStub(false); + } + }); + + storeWatcher = pdWatch.watchNode(new PDWatch.Listener<>() { + @Override + public void onNext(NodeEvent response) { + log.info("PDClient receive store event {} {}", + response.getEventType(), Long.toHexString(response.getNodeId())); + + if (response.getEventType() == NODE_PD_LEADER_CHANGE) { + // pd raft change + var leaderIp = response.getGraph(); + log.info("watchNode: pd leader changed to {}, current watch:{}", + leaderIp, pdWatch.getCurrentHost()); + closeStub(!Objects.equals(pdWatch.getCurrentHost(), leaderIp)); + connectPdWatch(leaderIp); + } + + invalidStoreCache(response.getNodeId()); + eventListeners.forEach(listener -> { + listener.onStoreChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.error("watchNode exception {}", throwable.getMessage()); + closeStub(false); + } + + }); + + graphWatcher = pdWatch.watchGraph(new PDWatch.Listener<>() { + @Override + public void onNext(WatchResponse response) { + eventListeners.forEach(listener -> { + listener.onGraphChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.warn("graphWatcher exception {}", throwable.getMessage()); + } + }); + + shardGroupWatcher = pdWatch.watchShardGroup(new PDWatch.Listener<>() { + @Override + public void onNext(WatchResponse response) { + var shardResponse = response.getShardGroupResponse(); + // log.info("PDClient receive shard group event: raft {}-{}", shardResponse + // .getShardGroupId(), + // shardResponse.getType()); + if (config.isEnableCache()) { + switch (shardResponse.getType()) { + case WATCH_CHANGE_TYPE_DEL: + cache.deleteShardGroup(shardResponse.getShardGroupId()); + break; + case WATCH_CHANGE_TYPE_ALTER: + cache.updateShardGroup( + response.getShardGroupResponse().getShardGroup()); + break; + default: + break; + } + } + eventListeners.forEach(listener -> listener.onShardGroupChanged(response)); + } + + @Override + public void onError(Throwable throwable) { + log.warn("shardGroupWatcher exception {}", throwable.getMessage()); + } + }); + + } + + private synchronized void closeStub(boolean closeWatcher) { + // TODO ManagedChannel 没有正常关闭 + stubProxy.set(null); + cache.reset(); + + if (closeWatcher) { + if (partitionWatcher != null) { + partitionWatcher.close(); + partitionWatcher = null; + } + if (storeWatcher != null) { + storeWatcher.close(); + storeWatcher = null; + } + if (graphWatcher != null) { + graphWatcher.close(); + graphWatcher = null; + } + + if (shardGroupWatcher != null) { + shardGroupWatcher.close(); + shardGroupWatcher = null; + } + + pdWatch = null; + } + } + + private PDGrpc.PDBlockingStub getStub() throws PDException { + if (stubProxy.get() == null) { + newBlockingStub(); + } + return stubProxy.get().withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS); + } + + private PDGrpc.PDBlockingStub newStub() throws PDException { + if (stubProxy.get() == null) { + newBlockingStub(); + } + return PDGrpc.newBlockingStub(stubProxy.get().getChannel()) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + } + + private String newLeaderStub() { + String leaderHost = ""; + for (int i = 0; i < stubProxy.getHostCount(); i++) { + String host = stubProxy.nextHost(); + ManagedChannel channel = Channels.getChannel(host); + + PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + try { + var leaderIp = getLeaderIp(stub); + if (!leaderIp.equalsIgnoreCase(host)) { + leaderHost = leaderIp; + stubProxy.set(PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS)); + } else { + stubProxy.set(stub); + leaderHost = host; + } + stubProxy.setLeader(leaderIp); + + log.info("PDClient connect to host = {} success", leaderHost); + break; + } catch (Exception e) { + log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(), + e.getCause() != null ? e.getCause().getMessage() : ""); + } + } + return leaderHost; + } + + public String getLeaderIp() { + + return getLeaderIp(stubProxy.get()); + } + + private String getLeaderIp(PDGrpc.PDBlockingStub stub) { + if (stub == null) { + try { + getStub(); + return stubProxy.getLeader(); + } catch (PDException e) { + throw new RuntimeException(e); + } + } + + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Metapb.Member leader = stub.getMembers(request).getLeader(); + return leader.getGrpcUrl(); + } + + /** + * Store注册,返回storeID,初次注册会返回新ID + * + * @param store + * @return + */ + public long registerStore(Metapb.Store store) throws PDException { + Pdpb.RegisterStoreRequest request = Pdpb.RegisterStoreRequest.newBuilder() + .setHeader(header) + .setStore(store).build(); + + Pdpb.RegisterStoreResponse response = + blockingUnaryCall(PDGrpc.getRegisterStoreMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreId(); + } + + /** + * 根据storeId返回Store对象 + * + * @param storeId + * @return + * @throws PDException + */ + public Metapb.Store getStore(long storeId) throws PDException { + Metapb.Store store = cache.getStoreById(storeId); + if (store == null) { + Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder() + .setHeader(header) + .setStoreId(storeId).build(); + Pdpb.GetStoreResponse response = getStub().getStore(request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) { + cache.addStore(storeId, store); + } + } + return store; + } + + /** + * 更新Store信息,包括上下线等 + * + * @param store + * @return + */ + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder() + .setHeader(header) + .setStore(store).build(); + + Pdpb.SetStoreResponse response = getStub().setStore(request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) { + cache.addStore(store.getId(), store); + } + return store; + } + + /** + * 返回活跃的Store + * + * @param graphName + * @return + */ + public List getActiveStores(String graphName) throws PDException { + List stores = new ArrayList<>(); + KVPair ptShard = this.getPartitionByCode(graphName, 0); + while (ptShard != null) { + stores.add(this.getStore(ptShard.getValue().getStoreId())); + if (ptShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) { + ptShard = this.getPartitionByCode(graphName, ptShard.getKey().getEndKey()); + } else { + ptShard = null; + } + } + return stores; + } + + public List getActiveStores() throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName("") + .setExcludeOfflineStores(true) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * 返回活跃的Store + * + * @param graphName + * @return + */ + public List getAllStores(String graphName) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setExcludeOfflineStores(false) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * Store心跳,定期调用,保持在线状态 + * + * @param stats + * @throws PDException + */ + public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException { + Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder() + .setHeader(header) + .setStats(stats).build(); + Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request); + handleResponseError(response.getHeader()); + return response.getClusterStats(); + } + + private KVPair getKvPair(String graphName, byte[] key, + KVPair partShard) throws + PDException { + if (partShard == null) { + GetPartitionRequest request = GetPartitionRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setKey(ByteString.copyFrom(key)) + .build(); + GetPartitionResponse response = + blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); + } + return partShard; + } + + /** + * 查询Key所属分区信息 + * + * @param graphName + * @param key + * @return + * @throws PDException + */ + public KVPair getPartition(String graphName, byte[] key) throws + PDException { + // 先查cache,cache没有命中,在调用PD + KVPair partShard = cache.getPartitionByKey(graphName, key); + partShard = getKvPair(graphName, key, partShard); + return partShard; + } + + public KVPair getPartition(String graphName, byte[] key, + int code) throws + PDException { + KVPair partShard = + cache.getPartitionByCode(graphName, code); + partShard = getKvPair(graphName, key, partShard); + return partShard; + } + + /** + * 根据hashcode查询所属分区信息 + * + * @param graphName + * @param hashCode + * @return + * @throws PDException + */ + public KVPair getPartitionByCode(String graphName, + long hashCode) + throws PDException { + // 先查cache,cache没有命中,在调用PD + KVPair partShard = + cache.getPartitionByCode(graphName, hashCode); + if (partShard == null) { + GetPartitionByCodeRequest request = GetPartitionByCodeRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setCode(hashCode).build(); + GetPartitionResponse response = + blockingUnaryCall(PDGrpc.getGetPartitionByCodeMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); + cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); + } + + if (partShard.getValue() == null) { + ShardGroup shardGroup = getShardGroup(partShard.getKey().getId()); + if (shardGroup != null) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + partShard.setValue(shard); + } + } + } else { + log.error("getPartitionByCode: get shard group failed, {}", + partShard.getKey().getId()); + } + } + return partShard; + } + + /** + * 获取Key的哈希值 + */ + public int keyToCode(String graphName, byte[] key) { + return PartitionUtils.calcHashcode(key); + } + + /** + * 根据分区id返回分区信息, RPC请求 + * + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public KVPair getPartitionById(String graphName, + int partId) throws PDException { + KVPair partShard = + cache.getPartitionById(graphName, partId); + if (partShard == null) { + Pdpb.GetPartitionByIDRequest request = Pdpb.GetPartitionByIDRequest.newBuilder() + .setHeader(header) + .setGraphName( + graphName) + .setPartitionId( + partId) + .build(); + GetPartitionResponse response = + blockingUnaryCall(PDGrpc.getGetPartitionByIDMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + if (config.isEnableCache()) { + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); + cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); + } + } + if (partShard.getValue() == null) { + var shardGroup = getShardGroup(partShard.getKey().getId()); + if (shardGroup != null) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + partShard.setValue(shard); + } + } + } else { + log.error("getPartitionById: get shard group failed, {}", + partShard.getKey().getId()); + } + } + return partShard; + } + + public ShardGroup getShardGroup(int partId) throws PDException { + ShardGroup group = cache.getShardGroup(partId); + if (group == null) { + Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(partId) + .build(); + Pdpb.GetShardGroupResponse response = + blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request); + handleResponseError(response.getHeader()); + group = response.getShardGroup(); + if (config.isEnableCache()) { + cache.updateShardGroup(group); + } + } + return group; + } + + public void updateShardGroup(ShardGroup shardGroup) throws PDException { + Pdpb.UpdateShardGroupRequest request = Pdpb.UpdateShardGroupRequest.newBuilder() + .setHeader(header) + .setShardGroup( + shardGroup) + .build(); + Pdpb.UpdateShardGroupResponse response = + blockingUnaryCall(PDGrpc.getUpdateShardGroupMethod(), request); + handleResponseError(response.getHeader()); + + if (config.isEnableCache()) { + cache.updateShardGroup(shardGroup); + } + } + + /** + * 返回startKey和endKey跨越的所有分区信息 + * + * @param graphName + * @param startKey + * @param endKey + * @return + * @throws PDException + */ + public List> scanPartitions(String graphName, + byte[] startKey, + byte[] endKey) throws + PDException { + List> partitions = new ArrayList<>(); + KVPair startPartShard = getPartition(graphName, startKey); + KVPair endPartShard = getPartition(graphName, endKey); + if (startPartShard == null || endPartShard == null) { + return null; + } + + partitions.add(startPartShard); + while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey() + && startPartShard.getKey().getEndKey() < + PartitionUtils.MAX_VALUE /*排除最后一个分区*/) { + startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey()); + partitions.add(startPartShard); + } + return partitions; + } + + /** + * 根据条件查询分区信息 + * + * @return + * @throws PDException + */ + public List getPartitionsByStore(long storeId) throws PDException { + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + } + + /** + * 查找指定store上的指定partitionId + * + * @return + * @throws PDException + */ + public List queryPartitions(long storeId, int partitionId) throws + PDException { + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .setPartitionId(partitionId) + .build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + } + + public List getPartitions(long storeId, String graphName) throws PDException { + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .setGraphName(graphName).build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + + } + + public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { + Pdpb.SetGraphRequest request = Pdpb.SetGraphRequest.newBuilder() + .setGraph(graph) + .build(); + Pdpb.SetGraphResponse response = + blockingUnaryCall(PDGrpc.getSetGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + GetGraphRequest request = GetGraphRequest.newBuilder() + .setGraphName(graphName) + .build(); + Pdpb.GetGraphResponse response = + blockingUnaryCall(PDGrpc.getGetGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public Metapb.Graph getGraphWithOutException(String graphName) throws + PDException { + GetGraphRequest request = GetGraphRequest.newBuilder() + .setGraphName( + graphName) + .build(); + Pdpb.GetGraphResponse response = blockingUnaryCall( + PDGrpc.getGetGraphMethod(), request); + return response.getGraph(); + } + + public Metapb.Graph delGraph(String graphName) throws PDException { + Pdpb.DelGraphRequest request = Pdpb.DelGraphRequest.newBuilder() + .setGraphName(graphName) + .build(); + Pdpb.DelGraphResponse response = + blockingUnaryCall(PDGrpc.getDelGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public List updatePartition(List partitions) throws + PDException { + + Pdpb.UpdatePartitionRequest request = Pdpb.UpdatePartitionRequest.newBuilder() + .addAllPartition( + partitions) + .build(); + Pdpb.UpdatePartitionResponse response = + blockingUnaryCall(PDGrpc.getUpdatePartitionMethod(), request); + handleResponseError(response.getHeader()); + invalidPartitionCache(); + + return response.getPartitionList(); + } + + public Metapb.Partition delPartition(String graphName, int partitionId) throws PDException { + + Pdpb.DelPartitionRequest request = Pdpb.DelPartitionRequest.newBuilder() + .setGraphName(graphName) + .setPartitionId(partitionId) + .build(); + Pdpb.DelPartitionResponse response = + blockingUnaryCall(PDGrpc.getDelPartitionMethod(), request); + + handleResponseError(response.getHeader()); + invalidPartitionCache(graphName, partitionId); + return response.getPartition(); + } + + /** + * 删除分区缓存 + */ + public void invalidPartitionCache(String graphName, int partitionId) { + // 检查是否存在缓存 + if (null != cache.getPartitionById(graphName, partitionId)) { + cache.removePartition(graphName, partitionId); + } + } + + /** + * 删除分区缓存 + */ + public void invalidPartitionCache() { + // 检查是否存在缓存 + cache.removePartitions(); + } + + /** + * 删除分区缓存 + */ + public void invalidStoreCache(long storeId) { + cache.removeStore(storeId); + } + + /** + * Hugegraph server 调用,Leader发生改变,更新缓存 + */ + public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) { + KVPair partShard = null; + try { + partShard = this.getPartitionById(graphName, partId); + + if (partShard != null && partShard.getValue().getStoreId() != leaderStoreId) { + var shardGroup = this.getShardGroup(partId); + Metapb.Shard shard = null; + List shards = new ArrayList<>(); + + for (Metapb.Shard s : shardGroup.getShardsList()) { + if (s.getStoreId() == leaderStoreId) { + shard = s; + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Leader).build()); + } else { + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Follower).build()); + } + } + + if (config.isEnableCache()) { + if (shard == null) { + // 分区的shard中未找到leader,说明分区发生了迁移 + cache.removePartition(graphName, partId); + } + } + } + } catch (PDException e) { + log.error("getPartitionException: {}", e.getMessage()); + } + } + + /** + * Hugegraph-store调用,更新缓存 + * + * @param partition + */ + public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) { + if (config.isEnableCache()) { + cache.update(partition.getGraphName(), partition.getId(), partition); + cache.updateLeader(partition.getId(), leader); + } + } + + public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException { + Pdpb.GetIdRequest request = Pdpb.GetIdRequest.newBuilder() + .setHeader(header) + .setKey(key) + .setDelta(delta) + .build(); + Pdpb.GetIdResponse response = blockingUnaryCall(PDGrpc.getGetIdMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException { + Pdpb.ResetIdRequest request = Pdpb.ResetIdRequest.newBuilder() + .setHeader(header) + .setKey(key) + .build(); + Pdpb.ResetIdResponse response = blockingUnaryCall(PDGrpc.getResetIdMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Metapb.Member getLeader() throws PDException { + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); + handleResponseError(response.getHeader()); + return response.getLeader(); + } + + public Pdpb.GetMembersResponse getMembers() throws PDException { + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Metapb.ClusterStats getClusterStats() throws PDException { + Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetClusterStatsResponse response = + blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); + handleResponseError(response.getHeader()); + return response.getCluster(); + } + + private > RespT + blockingUnaryCall(MethodDescriptor method, ReqT req) throws PDException { + return blockingUnaryCall(method, req, 1); + } + + private > RespT + blockingUnaryCall(MethodDescriptor method, ReqT req, int retry) throws + PDException { + io.grpc.stub.AbstractBlockingStub stub = (AbstractBlockingStub) getStub(); + try { + RespT resp = io.grpc.stub.ClientCalls.blockingUnaryCall(stub.getChannel(), method, + stub.getCallOptions(), req); + return resp; + } catch (Exception e) { + log.error(method.getFullMethodName() + " exception, {}", e.getMessage()); + if (e instanceof StatusRuntimeException) { + StatusRuntimeException se = (StatusRuntimeException) e; + //se.getStatus() == Status.UNAVAILABLE && + if (retry < stubProxy.getHostCount()) { + // 网络不通,关掉之前连接,换host重新连接 + closeStub(true); + return blockingUnaryCall(method, req, ++retry); + } + } + } + return null; + } + + private void handleResponseError(Pdpb.ResponseHeader header) throws + PDException { + var errorType = header.getError().getType(); + if (header.hasError() && errorType != Pdpb.ErrorType.OK) { + + throw new PDException(header.getError().getTypeValue(), + String.format( + "PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } + } + + public void addEventListener(PDEventListener listener) { + eventListeners.add(listener); + } + + public PDWatch getWatchClient() { + return new PDWatchImpl(stubProxy.getHost()); + } + + /** + * 返回Store状态信息 + */ + public List getStoreStatus(boolean offlineExcluded) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setExcludeOfflineStores( + offlineExcluded) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request); + handleResponseError(response.getHeader()); + List stores = response.getStoresList(); + return stores; + } + + public void setGraphSpace(String graphSpaceName, long storageLimit) throws PDException { + Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder().setName(graphSpaceName) + .setStorageLimit(storageLimit) + .setTimestamp(System.currentTimeMillis()) + .build(); + Pdpb.SetGraphSpaceRequest request = Pdpb.SetGraphSpaceRequest.newBuilder() + .setHeader(header) + .setGraphSpace(graphSpace) + .build(); + Pdpb.SetGraphSpaceResponse response = getStub().setGraphSpace(request); + handleResponseError(response.getHeader()); + } + + public List getGraphSpace(String graphSpaceName) throws + PDException { + Pdpb.GetGraphSpaceRequest.Builder builder = Pdpb.GetGraphSpaceRequest.newBuilder(); + Pdpb.GetGraphSpaceRequest request; + builder.setHeader(header); + if (graphSpaceName != null && graphSpaceName.length() > 0) { + builder.setGraphSpaceName(graphSpaceName); + } + request = builder.build(); + Pdpb.GetGraphSpaceResponse response = getStub().getGraphSpace(request); + List graphSpaceList = response.getGraphSpaceList(); + handleResponseError(response.getHeader()); + return graphSpaceList; + } + + public void setPDConfig(int partitionCount, String peerList, int shardCount, + long version) throws PDException { + Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().setPartitionCount(partitionCount) + .setPeersList(peerList).setShardCount(shardCount) + .setVersion(version) + .setTimestamp(System.currentTimeMillis()) + .build(); + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + handleResponseError(response.getHeader()); + } + + public Metapb.PDConfig getPDConfig() throws PDException { + Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + handleResponseError(response.getHeader()); + return response.getPdConfig(); + } + + public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + handleResponseError(response.getHeader()); + } + + public Metapb.PDConfig getPDConfig(long version) throws PDException { + Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder().setHeader( + header).setVersion(version).build(); + Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + handleResponseError(response.getHeader()); + return response.getPdConfig(); + } + + public void changePeerList(String peerList) throws PDException { + Pdpb.ChangePeerListRequest request = Pdpb.ChangePeerListRequest.newBuilder() + .setPeerList(peerList) + .setHeader(header).build(); + Pdpb.getChangePeerListResponse response = + blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request); + handleResponseError(response.getHeader()); + } + + /** + * 工作模式 + * Auto:自动分裂,每个Store上分区数达到最大值 + * + * @throws PDException + */ + public void splitData() throws PDException { + Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder() + .setHeader(header) + .setMode(Pdpb.OperationMode.Auto) + .build(); + Pdpb.SplitDataResponse response = getStub().splitData(request); + handleResponseError(response.getHeader()); + } + + /** + * 工作模式 + * Auto:自动分裂,每个Store上分区数达到最大值 + * Expert:专家模式,需要指定splitParams + * + * @param mode + * @param params + * @throws PDException + */ + public void splitData(Pdpb.OperationMode mode, List params) throws + PDException { + Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .addAllParam(params).build(); + Pdpb.SplitDataResponse response = getStub().splitData(request); + handleResponseError(response.getHeader()); + } + + public void splitGraphData(String graphName, int toCount) throws PDException { + Pdpb.SplitGraphDataRequest request = Pdpb.SplitGraphDataRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); + Pdpb.SplitDataResponse response = getStub().splitGraphData(request); + handleResponseError(response.getHeader()); + } + + /** + * 自动转移,达到每个Store上分区数量相同 + * + * @throws PDException + */ + public void balancePartition() throws PDException { + Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder() + .setHeader(header) + .setMode( + Pdpb.OperationMode.Auto) + .build(); + Pdpb.MovePartitionResponse response = getStub().movePartition(request); + handleResponseError(response.getHeader()); + } + + /** + * //工作模式 + * // Auto:自动转移,达到每个Store上分区数量相同 + * // Expert:专家模式,需要指定transferParams + * + * @param mode + * @param params + * @throws PDException + */ + public void movePartition(Pdpb.OperationMode mode, List params) throws + PDException { + Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .addAllParam(params).build(); + Pdpb.MovePartitionResponse response = getStub().movePartition(request); + handleResponseError(response.getHeader()); + } + + public void reportTask(MetaTask.Task task) throws PDException { + Pdpb.ReportTaskRequest request = Pdpb.ReportTaskRequest.newBuilder() + .setHeader(header) + .setTask(task).build(); + Pdpb.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request); + handleResponseError(response.getHeader()); + } + + public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws PDException { + Pdpb.GetPartitionStatsRequest request = Pdpb.GetPartitionStatsRequest.newBuilder() + .setHeader(header) + .setGraphName(graph) + .setPartitionId(partId) + .build(); + Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request); + handleResponseError(response.getHeader()); + return response.getPartitionStats(); + } + + /** + * 平衡不同store中leader的数量 + */ + public void balanceLeaders() throws PDException { + Pdpb.BalanceLeadersRequest request = Pdpb.BalanceLeadersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request); + handleResponseError(response.getHeader()); + } + + /** + * 从pd中删除store + */ + public Metapb.Store delStore(long storeId) throws PDException { + Pdpb.DetStoreRequest request = Pdpb.DetStoreRequest.newBuilder() + .setHeader(header) + .setStoreId(storeId) + .build(); + Pdpb.DetStoreResponse response = getStub().delStore(request); + handleResponseError(response.getHeader()); + return response.getStore(); + } + + /** + * 对rocksdb整体进行compaction + * + * @throws PDException + */ + public void dbCompaction() throws PDException { + Pdpb.DbCompactionRequest request = Pdpb.DbCompactionRequest + .newBuilder() + .setHeader(header) + .build(); + Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + handleResponseError(response.getHeader()); + } + + /** + * 对rocksdb指定表进行compaction + * + * @param tableName + * @throws PDException + */ + public void dbCompaction(String tableName) throws PDException { + Pdpb.DbCompactionRequest request = Pdpb.DbCompactionRequest + .newBuilder() + .setHeader(header) + .setTableName(tableName) + .build(); + Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + handleResponseError(response.getHeader()); + } + + /** + * 分区合并,把当前的分区缩容至toCount个 + * + * @param toCount 缩容到分区的个数 + * @throws PDException + */ + public void combineCluster(int toCount) throws PDException { + Pdpb.CombineClusterRequest request = Pdpb.CombineClusterRequest + .newBuilder() + .setHeader(header) + .setToCount(toCount) + .build(); + Pdpb.CombineClusterResponse response = getStub().combineCluster(request); + handleResponseError(response.getHeader()); + } + + /** + * 将单图缩容到 toCount个 + * + * @param graphName graph name + * @param toCount target count + * @throws PDException + */ + public void combineGraph(String graphName, int toCount) throws PDException { + Pdpb.CombineGraphRequest request = Pdpb.CombineGraphRequest + .newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); + Pdpb.CombineGraphResponse response = getStub().combineGraph(request); + handleResponseError(response.getHeader()); + } + + public void deleteShardGroup(int groupId) throws PDException { + Pdpb.DeleteShardGroupRequest request = Pdpb.DeleteShardGroupRequest + .newBuilder() + .setHeader(header) + .setGroupId(groupId) + .build(); + Pdpb.DeleteShardGroupResponse response = + blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request); + + handleResponseError(response.getHeader()); + } + + /** + * 用于 store的 shard list重建 + * + * @param groupId shard group id + * @param shards shard list,delete when shards size is 0 + */ + public void updateShardGroupOp(int groupId, List shards) throws PDException { + Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request); + handleResponseError(response.getHeader()); + } + + /** + * invoke fireChangeShard command + * + * @param groupId shard group id + * @param shards shard list + */ + public void changeShard(int groupId, List shards) throws PDException { + Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + Pdpb.ChangeShardResponse response = getStub().changeShard(request); + handleResponseError(response.getHeader()); + } + + public ClientCache getCache() { + return cache; + } + + public CacheResponse getClientCache() throws PDException { + GetGraphRequest request = GetGraphRequest.newBuilder().setHeader(header).build(); + CacheResponse cache = getStub().getCache(request); + handleResponseError(cache.getHeader()); + return cache; + } + + public CachePartitionResponse getPartitionCache(String graph) throws PDException { + GetGraphRequest request = + GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build(); + CachePartitionResponse ps = getStub().getPartitions(request); + handleResponseError(ps.getHeader()); + return ps; + } + + public void updatePdRaft(String raftConfig) throws PDException { + Pdpb.UpdatePdRaftRequest request = Pdpb.UpdatePdRaftRequest.newBuilder() + .setHeader(header) + .setConfig(raftConfig) + .build(); + Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request); + handleResponseError(response.getHeader()); + } + + public interface PDEventListener { + void onStoreChanged(NodeEvent event); + + void onPartitionChanged(PartitionEvent event); + + void onGraphChanged(WatchResponse event); + + default void onShardGroupChanged(WatchResponse event) { + } + + } + + static class StubProxy { + + private final LinkedList hostList = new LinkedList<>(); + private volatile PDGrpc.PDBlockingStub stub; + private String leader; + + public StubProxy(String[] hosts) { + for (String host : hosts) { + if (!host.isEmpty()) { + hostList.offer(host); + } + } + } + + public String nextHost() { + String host = hostList.poll(); + hostList.offer(host); //移到尾部 + return host; + } + + public void set(PDGrpc.PDBlockingStub stub) { + this.stub = stub; + } + + public PDGrpc.PDBlockingStub get() { + return this.stub; + } + + public String getHost() { + return hostList.peek(); + } + + public int getHostCount() { + return hostList.size(); + } + + public String getLeader() { + return leader; + } + + public void setLeader(String leader) { + this.leader = leader; + } + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java new file mode 100644 index 0000000000..64558436ea --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +public final class PDConfig { + //TODO multi-server + private String serverHost = "localhost:9000"; + private long grpcTimeOut = 60000; // grpc调用超时时间 10秒 + + // 是否接收PD异步通知 + private boolean enablePDNotify = false; + + private boolean enableCache = false; + + private PDConfig() { + } + + public static PDConfig of() { + return new PDConfig(); + } + + public static PDConfig of(String serverHost) { + PDConfig config = new PDConfig(); + config.serverHost = serverHost; + return config; + } + + public static PDConfig of(String serverHost, long timeOut) { + PDConfig config = new PDConfig(); + config.serverHost = serverHost; + config.grpcTimeOut = timeOut; + return config; + } + + public String getServerHost() { + return serverHost; + } + + public long getGrpcTimeOut() { + return grpcTimeOut; + } + + @Deprecated + public PDConfig setEnablePDNotify(boolean enablePDNotify) { + this.enablePDNotify = enablePDNotify; + + // TODO 临时代码,hugegraph修改完后删除 + this.enableCache = enablePDNotify; + return this; + } + + public boolean isEnableCache() { + return enableCache; + } + + public PDConfig setEnableCache(boolean enableCache) { + this.enableCache = enableCache; + return this; + } + + @Override + public String toString() { + return "PDConfig{" + + "serverHost='" + serverHost + '\'' + + '}'; + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java new file mode 100644 index 0000000000..025d6f7ae8 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; + +/** + * Bidirectional communication interface of pd-client and pd-server + */ +public interface PDPulse { + + /*** inner static methods ***/ + static Listener listener(Consumer onNext) { + return listener(onNext, t -> { + }, () -> { + }); + } + + static Listener listener(Consumer onNext, Consumer onError) { + return listener(onNext, onError, () -> { + }); + } + + static Listener listener(Consumer onNext, Runnable onCompleted) { + return listener(onNext, t -> { + }, onCompleted); + } + + static Listener listener(Consumer onNext, Consumer onError, + Runnable onCompleted) { + return new Listener<>() { + @Override + public void onNext(T response) { + onNext.accept(response); + } + + @Override + public void onNotice(PulseServerNotice notice) { + + } + + @Override + public void onError(Throwable throwable) { + onError.accept(throwable); + } + + @Override + public void onCompleted() { + onCompleted.run(); + } + }; + } + + /** + * @param listener + * @return + */ + Notifier connectPartition(Listener listener); + + /** + * 切换成新的host。做 channel/host的检查,如果需要关闭,notifier调用close方法。 + * + * @param host new host + * @param notifier notifier + * @return true if create new stub, otherwise false + */ + boolean resetStub(String host, Notifier notifier); + + /** + * Interface of pulse. + */ + interface Listener { + /** + * Invoked on new events. + * + * @param response the response. + */ + @Deprecated + default void onNext(T response) { + } + + /** + * Invoked on new events. + * + * @param notice a wrapper of response + */ + default void onNotice(PulseServerNotice notice) { + notice.ack(); + } + + /** + * Invoked on errors. + * + * @param throwable the error. + */ + void onError(Throwable throwable); + + /** + * Invoked on completion. + */ + void onCompleted(); + + } + + /** + * Interface of notifier that can send notice to server. + * + * @param + */ + interface Notifier extends Closeable { + /** + * closes this watcher and all its resources. + */ + @Override + void close(); + + /** + * Send notice to pd-server. + * + * @return + */ + void notifyServer(T t); + + /** + * Send an error report to pd-server. + * + * @param error + */ + void crash(String error); + + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java new file mode 100644 index 0000000000..2d1ccb743e --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseAckRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseCreateRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.pulse.PartitionNotice; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import io.grpc.ManagedChannel; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public final class PDPulseImpl implements PDPulse { + + private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); + private final ExecutorService threadPool; + private HgPdPulseGrpc.HgPdPulseStub stub; + private String pdServerAddress; + + // TODO: support several servers. + public PDPulseImpl(String pdServerAddress) { + this.pdServerAddress = pdServerAddress; + this.stub = HgPdPulseGrpc.newStub(Channels.getChannel(pdServerAddress)); + var namedThreadFactory = + new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); + threadPool = Executors.newSingleThreadExecutor(namedThreadFactory); + } + + + private String getCurrentHost() { + return this.pdServerAddress; + } + + private boolean checkChannel() { + return stub != null && !((ManagedChannel) stub.getChannel()).isShutdown(); + } + + /* TODO: handle this override problem */ + @Override + public Notifier connectPartition(Listener + listener) { + return new PartitionHeartbeat(listener); + } + + @Override + public boolean resetStub(String host, Notifier notifier) { + log.info("reset stub: current, {}, new: {}, channel state:{}", getCurrentHost(), host, + checkChannel()); + if (Objects.equals(host, getCurrentHost()) && checkChannel()) { + return false; + } + + if (notifier != null) { + notifier.close(); + } + + this.stub = HgPdPulseGrpc.newStub(Channels.getChannel(host)); + log.info("pd pulse connect to {}", host); + this.pdServerAddress = host; + return true; + } + + /*** PartitionHeartbeat's implement ***/ + private class PartitionHeartbeat extends + AbstractConnector { + private long observerId = -1; + + PartitionHeartbeat(Listener listener) { + super(listener, PulseType.PULSE_TYPE_PARTITION_HEARTBEAT); + } + + private void setObserverId(long observerId) { + if (this.observerId == -1) { + this.observerId = observerId; + } + } + + @Override + public void notifyServer(PartitionHeartbeatRequest.Builder requestBuilder) { + this.reqStream.onNext(PulseRequest.newBuilder() + .setNoticeRequest( + PulseNoticeRequest.newBuilder() + .setPartitionHeartbeatRequest( + requestBuilder.build() + ).build() + ).build() + ); + } + + @Override + public void onNext(PulseResponse pulseResponse) { + this.setObserverId(pulseResponse.getObserverId()); + long noticeId = pulseResponse.getNoticeId(); + this.listener.onNext(pulseResponse); + this.listener.onNotice(new PartitionNotice(noticeId, + e -> super.ackNotice(e, observerId), + pulseResponse)); + } + + } + + private abstract class AbstractConnector implements Notifier, + StreamObserver { + Listener listener; + StreamObserver reqStream; + PulseType pulseType; + PulseRequest.Builder reqBuilder = PulseRequest.newBuilder(); + PulseAckRequest.Builder ackBuilder = PulseAckRequest.newBuilder(); + + private AbstractConnector(Listener listener, PulseType pulseType) { + this.listener = listener; + this.pulseType = pulseType; + this.init(); + } + + void init() { + PulseCreateRequest.Builder builder = PulseCreateRequest.newBuilder() + .setPulseType(this.pulseType); + + this.reqStream = PDPulseImpl.this.stub.pulse(this); + this.reqStream.onNext(reqBuilder.clear().setCreateRequest(builder).build()); + } + + /*** notifier ***/ + @Override + public void close() { + this.reqStream.onCompleted(); + } + + @Override + public abstract void notifyServer(N t); + + @Override + public void crash(String error) { + this.reqStream.onError(new Throwable(error)); + } + + /*** listener ***/ + @Override + public abstract void onNext(PulseResponse pulseResponse); + + @Override + public void onError(Throwable throwable) { + this.listener.onError(throwable); + } + + @Override + public void onCompleted() { + this.listener.onCompleted(); + } + + protected void ackNotice(long noticeId, long observerId) { + threadPool.execute(() -> { + // log.info("send ack: {}, ts: {}", noticeId, System.currentTimeMillis()); + this.reqStream.onNext(reqBuilder.clear() + .setAckRequest( + this.ackBuilder.clear() + .setNoticeId(noticeId) + .setObserverId(observerId) + .build() + ).build() + ); + }); + } + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java new file mode 100644 index 0000000000..3da255a825 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.PartitionEvent; + +public interface PDWatch { + + /** + * Watch the events of all store-nodes registered in the remote PD-Server. + * + * @param listener + * @return + */ + //PDWatcher watchNode(Listener listener); + + /** + * Watch the events of the store-nodes assigned to a specified graph. + * + * @param graph the graph name which you want to watch + * @param listener + * @return + */ + //PDWatcher watchNode(String graph, Listener listener); + + String getCurrentHost(); + + boolean checkChannel(); + + /*** inner static methods ***/ + static Listener listener(Consumer onNext) { + return listener(onNext, t -> { + }, () -> { + }); + } + + static Listener listener(Consumer onNext, Consumer onError) { + return listener(onNext, onError, () -> { + }); + } + + static Listener listener(Consumer onNext, Runnable onCompleted) { + return listener(onNext, t -> { + }, onCompleted); + } + + static Listener listener(Consumer onNext, Consumer onError, + Runnable onCompleted) { + return new Listener() { + @Override + public void onNext(T response) { + onNext.accept(response); + } + + @Override + public void onError(Throwable throwable) { + onError.accept(throwable); + } + + @Override + public void onCompleted() { + onCompleted.run(); + } + }; + } + + /** + * @param listener + * @return + */ + Watcher watchPartition(Listener listener); + + Watcher watchNode(Listener listener); + + Watcher watchGraph(Listener listener); + + Watcher watchShardGroup(Listener listener); + + + /** + * Interface of Watcher. + */ + interface Listener { + /** + * Invoked on new events. + * + * @param response the response. + */ + void onNext(T response); + + /** + * Invoked on errors. + * + * @param throwable the error. + */ + void onError(Throwable throwable); + + /** + * Invoked on completion. + */ + default void onCompleted() {}; + } + + interface Watcher extends Closeable { + /** + * closes this watcher and all its resources. + */ + @Override + void close(); + + /** + * Requests the latest revision processed and propagates it to listeners + */ + // TODO: what's it for? + //void requestProgress(); + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java new file mode 100644 index 0000000000..73796b53fb --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.function.Supplier; + +import org.apache.hugegraph.pd.grpc.watch.HgPdWatchGrpc; +import org.apache.hugegraph.pd.grpc.watch.WatchCreateRequest; +import org.apache.hugegraph.pd.grpc.watch.WatchNodeResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchPartitionResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchRequest; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.PartitionEvent; + +import io.grpc.ManagedChannel; +import io.grpc.stub.StreamObserver; + +final class PDWatchImpl implements PDWatch { + + private final HgPdWatchGrpc.HgPdWatchStub stub; + + private final String pdServerAddress; + + // TODO: support several servers. + PDWatchImpl(String pdServerAddress) { + this.pdServerAddress = pdServerAddress; + this.stub = HgPdWatchGrpc.newStub(Channels.getChannel(pdServerAddress)); + } + + @Override + public String getCurrentHost() { + return this.pdServerAddress; + } + + @Override + public boolean checkChannel() { + return stub != null && !((ManagedChannel) stub.getChannel()).isShutdown(); + } + + /** + * Get Partition change watcher. + * + * @param listener + * @return + */ + @Override + public Watcher watchPartition(Listener listener) { + return new PartitionWatcher(listener); + } + + /** + * Get Store-Node change watcher. + * + * @param listener + * @return + */ + @Override + public Watcher watchNode(Listener listener) { + return new NodeWatcher(listener); + } + + @Override + public Watcher watchGraph(Listener listener) { + return new GraphWatcher(listener); + } + + @Override + public Watcher watchShardGroup(Listener listener) { + return new ShardGroupWatcher(listener); + } + + private class GraphWatcher extends AbstractWatcher { + + private GraphWatcher(Listener listener) { + super(listener, + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_GRAPH_CHANGE) + .build() + ); + } + + @Override + public void onNext(WatchResponse watchResponse) { + this.listener.onNext(watchResponse); + } + } + + private class ShardGroupWatcher extends AbstractWatcher { + + private ShardGroupWatcher(Listener listener) { + super(listener, + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE) + .build() + ); + } + + @Override + public void onNext(WatchResponse watchResponse) { + this.listener.onNext(watchResponse); + } + } + + private class PartitionWatcher extends AbstractWatcher { + + private PartitionWatcher(Listener listener) { + super(listener, + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_PARTITION_CHANGE) + .build() + ); + } + + @Override + public void onNext(WatchResponse watchResponse) { + WatchPartitionResponse res = watchResponse.getPartitionResponse(); + PartitionEvent event = new PartitionEvent(res.getGraph(), res.getPartitionId(), + PartitionEvent.ChangeType.grpcTypeOf( + res.getChangeType())); + this.listener.onNext(event); + } + } + + private class NodeWatcher extends AbstractWatcher { + private NodeWatcher(Listener listener) { + super(listener, + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_STORE_NODE_CHANGE) + .build() + ); + } + + @Override + public void onNext(WatchResponse watchResponse) { + WatchNodeResponse res = watchResponse.getNodeResponse(); + NodeEvent event = new NodeEvent(res.getGraph(), res.getNodeId(), + NodeEvent.EventType.grpcTypeOf(res.getNodeEventType())); + this.listener.onNext(event); + } + } + + private abstract class AbstractWatcher implements Watcher, StreamObserver { + Listener listener; + StreamObserver reqStream; + Supplier requestSupplier; + + private AbstractWatcher(Listener listener, + Supplier requestSupplier) { + this.listener = listener; + this.requestSupplier = requestSupplier; + this.init(); + } + + void init() { + this.reqStream = PDWatchImpl.this.stub.watch(this); + this.reqStream.onNext(WatchRequest.newBuilder().setCreateRequest( + this.requestSupplier.get() + ).build()); + } + + @Override + public void close() { + this.reqStream.onCompleted(); + } + + @Override + public abstract void onNext(WatchResponse watchResponse); + + @Override + public void onError(Throwable throwable) { + + this.listener.onError(throwable); + } + + @Override + public void onCompleted() { + this.listener.onCompleted(); + } + } + +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java new file mode 100644 index 0000000000..b6e5555e6a --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.pulse; + +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; + +public class PartitionNotice implements PulseServerNotice { + private final long noticeId; + private final Consumer ackConsumer; + private final PulseResponse content; + + public PartitionNotice(long noticeId, Consumer ackConsumer, PulseResponse content) { + this.noticeId = noticeId; + this.ackConsumer = ackConsumer; + this.content = content; + } + + @Override + public void ack() { + this.ackConsumer.accept(this.noticeId); + } + + @Override + public long getNoticeId() { + return this.noticeId; + } + + @Override + public PulseResponse getContent() { + return this.content; + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java new file mode 100644 index 0000000000..251bab07f9 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.pulse; + +public interface PulseServerNotice { + /** + * @throws RuntimeException when failed to send ack-message to pd-server + */ + void ack(); + + long getNoticeId(); + + /** + * Return a response object of gRPC stream. + * + * @return + */ + T getContent(); + +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java new file mode 100644 index 0000000000..893566b9f5 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +import java.util.Objects; + +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; + +public class NodeEvent { + private final String graph; + private final long nodeId; + private final EventType eventType; + + public NodeEvent(String graph, long nodeId, EventType eventType) { + this.graph = graph; + this.nodeId = nodeId; + this.eventType = eventType; + } + + public String getGraph() { + return graph; + } + + public long getNodeId() { + return nodeId; + } + + public EventType getEventType() { + return eventType; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + NodeEvent nodeEvent = (NodeEvent) o; + return nodeId == nodeEvent.nodeId && Objects.equals(graph, + nodeEvent.graph) && + eventType == nodeEvent.eventType; + } + + @Override + public int hashCode() { + return Objects.hash(graph, nodeId, eventType); + } + + @Override + public String toString() { + return "NodeEvent{" + + "graph='" + graph + '\'' + + ", nodeId=" + nodeId + + ", eventType=" + eventType + + '}'; + } + + public enum EventType { + UNKNOWN, + NODE_ONLINE, + NODE_OFFLINE, + NODE_RAFT_CHANGE, + NODE_PD_LEADER_CHANGE; + + public static EventType grpcTypeOf(NodeEventType grpcType) { + switch (grpcType) { + case NODE_EVENT_TYPE_NODE_ONLINE: + return NODE_ONLINE; + case NODE_EVENT_TYPE_NODE_OFFLINE: + return NODE_OFFLINE; + case NODE_EVENT_TYPE_NODE_RAFT_CHANGE: + return NODE_RAFT_CHANGE; + case NODE_EVENT_TYPE_PD_LEADER_CHANGE: + return NODE_PD_LEADER_CHANGE; + default: + return UNKNOWN; + } + + } + + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java new file mode 100644 index 0000000000..c4ff91b107 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +public class PDWatcher { + +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java new file mode 100644 index 0000000000..76a4fdc7d9 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +import java.util.Objects; + +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; + +public class PartitionEvent { + private final String graph; + private final int partitionId; + private final ChangeType changeType; + + public PartitionEvent(String graph, int partitionId, ChangeType changeType) { + this.graph = graph; + this.partitionId = partitionId; + this.changeType = changeType; + } + + public String getGraph() { + return this.graph; + } + + public int getPartitionId() { + return this.partitionId; + } + + public ChangeType getChangeType() { + return this.changeType; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PartitionEvent that = (PartitionEvent) o; + return partitionId == that.partitionId && Objects.equals(graph, that.graph) && + changeType == that.changeType; + } + + @Override + public int hashCode() { + return Objects.hash(graph, partitionId, changeType); + } + + @Override + public String toString() { + return "PartitionEvent{" + + "graph='" + graph + '\'' + + ", partitionId=" + partitionId + + ", changeType=" + changeType + + '}'; + } + + public enum ChangeType { + UNKNOWN, + ADD, + ALTER, + DEL; + + public static ChangeType grpcTypeOf(WatchChangeType grpcType) { + switch (grpcType) { + case WATCH_CHANGE_TYPE_ADD: + return ADD; + case WATCH_CHANGE_TYPE_ALTER: + return ALTER; + case WATCH_CHANGE_TYPE_DEL: + return DEL; + default: + return UNKNOWN; + } + } + } +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java new file mode 100644 index 0000000000..e14bfafdc1 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +enum WatchType { + + PARTITION_CHANGE(10); + + private final int value; + + WatchType(int value) { + this.value = value; + } + +} diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java new file mode 100644 index 0000000000..0b937f4ed2 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PartitionCache; +import org.apache.hugegraph.pd.grpc.Metapb; + +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + +public class PartitionCacheTest { + + // @Test + public void test() { + PartitionCache cache = new PartitionCache(); + for (int i = 0; i < 10; i++) { + KVPair partShards = + new KVPair<>(Metapb.Partition.newBuilder() + .setStartKey(i * 10) + .setEndKey((i + 1) * 10) + .build(), null); + cache.updatePartition("aa", i, partShards.getKey()); + } + + for (int i = 0; i < 100; i++) { + KVPair partShards = cache.getPartitionByCode("aa", i); + System.out.println(" " + i + " " + partShards.getKey().getStartKey()); + } + } + + + // @Test + public void test1() { + Map> keyToPartIdCache = new HashMap<>(); + // graphName + PartitionID组成key + Map> partitionCache = new HashMap<>(); + + // 缓存全部Store,用于全库查询,需要优化 + Map> allStoresCache = new HashMap<>(); + + keyToPartIdCache.put("a", TreeRangeMap.create()); + + keyToPartIdCache.get("a") + .put(Range.closedOpen(1L, 2L), 1); + + allStoresCache.put("a", new ArrayList<>()); + allStoresCache.get("a").add(Metapb.Store.newBuilder().setId(34).build()); + + + Map> keyToPartIdCache2 = + cloneKeyToPartIdCache(keyToPartIdCache); + System.out.println(keyToPartIdCache2.size()); + } + + public Map> cloneKeyToPartIdCache( + Map> cache) { + Map> cacheClone = new HashMap<>(); + cache.forEach((k1, v1) -> { + cacheClone.put(k1, TreeRangeMap.create()); + v1.asMapOfRanges().forEach((k2, v2) -> { + cacheClone.get(k1).put(k2, v2); + }); + }); + return cacheClone; + } + + public Map> + clonePartitionCache(Map> cache) { + Map> cacheClone = new HashMap<>(); + cacheClone.putAll(cache); + return cacheClone; + } + + public Map> + cloneStoreCache(Map> cache) { + Map> cacheClone = new HashMap<>(); + cacheClone.putAll(cache); + return cacheClone; + } +} diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java new file mode 100644 index 0000000000..bc5e3879ac --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.nio.charset.StandardCharsets; +import java.util.List; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.client.PDPulse; +import org.apache.hugegraph.pd.client.PDPulseImpl; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.junit.Assert; +import org.junit.BeforeClass; + +public class StoreRegisterTest { + private static PDClient pdClient; + private final String storeAddr = "localhost"; + private final String graphName = "default/hugegraph/g"; + private long storeId = 0; + + @BeforeClass + public static void beforeClass() throws Exception { + PDConfig config = PDConfig.of("localhost:8686"); + config.setEnableCache(true); + pdClient = PDClient.create(config); + } + + // @Test + public void testRegisterStore() throws PDException { + Metapb.Store store = Metapb.Store.newBuilder().setAddress(storeAddr).build(); + try { + storeId = pdClient.registerStore(store); + } catch (Exception e) { + e.printStackTrace(); + } + Assert.assertTrue("RegisterStore store_id = " + storeId, storeId != 0); + } + + // @Test + public void testGetStore() throws PDException { + testRegisterStore(); + Metapb.Store store = pdClient.getStore(storeId); + Assert.assertEquals(storeAddr, store.getAddress()); + System.out.println(store); + } + + // @Test + public void testGetActiveStores() throws PDException { + testRegisterStore(); + List stores = pdClient.getActiveStores(graphName); + stores.forEach((e) -> { + System.out.println("-------------------------------------"); + System.out.println(e); + }); + } + + + // @Test + public void testStoreHeartbeat() throws PDException { + testRegisterStore(); + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder().setStoreId(storeId).build(); + pdClient.storeHeartbeat(stats); + List stores = pdClient.getActiveStores(graphName); + boolean exist = false; + for (Metapb.Store store : stores) { + if (store.getId() == storeId) { + exist = true; + break; + } + } + Assert.assertTrue(exist); + } + + + // @Test + public void testPartitionHeartbeat() throws InterruptedException, PDException { + testRegisterStore(); + PDPulse pdPulse = new PDPulseImpl(pdClient.getLeaderIp()); + + PDPulse.Notifier notifier = pdPulse.connectPartition( + new PDPulse.Listener() { + + @Override + public void onNext(PulseResponse response) { + + } + + @Override + public void onNotice(PulseServerNotice notice) { + + } + + @Override + public void onError(Throwable throwable) { + + } + + @Override + public void onCompleted() { + + } + }); + KVPair partShard = + pdClient.getPartition("test", "1".getBytes(StandardCharsets.UTF_8)); + notifier.notifyServer(PartitionHeartbeatRequest.newBuilder().setStates( + Metapb.PartitionStats.newBuilder().addGraphName("test") + .setId(partShard.getKey().getId()) + .setLeader(Metapb.Shard.newBuilder().setStoreId(1).build()))); + + + Thread.sleep(10000); + } + +} diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java new file mode 100644 index 0000000000..8952cc1162 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.HashMap; +import java.util.Map; +import java.util.Vector; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.junit.Assert; + +public class DiscoveryClientImplTest { + + private static final AtomicLong label = new AtomicLong(); + String address = "localhost:80"; + int delay = 1000; + int wait = delay * 3 + 500; + + // @Test + public void registerStore() throws InterruptedException { + + HashMap labels = new HashMap<>(); + + labels.put("metrics", "/actuator/prometheus"); + labels.put("target", "10.81.116.77:8520"); + labels.put("scheme", "http"); + labels.put("__relabeling", "http"); + labels.put("no_relabeling", "http"); + getClient("store", "address1", labels); + + labels.put("metrics", "/actuator/prometheus"); + labels.put("target", "10.81.116.78:8520"); + labels.put("scheme", "http"); + getClient("store", "address2", labels); + + labels.put("metrics", "/actuator/prometheus"); + labels.put("target", "10.81.116.79:8520"); + labels.put("scheme", "http"); + getClient("store", "address3", labels); + + labels.put("metrics", "/actuator/prometheus"); + labels.put("target", "10.81.116.78:8620"); + labels.put("scheme", "http"); + getClient("pd", "address1", labels); + + labels.put("metrics", "/graph/metrics"); + labels.put("target", "10.37.1.1:9200"); + labels.put("scheme", "https"); + getClient("hugegraph", "address1", labels); + } + + // @Test + public void testNodes() throws InterruptedException { + String appName = "hugegraph"; + register(appName, address); + } + + // @Test + public void testMultiNode() throws InterruptedException { + for (int i = 0; i < 2; i++) { + register("app" + i, address + i); + } + } + + // @Test + public void testParallelMultiNode() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(30); + Vector exceptions = new Vector<>(); + for (int i = 0; i < 30; i++) { + int finalI = i; + new Thread(() -> { + try { + for (int j = 0; j < 3; j++) { + register("app" + finalI, address + j); + } + } catch (Exception e) { + exceptions.add(e); + } finally { + latch.countDown(); + } + }).start(); + } + latch.await(); + Assert.assertEquals(0, exceptions.size()); + } + + private void register(String appName, String address) throws InterruptedException { + + HashMap labels = new HashMap<>(); + String labelValue = String.valueOf(label.incrementAndGet()); + labels.put("address", labelValue); + labels.put("address1", labelValue); + Query query = Query.newBuilder().setAppName( + appName).setVersion("0.13.0").putAllLabels(labels).build(); + DiscoveryClientImpl discoveryClient = getClient(appName, address, labels); + Thread.sleep(10000); + NodeInfos nodeInfos1 = discoveryClient.getNodeInfos(query); + Assert.assertEquals(1, nodeInfos1.getInfoCount()); + DiscoveryClientImpl discoveryClient1 = getClient(appName, address + 0, labels); + Thread.sleep(10000); + Assert.assertEquals(2, discoveryClient.getNodeInfos(query).getInfoCount()); + Query query1 = Query.newBuilder().setAppName( + appName).setVersion("0.12.0").putAllLabels(labels).build(); + Assert.assertEquals(0, discoveryClient.getNodeInfos(query1).getInfoCount()); + discoveryClient.cancelTask(); + discoveryClient1.cancelTask(); + Thread.sleep(wait); + NodeInfos nodeInfos = discoveryClient.getNodeInfos(query); + System.out.println(nodeInfos); + Assert.assertEquals(0, nodeInfos.getInfoCount()); + discoveryClient.close(); + discoveryClient1.close(); + } + + private DiscoveryClientImpl getClient(String appName, String address, Map labels) { + DiscoveryClientImpl discoveryClient = null; + try { + discoveryClient = DiscoveryClientImpl.newBuilder().setCenterAddress( + "localhost:8687,localhost:8686,localhost:8688").setAddress(address).setAppName( + appName).setDelay(delay).setVersion("0.13.0").setId( + "0").setLabels(labels).build(); + discoveryClient.scheduleTask(); + } catch (Exception e) { + e.printStackTrace(); + } + + return discoveryClient; + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java new file mode 100644 index 0000000000..390821662e --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.io.File; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.KvResponse; +import org.yaml.snakeyaml.Yaml; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class LicenseClientImplTest { + + // @Test + public void putLicense() { + PDConfig pdConfig = PDConfig.of("localhost:8686,localhost:8687,localhost:8688"); + //PDConfig pdConfig = PDConfig.of("localhost:8686"); + pdConfig.setEnableCache(true); + try (LicenseClient c = new LicenseClient(pdConfig)) { + File file = new File("../conf/hugegraph.license"); + byte[] bytes = FileUtils.readFileToByteArray(file); + Pdpb.PutLicenseResponse putLicenseResponse = c.putLicense(bytes); + Pdpb.Error error = putLicenseResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(Pdpb.ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: ", e); + } + } + + // @Test + public void getKv() { + PDConfig pdConfig = PDConfig.of("10.157.12.36:8686"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + KResponse kResponse = c.get("S:FS"); + Pdpb.Error error = kResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(Pdpb.ErrorType.OK); + Properties ymlConfig = getYmlConfig(kResponse.getValue()); + Object property = ymlConfig.get("rocksdb.write_buffer_size"); + assert property.toString().equals("32000000"); + } catch (Exception e) { + log.error("put license with error: ", e); + } + } + + // @Test + public void putKv() { + PDConfig pdConfig = PDConfig.of("127.0.0.1.70:8688"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + long l = System.currentTimeMillis(); + KvResponse kvResponse = c.put("S:Timestamp", String.valueOf(l)); + Pdpb.Error error = kvResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(Pdpb.ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: ", e); + } + } + + // @Test + public void putKvLocal() { + PDConfig pdConfig = PDConfig.of("localhost:8686"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + long l = System.currentTimeMillis(); + KvResponse kvResponse = c.put("S:Timestamp", String.valueOf(l)); + Pdpb.Error error = kvResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(Pdpb.ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: ", e); + } + } + + private Properties getYmlConfig(String yml) { + Yaml yaml = new Yaml(); + Iterable load = yaml.loadAll(yml); + Iterator iterator = load.iterator(); + Properties properties = new Properties(); + while (iterator.hasNext()) { + Map next = (Map) iterator.next(); + map2Properties(next, "", properties); + } + return properties; + } + + private void map2Properties(Map map, String prefix, Properties properties) { + + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + String newPrefix = prefix == null || prefix.length() == 0 ? key : prefix + "." + key; + Object value = entry.getValue(); + if (!(value instanceof Map)) { + properties.put(newPrefix, value); + } else { + map2Properties((Map) value, newPrefix, properties); + } + + } + } + +} diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java new file mode 100644 index 0000000000..dfdc63cf36 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.client.test.HgPDTestUtil; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.junit.BeforeClass; + +public class PDPulseTest { + private static PDClient pdClient; + + private final long storeId = 0; + private final String storeAddress = "localhost"; + private final String graphName = "graph1"; + + @BeforeClass + public static void beforeClass() throws Exception { + PDConfig pdConfig = PDConfig.of("localhost:8686"); + pdConfig.setEnableCache(true); + pdClient = PDClient.create(pdConfig); + pdClient.getLeader(); + } + + // @Test + public void listen() { + + PDPulse pulse = new PDPulseImpl(pdClient.getLeaderIp()); + CountDownLatch latch = new CountDownLatch(60); + + PDPulse.Notifier notifier1 = + pulse.connectPartition(new PulseListener(latch, "listener1")); + PDPulse.Notifier notifier2 = + pulse.connectPartition(new PulseListener(latch, "listener2")); + PDPulse.Notifier notifier3 = + pulse.connectPartition(new PulseListener(latch, "listener3")); + + try { + latch.await(120, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + PartitionHeartbeatRequest.Builder builder = PartitionHeartbeatRequest.newBuilder(); + + notifier1.notifyServer(builder); + + + notifier2.notifyServer(builder); + + notifier3.notifyServer(builder); + + notifier1.close(); + notifier2.close(); + notifier3.close(); + } + + + private class PulseListener implements PDPulse.Listener { + private final String listenerName; + CountDownLatch latch = new CountDownLatch(10); + + private PulseListener(CountDownLatch latch, String listenerName) { + this.latch = latch; + this.listenerName = listenerName; + } + + @Override + public void onNext(T response) { + // println(this.listenerName+" res: "+response); + // this.latch.countDown(); + } + + @Override + public void onNotice(PulseServerNotice notice) { + HgPDTestUtil.println(this.listenerName + " ---> res: " + notice.getContent()); + + notice.ack(); + this.latch.countDown(); + } + + @Override + public void onError(Throwable throwable) { + HgPDTestUtil.println(this.listenerName + " error: " + throwable.toString()); + } + + @Override + public void onCompleted() { + HgPDTestUtil.println(this.listenerName + " is completed"); + } + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java new file mode 100644 index 0000000000..675577596a --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.client.test.HgPDTestUtil; +import org.junit.BeforeClass; +import org.junit.Test; + +@Deprecated +public class PDWatchTest { + private static PDClient pdClient; + + private final long storeId = 0; + private final String storeAddr = "localhost"; + private final String graphName = "graph1"; + + @BeforeClass + public static void beforeClass() { + pdClient = PDClient.create(PDConfig.of("localhost:9000")); + } + + @Test + public void watch() { + PDWatch watch = pdClient.getWatchClient(); + CountDownLatch latch = new CountDownLatch(10); + + PDWatch.Watcher watcher1 = watch.watchPartition(new WatchListener<>(latch, "watcher1")); + PDWatch.Watcher watcher2 = watch.watchPartition(new WatchListener<>(latch, "watcher2")); + PDWatch.Watcher watcher3 = watch.watchPartition(new WatchListener<>(latch, "watcher3")); + + PDWatch.Watcher nodeWatcher1 = watch.watchNode(new WatchListener<>(latch, "nodeWatcher1")); + + try { + latch.await(15, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + watcher1.close(); + watcher2.close(); + watcher3.close(); + } + + private class WatchListener implements PDWatch.Listener { + private final String watcherName; + CountDownLatch latch; + + private WatchListener(CountDownLatch latch, String watcherName) { + this.latch = latch; + this.watcherName = watcherName; + } + + @Override + public void onNext(T response) { + HgPDTestUtil.println(this.watcherName + " res: " + response); + this.latch.countDown(); + } + + @Override + public void onError(Throwable throwable) { + HgPDTestUtil.println(this.watcherName + " error: " + throwable.toString()); + } + + @Override + public void onCompleted() { + HgPDTestUtil.println(this.watcherName + " is completed"); + } + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java new file mode 100644 index 0000000000..99e5f83360 --- /dev/null +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client.test; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.List; + +public class HgPDTestUtil { + + public static void println(Object str) { + System.out.println(str); + } + + public static String toStr(byte[] b) { + if (b == null) return ""; + if (b.length == 0) return ""; + return new String(b, StandardCharsets.UTF_8); + } + + public static byte[] toBytes(String str) { + if (str == null) return null; + return str.getBytes(StandardCharsets.UTF_8); + } + + public static byte[] toBytes(long l) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(l); + return buffer.array(); + } + + private static byte[] toBytes(final int i) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.putInt(i); + return buffer.array(); + } + + public static long toLong(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getLong(); + } + + public static long toInt(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getInt(); + } + + public static String padLeftZeros(String str, int n) { + return String.format("%1$" + n + "s", str).replace(' ', '0'); + } + + public static String toSuffix(int num, int length) { + return "-" + padLeftZeros(String.valueOf(num), length); + } + + public static int amountOf(List list) { + if (list == null) { + return 0; + } + return list.size(); + } + + public static int amountOf(Iterator iterator) { + if (iterator == null) return 0; + int count = 0; + while (iterator.hasNext()) { + iterator.next(); + count++; + } + return count; + } +} diff --git a/hugegraph-pd/hg-pd-clitools/pom.xml b/hugegraph-pd/hg-pd-clitools/pom.xml new file mode 100644 index 0000000000..d408b45baf --- /dev/null +++ b/hugegraph-pd/hg-pd-clitools/pom.xml @@ -0,0 +1,74 @@ + + + + + + hugegraph-pd + org.apache.hugegraph + ${revision} + ../pom.xml + + 4.0.0 + + hg-pd-clitools + + + org.apache.hugegraph + hg-pd-client + ${revision} + + + junit + junit + 4.13.2 + test + + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + package + + single + + + + + + org.apache.hugegraph.pd.clitools.Main + + + + + jar-with-dependencies + + + + + + + + diff --git a/hugegraph-pd/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java b/hugegraph-pd/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java new file mode 100644 index 0000000000..f1db9cc8de --- /dev/null +++ b/hugegraph-pd/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.clitools; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; + +public class Main { + + public static void main(String[] args) throws PDException { + if (args.length < 3) { + String error = " usage: pd-address config key[=value] \n key list: " + + "\n\tenableBatchLoad"; + System.out.println(error); + System.exit(0); + } + String pd = args[0]; + String cmd = args[1]; + String param = args[2]; + System.out.println(pd + " " + cmd + " " + param); + System.out.println("Result: \n"); + switch (cmd) { + case "config": + doConfig(pd, param); + case "change_raft": + doChangeRaft(pd, param); + } + } + + private static void doChangeRaft(String pd, String param) throws PDException { + PDClient pdClient = PDClient.create(PDConfig.of(pd)); + pdClient.updatePdRaft(param); + } + + public static void doConfig(String pd, String param) throws PDException { + PDClient pdClient = PDClient.create(PDConfig.of(pd)); + String[] pair = param.split("="); + String key = pair[0].trim(); + Object value = null; + if (pair.length > 1) { + value = pair[1].trim(); + } + if (value == null) { + Metapb.PDConfig pdConfig = pdClient.getPDConfig(); + switch (key) { + case "enableBatchLoad": + // value = pdConfig.getEnableBatchLoad(); + break; + case "shardCount": + value = pdConfig.getShardCount(); + break; + } + + System.out.println("Get config " + key + "=" + value); + } else { + Metapb.PDConfig.Builder builder = Metapb.PDConfig.newBuilder(); + switch (key) { + case "enableBatchLoad": + // builder.setEnableBatchLoad(Boolean.valueOf((String)value)); + case "shardCount": + builder.setShardCount(Integer.valueOf((String) value)); + } + pdClient.setPDConfig(builder.build()); + System.out.println("Set config " + key + "=" + value); + } + } + +} diff --git a/hugegraph-pd/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java b/hugegraph-pd/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java new file mode 100644 index 0000000000..4fa7eaebb6 --- /dev/null +++ b/hugegraph-pd/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.clitools; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; + +public class MainTest { + public static boolean test2sup(List arrays, int tail, int res) { + System.out.printf("%d %d%n", tail, res); + if (tail == 0) { + System.out.printf("a = %d %d%n", tail, res); + return false; + } else if (tail == 1) { + System.out.printf("b = %d %d%n", arrays.get(0), res); + return (arrays.get(0) == res); + } else if (tail == 2) { + System.out.printf("c = %d %d %d%n", arrays.get(0), arrays.get(1), res); + return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || + (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); + } else { + return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || + test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); + } + } + + // @Test + public void getConfig() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad"}); + } + + // @Test + public void setBatchTrue() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad= true "}); + } + + // @Test + public void setBatchFalse() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad=false"}); + } + + // @Test + public void getConfig2() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount"}); + } + + // @Test + public void setShardCount1() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=1"}); + } + + // @Test + public void setShardCount3() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=3"}); + } + + // @Test + public void test2() { + Integer[] a = new Integer[]{1, 0, 3, 2}; + List aa = Arrays.asList(a); + System.out.printf(test2sup(aa, aa.size(), 0) ? "TRUE" : "FALSE"); + } +} diff --git a/hugegraph-pd/hg-pd-common/pom.xml b/hugegraph-pd/hg-pd-common/pom.xml new file mode 100644 index 0000000000..1997660099 --- /dev/null +++ b/hugegraph-pd/hg-pd-common/pom.xml @@ -0,0 +1,54 @@ + + + + + + 4.0.0 + + org.apache.hugegraph + hugegraph-pd + ${revision} + ../pom.xml + + hg-pd-common + + + 11 + 11 + + + + + org.apache.hugegraph + hg-pd-grpc + ${revision} + + + org.projectlombok + lombok + 1.18.24 + + + org.apache.commons + commons-collections4 + 4.4 + + + diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java new file mode 100644 index 0000000000..46da3b75e6 --- /dev/null +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.apache.hugegraph.pd.grpc.Metapb.Graph; +import org.apache.hugegraph.pd.grpc.Metapb.Partition; + +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + +import lombok.Data; + +@Data +public class GraphCache { + + private Graph graph; + private AtomicBoolean initialized = new AtomicBoolean(false); + private AtomicBoolean writing = new AtomicBoolean(false); + private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private Map state = new ConcurrentHashMap<>(); + private Map partitions = new ConcurrentHashMap<>(); + private RangeMap range = TreeRangeMap.create(); + + public GraphCache(Graph graph) { + this.graph = graph; + } + + public GraphCache() { + } + + public Partition getPartition(Integer id) { + return partitions.get(id); + } + + public Partition addPartition(Integer id, Partition p) { + return partitions.put(id, p); + } + + public Partition removePartition(Integer id) { + return partitions.remove(id); + } +} diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java new file mode 100644 index 0000000000..bb1fccd025 --- /dev/null +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import java.util.Collection; +import java.util.Map; + +public final class HgAssert { + + public static void isTrue(boolean expression, String message) { + if (message == null) { + throw new IllegalArgumentException("message is null"); + } + + if (!expression) { + throw new IllegalArgumentException(message); + } + } + + public static void isFalse(boolean expression, String message) { + isTrue(!expression, message); + } + + public static void isArgumentValid(byte[] bytes, String parameter) { + isFalse(isInvalid(bytes), "The argument is invalid: " + parameter); + } + + public static void isArgumentValid(String str, String parameter) { + isFalse(isInvalid(str), "The argument is invalid: " + parameter); + } + + public static void isArgumentNotNull(Object obj, String parameter) { + isTrue(obj != null, "The argument is null: " + parameter); + } + + public static void istValid(byte[] bytes, String msg) { + isFalse(isInvalid(bytes), msg); + } + + public static void isValid(String str, String msg) { + isFalse(isInvalid(str), msg); + } + + public static void isNotNull(Object obj, String msg) { + isTrue(obj != null, msg); + } + + public static boolean isContains(Object[] objs, Object obj) { + if (objs == null || objs.length == 0 || obj == null) { + return false; + } + for (Object item : objs) { + if (obj.equals(item)) { + return true; + } + } + return false; + } + + public static boolean isInvalid(String... strs) { + if (strs == null || strs.length == 0) { + return true; + } + for (String item : strs) { + if (item == null || "".equals(item.trim())) { + return true; + } + } + return false; + } + + public static boolean isInvalid(byte[] bytes) { + return bytes == null || bytes.length == 0; + } + + public static boolean isInvalid(Map map) { + return map == null || map.isEmpty(); + } + + public static boolean isInvalid(Collection list) { + return list == null || list.isEmpty(); + } + + public static boolean isContains(Collection list, T item) { + if (list == null || item == null) { + return false; + } + return list.contains(item); + } + + public static boolean isNull(Object... objs) { + if (objs == null) { + return true; + } + for (Object item : objs) { + if (item == null) { + return true; + } + } + return false; + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java new file mode 100644 index 0000000000..b560911ea2 --- /dev/null +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import java.io.Serializable; +import java.util.Objects; + +public class KVPair implements Serializable { + + /** + * Key of this Pair. + */ + private K key; + /** + * Value of this this Pair. + */ + private V value; + + /** + * Creates a new pair + * + * @param key The key for this pair + * @param value The value to use for this pair + */ + public KVPair(K key, V value) { + this.key = key; + this.value = value; + } + + /** + * Gets the key for this pair. + * + * @return key for this pair + */ + public K getKey() { + return key; + } + + public void setKey(K key) { + this.key = key; + } + + /** + * Gets the value for this pair. + * + * @return value for this pair + */ + public V getValue() { + return value; + } + + public void setValue(V value) { + this.value = value; + } + + /** + *

String representation of this + * Pair.

+ * + *

The default name/value delimiter '=' is always used.

+ * + * @return String representation of this Pair + */ + @Override + public String toString() { + return key + "=" + value; + } + + /** + *

Generate a hash code for this Pair.

+ * + *

The hash code is calculated using both the name and + * the value of the Pair.

+ * + * @return hash code for this Pair + */ + @Override + public int hashCode() { + // name's hashCode is multiplied by an arbitrary prime number (13) + // in order to make sure there is a difference in the hashCode between + // these two parameters: + // name: a value: aa + // name: aa value: a + return key.hashCode() * 13 + (value == null ? 0 : value.hashCode()); + } + + /** + *

Test this Pair for equality with another + * Object.

+ * + *

If the Object to be tested is not a + * Pair or is null, then this method + * returns false.

+ * + *

Two Pairs are considered equal if and only if + * both the names and values are equal.

+ * + * @param o the Object to test for + * equality with this Pair + * @return true if the given Object is + * equal to this Pair else false + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o instanceof KVPair) { + KVPair pair = (KVPair) o; + if (!Objects.equals(key, pair.key)) { + return false; + } + return Objects.equals(value, pair.value); + } + return false; + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java new file mode 100644 index 0000000000..3efc7d1385 --- /dev/null +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +public class PDException extends Exception { + + private final int errorCode; + + public PDException(int error) { + super(String.format("Error code = %d", error)); + this.errorCode = error; + } + + public PDException(int error, String msg) { + super(msg); + this.errorCode = error; + } + + public PDException(int error, Throwable e) { + super(e); + this.errorCode = error; + } + + public PDException(int error, String msg, Throwable e) { + super(msg, e); + this.errorCode = error; + } + + public int getErrorCode() { + return errorCode; + } +} diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java new file mode 100644 index 0000000000..c7efb84031 --- /dev/null +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +public class PDRuntimeException extends RuntimeException { + + // public static final int LICENSE_ERROR = -11; + + private int errorCode = 0; + + public PDRuntimeException(int error) { + super(String.format("Error code = %d", error)); + this.errorCode = error; + } + + public PDRuntimeException(int error, String msg) { + super(msg); + this.errorCode = error; + } + + public PDRuntimeException(int error, Throwable e) { + super(e); + this.errorCode = error; + } + + public PDRuntimeException(int error, String msg, Throwable e) { + super(msg, e); + this.errorCode = error; + } + + public int getErrorCode() { + return errorCode; + } +} diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java new file mode 100644 index 0000000000..7c9199dd0d --- /dev/null +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java @@ -0,0 +1,460 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.apache.hugegraph.pd.grpc.Metapb; + +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + +/** + * 放弃 copy on write 的方式 + * 1. 在 graph * partition 数量极多的时候,效率严重下降,不能用 + */ +public class PartitionCache { + + // 读写锁对象 + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + Lock writeLock = readWriteLock.writeLock(); + // 每张图一个缓存 + private volatile Map> keyToPartIdCache; + // graphName + PartitionID 组成 key + private volatile Map> partitionCache; + + private volatile Map shardGroupCache; + private volatile Map storeCache; + private volatile Map graphCache; + + private final Map locks = new HashMap<>(); + + public PartitionCache() { + keyToPartIdCache = new HashMap<>(); + partitionCache = new HashMap<>(); + shardGroupCache = new ConcurrentHashMap<>(); + storeCache = new ConcurrentHashMap<>(); + graphCache = new ConcurrentHashMap<>(); + } + + private AtomicBoolean getOrCreateGraphLock(String graphName) { + var lock = this.locks.get(graphName); + if (lock == null) { + try { + writeLock.lock(); + if ((lock = this.locks.get(graphName)) == null) { + lock = new AtomicBoolean(); + locks.put(graphName, lock); + } + } finally { + writeLock.unlock(); + } + } + return lock; + } + + public void waitGraphLock(String graphName) { + var lock = getOrCreateGraphLock(graphName); + while (lock.get()) { + Thread.onSpinWait(); + } + } + + public void lockGraph(String graphName) { + var lock = getOrCreateGraphLock(graphName); + while (lock.compareAndSet(false, true)) { + Thread.onSpinWait(); + } + } + + public void unlockGraph(String graphName) { + var lock = getOrCreateGraphLock(graphName); + lock.set(false); + } + + /** + * 根据 partitionId 返回分区信息 + * + * @param graphName + * @param partId + * @return + */ + public KVPair getPartitionById(String graphName, int partId) { + waitGraphLock(graphName); + var graphs = partitionCache.get(graphName); + if (graphs != null) { + var partition = graphs.get(partId); + if (partition != null) { + return new KVPair<>(partition, getLeaderShard(partId)); + } + } + + return null; + } + + /** + * 返回 key 所在的分区信息 + * + * @param key + * @return + */ + public KVPair getPartitionByKey(String graphName, byte[] key) { + int code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + /** + * 根据 key 的 hashcode 返回分区信息 + * + * @param graphName + * @param code + * @return + */ + public KVPair getPartitionByCode(String graphName, long code) { + waitGraphLock(graphName); + RangeMap rangeMap = keyToPartIdCache.get(graphName); + if (rangeMap != null) { + Integer partId = rangeMap.get(code); + if (partId != null) { + return getPartitionById(graphName, partId); + } + } + return null; + } + + public List getPartitions(String graphName) { + waitGraphLock(graphName); + + List partitions = new ArrayList<>(); + if (!partitionCache.containsKey(graphName)) { + return partitions; + } + partitionCache.get(graphName).forEach((k, v) -> { + partitions.add(v); + }); + + return partitions; + } + + public boolean addPartition(String graphName, int partId, Metapb.Partition partition) { + waitGraphLock(graphName); + Metapb.Partition old = null; + + if (partitionCache.containsKey(graphName)) { + old = partitionCache.get(graphName).get(partId); + } + + if (old != null && old.equals(partition)) { + return false; + } + try { + + lockGraph(graphName); + + partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); + + if (old != null) { + // old [1-3) 被 [2-3) 覆盖了。当 [1-3) 变成 [1-2) 不应该删除原先的 [1-3) + // 当确认老的 start, end 都是自己的时候,才可以删除老的。(即还没覆盖) + var graphRange = keyToPartIdCache.get(graphName); + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + } + } + + keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) + .put(Range.closedOpen(partition.getStartKey(), + partition.getEndKey()), partId); + } finally { + unlockGraph(graphName); + } + return true; + } + + public void updatePartition(String graphName, int partId, Metapb.Partition partition) { + try { + lockGraph(graphName); + Metapb.Partition old = null; + var graphs = partitionCache.get(graphName); + if (graphs != null) { + old = graphs.get(partId); + } + + if (old != null) { + var graphRange = keyToPartIdCache.get(graphName); + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + } + } + + partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); + keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), + partId); + } finally { + unlockGraph(graphName); + } + } + + public boolean updatePartition(Metapb.Partition partition) { + + var graphName = partition.getGraphName(); + var partitionId = partition.getId(); + + var old = getPartitionById(graphName, partitionId); + if (old != null && Objects.equals(partition, old.getKey())) { + return false; + } + + updatePartition(graphName, partitionId, partition); + return true; + } + + public void removePartition(String graphName, int partId) { + try { + lockGraph(graphName); + var partition = partitionCache.get(graphName).remove(partId); + if (partition != null) { + var graphRange = keyToPartIdCache.get(graphName); + + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + } + } + } finally { + unlockGraph(graphName); + } + } + + /** + * remove partition id of graph name + * + * @param graphName + * @param id + */ + public void remove(String graphName, int id) { + removePartition(graphName, id); + } + + /** + * remove all partitions + */ + public void removePartitions() { + writeLock.lock(); + try { + partitionCache = new HashMap<>(); + keyToPartIdCache = new HashMap<>(); + locks.clear(); + } finally { + writeLock.unlock(); + } + } + + /** + * remove partition cache of graphName + * + * @param graphName + */ + public void removeAll(String graphName) { + try { + lockGraph(graphName); + partitionCache.remove(graphName); + keyToPartIdCache.remove(graphName); + locks.remove(graphName); + } finally { + unlockGraph(graphName); + } + } + + private String makePartitionKey(String graphName, int partId) { + return graphName + "/" + partId; + } + + public boolean updateShardGroup(Metapb.ShardGroup shardGroup) { + Metapb.ShardGroup oldShardGroup = shardGroupCache.get(shardGroup.getId()); + if (oldShardGroup != null && oldShardGroup.equals(shardGroup)) { + return false; + } + shardGroupCache.put(shardGroup.getId(), shardGroup); + return true; + } + + public void deleteShardGroup(int shardGroupId) { + shardGroupCache.remove(shardGroupId); + } + + public Metapb.ShardGroup getShardGroup(int groupId) { + return shardGroupCache.get(groupId); + } + + public boolean addStore(Long storeId, Metapb.Store store) { + Metapb.Store oldStore = storeCache.get(storeId); + if (oldStore != null && oldStore.equals(store)) { + return false; + } + storeCache.put(storeId, store); + return true; + } + + public Metapb.Store getStoreById(Long storeId) { + return storeCache.get(storeId); + } + + public void removeStore(Long storeId) { + storeCache.remove(storeId); + } + + public boolean hasGraph(String graphName) { + return getPartitions(graphName).size() > 0; + } + + public void updateGraph(Metapb.Graph graph) { + if (Objects.equals(graph, getGraph(graph.getGraphName()))) { + return; + } + graphCache.put(graph.getGraphName(), graph); + } + + public Metapb.Graph getGraph(String graphName) { + return graphCache.get(graphName); + } + + public List getGraphs() { + List graphs = new ArrayList<>(); + graphCache.forEach((k, v) -> { + graphs.add(v); + }); + return graphs; + } + + public void reset() { + writeLock.lock(); + try { + partitionCache = new HashMap<>(); + keyToPartIdCache = new HashMap<>(); + shardGroupCache = new ConcurrentHashMap<>(); + storeCache = new ConcurrentHashMap<>(); + graphCache = new ConcurrentHashMap<>(); + locks.clear(); + } finally { + writeLock.unlock(); + } + } + + public void clear() { + reset(); + } + + public String debugCacheByGraphName(String graphName) { + StringBuilder builder = new StringBuilder(); + builder.append("Graph:").append(graphName).append(", cache info: range info: {"); + var rangeMap = keyToPartIdCache.get(graphName); + builder.append(rangeMap == null ? "" : rangeMap).append("}"); + + if (rangeMap != null) { + builder.append(", partition info : {"); + rangeMap.asMapOfRanges().forEach((k, v) -> { + var partition = partitionCache.get(graphName).get(v); + builder.append("[part_id:").append(v); + if (partition != null) { + builder.append(", start_key:").append(partition.getStartKey()) + .append(", end_key:").append(partition.getEndKey()) + .append(", state:").append(partition.getState().name()); + } + builder.append("], "); + }); + builder.append("}"); + } + + builder.append(", graph info:{"); + var graph = graphCache.get(graphName); + if (graph != null) { + builder.append("partition_count:").append(graph.getPartitionCount()) + .append(", state:").append(graph.getState().name()); + } + builder.append("}]"); + return builder.toString(); + } + + public Metapb.Shard getLeaderShard(int partitionId) { + var shardGroup = shardGroupCache.get(partitionId); + if (shardGroup != null) { + for (Metapb.Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + return shard; + } + } + } + + return null; + } + + public void updateShardGroupLeader(int partitionId, Metapb.Shard leader) { + if (shardGroupCache.containsKey(partitionId) && leader != null) { + if (!Objects.equals(getLeaderShard(partitionId), leader)) { + var shardGroup = shardGroupCache.get(partitionId); + var builder = Metapb.ShardGroup.newBuilder(shardGroup).clearShards(); + for (var shard : shardGroup.getShardsList()) { + builder.addShards( + Metapb.Shard.newBuilder() + .setStoreId(shard.getStoreId()) + .setRole(shard.getStoreId() == leader.getStoreId() ? + Metapb.ShardRole.Leader : + Metapb.ShardRole.Follower) + .build() + ); + } + shardGroupCache.put(partitionId, builder.build()); + } + } + } + + public String debugShardGroup() { + StringBuilder builder = new StringBuilder(); + builder.append("shard group cache:{"); + shardGroupCache.forEach((partitionId, shardGroup) -> { + builder.append(partitionId).append("::{") + .append("version:").append(shardGroup.getVersion()) + .append(", conf_version:").append(shardGroup.getConfVer()) + .append(", state:").append(shardGroup.getState().name()) + .append(", shards:["); + + for (var shard : shardGroup.getShardsList()) { + builder.append("{store_id:").append(shard.getStoreId()) + .append(", role:").append(shard.getRole().name()) + .append("},"); + } + builder.append("], "); + }); + builder.append("}"); + return builder.toString(); + } +} diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java new file mode 100644 index 0000000000..869a686933 --- /dev/null +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +public class PartitionUtils { + + public static final int MAX_VALUE = 0xffff; + + /** + * 计算key的hashcode + * + * @param key + * @return hashcode + */ + public static int calcHashcode(byte[] key) { + final int p = 16777619; + int hash = (int) 2166136261L; + for (byte element : key) { + hash = (hash ^ element) * p; + } + hash += hash << 13; + hash ^= hash >> 7; + hash += hash << 3; + hash ^= hash >> 17; + hash += hash << 5; + hash = hash & PartitionUtils.MAX_VALUE; + if (hash == PartitionUtils.MAX_VALUE) { + hash = PartitionUtils.MAX_VALUE - 1; + } + return hash; + } +} diff --git a/hugegraph-pd/hg-pd-core/pom.xml b/hugegraph-pd/hg-pd-core/pom.xml new file mode 100644 index 0000000000..374d9a93c9 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/pom.xml @@ -0,0 +1,94 @@ + + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-pd + ${revision} + ../pom.xml + + + hg-pd-core + + + 0.5.10 + + + + com.alipay.sofa + jraft-core + + 1.3.13 + + + org.rocksdb + rocksdbjni + + + + + org.rocksdb + rocksdbjni + 6.29.5 + + + org.apache.hugegraph + hg-pd-grpc + + + org.springframework + spring-context + 5.3.20 + + + org.apache.hugegraph + hg-pd-common + + + org.springframework.boot + spring-boot + 2.5.14 + + + org.projectlombok + lombok + 1.18.24 + + + org.apache.commons + commons-lang3 + 3.12.0 + + + com.google.code.gson + gson + 2.8.9 + + + + org.apache.hugegraph + hg-store-grpc + ${revision} + + + diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java new file mode 100644 index 0000000000..197e155106 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.ConfigMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.raft.RaftStateListener; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class ConfigService implements RaftStateListener { + + private final ConfigMetaStore meta; + private PDConfig pdConfig; + + public ConfigService(PDConfig config) { + this.pdConfig = config; + config.setConfigService(this); + meta = MetadataFactory.newConfigMeta(config); + } + + + public Metapb.PDConfig getPDConfig(long version) throws PDException { + return this.meta.getPdConfig(version); + } + + public Metapb.PDConfig getPDConfig() throws PDException { + return this.meta.getPdConfig(0); + } + + public Metapb.PDConfig setPDConfig(Metapb.PDConfig mConfig) throws PDException { + Metapb.PDConfig oldCfg = getPDConfig(); + Metapb.PDConfig.Builder builder = oldCfg.toBuilder().mergeFrom(mConfig) + .setVersion(oldCfg.getVersion() + 1) + .setTimestamp(System.currentTimeMillis()); + mConfig = this.meta.setPdConfig(builder.build()); + log.info("PDConfig has been modified, new PDConfig is {}", mConfig); + updatePDConfig(mConfig); + return mConfig; + } + + public List getGraphSpace(String graphSpaceName) throws PDException { + return this.meta.getGraphSpace(graphSpaceName); + } + + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + return this.meta.setGraphSpace(graphSpace.toBuilder() + .setTimestamp(System.currentTimeMillis()) + .build()); + } + + /** + * 从存储中读取配置项,并覆盖全局的PDConfig对象 + * + * @return + */ + public PDConfig loadConfig() { + try { + Metapb.PDConfig mConfig = this.meta.getPdConfig(0); + if (mConfig == null) { + mConfig = Metapb.PDConfig.newBuilder() + .setPartitionCount(pdConfig.getInitialPartitionCount()) + .setShardCount(pdConfig.getPartition().getShardCount()) + .setVersion(1) + .setTimestamp(System.currentTimeMillis()) + .setMaxShardsPerStore( + pdConfig.getPartition().getMaxShardsPerStore()) + .build(); + this.meta.setPdConfig(mConfig); + } + pdConfig = updatePDConfig(mConfig); + } catch (Exception e) { + log.error("ConfigService loadConfig exception {}", e); + } + return pdConfig; + } + + public synchronized PDConfig updatePDConfig(Metapb.PDConfig mConfig) { + log.info("update pd config: mConfig:{}", mConfig); + pdConfig.getPartition().setShardCount(mConfig.getShardCount()); + pdConfig.getPartition().setTotalCount(mConfig.getPartitionCount()); + pdConfig.getPartition().setMaxShardsPerStore(mConfig.getMaxShardsPerStore()); + return pdConfig; + } + + public synchronized PDConfig setPartitionCount(int count) { + Metapb.PDConfig mConfig = null; + try { + mConfig = getPDConfig(); + mConfig = mConfig.toBuilder().setPartitionCount(count).build(); + setPDConfig(mConfig); + } catch (PDException e) { + log.error("ConfigService exception {}", e); + e.printStackTrace(); + } + return pdConfig; + } + + /** + * meta store中的数量 + * 由于可能会受分区分裂/合并的影响,原始的partition count不推荐使用 + * + * @return partition count of cluster + * @throws PDException when io error + */ + public int getPartitionCount() throws PDException { + return getPDConfig().getPartitionCount(); + } + + @Override + public void onRaftLeaderChanged() { + loadConfig(); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java new file mode 100644 index 0000000000..2e4a63603c --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.IdMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; + +public class IdService { + + private final IdMetaStore meta; + private PDConfig pdConfig; + + public IdService(PDConfig config) { + this.pdConfig = config; + meta = MetadataFactory.newHugeServerMeta(config); + } + + public PDConfig getPdConfig() { + return pdConfig; + } + + public void setPdConfig(PDConfig pdConfig) { + this.pdConfig = pdConfig; + } + + public long getId(String key, int delta) throws PDException { + return meta.getId(key, delta); + } + + public void resetId(String key) throws PDException { + meta.resetId(key); + } + + /** + * 获取自增循环不重复id, 达到上限后从0开始自增.自动跳过正在使用的cid + * + * @param key + * @param max + * @return + * @throws PDException + */ + public long getCId(String key, long max) throws PDException { + return meta.getCId(key, max); + } + + public long getCId(String key, String name, long max) throws PDException { + return meta.getCId(key, name, max); + } + + /** + * 删除一个自增循环id + * + * @param key + * @param cid + * @return + * @throws PDException + */ + public long delCId(String key, long cid) throws PDException { + return meta.delCId(key, cid); + } + + public long delCIdDelay(String key, String name, long cid) throws PDException { + return meta.delCIdDelay(key, name, cid); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java new file mode 100644 index 0000000000..5c8ef3c8db --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java @@ -0,0 +1,317 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.nio.charset.Charset; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.V; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.store.KV; +import org.springframework.stereotype.Service; + +import com.google.protobuf.InvalidProtocolBufferException; + +import lombok.extern.slf4j.Slf4j; + +/** + * + **/ +@Slf4j +@Service +public class KvService { + + + public static final char KV_DELIMITER = '@'; + // TODO 主前缀之后,增加类名做区分 + private static final String TTL_PREFIX = "T"; + private static final String KV_PREFIX = "K"; + private static final String LOCK_PREFIX = "L"; + private static final String KV_PREFIX_DELIMITER = KV_PREFIX + KV_DELIMITER; + private static final byte[] EMPTY_VALUE = new byte[0]; + private final MetadataRocksDBStore meta; + private PDConfig pdConfig; + + public KvService(PDConfig config) { + this.pdConfig = config; + meta = new MetadataRocksDBStore(config); + } + + public static String getKey(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + builder.append(KV_PREFIX).append(KV_DELIMITER); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static byte[] getKeyBytes(Object... keys) { + String key = getKey(keys); + return key.getBytes(Charset.defaultCharset()); + } + + public static String getKeyWithoutPrefix(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static String getDelimiter() { + return String.valueOf(KV_DELIMITER); + } + + public PDConfig getPdConfig() { + return pdConfig; + } + + public void setPdConfig(PDConfig pdConfig) { + this.pdConfig = pdConfig; + } + + public void put(String key, String value) throws PDException { + V storeValue = V.newBuilder().setValue(value).setTtl(0).build(); + meta.put(getStoreKey(key), storeValue.toByteArray()); + // log.warn("add key with key-{}:value-{}", key, value); + } + + public void put(String key, String value, long ttl) throws PDException { + long curTime = System.currentTimeMillis(); + curTime += ttl; + V storeValue = V.newBuilder().setValue(value).setSt(ttl).setTtl(curTime).build(); + meta.put(getStoreKey(key), storeValue.toByteArray()); + meta.put(getTTLStoreKey(key, curTime), EMPTY_VALUE); + // log.warn("add key with key-{}:value-{}:ttl-{}", key, value, ttl); + } + + public String get(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + return get(storeKey); + } + + public String get(byte[] keyBytes) throws PDException { + byte[] bytes = meta.getOne(keyBytes); + String v = getValue(keyBytes, bytes); + return v; + } + + private String getValue(byte[] keyBytes, byte[] valueBytes) throws PDException { + if (valueBytes == null || valueBytes.length == 0) { + return ""; + } + try { + V v = V.parseFrom(valueBytes); + if (v.getTtl() == 0 || v.getTtl() >= System.currentTimeMillis()) { + return v.getValue(); + } else { + meta.remove(keyBytes); + meta.remove(getTTLStoreKey(new String(keyBytes), v.getTtl())); + } + } catch (Exception e) { + log.error("parse value with error:{}", e.getMessage()); + throw new PDException(-1, e.getMessage()); + } + return null; + } + + public boolean keepAlive(String key) throws PDException { + byte[] bytes = meta.getOne(getStoreKey(key)); + try { + if (bytes == null || bytes.length == 0) { + return false; + } + V v = V.parseFrom(bytes); + if (v != null) { + long ttl = v.getTtl(); + long st = v.getSt(); + meta.remove(getTTLStoreKey(key, ttl)); + put(key, v.getValue(), st); + return true; + } else { + return false; + } + } catch (InvalidProtocolBufferException e) { + throw new PDException(-1, e.getMessage()); + } + } + + public Kv delete(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + String value = this.get(storeKey); + meta.remove(storeKey); + Kv.Builder builder = Kv.newBuilder().setKey(key); + if (value != null) { + builder.setValue(value); + } + Kv kv = builder.build(); + // log.warn("delete kv with key :{}", key); + return kv; + } + + public List deleteWithPrefix(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + //TODO to many rows for scan + List kvList = meta.scanPrefix(storeKey); + LinkedList kvs = new LinkedList<>(); + for (KV kv : kvList) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) { + kvs.add(Kv.newBuilder().setKey(kvKey).setValue(kvValue).build()); + } + } + meta.removeByPrefix(storeKey); + // log.warn("delete kv with key prefix :{}", key); + return kvs; + } + + /** + * scan result ranged from key start and key end + * + * @param keyStart + * @param keyEnd + * @return Records + * @throws PDException + */ + public Map scanRange(String keyStart, String keyEnd) throws PDException { + List list = meta.scanRange(getStoreKey(keyStart), getStoreKey(keyEnd)); + Map map = new HashMap<>(); + for (KV kv : list) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) { + map.put(kvKey, kvValue); + } + } + return map; + } + + public Map scanWithPrefix(String key) throws PDException { + List kvList = meta.scanPrefix(getStoreKey(key)); + HashMap map = new HashMap<>(); + for (KV kv : kvList) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) { + map.put(kvKey, kvValue); + } + } + return map; + } + + public boolean locked(String key) throws PDException { + String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); + Map allLock = scanWithPrefix(lockKey); + return allLock != null && allLock.size() != 0; + } + + private boolean owned(String key, long clientId) throws PDException { + String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); + Map allLock = scanWithPrefix(lockKey); + if (allLock.size() == 0) { + return true; + } + for (Map.Entry entry : allLock.entrySet()) { + String entryKey = entry.getKey(); + String[] split = entryKey.split(String.valueOf(KV_DELIMITER)); + if (Long.valueOf(split[split.length - 1]).equals(clientId)) { + return true; + } + } + return false; + } + + public boolean lock(String key, long ttl, long clientId) throws PDException { + //TODO lock improvement + synchronized (KvService.class) { + if (!owned(key, clientId)) { + return false; + } + put(getLockKey(key, clientId), " ", ttl); + return true; + } + } + + public boolean lockWithoutReentrant(String key, long ttl, + long clientId) throws PDException { + synchronized (KvService.class) { + if (locked(key)) { + return false; + } + put(getLockKey(key, clientId), " ", ttl); + return true; + } + } + + public boolean unlock(String key, long clientId) throws PDException { + synchronized (KvService.class) { + if (!owned(key, clientId)) { + return false; + } + delete(getLockKey(key, clientId)); + return true; + } + } + + public boolean keepAlive(String key, long clientId) throws PDException { + String lockKey = getLockKey(key, clientId); + return keepAlive(lockKey); + } + + public String getLockKey(String key, long clientId) { + return getKeyWithoutPrefix(LOCK_PREFIX, key, clientId); + } + + public byte[] getStoreKey(String key) { + return getKeyBytes(key); + } + + public byte[] getTTLStoreKey(String key, long time) { + return getKeyBytes(TTL_PREFIX, time, key); + } + + public void clearTTLData() { + try { + byte[] ttlStartKey = getTTLStoreKey("", 0); + byte[] ttlEndKey = getTTLStoreKey("", System.currentTimeMillis()); + List kvList = meta.scanRange(ttlStartKey, ttlEndKey); + for (KV kv : kvList) { + String key = new String(kv.getKey()); + int index = key.indexOf(KV_DELIMITER, 2); + String delKey = key.substring(index + 1); + delete(delKey); + meta.remove(kv.getKey()); + } + } catch (Exception e) { + log.error("clear ttl data with error :", e); + } + } + + public MetadataRocksDBStore getMeta() { + return meta; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java new file mode 100644 index 0000000000..664b6b8dec --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.LogMeta; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.springframework.stereotype.Service; + +import com.google.protobuf.Any; +import com.google.protobuf.GeneratedMessageV3; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Service +public class LogService { + + public static final String GRPC = "GRPC"; + public static final String REST = "REST"; + public static final String TASK = "TASK"; + public static final String NODE_CHANGE = "NODE_CHANGE"; + public static final String PARTITION_CHANGE = "PARTITION_CHANGE"; + private final LogMeta logMeta; + + public LogService(PDConfig pdConfig) { + logMeta = MetadataFactory.newLogMeta(pdConfig); + } + + public List getLog(String action, Long start, Long end) throws PDException { + return logMeta.getLog(action, start, end); + } + + public void insertLog(String action, String message, GeneratedMessageV3 target) { + try { + Metapb.LogRecord logRecord = Metapb.LogRecord.newBuilder() + .setAction(action) + .setMessage(message) + .setTimestamp(System.currentTimeMillis()) + .setObject(Any.pack(target)) + .build(); + logMeta.insertLog(logRecord); + } catch (PDException e) { + log.debug("Insert log with error:{}", e); + } + + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java new file mode 100644 index 0000000000..4b7184f4b9 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; + +/** + * 分区命令监听 + */ +public interface PartitionInstructionListener { + void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException; + + void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws + PDException; + + void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws + PDException; + + void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException; + + void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException; + + void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws + PDException; + + void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) throws PDException; + +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java new file mode 100644 index 0000000000..95c8f88321 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java @@ -0,0 +1,1563 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.CleanType; +import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.PartitionMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import org.apache.hugegraph.pd.raft.RaftStateListener; + +import lombok.extern.slf4j.Slf4j; + +/** + * 分区管理 + */ +@Slf4j +public class PartitionService implements RaftStateListener { + + private final long Partition_Version_Skip = 0x0F; + private final StoreNodeService storeService; + private final PartitionMeta partitionMeta; + private final PDConfig pdConfig; + // 分区命令监听 + private final List instructionListeners; + + // 分区状态监听 + private final List statusListeners; + + public PartitionService(PDConfig config, StoreNodeService storeService) { + this.pdConfig = config; + this.storeService = storeService; + partitionMeta = MetadataFactory.newPartitionMeta(config); + instructionListeners = + Collections.synchronizedList(new ArrayList()); + statusListeners = Collections.synchronizedList(new ArrayList()); + } + + public void init() throws PDException { + partitionMeta.init(); + storeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { + if (status == Metapb.StoreState.Tombstone) { + // Store被停机,通知所有该store所有分区,迁移数据 + storeOffline(store); + } + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); + } + + /** + * 返回Key所属的partition + * + * @param graphName + * @param key + * @return + */ + public Metapb.PartitionShard getPartitionShard(String graphName, byte[] key) throws + PDException { + long code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + /** + * 根据hashcode返回所属的partition + * + * @param graphName + * @param code + * @return + */ + public Metapb.PartitionShard getPartitionByCode(String graphName, long code) throws + PDException { + if (code < 0 || code >= PartitionUtils.MAX_VALUE) { + throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "code error"); + } + // 根据Code查找分区id,如果没有找到,创建新的分区 + Metapb.Partition partition = partitionMeta.getPartitionByCode(graphName, code); + + if (partition == null) { + synchronized (this) { + if (partition == null) { + partition = newPartition(graphName, code); + } + } + } + + Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() + .setPartition(partition) + .setLeader(storeService.getLeader( + partition, 0)) + .build(); + log.debug( + "{} Partition get code = {}, partition id = {}, start = {}, end = {}, leader = {}", + graphName, (code), partition.getId(), partition.getStartKey(), + partition.getEndKey(), partShard.getLeader()); + + return partShard; + } + + /** + * 根据ID返回分区信息 + * + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public Metapb.PartitionShard getPartitionShardById(String graphName, int partId) throws + PDException { + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); + if (partition == null) { + return null; + } + + Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() + .setPartition(partition) + // 此处需要返回正确的leader,暂时默认取第一个 + .setLeader(storeService.getLeader( + partition, 0)) + .build(); + + return partShard; + } + + public Metapb.Partition getPartitionById(String graphName, int partId) throws PDException { + return partitionMeta.getPartitionById(graphName, partId); + } + + public List getPartitionById(int partId) throws PDException { + return partitionMeta.getPartitionById(partId); + } + + /** + * 获取图的所有分区 + */ + public List getPartitions() { + return partitionMeta.getPartitions(); + } + + public List getPartitions(String graphName) { + if (StringUtils.isAllEmpty(graphName)) { + return partitionMeta.getPartitions(); + } + return partitionMeta.getPartitions(graphName); + } + + /** + * 查找在store上的所有分区 + * + * @param store + * @return + */ + public List getPartitionByStore(Metapb.Store store) throws PDException { + List partitions = new ArrayList<>(); + getGraphs().forEach(graph -> { + getPartitions(graph.getGraphName()).forEach(partition -> { + try { + storeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { + if (shard.getStoreId() == store.getId()) { + partitions.add(partition); + } + }); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + }); + return partitions; + } + + /** + * 产生一个新的分区 + * + * @param graphName + * @return + */ + private Metapb.Partition newPartition(String graphName, long code) throws PDException { + Metapb.Graph graph = partitionMeta.getAndCreateGraph(graphName); + int partitionSize = PartitionUtils.MAX_VALUE / graph.getPartitionCount(); + if (PartitionUtils.MAX_VALUE % graph.getPartitionCount() != 0) { + // 有余数,分区除不尽 + partitionSize++; + } + + int partitionId = (int) (code / partitionSize); + long startKey = (long) partitionSize * partitionId; + long endKey = (long) partitionSize * (partitionId + 1); + + // 检查本地 + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partitionId); + if (partition == null) { + storeService.allocShards(null, partitionId); + + // 分配store + partition = Metapb.Partition.newBuilder() + .setId(partitionId) + .setVersion(0) + .setState(Metapb.PartitionState.PState_Normal) + .setStartKey(startKey) + .setEndKey(endKey) + .setGraphName(graphName) + .build(); + + log.info("Create newPartition {}", partition); + } + + partitionMeta.updatePartition(partition); + + return partition; + } + + /** + * 计算Key所属的分区,此处采用Hash映射的方法。 + * + * @param graphName + * @param key + * @return + */ + protected int getPartitionId(String graphName, byte[] key) throws PDException { + int code = PartitionUtils.calcHashcode(key); + Metapb.Partition partition = partitionMeta.getPartitionByCode(graphName, code); + return partition != null ? partition.getId() : -1; + } + + /** + * 获取key范围所跨越的所有分区 + * 暂时使用hashcode计算,正常做法,基于key进行查询 + * + * @param graphName + * @param startKey + * @param endKey + */ + public List scanPartitions(String graphName, byte[] startKey, + byte[] endKey) + throws PDException { + int startPartId = getPartitionId(graphName, startKey); + int endPartId = getPartitionId(graphName, endKey); + + List partShards = new ArrayList<>(); + for (int id = startPartId; id <= endPartId; id++) { + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, id); + partShards.add( + Metapb.PartitionShard.newBuilder() + .setPartition(partition) + // 此处需要返回正确的leader,暂时默认取第一个 + .setLeader(storeService.getLeader(partition, 0)) + .build() + ); + } + return partShards; + } + + public synchronized long updatePartition(List partitions) throws PDException { + for (Metapb.Partition pt : partitions) { + Metapb.Partition oldPt = getPartitionById(pt.getGraphName(), pt.getId()); + partitionMeta.updatePartition(pt); + onPartitionChanged(oldPt, pt); + } + return partitions.size(); + } + + /** + * 更新分区以及图的状态 + * + * @param graph + * @param partId + * @param state + * @throws PDException + */ + public synchronized void updatePartitionState(String graph, int partId, + Metapb.PartitionState state) throws PDException { + Metapb.Partition partition = getPartitionById(graph, partId); + + if (partition.getState() != state) { + Metapb.Partition newPartition = partitionMeta.updatePartition(partition.toBuilder() + .setState(state) + .build()); + + onPartitionChanged(partition, newPartition); + } + } + + public synchronized void updateGraphState(String graphName, Metapb.PartitionState state) throws + PDException { + Metapb.Graph graph = getGraph(graphName); + if (graph != null) { + partitionMeta.updateGraph(graph.toBuilder() + .setState(state).build()); + } + } + + public synchronized long removePartition(String graphName, int partId) throws PDException { + log.info("Partition {}-{} removePartition", graphName, partId); + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); + var ret = partitionMeta.removePartition(graphName, partId); + partitionMeta.reload(); + onPartitionRemoved(partition); + + // source中有些是 offline的,删除后,需要更新图的状态 + try { + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { + if (pt.getState().getNumber() > state.getNumber()) { + state = pt.getState(); + } + } + updateGraphState(partition.getGraphName(), state); + + state = Metapb.PartitionState.PState_Normal; + for (Metapb.ShardGroup group : storeService.getShardGroups()) { + if (group.getState().getNumber() > state.getNumber()) { + state = group.getState(); + } + } + storeService.updateClusterStatus(state); + + } catch (PDException e) { + log.error("onPartitionChanged", e); + } + + return ret; + } + + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws + PDException { + return partitionMeta.getPartitionStats(graphName, partitionId); + } + + /** + * 获取图的分区状态 + */ + public List getPartitionStatus(String graphName) + throws PDException { + return partitionMeta.getPartitionStats(graphName); + } + + /** + * 返回图的信息 + */ + public List getGraphs() throws PDException { + return partitionMeta.getGraphs(); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + return partitionMeta.getGraph(graphName); + } + + /** + * 删除图以及图的所有分区 + */ + public Metapb.Graph delGraph(String graphName) throws PDException { + log.info("delGraph {}", graphName); + Metapb.Graph graph = getGraph(graphName); + getPartitions(graphName).forEach(partition -> { + onPartitionRemoved(partition); + }); + partitionMeta.removeAllPartitions(graphName); + partitionMeta.removeGraph(graphName); + return graph; + } + + /** + * 修改图信息,需要通知到store + */ + public synchronized Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + Metapb.Graph lastGraph = partitionMeta.getAndCreateGraph(graph.getGraphName()); + log.info("updateGraph graph: {}, last: {}", graph, lastGraph); + + int partCount = + (graph.getGraphName().endsWith("/s") || graph.getGraphName().endsWith("/m")) ? + 1 : pdConfig.getPartition().getTotalCount(); + + // set the partition count to specified if legal. + if (graph.getPartitionCount() <= partCount && graph.getPartitionCount() > 0) { + partCount = graph.getPartitionCount(); + } + + if (partCount == 0) { + throw new PDException(10010, "update graph error, partition count = 0"); + } + + graph = lastGraph.toBuilder() + .mergeFrom(graph) + .setPartitionCount(partCount) + .build(); + partitionMeta.updateGraph(graph); + + // 分区数发生改变 + if (lastGraph.getPartitionCount() != graph.getPartitionCount()) { + log.info("updateGraph graph: {}, partition count changed from {} to {}", + graph.getGraphName(), lastGraph.getPartitionCount(), + graph.getPartitionCount()); + // TODO 修改图的分区数,需要进行数据迁移。 + } + return graph; + } + + // partitionId -> (storeId -> shard committedIndex) + public Map> getCommittedIndexStats() throws PDException { + Map> map = new HashMap<>(); + for (Metapb.Store store : storeService.getActiveStores()) { + for (Metapb.RaftStats raftStats : store.getStats().getRaftStatsList()) { + int partitionID = raftStats.getPartitionId(); + if (!map.containsKey(partitionID)) { + map.put(partitionID, new HashMap<>()); + } + Map storeMap = map.get(partitionID); + if (!storeMap.containsKey(store.getId())) { + storeMap.put(store.getId(), raftStats.getCommittedIndex()); + } + } + } + return map; + } + + /** + * 存储被下线,迁移分区数据 + * + * @param store + */ + public void storeOffline(Metapb.Store store) { + try { + log.info("storeOffline store id: {}, address: {}, state: {}", + store.getId(), store.getAddress(), store.getState()); + List partitions = getPartitionByStore(store); + var partIds = new HashSet(); + for (Metapb.Partition p : partitions) { + if (partIds.contains(p.getId())) { + continue; + } + shardOffline(p, store.getId()); + partIds.add(p.getId()); + } + } catch (PDException e) { + log.error("storeOffline exception: ", e); + } + } + + /** + * 存储被下线,迁移分区数据 + */ + public synchronized void shardOffline(Metapb.Partition partition, long storeId) { + try { + log.info("shardOffline Partition {} - {} shardOffline store : {}", + partition.getGraphName(), partition.getId(), storeId); + // partition = getPartitionById(partition.getGraphName(), partition.getId()); + // Metapb.Partition.Builder builder = Metapb.Partition.newBuilder(partition); + // builder.clearShards(); + // partition.getShardsList().forEach(shard -> { + // if (shard.getStoreId() != storeId) + // builder.addShards(shard); + // }); + // partition = builder.build(); + Metapb.Graph graph = getGraph(partition.getGraphName()); + reallocPartitionShards(graph, partition); + + } catch (PDException e) { + log.error("storeOffline exception: ", e); + } + } + + private boolean isShardListEquals(List list1, List list2) { + if (list1 == list2) { + return true; + } else if (list1 != null && list2 != null) { + + var s1 = list1.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare) + .collect(Collectors.toList()); + var s2 = list2.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare) + .collect(Collectors.toList()); + + if (s1.size() == s2.size()) { + for (int i = 0; i < s1.size(); i++) { + if (s1.get(i) != s2.get(i)) { + return false; + } + } + return true; + } + } + + return false; + } + + /** + * 重新分配shard + * + * @param graph + * @param partition + * @throws PDException + */ + public void reallocPartitionShards(Metapb.Graph graph, Metapb.Partition partition) throws + PDException { + if (partition == null) { + return; + } + List originalShards = storeService.getShardList(partition.getId()); + + var shardGroup = storeService.getShardGroup(partition.getId()); + + List shards = storeService.reallocShards(shardGroup); + + if (isShardListEquals(originalShards, shards)) { + log.info("reallocPartitionShards:{} vs {}", shardGroup, shards); + // partition = Metapb.Partition.newBuilder(partition) + // .clearShards().addAllShards(shards) + // .build(); + // partitionMeta.updatePartition(partition); + fireChangeShard(partition, shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + } + + public synchronized void reallocPartitionShards(String graphName, int partitionId) throws + PDException { + reallocPartitionShards(partitionMeta.getGraph(graphName), + partitionMeta.getPartitionById(graphName, partitionId)); + } + + /** + * 迁移分区副本 + */ + public synchronized void movePartitionsShard(Integer partitionId, long fromStore, + long toStore) { + try { + log.info("movePartitionsShard partitionId {} from store {} to store {}", partitionId, + fromStore, toStore); + for (Metapb.Graph graph : getGraphs()) { + Metapb.Partition partition = + this.getPartitionById(graph.getGraphName(), partitionId); + if (partition == null) { + continue; + } + + var shardGroup = storeService.getShardGroup(partitionId); + List shards = new ArrayList<>(); + shardGroup.getShardsList().forEach(shard -> { + if (shard.getStoreId() != fromStore) { + shards.add(shard); + } + }); + + shards.add(Metapb.Shard.newBuilder().setStoreId(toStore) + .setRole(Metapb.ShardRole.Follower).build()); + + // storeService.updateShardGroup(partitionId, shards, -1, -1); + // storeService.onShardGroupStatusChanged(shardGroup, newShardGroup); + fireChangeShard(partition, shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + // shard group和 graph无关,迁移一个就够了 + break; + } + } catch (PDException e) { + log.error("Partition {} movePartitionsShard exception {}", partitionId, e); + } + } + + /** + * 把集群中所有的分区,拆成split + * + * @param splits 拆分分区 + */ + public synchronized void splitPartition(List> splits) throws + PDException { + var tasks = new HashMap>>(); + + for (var pair : splits) { + for (var partition : getPartitionById(pair.getKey())) { + if (!tasks.containsKey(partition.getGraphName())) { + tasks.put(partition.getGraphName(), new ArrayList<>()); + } + tasks.get(partition.getGraphName()).add(pair); + } + } + + for (var entry : tasks.entrySet()) { + splitPartition(getGraph(entry.getKey()), entry.getValue()); + } + } + + /** + * 分区分裂, 把一个图拆分到N 个 + * + * @param graph graph + * @param toCount target count + * @throws PDException + */ + + public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws PDException { + + var partitionCount = getPartitions(graph.getGraphName()).size(); + var maxShardsPerStore = pdConfig.getPartition().getMaxShardsPerStore(); + var shardCount = pdConfig.getPartition().getShardCount(); + + if (shardCount * toCount > storeService.getActiveStores().size() * maxShardsPerStore) { + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "can't satisfy target shard group count, reached the upper " + + "limit of the cluster"); + } + + if (toCount % partitionCount != 0 || toCount <= partitionCount) { + throw new PDException(Pdpb.ErrorType.Invalid_Split_Partition_Count_VALUE, + "invalid split partition count, make sure to count is N time of" + + " current partition count"); + } + + // 由于是整数倍数,扩充因子为 toCount / current count + var splitCount = toCount / partitionCount; + var list = new ArrayList>(); + for (int i = 0; i < partitionCount; i++) { + list.add(new KVPair<>(i, splitCount)); + } + + splitPartition(graph, list); + } + + private synchronized void splitPartition(Metapb.Graph graph, + List> splits) + throws PDException { + var taskInfoMeta = storeService.getTaskInfoMeta(); + if (taskInfoMeta.scanSplitTask(graph.getGraphName()).size() > 0) { + return; + } + + splits.sort(Comparator.comparing(KVPair::getKey)); + log.info("split partition, graph: {}, splits:{}", graph, splits); + + // 从最后一个partition下标开始 + var i = getPartitions(graph.getGraphName()).size(); + + for (var pair : splits) { + Metapb.Partition partition = + partitionMeta.getPartitionById(graph.getGraphName(), pair.getKey()); + if (partition != null) { + var splitCount = pair.getValue(); + long splitLen = (partition.getEndKey() - partition.getStartKey()) / splitCount; + + List newPartitions = new ArrayList<>(); + // 第一个分区也就是原分区 + newPartitions.add(partition.toBuilder() + .setStartKey(partition.getStartKey()) + .setEndKey(partition.getStartKey() + splitLen) + .setId(partition.getId()) + .setState(Metapb.PartitionState.PState_Offline) + .build()); + + int idx = 0; + + for (; idx < splitCount - 2; idx++) { + newPartitions.add(partition.toBuilder() + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(newPartitions.get(idx).getEndKey() + + splitLen) + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build()); + i += 1; + } + + newPartitions.add(partition.toBuilder() + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(partition.getEndKey()) + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build()); + i += 1; + + // try to save new partitions, and repair shard group + for (int j = 0; j < newPartitions.size(); j++) { + var newPartition = newPartitions.get(j); + + if (j != 0) { + partitionMeta.updatePartition(newPartition); + } + // 创建shard group,如果为空,则按照partition的shard group为蓝本,去创建,保证在一个机器上 + // 如果存在,则由于各个图的分区数量不一样,需要store端复制到其他机器上 + var shardGroup = storeService.getShardGroup(newPartition.getId()); + if (shardGroup == null) { + shardGroup = storeService.getShardGroup(partition.getId()).toBuilder() + .setId(newPartition.getId()) + .build(); + storeService.getStoreInfoMeta().updateShardGroup(shardGroup); + updateShardGroupCache(shardGroup); + } + + // 做shard list的检查 + if (shardGroup.getShardsCount() != pdConfig.getPartition().getShardCount()) { + storeService.reallocShards(shardGroup); + } + } + + SplitPartition splitPartition = SplitPartition.newBuilder() + .addAllNewPartition(newPartitions) + .build(); + + fireSplitPartition(partition, splitPartition); + // 修改Partition状态为下线,任务完成后恢复为上线 + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Offline); + + // 记录事务 + var task = MetaTask.Task.newBuilder().setPartition(partition) + .setSplitPartition(splitPartition) + .build(); + taskInfoMeta.addSplitTask(pair.getKey(), task.getPartition(), + task.getSplitPartition()); + } + } + } + + /** + * 转移leader到其他shard上. + * 转移一个partition即可 + */ + public void transferLeader(Integer partId, Metapb.Shard shard) { + try { + var partitions = getPartitionById(partId); + if (partitions.size() > 0) { + fireTransferLeader(partitions.get(0), + TransferLeader.newBuilder().setShard(shard).build()); + } +// for (Metapb.Graph graph : getGraphs()) { +// Metapb.Partition partition = this.getPartitionById(graph.getGraphName(), partId); +// if (partition != null) { +// fireTransferLeader(partition, TransferLeader.newBuilder().setShard(shard) +// .build()); +// } +// } + } catch (PDException e) { + log.error("Partition {} transferLeader exception {}", partId, e); + } + } + + /** + * 分区合并,将整个集群的分区数,合并到toCount个 + * + * @param toCount 目标分区数 + * @throws PDException when query errors + */ + public void combinePartition(int toCount) throws PDException { + + int shardsTotalCount = getShardGroupCount(); + for (var graph : getGraphs()) { + // 对所有大于toCount分区的图,都进行缩容 + if (graph.getPartitionCount() > toCount) { + combineGraphPartition(graph, toCount, shardsTotalCount); + } + } + } + + /** + * 针对单个图,进行分区合并 + * + * @param graphName the name of the graph + * @param toCount the target partition count + * @throws PDException when query errors + */ + + public void combineGraphPartition(String graphName, int toCount) throws PDException { + combineGraphPartition(getGraph(graphName), toCount, getShardGroupCount()); + } + + /** + * 单图合并的内部实现 + * + * @param graph the name of the graph + * @param toCount the target partition count + * @param shardCount the shard count of the clusters + * @throws PDException when query errors + */ + private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, int shardCount) + throws PDException { + if (graph == null) { + throw new PDException(1, + "Graph not exists, try to use full graph name, like " + + "/DEFAULT/GRAPH_NAME/g"); + } + + log.info("Combine graph {} partition, from {}, to {}, with shard count:{}", + graph.getGraphName(), graph.getPartitionCount(), toCount, shardCount); + + if (!checkTargetCount(graph.getPartitionCount(), toCount, shardCount)) { + log.error("Combine partition, illegal toCount:{}, graph:{}", toCount, + graph.getGraphName()); + throw new PDException(2, + "illegal partition toCount, should between 1 ~ shard group " + + "count and " + + " can be dived by shard group count"); + } + + var taskInfoMeta = storeService.getTaskInfoMeta(); + if (taskInfoMeta.scanMoveTask(graph.getGraphName()).size() > 0) { + throw new PDException(3, "Graph Combine process exists"); + } + + // 按照 key start 排序,合并后的key range 是连续的 + var partitions = getPartitions(graph.getGraphName()).stream() + .sorted(Comparator.comparing( + Metapb.Partition::getStartKey)) + .collect(Collectors.toList()); + + // 分区编号不一定是连续的 + var sortPartitions = getPartitions(graph.getGraphName()) + .stream() + .sorted(Comparator.comparing(Metapb.Partition::getId)) + .collect(Collectors.toList()); + + var groupSize = partitions.size() / toCount; // merge group size + // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 共12个分区, 合并成4个 + // 方案:0,1,2 => 0, 3,4,5 -> 1, 6,7,8 ->2, 9,10,11 -> 3 + // 保证分区的连续性. + for (int i = 0; i < toCount; i++) { + var startKey = partitions.get(i * groupSize).getStartKey(); + var endKey = partitions.get(i * groupSize + groupSize - 1).getEndKey(); + // compose the key range + // the start key and end key should be changed if combine success. + + var targetPartition = Metapb.Partition.newBuilder(sortPartitions.get(i)) + .setStartKey(startKey) + .setEndKey(endKey) + .build(); + + for (int j = 0; j < groupSize; j++) { + var partition = partitions.get(i * groupSize + j); + // 分区id相同,就跳过 + if (i == partition.getId()) { + continue; + } + + log.info("combine partition of graph :{}, from part id {} to {}", + partition.getGraphName(), + partition.getId(), targetPartition.getId()); + MovePartition movePartition = MovePartition.newBuilder() + .setTargetPartition(targetPartition) + .setKeyStart(partition.getStartKey()) + .setKeyEnd(partition.getEndKey()) + .build(); + taskInfoMeta.addMovePartitionTask(partition, movePartition); + // source 下线 + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Offline); + fireMovePartition(partition, movePartition); + } + // target 下线 + updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), + Metapb.PartitionState.PState_Offline); + } + + storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); + } + + /** + * 通过 storeService 获取 raft group 总数 + * + * @return the count of raft groups + */ + private int getShardGroupCount() { + try { + return Optional.ofNullable(storeService.getShardGroups()).orElseGet(ArrayList::new) + .size(); + } catch (PDException e) { + log.error("get shard group failed, error: {}", e); + } + return 0; + } + + /** + * 判断图分区是否能够从from合并到to个 + * + * @param fromCount 现在的分区数 + * @param toCount 目标分区数 + * @return true when available , or otherwise + */ + private boolean checkTargetCount(int fromCount, int toCount, int shardCount) { + // 要介于 1 ~ N 中间,而且可以整除 + return toCount >= 1 && toCount < fromCount && fromCount % toCount == 0 && + toCount < shardCount; + } + + /** + * 处理分区心跳, 记录Leader信息 + * 检查term和version,比较是否是最新的消息 + * + * @param stats + */ + public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { + + Metapb.ShardGroup shardGroup = storeService.getShardGroup(stats.getId()); + // shard group version changes + // (shard group 由pd控制, 在分裂等操作后,可能出现短暂不一致的情况,以pd为准) + // store控制shard leader + if (shardGroup != null && + (shardGroup.getVersion() < stats.getLeaderTerm() || + shardGroup.getConfVer() < stats.getConfVer())) { + storeService.updateShardGroup(stats.getId(), + stats.getShardList(), stats.getLeaderTerm(), + stats.getConfVer()); + } + + List partitions = getPartitionById(stats.getId()); + for (Metapb.Partition partition : partitions) { + // partitionMeta.getAndCreateGraph(partition.getGraphName()); + checkShardState(partition, stats); + } + // 统计信息 + partitionMeta.updatePartitionStats(stats.toBuilder() + .setTimestamp(System.currentTimeMillis()).build()); + } + + /** + * 检查shard状态,离线shard影响到分区状态 + * + * @param stats + */ + private void checkShardState(Metapb.Partition partition, Metapb.PartitionStats stats) { + + try { + int offCount = 0; + for (Metapb.ShardStats shard : stats.getShardStatsList()) { + if (shard.getState() == Metapb.ShardState.SState_Offline) { + offCount++; + } + } + if (partition.getState() != Metapb.PartitionState.PState_Offline) { + if (offCount == 0) { + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Normal); + } else if (offCount * 2 < stats.getShardStatsCount()) { + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Warn); + } else { + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Warn); + } + } + } catch (Exception e) { + log.error("Partition {}-{} checkShardState exception {}", + partition.getGraphName(), partition.getId(), e); + } + } + + + public void addInstructionListener(PartitionInstructionListener event) { + instructionListeners.add(event); + } + + public void addStatusListener(PartitionStatusListener listener) { + statusListeners.add(listener); + } + + /** + * 发起改变shard命令 + * + * @param changeType + */ + protected void fireChangeShard(Metapb.Partition partition, List shards, + ConfChangeType changeType) { + log.info("fireChangeShard partition: {}-{}, changeType:{} {}", partition.getGraphName(), + partition.getId(), changeType, shards); + instructionListeners.forEach(cmd -> { + try { + cmd.changeShard(partition, ChangeShard.newBuilder() + .addAllShard(shards).setChangeType(changeType) + .build()); + } catch (Exception e) { + log.error("fireChangeShard", e); + } + }); + } + + public void changeShard(int groupId, List shards) throws PDException { + var partitions = getPartitionById(groupId); + if (partitions.size() == 0) { + return; + } + fireChangeShard(partitions.get(0), shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + + /** + * 发送分区分裂消息 + * + * @param partition + */ + protected void fireSplitPartition(Metapb.Partition partition, SplitPartition splitPartition) { + log.info("fireSplitPartition partition: {}-{}, split :{}", + partition.getGraphName(), partition.getId(), splitPartition); + instructionListeners.forEach(cmd -> { + try { + cmd.splitPartition(partition, splitPartition); + } catch (Exception e) { + log.error("fireSplitPartition", e); + } + }); + } + + /** + * 发送Leader切换消息 + */ + protected void fireTransferLeader(Metapb.Partition partition, TransferLeader transferLeader) { + log.info("fireTransferLeader partition: {}-{}, leader :{}", + partition.getGraphName(), partition.getId(), transferLeader); + instructionListeners.forEach(cmd -> { + try { + cmd.transferLeader(partition, transferLeader); + } catch (Exception e) { + log.error("fireSplitPartition", e); + } + }); + } + + /** + * 发送分区移动数据的消息 + * + * @param partition 原分区 + * @param movePartition 目标分区,包含 key range + */ + protected void fireMovePartition(Metapb.Partition partition, MovePartition movePartition) { + log.info("fireMovePartition partition: {} -> {}", + partition, movePartition); + + instructionListeners.forEach(cmd -> { + try { + cmd.movePartition(partition, movePartition); + } catch (Exception e) { + log.error("fireMovePartition", e); + } + }); + } + + protected void fireCleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) { + log.info("fireCleanPartition partition: {} -> just keep : {}->{}", + partition.getId(), cleanPartition.getKeyStart(), cleanPartition.getKeyEnd()); + + instructionListeners.forEach(cmd -> { + try { + cmd.cleanPartition(partition, cleanPartition); + } catch (Exception e) { + log.error("cleanPartition", e); + } + }); + } + + protected void fireChangePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) { + log.info("fireChangePartitionKeyRange partition: {}-{} -> key range {}", + partition.getGraphName(), partition.getId(), partitionKeyRange); + + instructionListeners.forEach(cmd -> { + try { + cmd.changePartitionKeyRange(partition, partitionKeyRange); + } catch (Exception e) { + log.error("cleanPartition", e); + } + }); + } + + /** + * 处理图迁移任务 + * + * @param task + */ + public synchronized void handleMoveTask(MetaTask.Task task) throws PDException { + var taskInfoMeta = storeService.getTaskInfoMeta(); + var partition = task.getPartition(); + var movePartition = task.getMovePartition(); + + MetaTask.Task pdMetaTask = taskInfoMeta.getMovePartitionTask(partition.getGraphName(), + movePartition.getTargetPartition() + .getId(), + partition.getId()); + + log.info("report move task, graph:{}, pid : {}->{}, state: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), task.getMovePartition().getTargetPartition().getId(), + task.getState()); + + // 已经被处理(前面有failed) + if (pdMetaTask != null) { + var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); + taskInfoMeta.updateMovePartitionTask(newTask); + + List subTasks = taskInfoMeta.scanMoveTask(partition.getGraphName()); + + var finished = subTasks.stream().allMatch(t -> + t.getState() == + MetaTask.TaskState.Task_Success || + t.getState() == + MetaTask.TaskState.Task_Failure); + + if (finished) { + var allSuccess = subTasks.stream().allMatch( + t -> t.getState() == MetaTask.TaskState.Task_Success); + if (allSuccess) { + log.info("graph:{} combine task all success!", partition.getGraphName()); + handleMoveTaskAllSuccess(subTasks, partition.getGraphName(), taskInfoMeta); + } else { + log.info("graph:{} combine task failed!", partition.getGraphName()); + handleMoveTaskIfFailed(partition.getGraphName(), taskInfoMeta); + } + } + } + } + + /** + * 当所有的迁移子任务成功: + * 1. 发送清理source分区指令 + * 2. 设置target上线, 更新key range, 更新 graph partition count + * 3. 删除move task,任务结束 + * + * @param subTasks all move sub tasks + * @param graphName graph name + * @param taskInfoMeta task info meta + * @throws PDException returns if write db failed + */ + private void handleMoveTaskAllSuccess(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) throws PDException { + + var targetPartitionIds = new HashSet(); + var targetPartitions = new ArrayList(); + var deleteFlags = + subTasks.stream().map(task -> task.getMovePartition().getTargetPartition().getId()) + .collect(Collectors.toSet()); + + for (MetaTask.Task subTask : subTasks) { + var source = subTask.getPartition(); + var targetPartition = subTask.getMovePartition().getTargetPartition(); + // 是否处理过 + if (!targetPartitionIds.contains(targetPartition.getId())) { + // 更新range + var old = getPartitionById(targetPartition.getGraphName(), targetPartition.getId()); + var newPartition = Metapb.Partition.newBuilder(old) + .setStartKey(targetPartition.getStartKey()) + .setEndKey(targetPartition.getEndKey()) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + // 在 key range之前更新,避免store没有分区的问题, 需要到pd查询 + updatePartition(List.of(newPartition)); + targetPartitions.add(newPartition); + + // 发送key range 变更消息 + PartitionKeyRange partitionKeyRange = PartitionKeyRange.newBuilder() + .setPartitionId(old.getId()) + .setKeyStart( + targetPartition.getStartKey()) + .setKeyEnd( + targetPartition.getEndKey()) + .build(); + // 通知store + fireChangePartitionKeyRange( + old.toBuilder().setState(Metapb.PartitionState.PState_Normal).build(), + partitionKeyRange); + + // 将 target 设置为上线. source 理论上可能被删掉,所以不处理 + updatePartitionState(newPartition.getGraphName(), newPartition.getId(), + Metapb.PartitionState.PState_Normal); + + targetPartitionIds.add(targetPartition.getId()); + } + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(source.getStartKey()) + .setKeyEnd(source.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_EXCLUDE_RANGE) + // target 的 partition只需要清理数据,不需要删除分区 + .setDeletePartition(!deleteFlags.contains( + source.getId())) + .build(); + + log.info("pd clean data: {}-{}, key range:{}-{}, type:{}, delete partition:{}", + source.getGraphName(), + source.getId(), + cleanPartition.getKeyStart(), + cleanPartition.getKeyEnd(), + CleanType.CLEAN_TYPE_EXCLUDE_RANGE, + cleanPartition.getDeletePartition()); + + // 清理掉被移动分区的数据 + fireCleanPartition(source, cleanPartition); + } + + // 更新key range, 本地更新,client更新 + // updatePartition(targetPartitions); + + // 更新target 分区状态,source 可能被删掉,所以不处理 + targetPartitions.forEach(p -> { + try { + updatePartitionState(p.getGraphName(), p.getId(), + Metapb.PartitionState.PState_Normal); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + + partitionMeta.reload(); + + // 更新graph partition count + var graph = getGraph(graphName).toBuilder() + .setPartitionCount(targetPartitionIds.size()) + .build(); + updateGraph(graph); + + // 事务完成 + taskInfoMeta.removeMoveTaskPrefix(graphName); + } + + /** + * 如果缩容任务有失败的,回滚合并操作 + * 1. 清理原来的target 分区,将迁移过来的数据再删掉 + * 2. 将source/target 分区设置为上线 + * 3. 删除task,任务结束 + * + * @param graphName graph name + * @param taskInfoMeta task info meta + * @throws PDException return if write to db failed + */ + private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) throws + PDException { + // 发送清理target分区的任务, 回滚target分区 + var targetPartitionIds = new HashSet(); + for (var metaTask : taskInfoMeta.scanMoveTask(graphName)) { + + var source = metaTask.getPartition(); + // 设置 source 为上线 + updatePartitionState(source.getGraphName(), source.getId(), + Metapb.PartitionState.PState_Normal); + var movedPartition = metaTask.getMovePartition().getTargetPartition(); + + if (targetPartitionIds.contains(movedPartition.getId())) { + continue; + } + + var targetPartition = getPartitionById(graphName, movedPartition.getId()); + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart( + targetPartition.getStartKey()) + .setKeyEnd(targetPartition.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_KEEP_RANGE) + .setDeletePartition(false) + .build(); + fireCleanPartition(targetPartition, cleanPartition); + targetPartitionIds.add(targetPartition.getId()); + + // 设置target 上线 + updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), + Metapb.PartitionState.PState_Normal); + } + // 清理掉任务列表 + taskInfoMeta.removeMoveTaskPrefix(graphName); + } + + /** + * 处理clean task + * + * @param task clean task + */ + public void handleCleanPartitionTask(MetaTask.Task task) { + log.info("clean task {} -{}, key range:{}~{}, report: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), + task.getCleanPartition().getKeyStart(), + task.getCleanPartition().getKeyEnd(), + task.getState() + ); + + // 如果失败重试? + } + + public synchronized void handleSplitTask(MetaTask.Task task) throws PDException { + + var taskInfoMeta = storeService.getTaskInfoMeta(); + var partition = task.getPartition(); + + MetaTask.Task pdMetaTask = + taskInfoMeta.getSplitTask(partition.getGraphName(), partition.getId()); + + log.info("report split task, graph:{}, pid : {}, state: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), task.getState()); + + if (pdMetaTask != null) { + var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); + taskInfoMeta.updateSplitTask(newTask); + + List subTasks = taskInfoMeta.scanSplitTask(partition.getGraphName()); + + var finished = subTasks.stream().allMatch(t -> + t.getState() == + MetaTask.TaskState.Task_Success || + t.getState() == + MetaTask.TaskState.Task_Failure); + + if (finished) { + var allSuccess = subTasks.stream().allMatch( + t -> t.getState() == MetaTask.TaskState.Task_Success); + if (allSuccess) { + log.info("graph:{} split task all success!", partition.getGraphName()); + handleSplitTaskAllSuccess(subTasks, partition.getGraphName(), taskInfoMeta); + } else { + handleSplitTaskIfFailed(subTasks, partition.getGraphName(), taskInfoMeta); + } + } + } + } + + private void handleSplitTaskAllSuccess(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) + throws PDException { + + int addedPartitions = 0; + var partitions = new ArrayList(); + for (MetaTask.Task subTask : subTasks) { + var source = subTask.getPartition(); + var newPartition = subTask.getSplitPartition().getNewPartitionList().get(0); + + // 发送key range 变更消息 + PartitionKeyRange partitionKeyRange = PartitionKeyRange.newBuilder() + .setPartitionId(source.getId()) + .setKeyStart( + newPartition.getStartKey()) + .setKeyEnd( + newPartition.getEndKey()) + .build(); + // 通知store + fireChangePartitionKeyRange(source, partitionKeyRange); + // 将 target 设置为上线. source 理论上可能被删掉,所以不处理 + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(newPartition.getStartKey()) + .setKeyEnd(newPartition.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_KEEP_RANGE) + // target 的 partition只需要清理数据,不需要删除分区 + .setDeletePartition(false) + .build(); + + log.info("pd clean data: {}-{}, key range:{}-{}, type:{}, delete partition:{}", + source.getGraphName(), + source.getId(), + cleanPartition.getKeyStart(), + cleanPartition.getKeyEnd(), + CleanType.CLEAN_TYPE_EXCLUDE_RANGE, + cleanPartition.getDeletePartition()); + + fireCleanPartition(source, cleanPartition); + + // 更新partition state + for (var sp : subTask.getSplitPartition().getNewPartitionList()) { + partitions.add( + sp.toBuilder().setState(Metapb.PartitionState.PState_Normal).build()); + } + + addedPartitions += subTask.getSplitPartition().getNewPartitionCount() - 1; + } + + updatePartition(partitions); + partitionMeta.reload(); + + var graph = getGraph(graphName); + + // set partition count + if (pdConfig.getConfigService().getPartitionCount() != + storeService.getShardGroups().size()) { + pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size()); + log.info("set the partition count of config server to {}", + storeService.getShardGroups().size()); + } + + // 更新graph partition count + var newGraph = graph.toBuilder() + .setPartitionCount(graph.getPartitionCount() + addedPartitions) + .build(); + updateGraph(newGraph); + + // 事务完成 + taskInfoMeta.removeSplitTaskPrefix(graphName); + } + + private void handleSplitTaskIfFailed(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) + throws PDException { + for (var metaTask : subTasks) { + var splitPartitions = metaTask.getSplitPartition().getNewPartitionList(); + for (int i = 1; i < splitPartitions.size(); i++) { + var split = splitPartitions.get(i); + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(split.getStartKey()) + .setKeyEnd(split.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_EXCLUDE_RANGE) + .setDeletePartition(true) + .build(); + + fireCleanPartition(split, cleanPartition); + } + + // set partition state normal + var partition = metaTask.getPartition(); + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Normal); + } + // 清理掉任务列表 + taskInfoMeta.removeSplitTaskPrefix(graphName); + } + + /** + * 接收到Leader改变的消息 + * 更新图状态,触发分区变更 + */ + protected void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + log.info("onPartitionChanged partition: {}", partition); + if (old != null && old.getState() != partition.getState()) { + // 状态改变,重置图的状态 + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { + if (pt.getState().getNumber() > state.getNumber()) { + state = pt.getState(); + } + } + try { + updateGraphState(partition.getGraphName(), state); + } catch (PDException e) { + log.error("onPartitionChanged", e); + } + + } + + statusListeners.forEach(e -> { + e.onPartitionChanged(old, partition); + }); + } + + protected void onPartitionRemoved(Metapb.Partition partition) { + log.info("onPartitionRemoved partition: {}", partition); + statusListeners.forEach(e -> { + e.onPartitionRemoved(partition); + }); + } + + /** + * PD的leader发生改变,需要重新加载数据 + */ + @Override + public void onRaftLeaderChanged() { + log.info("Partition service reload cache from rocksdb, due to leader change"); + try { + partitionMeta.reload(); + } catch (PDException e) { + log.error("Partition meta reload exception {}", e); + } + } + + /** + * 分区状态发生改变,需要传播到图、集群 + * + * @param graph + * @param partId + * @param state + */ + public void onPartitionStateChanged(String graph, int partId, + Metapb.PartitionState state) throws PDException { + updatePartitionState(graph, partId, state); + } + + /** + * Shard状态发生改变,需要传播到分区、图、集群 + * + * @param graph + * @param partId + * @param state + */ + public void onShardStateChanged(String graph, int partId, Metapb.PartitionState state) { + + } + + /** + * 发送rocksdb compaction 消息 + * + * @param partId + * @param tableName + */ + public void fireDbCompaction(int partId, String tableName) { + + try { + for (Metapb.Graph graph : getGraphs()) { + Metapb.Partition partition = + partitionMeta.getPartitionById(graph.getGraphName(), partId); + + DbCompaction dbCompaction = DbCompaction.newBuilder() + .setTableName(tableName) + .build(); + instructionListeners.forEach(cmd -> { + try { + cmd.dbCompaction(partition, dbCompaction); + } catch (Exception e) { + log.error("firedbCompaction", e); + } + }); + } + } catch (PDException e) { + e.printStackTrace(); + } + + } + + public void updateShardGroupCache(Metapb.ShardGroup group) { + partitionMeta.getPartitionCache().updateShardGroup(group); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java new file mode 100644 index 0000000000..78ad1b6bf2 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.grpc.Metapb; + +/** + * 分区状态监听 + */ +public interface PartitionStatusListener { + void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition); + + void onPartitionRemoved(Metapb.Partition partition); +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java new file mode 100644 index 0000000000..4a0f3fef66 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.meta.DiscoveryMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; + +public class RegistryService { + private final PDConfig pdConfig; + private final DiscoveryMetaStore meta; + + public RegistryService(PDConfig config) { + this.pdConfig = config; + meta = MetadataFactory.newDiscoveryMeta(config); + } + + public void register(NodeInfo nodeInfo, int outTimes) throws PDException { + meta.register(nodeInfo, outTimes); + } + + public NodeInfos getNodes(Query query) { + return meta.getNodes(query); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java new file mode 100644 index 0000000000..5f206291f3 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.grpc.Metapb; + +public interface ShardGroupStatusListener { + void onShardListChanged(Metapb.ShardGroup shardGroup, Metapb.ShardGroup newShardGroup); + + void onShardListOp(Metapb.ShardGroup shardGroup); +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java new file mode 100644 index 0000000000..e6a5ec1d7b --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java @@ -0,0 +1,266 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.springframework.stereotype.Service; + +import lombok.extern.slf4j.Slf4j; + + +@Slf4j +@Service +public class StoreMonitorDataService { + private static final String MONITOR_DATA_PREFIX = "SMD"; + private final PDConfig pdConfig; + private final KvService kvService; + /** + * the last timestamp of the store monitor data, + * used for determine the gap of store's heartbeat. + */ + private final Map lastStoreStateTimestamp; + + + public StoreMonitorDataService(PDConfig pdConfig) { + this.pdConfig = pdConfig; + this.kvService = new KvService(pdConfig); + this.lastStoreStateTimestamp = new HashMap<>(); + } + + /** + * save the store stats + * + * @param storeStats + */ + public void saveMonitorData(Metapb.StoreStats storeStats) throws PDException { + long storeId = storeStats.getStoreId(); + /** + * load the latest store timestamp when start up or alter leader + */ + if (!lastStoreStateTimestamp.containsKey(storeId)) { + long lastTimestamp = getLatestStoreMonitorDataTimeStamp(storeId); + log.debug("store id : {}, last timestamp :{}", storeId, lastTimestamp); + lastStoreStateTimestamp.put(storeId, lastTimestamp); + } + + long current = System.currentTimeMillis() / 1000; + long interval = this.pdConfig.getStore().getMonitorInterval(); + + // exceed the interval + if (current - lastStoreStateTimestamp.getOrDefault(storeId, 0L) >= interval) { + saveMonitorDataToDb(storeStats, current); + log.debug("store id: {}, system info:{}", storeId, + debugMonitorInfo(storeStats.getSystemMetricsList())); + lastStoreStateTimestamp.put(storeId, current); + } + } + + /** + * save the snapshot of store status + * + * @param storeStats store status + * @param ts, timestamp + * @return store status + * @throws PDException + */ + private void saveMonitorDataToDb(Metapb.StoreStats storeStats, long ts) throws PDException { + String key = getMonitorDataKey(storeStats.getStoreId(), ts); + log.debug("store id: {}, save monitor data info, ts:{}, my key:{}", storeStats.getStoreId(), + ts, key); + kvService.put(key, extractMetricsFromStoreStatus(storeStats)); + } + + public String debugMonitorInfo(List systemInfo) { + StringBuilder sb = new StringBuilder(); + sb.append("["); + for (Metapb.RecordPair pair : systemInfo) { + sb.append(pair.getKey()); + sb.append(":"); + sb.append(pair.getValue()); + sb.append(","); + } + sb.append("]"); + return sb.toString(); + } + + /** + * get the historical monitor data by store id, by range(start, end) + * + * @param storeId store id + * @param start range start + * @param end range end + * @return list of store stats + */ + public Map getStoreMonitorData(long storeId, long start, long end) throws + PDException { + log.debug("get monitor data, store id:{}, start{}, end:{}", + storeId, + getMonitorDataKey(storeId, start), + getMonitorDataKey(storeId, end)); + return kvService.scanRange(getMonitorDataKey(storeId, start), + getMonitorDataKey(storeId, end)); + } + + /** + * for api service + * + * @param storeId + * @return + * @throws PDException + */ + public List> getStoreMonitorData(long storeId) throws PDException { + List> result = new LinkedList<>(); + long current = System.currentTimeMillis() / 1000; + long start = current - this.pdConfig.getStore().getRetentionPeriod(); + + try { + for (Map.Entry entry : getStoreMonitorData(storeId, start, + current).entrySet()) { + String[] arr = + entry.getKey().split(String.valueOf(MetadataKeyHelper.getDelimiter())); + Map map = new HashMap(); + long timestamp = Long.parseLong(arr[arr.length - 1]); + map.put("ts", timestamp); + for (String pair : entry.getValue().split(",")) { + String[] p = pair.split(":"); + if (p.length == 2) { + map.put(p[0], Long.parseLong(p[1])); + } + } + result.add(map); + } + result.sort((o1, o2) -> o1.get("ts").compareTo(o2.get("ts"))); + } catch (PDException e) { + log.error(e.getMessage()); + } + return result; + } + + /** + * for api service, export txt + * + * @param storeId + * @return + * @throws PDException + */ + public String getStoreMonitorDataText(long storeId) throws PDException { + + List> result = getStoreMonitorData(storeId); + StringBuilder sb = new StringBuilder(); + if (result.size() > 0) { + DateTimeFormatter dtf = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); + Map lastRow = result.get(result.size() - 1); + List columns = new ArrayList<>(); + // construct columns, ts + sorted keys + columns.add("ts"); + columns.addAll(lastRow.keySet().stream() + .filter(x -> !"ts".equals(x)) + .sorted() + .collect(Collectors.toList())); + sb.append(String.join(",", columns).replace("\"", "")).append("\r\n"); + for (Map row : result) { + for (String key : columns) { + // ts + , + ... + if ("ts".equals(key)) { + // format ts + sb.append(dtf.format( + LocalDateTime.ofInstant(Instant.ofEpochSecond(row.get(key)), + ZoneId.systemDefault()))); + continue; + } else { + sb.append(",").append(row.getOrDefault(key, 0L)); + } + } + sb.append("\r\n"); + } + } + return sb.toString(); + } + + /** + * remove the monitor data of the store that before till(not include) + * + * @param storeId store id + * @param till expire time + * @return affect rows + */ + public int removeExpiredMonitorData(long storeId, long till) throws PDException { + String keyStart = getMonitorDataKey(storeId, 1); + String keyEnd = getMonitorDataKey(storeId, till); + int records = 0; + for (String key : kvService.scanRange(keyStart, keyEnd).keySet()) { + kvService.delete(key); + log.debug("remove monitor data, key: {}", key); + records += 1; + } + return records; + } + + /** + * get the latest timestamp of the store monitor data + * + * @param storeId + * @return timestamp(by seconds) + */ + public long getLatestStoreMonitorDataTimeStamp(long storeId) { + long maxId = 0L; + long current = System.currentTimeMillis() / 1000; + long start = current - this.pdConfig.getStore().getMonitorInterval(); + String keyStart = getMonitorDataKey(storeId, start); + String keyEnd = getMonitorDataKey(storeId, current); + try { + for (String key : kvService.scanRange(keyStart, keyEnd).keySet()) { + String[] arr = key.split(String.valueOf(MetadataKeyHelper.getDelimiter())); + maxId = Math.max(maxId, Long.parseLong(arr[arr.length - 1])); + } + } catch (PDException e) { + } + return maxId; + } + + private String getMonitorDataKey(long storeId, long ts) { + String builder = MONITOR_DATA_PREFIX + + MetadataKeyHelper.getDelimiter() + + storeId + + MetadataKeyHelper.getDelimiter() + + ts; + return builder; + } + + private String extractMetricsFromStoreStatus(Metapb.StoreStats storeStats) { + List list = new ArrayList<>(); + for (Metapb.RecordPair pair : storeStats.getSystemMetricsList()) { + list.add("\"" + pair.getKey() + "\":" + pair.getValue()); + } + return String.join(",", list); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java new file mode 100644 index 0000000000..eff6c4d0f7 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -0,0 +1,1074 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Random; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.GraphMode; +import org.apache.hugegraph.pd.grpc.Metapb.GraphModeReason; +import org.apache.hugegraph.pd.grpc.Metapb.GraphState; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.StoreInfoMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; + +import com.google.gson.Gson; + +import lombok.extern.slf4j.Slf4j; + + +/** + * HgStore注册、保活管理类 + */ +@Slf4j +public class StoreNodeService { + + private static final Long STORE_HEART_BEAT_INTERVAL = 30000L; + private static final String graphSpaceConfPrefix = "HUGEGRAPH/hg/GRAPHSPACE/CONF/"; + // Store状态监听 + private final List statusListeners; + private final List shardGroupStatusListeners; + private final StoreInfoMeta storeInfoMeta; + private final TaskInfoMeta taskInfoMeta; + private final Random random = new Random(System.currentTimeMillis()); + private final KvService kvService; + private final ConfigService configService; + private final PDConfig pdConfig; + private PartitionService partitionService; + private final Runnable quotaChecker = () -> { + try { + getQuota(); + } catch (Exception e) { + log.error( + "obtaining and sending graph space quota information with error: ", + e); + } + }; + private Metapb.ClusterStats clusterStats; + + public StoreNodeService(PDConfig config) { + this.pdConfig = config; + storeInfoMeta = MetadataFactory.newStoreInfoMeta(pdConfig); + taskInfoMeta = MetadataFactory.newTaskInfoMeta(pdConfig); + shardGroupStatusListeners = Collections.synchronizedList(new ArrayList<>()); + statusListeners = Collections.synchronizedList(new ArrayList()); + clusterStats = Metapb.ClusterStats.newBuilder() + .setState(Metapb.ClusterState.Cluster_Not_Ready) + .setTimestamp(System.currentTimeMillis()) + .build(); + kvService = new KvService(pdConfig); + configService = new ConfigService(pdConfig); + } + + public void init(PartitionService partitionService) { + this.partitionService = partitionService; + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + if (old != null && old.getState() != partition.getState()) { + // 状态改变,重置集群状态 + try { + List partitions = + partitionService.getPartitionById(partition.getId()); + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for (Metapb.Partition pt : partitions) { + if (pt.getState().getNumber() > state.getNumber()) { + state = pt.getState(); + } + } + updateShardGroupState(partition.getId(), state); + + for (Metapb.ShardGroup group : getShardGroups()) { + if (group.getState().getNumber() > state.getNumber()) { + state = group.getState(); + } + } + updateClusterStatus(state); + } catch (PDException e) { + log.error("onPartitionChanged exception: ", e); + } + } + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); + } + + /** + * 集群是否准备就绪 + * + * @return + */ + public boolean isOK() { + return this.clusterStats.getState().getNumber() < + Metapb.ClusterState.Cluster_Offline.getNumber(); + } + + /** + * Store注册,记录Store的ip地址,首次注册需要生成store_ID + * + * @param store + */ + public Metapb.Store register(Metapb.Store store) throws PDException { + if (store.getId() == 0) { + // 初始注册,生成新id,保证Id不重复。 + store = newStoreNode(store); + } + + if (!storeInfoMeta.storeExists(store.getId())) { + log.error("Store id {} does not belong to this PD, address = {}", store.getId(), + store.getAddress()); + // storeId不存在,抛出异常 + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d doest not exist.", store.getId())); + } + + // 如果store状态为Tombstone拒绝注册。 + Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); + if (lastStore.getState() == Metapb.StoreState.Tombstone) { + log.error("Store id {} has been removed, Please reinitialize, address = {}", + store.getId(), store.getAddress()); + // storeId不存在,抛出异常 + throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format("Store id %d has been removed. %s", store.getId(), + store.getAddress())); + } + + // offline或者up,或者在初始激活列表中,自动上线 + Metapb.StoreState storeState = lastStore.getState(); + if (storeState == Metapb.StoreState.Offline || storeState == Metapb.StoreState.Up + || inInitialStoreList(store)) { + storeState = Metapb.StoreState.Up; + } else { + storeState = Metapb.StoreState.Pending; + } + + store = Metapb.Store.newBuilder(lastStore) + .setAddress(store.getAddress()) + .setRaftAddress(store.getRaftAddress()) + .setDataVersion(store.getDataVersion()) + .setDeployPath(store.getDeployPath()) + .setVersion(store.getVersion()) + .setDataPath(store.getDataPath()) + .setState(storeState).setCores(store.getCores()) + .clearLabels().addAllLabels(store.getLabelsList()) + .setLastHeartbeat(System.currentTimeMillis()).build(); + + long current = System.currentTimeMillis(); + boolean raftChanged = false; + // 上线状态的Raft Address 发生了变更 + if (!Objects.equals(lastStore.getRaftAddress(), store.getRaftAddress()) && + storeState == Metapb.StoreState.Up) { + // 时间间隔太短,而且raft有变更,则认为是无效的store + if (current - lastStore.getLastHeartbeat() < STORE_HEART_BEAT_INTERVAL * 0.8) { + throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DUPLICATE_VALUE, + String.format("Store id %d may be duplicate. addr: %s", + store.getId(), store.getAddress())); + } else if (current - lastStore.getLastHeartbeat() > STORE_HEART_BEAT_INTERVAL * 1.2) { + // 认为发生了变更 + raftChanged = true; + } else { + // 等待下次注册 + return Metapb.Store.newBuilder(store).setId(0L).build(); + } + } + + // 存储store信息 + storeInfoMeta.updateStore(store); + if (storeState == Metapb.StoreState.Up) { + // 更新store 活跃状态 + storeInfoMeta.keepStoreAlive(store); + onStoreStatusChanged(store, Metapb.StoreState.Offline, Metapb.StoreState.Up); + checkStoreStatus(); + } + + // 等store信息保存后,再发送变更 + if (raftChanged) { + onStoreRaftAddressChanged(store); + } + + log.info("Store register, id = {} {}", store.getId(), store); + return store; + } + + private boolean inInitialStoreList(Metapb.Store store) { + return this.pdConfig.getInitialStoreMap().containsKey(store.getAddress()); + } + + /** + * 产生一个新的store对象 + * + * @param store + * @return + * @throws PDException + */ + private synchronized Metapb.Store newStoreNode(Metapb.Store store) throws PDException { + long id = random.nextLong() & Long.MAX_VALUE; + while (id == 0 || storeInfoMeta.storeExists(id)) { + id = random.nextLong() & Long.MAX_VALUE; + } + store = Metapb.Store.newBuilder(store) + .setId(id) + .setState(Metapb.StoreState.Pending) + .setStartTimestamp(System.currentTimeMillis()).build(); + storeInfoMeta.updateStore(store); + return store; + } + + /** + * 根据store_id返回Store信息 + * + * @param id + * @return + * @throws PDException + */ + public Metapb.Store getStore(long id) throws PDException { + Metapb.Store store = storeInfoMeta.getStore(id); + if (store == null) { + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %x doest not exist.", id)); + } + return store; + } + + /** + * 更新Store信息,检测Store状态的变化,通知到Hugestore + */ + public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDException { + log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), + store.getAddress(), store.getState()); + Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); + if (lastStore == null) { + return null; + } + Metapb.Store.Builder builder = + Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); + store = builder.mergeFrom(store).build(); + if (store.getState() == Metapb.StoreState.Tombstone) { + List activeStores = getStores(); + if (lastStore.getState() == Metapb.StoreState.Up + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); + } + } + + storeInfoMeta.updateStore(store); + if (store.getState() != Metapb.StoreState.Unknown && + store.getState() != lastStore.getState()) { + // 如果希望将store下线 + if (store.getState() == Metapb.StoreState.Exiting) { + if (lastStore.getState() == Metapb.StoreState.Exiting) { + //如果已经是下线中的状态,则不作进一步处理 + return lastStore; + } + + List activeStores = this.getActiveStores(); + Map storeMap = new HashMap<>(); + activeStores.forEach(s -> { + storeMap.put(s.getId(), s); + }); + //如果store已经离线,直接从活跃中删除,如果store在线,暂时不从活跃中删除,等把状态置成Tombstone的时候再删除 + if (!storeMap.containsKey(store.getId())) { + log.info("updateStore removeActiveStores store {}", store.getId()); + storeInfoMeta.removeActiveStore(store); + } + storeTurnoff(store); + } else if (store.getState() == Metapb.StoreState.Offline) { //监控到store已经离线,从活跃中删除 + storeInfoMeta.removeActiveStore(store); + } else if (store.getState() == Metapb.StoreState.Tombstone) { + // 状态发生改变,Store关机,修改shardGroup,进行副本迁移 + log.info("updateStore removeActiveStores store {}", store.getId()); + storeInfoMeta.removeActiveStore(store); + // 存储下线 + storeTurnoff(store); + } else if (store.getState() == Metapb.StoreState.Up) { + storeInfoMeta.keepStoreAlive(store); + checkStoreStatus(); + } + onStoreStatusChanged(lastStore, lastStore.getState(), store.getState()); + } + return store; + } + + /** + * store被关机,重新分配shardGroup的shard + * + * @param store + * @throws PDException + */ + public synchronized void storeTurnoff(Metapb.Store store) throws PDException { + // 遍历ShardGroup,重新分配shard + for (Metapb.ShardGroup group : getShardGroupsByStore(store.getId())) { + Metapb.ShardGroup.Builder builder = Metapb.ShardGroup.newBuilder(group); + builder.clearShards(); + group.getShardsList().forEach(shard -> { + if (shard.getStoreId() != store.getId()) { + builder.addShards(shard); + } + }); + reallocShards(builder.build()); + } + } + + /** + * 根据图名返回stores信息,如果graphName为空,返回所有store信息 + * + * @throws PDException + */ + public List getStores() throws PDException { + return storeInfoMeta.getStores(null); + } + + public List getStores(String graphName) throws PDException { + return storeInfoMeta.getStores(graphName); + } + + public List getStoreStatus(boolean isActive) throws PDException { + return storeInfoMeta.getStoreStatus(isActive); + } + + public List getShardGroups() throws PDException { + return storeInfoMeta.getShardGroups(); + } + + public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { + return storeInfoMeta.getShardGroup(groupId); + } + + public List getShardList(int groupId) throws PDException { + var shardGroup = getShardGroup(groupId); + if (shardGroup != null) { + return shardGroup.getShardsList(); + } + return new ArrayList<>(); + } + + public List getShardGroupsByStore(long storeId) throws PDException { + List shardGroups = new ArrayList<>(); + storeInfoMeta.getShardGroups().forEach(shardGroup -> { + shardGroup.getShardsList().forEach(shard -> { + if (shard.getStoreId() == storeId) { + shardGroups.add(shardGroup); + } + }); + }); + return shardGroups; + } + + /** + * 返回活跃的store + * + * @param graphName + * @return + * @throws PDException + */ + public List getActiveStores(String graphName) throws PDException { + return storeInfoMeta.getActiveStores(graphName); + } + + public List getActiveStores() throws PDException { + return storeInfoMeta.getActiveStores(); + } + + public List getTombStores() throws PDException { + List stores = new ArrayList<>(); + for (Metapb.Store store : this.getStores()) { + if (store.getState() == Metapb.StoreState.Tombstone) { + stores.add(store); + } + } + return stores; + } + + public long removeStore(Long storeId) throws PDException { + return storeInfoMeta.removeStore(storeId); + } + + /** + * 给partition分配store,根据图的配置,决定分配几个peer + * 分配完所有的shards,保存ShardGroup对象(store不变动,只执行一次) + */ + public synchronized List allocShards(Metapb.Graph graph, int partId) throws + PDException { + // 多图共用raft分组,因此分配shard只依赖partitionId. + // 图根据数据大小可以设置分区的数量,但总数不能超过raft分组数量 + if (storeInfoMeta.getShardGroup(partId) == null) { + // 获取活跃的store key + // 根据 partionID计算store + List stores = storeInfoMeta.getActiveStores(); + + if (stores.size() == 0) { + throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, + "There is no any online store"); + } + + if (stores.size() < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); + } + + int shardCount = pdConfig.getPartition().getShardCount(); + shardCount = Math.min(shardCount, stores.size()); + //两个shard无法选出leader + // 不能为0 + + if (shardCount == 2 || shardCount < 1) { + shardCount = 1; + } + + // 一次创建完所有的ShardGroup,保证初始的groupID有序,方便人工阅读 + for (int groupId = 0; groupId < pdConfig.getConfigService().getPartitionCount(); + groupId++) { + int storeIdx = groupId % stores.size(); //store分配规则,简化为取模 + List shards = new ArrayList<>(); + for (int i = 0; i < shardCount; i++) { + Metapb.Shard shard = + Metapb.Shard.newBuilder().setStoreId(stores.get(storeIdx).getId()) + .setRole(i == 0 ? Metapb.ShardRole.Leader : + Metapb.ShardRole.Follower) // + .build(); + shards.add(shard); + storeIdx = (storeIdx + 1) >= stores.size() ? 0 : ++storeIdx; // 顺序选择 + } + + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() + .setId(groupId) + .setState( + Metapb.PartitionState.PState_Normal) + .addAllShards(shards).build(); + + // new group + storeInfoMeta.updateShardGroup(group); + partitionService.updateShardGroupCache(group); + onShardGroupStatusChanged(group, group); + log.info("alloc shard group: id {}", groupId); + } + } + return storeInfoMeta.getShardGroup(partId).getShardsList(); + } + + /** + * 根据graph的shard_count,重新分配shard + * 发送变更change shard指令 + */ + public synchronized List reallocShards(Metapb.ShardGroup shardGroup) throws + PDException { + List stores = storeInfoMeta.getActiveStores(); + + if (stores.size() == 0) { + throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, + "There is no any online store"); + } + + if (stores.size() < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); + } + + int shardCount = pdConfig.getPartition().getShardCount(); + shardCount = Math.min(shardCount, stores.size()); + if (shardCount == 2 || shardCount < 1) { + // 两个shard无法选出leader + // 不能为0 + shardCount = 1; + } + + List shards = new ArrayList<>(); + shards.addAll(shardGroup.getShardsList()); + + if (shardCount > shards.size()) { + // 需要增加shard + log.info("reallocShards ShardGroup {}, add shards from {} to {}", + shardGroup.getId(), shards.size(), shardCount); + int storeIdx = shardGroup.getId() % stores.size(); //store分配规则,简化为取模 + for (int addCount = shardCount - shards.size(); addCount > 0; ) { + // 检查是否已经存在 + if (!isStoreInShards(shards, stores.get(storeIdx).getId())) { + Metapb.Shard shard = Metapb.Shard.newBuilder() + .setStoreId(stores.get(storeIdx).getId()) + .build(); + shards.add(shard); + addCount--; + } + storeIdx = (storeIdx + 1) >= stores.size() ? 0 : ++storeIdx; // 顺序选择 + } + } else if (shardCount < shards.size()) { + // 需要减shard + log.info("reallocShards ShardGroup {}, remove shards from {} to {}", + shardGroup.getId(), shards.size(), shardCount); + + int subCount = shards.size() - shardCount; + Iterator iterator = shards.iterator(); + while (iterator.hasNext() && subCount > 0) { + if (iterator.next().getRole() != Metapb.ShardRole.Leader) { + iterator.remove(); + subCount--; + } + } + } else { + return shards; + } + + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder(shardGroup) + .clearShards() + .addAllShards(shards).build(); + storeInfoMeta.updateShardGroup(group); + partitionService.updateShardGroupCache(group); + // change shard group + onShardGroupStatusChanged(shardGroup, group); + + var partitions = partitionService.getPartitionById(shardGroup.getId()); + if (partitions.size() > 0) { + // send one message, change shard is regardless with partition/graph + partitionService.fireChangeShard(partitions.get(0), shards, + ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + + log.info("reallocShards ShardGroup {}, shards: {}", group.getId(), group.getShardsList()); + return shards; + } + + /** + * 根据partition的数量,分配group shard + * + * @param groups list of (partition id, count) + * @return total groups + */ + public synchronized int splitShardGroups(List> groups) throws + PDException { + int sum = groups.stream().map(pair -> pair.getValue()).reduce(0, Integer::sum); + // shard group 太大 + if (sum > getActiveStores().size() * pdConfig.getPartition().getMaxShardsPerStore()) { + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "can't satisfy target shard group count"); + } + + partitionService.splitPartition(groups); + + return sum; + } + + /** + * 分配shard group,为分裂做准备 + * + * @return true + * @throws PDException + */ + private boolean isStoreInShards(List shards, long storeId) { + AtomicBoolean exist = new AtomicBoolean(false); + shards.forEach(s -> { + if (s.getStoreId() == storeId) { + exist.set(true); + } + }); + return exist.get(); + } + + /** + * update shard group and cache. + * send shard group change message. + * + * @param groupId : shard group + * @param shards : shard lists + * @param version: term version, ignored if less than 0 + * @param confVersion : conf version, ignored if less than 0 + * @return + */ + public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List shards, + long version, long confVersion) throws + PDException { + Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); + + if (group == null) { + return null; + } + + var builder = Metapb.ShardGroup.newBuilder(group); + if (version >= 0) { + builder.setVersion(version); + } + + if (confVersion >= 0) { + builder.setConfVer(confVersion); + } + + var newGroup = builder.clearShards().addAllShards(shards).build(); + + storeInfoMeta.updateShardGroup(newGroup); + partitionService.updateShardGroupCache(newGroup); + onShardGroupStatusChanged(group, newGroup); + log.info("Raft {} updateShardGroup {}", groupId, newGroup); + return group; + } + + /** + * 通知 store 进行shard group的重建操作 + * + * @param groupId raft group id + * @param shards shard list: 如果为空,则删除对应的partition engine + */ + public void shardGroupOp(int groupId, List shards) throws PDException { + + var shardGroup = getShardGroup(groupId); + + if (shardGroup == null) { + return; + } + + var newGroup = shardGroup.toBuilder().clearShards().addAllShards(shards).build(); + if (shards.size() == 0) { + var partitions = partitionService.getPartitionById(groupId); + for (var partition : partitions) { + partitionService.removePartition(partition.getGraphName(), groupId); + } + deleteShardGroup(groupId); + } + + onShardGroupOp(newGroup); + } + + /** + * 删除 shard group + * + * @param groupId shard group id + */ + public synchronized void deleteShardGroup(int groupId) throws PDException { + Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); + if (group != null) { + storeInfoMeta.deleteShardGroup(groupId); + } + + onShardGroupStatusChanged(group, null); + + // 修正store的分区数. (分区合并导致) + var shardGroups = getShardGroups(); + if (shardGroups != null) { + var count1 = pdConfig.getConfigService().getPDConfig().getPartitionCount(); + var maxGroupId = + getShardGroups().stream().map(Metapb.ShardGroup::getId).max(Integer::compareTo); + if (maxGroupId.get() < count1) { + pdConfig.getConfigService().setPartitionCount(maxGroupId.get() + 1); + } + } + } + + public synchronized void updateShardGroupState(int groupId, Metapb.PartitionState state) throws + PDException { + Metapb.ShardGroup shardGroup = storeInfoMeta.getShardGroup(groupId) + .toBuilder() + .setState(state).build(); + storeInfoMeta.updateShardGroup(shardGroup); + partitionService.updateShardGroupCache(shardGroup); + } + + /** + * 接收Store的心跳 + * + * @param storeStats + * @throws PDException + */ + public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDException { + this.storeInfoMeta.updateStoreStats(storeStats); + Metapb.Store lastStore = this.getStore(storeStats.getStoreId()); + if (lastStore == null) { + //store不存在 + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist.", + storeStats.getStoreId())); + } + if (lastStore.getState() == Metapb.StoreState.Tombstone) { + throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format( + "Store id %d is useless since it's state is Tombstone", + storeStats.getStoreId())); + } + Metapb.Store nowStore; + // 如果正在做store下线操作 + if (lastStore.getState() == Metapb.StoreState.Exiting) { + List activeStores = this.getActiveStores(); + Map storeMap = new HashMap<>(); + activeStores.forEach(store -> { + storeMap.put(store.getId(), store); + }); + // 下线的store的分区为0,说明已经迁移完毕,可以下线,如果非0,则迁移还在进行,需要等待 + if (storeStats.getPartitionCount() > 0 && + storeMap.containsKey(storeStats.getStoreId())) { + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Exiting).build(); + this.storeInfoMeta.updateStore(nowStore); + return this.clusterStats; + } else { + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Tombstone).build(); + this.storeInfoMeta.updateStore(nowStore); + storeInfoMeta.removeActiveStore(nowStore); + return this.clusterStats; + } + } + + if (lastStore.getState() == Metapb.StoreState.Pending) { + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Pending).build(); + this.storeInfoMeta.updateStore(nowStore); + return this.clusterStats; + } else { + if (lastStore.getState() == Metapb.StoreState.Offline) { + this.updateStore( + Metapb.Store.newBuilder(lastStore).setState(Metapb.StoreState.Up).build()); + } + nowStore = Metapb.Store.newBuilder(lastStore) + .setState(Metapb.StoreState.Up) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()).build(); + this.storeInfoMeta.updateStore(nowStore); + this.storeInfoMeta.keepStoreAlive(nowStore); + this.checkStoreStatus(); + return this.clusterStats; + } + } + + public synchronized Metapb.ClusterStats updateClusterStatus(Metapb.ClusterState state) { + this.clusterStats = clusterStats.toBuilder().setState(state).build(); + return this.clusterStats; + } + + public Metapb.ClusterStats updateClusterStatus(Metapb.PartitionState state) { + Metapb.ClusterState cstate = Metapb.ClusterState.Cluster_OK; + switch (state) { + case PState_Normal: + cstate = Metapb.ClusterState.Cluster_OK; + break; + case PState_Warn: + cstate = Metapb.ClusterState.Cluster_Warn; + break; + case PState_Fault: + cstate = Metapb.ClusterState.Cluster_Fault; + break; + case PState_Offline: + cstate = Metapb.ClusterState.Cluster_Offline; + break; + } + return updateClusterStatus(cstate); + } + + public Metapb.ClusterStats getClusterStats() { + return this.clusterStats; + } + + /** + * 检查集群健康状态 + * 活跃机器数是否大于最小阈值 + * 分区shard在线数已否过半 * + */ + public synchronized void checkStoreStatus() { + Metapb.ClusterStats.Builder builder = Metapb.ClusterStats.newBuilder() + .setState( + Metapb.ClusterState.Cluster_OK); + try { + List activeStores = this.getActiveStores(); + if (activeStores.size() < pdConfig.getMinStoreCount()) { + builder.setState(Metapb.ClusterState.Cluster_Not_Ready); + builder.setMessage("The number of active stores is " + activeStores.size() + + ", less than pd.initial-store-count:" + + pdConfig.getMinStoreCount()); + } + Map storeMap = new HashMap<>(); + activeStores.forEach(store -> { + storeMap.put(store.getId(), store); + }); + + if (builder.getState() == Metapb.ClusterState.Cluster_OK) { + // 检查每个分区的在线shard数量是否大于半数 + for (Metapb.ShardGroup group : this.getShardGroups()) { + int count = 0; + for (Metapb.Shard shard : group.getShardsList()) { + count += storeMap.containsKey(shard.getStoreId()) ? 1 : 0; + } + if (count * 2 < group.getShardsList().size()) { + builder.setState(Metapb.ClusterState.Cluster_Not_Ready); + builder.setMessage( + "Less than half of active shard, partitionId is " + group.getId()); + break; + } + } + } + + } catch (PDException e) { + log.error("StoreNodeService updateClusterStatus exception {}", e); + } + this.clusterStats = builder.setTimestamp(System.currentTimeMillis()).build(); + if (this.clusterStats.getState() != Metapb.ClusterState.Cluster_OK) { + log.error("The cluster is not ready, {}", this.clusterStats); + } + } + + public void addStatusListener(StoreStatusListener listener) { + statusListeners.add(listener); + } + + protected void onStoreRaftAddressChanged(Metapb.Store store) { + log.info("onStoreRaftAddressChanged storeId = {}, new raft addr:", store.getId(), + store.getRaftAddress()); + statusListeners.forEach(e -> { + e.onStoreRaftChanged(store); + }); + } + + public void addShardGroupStatusListener(ShardGroupStatusListener listener) { + shardGroupStatusListeners.add(listener); + } + + protected void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState stats) { + log.info("onStoreStatusChanged storeId = {} from {} to {}", store.getId(), old, stats); + statusListeners.forEach(e -> { + e.onStoreStatusChanged(store, old, stats); + }); + } + + protected void onShardGroupStatusChanged(Metapb.ShardGroup group, Metapb.ShardGroup newGroup) { + log.info("onShardGroupStatusChanged, groupId: {}, from {} to {}", group.getId(), group, + newGroup); + shardGroupStatusListeners.forEach(e -> e.onShardListChanged(group, newGroup)); + } + + protected void onShardGroupOp(Metapb.ShardGroup shardGroup) { + log.info("onShardGroupOp, group id: {}, shard group:{}", shardGroup.getId(), shardGroup); + shardGroupStatusListeners.forEach(e -> e.onShardListOp(shardGroup)); + } + + /** + * 检查当前store是否可下线 + * 活跃机器数小于等于最小阈值,不可下线 + * 分区shard在线数不超过半数, 不可下线 + */ + public boolean checkStoreCanOffline(Metapb.Store currentStore) { + try { + long currentStoreId = currentStore.getId(); + List activeStores = this.getActiveStores(); + Map storeMap = new HashMap<>(); + activeStores.forEach(store -> { + if (store.getId() != currentStoreId) { + storeMap.put(store.getId(), store); + } + }); + + if (storeMap.size() < pdConfig.getMinStoreCount()) { + return false; + } + + // 检查每个分区的在线shard数量是否大于半数 + for (Metapb.ShardGroup group : this.getShardGroups()) { + int count = 0; + for (Metapb.Shard shard : group.getShardsList()) { + long storeId = shard.getStoreId(); + count += storeMap.containsKey(storeId) ? 1 : 0; + } + if (count * 2 < group.getShardsList().size()) { + return false; + } + } + } catch (PDException e) { + log.error("StoreNodeService checkStoreCanOffline exception {}", e); + return false; + } + + return true; + } + + /** + * 对store上的对rocksdb进行compaction + * + * @param groupId + * @param tableName + * @return + */ + public synchronized void shardGroupsDbCompaction(int groupId, String tableName) throws + PDException { + + // 通知所有的store,对rocksdb进行compaction + partitionService.fireDbCompaction(groupId, tableName); + // TODO 异常怎么处理? + } + + public Map getQuota() throws PDException { + List graphs = partitionService.getGraphs(); + String delimiter = String.valueOf(MetadataKeyHelper.DELIMITER); + HashMap storages = new HashMap<>(); + for (Metapb.Graph g : graphs) { + String graphName = g.getGraphName(); + String[] splits = graphName.split(delimiter); + if (!graphName.endsWith("/g") || splits.length < 2) { + continue; + } + String graphSpace = splits[0]; + storages.putIfAbsent(graphSpace, 0L); + List stores = getStores(graphName); + long dataSize = 0; + for (Metapb.Store store : stores) { + List gss = store.getStats() + .getGraphStatsList(); + for (Metapb.GraphStats gs : gss) { + boolean nameEqual = graphName.equals(gs.getGraphName()); + boolean roleEqual = Metapb.ShardRole.Leader.equals( + gs.getRole()); + if (nameEqual && roleEqual) { + dataSize += gs.getApproximateSize(); + } + } + } + Long size = storages.get(graphSpace); + size += dataSize; + storages.put(graphSpace, size); + + } + Metapb.GraphSpace.Builder spaceBuilder = Metapb.GraphSpace.newBuilder(); + HashMap limits = new HashMap<>(); + for (Map.Entry item : storages.entrySet()) { + String spaceName = item.getKey(); + String value = kvService.get(graphSpaceConfPrefix + spaceName); + if (!StringUtils.isEmpty(value)) { + HashMap config = new Gson().fromJson(value, HashMap.class); + Long size = item.getValue(); + int limit = ((Double) config.get("storage_limit")).intValue(); + long limitByLong = limit * 1024L * 1024L; + try { + spaceBuilder.setName(spaceName).setStorageLimit(limitByLong).setUsedSize(size); + Metapb.GraphSpace graphSpace = spaceBuilder.build(); + configService.setGraphSpace(graphSpace); + } catch (Exception e) { + log.error("update graph space with error:", e); + } + // KB and GB * 1024L * 1024L + if (size > limitByLong) { + limits.put(spaceName, true); + continue; + } + } + limits.put(spaceName, false); + + } + GraphState.Builder stateBuilder = GraphState.newBuilder() + .setMode(GraphMode.ReadOnly) + .setReason( + GraphModeReason.Quota); + for (Metapb.Graph g : graphs) { + String graphName = g.getGraphName(); + String[] splits = graphName.split(delimiter); + if (!graphName.endsWith("/g") || splits.length < 2) { + continue; + } + String graphSpace = splits[0]; + Metapb.GraphState gsOld = g.getGraphState(); + GraphMode gmOld = gsOld != null ? gsOld.getMode() : GraphMode.ReadWrite; + GraphMode gmNew = limits.get( + graphSpace) ? GraphMode.ReadOnly : GraphMode.ReadWrite; + if (gmOld == null || gmOld.getNumber() != gmNew.getNumber()) { + stateBuilder.setMode(gmNew); + if (gmNew.getNumber() == GraphMode.ReadOnly.getNumber()) { + stateBuilder.setReason(GraphModeReason.Quota); + } + GraphState gsNew = stateBuilder.build(); + Metapb.Graph newGraph = g.toBuilder().setGraphState(gsNew) + .build(); + partitionService.updateGraph(newGraph); + statusListeners.forEach(listener -> { + listener.onGraphChange(newGraph, gsOld, gsNew); + }); + } + } + + return limits; + } + + public Runnable getQuotaChecker() { + return quotaChecker; + } + + public TaskInfoMeta getTaskInfoMeta() { + return taskInfoMeta; + } + + public StoreInfoMeta getStoreInfoMeta() { + return storeInfoMeta; + } + + /** + * 获得分区的Leader + * + * @param partition + * @param initIdx + * @return + */ + public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx) { + Metapb.Shard leader = null; + try { + var shardGroup = this.getShardGroup(partition.getId()); + for (Metapb.Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + leader = shard; + } + } + } catch (Exception e) { + log.error("get leader error: group id:{}, error: {}", + partition.getId(), e.getMessage()); + } + return leader; + } + + public CacheResponse getCache() throws PDException { + + List stores = getStores(); + List groups = getShardGroups(); + List graphs = partitionService.getGraphs(); + CacheResponse cache = CacheResponse.newBuilder().addAllGraphs(graphs) + .addAllShards(groups) + .addAllStores(stores) + .build(); + return cache; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java new file mode 100644 index 0000000000..68042ac083 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.grpc.Metapb; + +public interface StoreStatusListener { + + void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status); + + void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, + Metapb.GraphState stateNew); + + void onStoreRaftChanged(Metapb.Store store); +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java new file mode 100644 index 0000000000..5fd1005b1c --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -0,0 +1,853 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.PriorityQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import org.apache.hugegraph.pd.raft.RaftEngine; + +import lombok.extern.slf4j.Slf4j; + + +/** + * 任务调度服务,定时检查Store、资源、分区的状态,及时迁移数据,错误节点 + * 1、监测Store是否离线 + * 2、监测Partition的副本是否正确 + * 3、监测Partition的工作模式是否正确 + * 4、监测Partition是否需要分裂,监测分裂是否完成 + */ +@Slf4j +public class TaskScheduleService { + private static final String BALANCE_SHARD_KEY = "BALANCE_SHARD_KEY"; + private final long TurnOffAndBalanceInterval = 30 * 60 * 1000; //机器下线30后才能进行动态平衡 + private final long BalanceLeaderInterval = 30 * 1000; // leader平衡时间间隔 + private final PDConfig pdConfig; + private final long clusterStartTime; // + private final StoreNodeService storeService; + private final PartitionService partitionService; + private final ScheduledExecutorService executor; + private final TaskInfoMeta taskInfoMeta; + private final StoreMonitorDataService storeMonitorDataService; + private final KvService kvService; + private final LogService logService; + // 先按照value排序,再按照key排序 + private final Comparator> kvPairComparatorAsc = (o1, o2) -> { + if (o1.getValue() == o2.getValue()) { + return o1.getKey().compareTo(o2.getKey()); + } + return o1.getValue().compareTo(o2.getValue()); + }; + // 先按照value排序(倒序),再按照key排序(升序) + private final Comparator> kvPairComparatorDesc = (o1, o2) -> { + if (o1.getValue() == o2.getValue()) { + return o2.getKey().compareTo(o1.getKey()); + } + return o2.getValue().compareTo(o1.getValue()); + }; + private long lastStoreTurnoffTime = 0; + private long lastBalanceLeaderTime = 0; + + + public TaskScheduleService(PDConfig config, StoreNodeService storeService, + PartitionService partitionService) { + this.pdConfig = config; + this.storeService = storeService; + this.partitionService = partitionService; + this.taskInfoMeta = new TaskInfoMeta(config); + this.logService = new LogService(pdConfig); + this.storeMonitorDataService = new StoreMonitorDataService(pdConfig); + this.clusterStartTime = System.currentTimeMillis(); + this.kvService = new KvService(pdConfig); + this.executor = new ScheduledThreadPoolExecutor(16); + } + + public void init() { + executor.scheduleWithFixedDelay(() -> { + try { + patrolStores(); + } catch (Throwable e) { + log.error("patrolStores exception: ", e); + } + + }, 60, 60, TimeUnit.SECONDS); + executor.scheduleWithFixedDelay(() -> { + try { + patrolPartitions(); + balancePartitionLeader(false); + balancePartitionShard(); + } catch (Throwable e) { + log.error("patrolPartitions exception: ", e); + } + }, pdConfig.getPatrolInterval(), pdConfig.getPatrolInterval(), TimeUnit.SECONDS); + executor.scheduleWithFixedDelay(() -> { + if (isLeader()) { + kvService.clearTTLData(); + } + }, 1000, 1000, TimeUnit.MILLISECONDS); + executor.scheduleWithFixedDelay( + () -> { + if (isLeader()) { + storeService.getQuotaChecker(); + } + }, 2, 30, + TimeUnit.SECONDS); + // clean expired monitor data each 10 minutes, delay 3min. + if (isLeader() && this.pdConfig.getStore().isMonitorDataEnabled()) { + executor.scheduleAtFixedRate(() -> { + Long expTill = System.currentTimeMillis() / 1000 - + this.pdConfig.getStore().getRetentionPeriod(); + log.debug("monitor data keys before " + expTill + " will be deleted"); + int records = 0; + try { + for (Metapb.Store store : storeService.getStores()) { + int cnt = + this.storeMonitorDataService.removeExpiredMonitorData(store.getId(), + expTill); + log.debug("store id :{}, records:{}", store.getId(), cnt); + records += cnt; + } + } catch (PDException e) { + throw new RuntimeException(e); + } + log.debug(String.format("%d records has been deleted", records)); + }, 180, 600, TimeUnit.SECONDS); + } + + storeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { + if (status == Metapb.StoreState.Tombstone) { + lastStoreTurnoffTime = System.currentTimeMillis(); + } + + if (status == Metapb.StoreState.Up) { + executor.schedule(() -> { + try { //store 上线后延时1分钟进行leader平衡 + balancePartitionLeader(false); + } catch (PDException e) { + log.error("exception {}", e); + } + }, BalanceLeaderInterval, TimeUnit.MILLISECONDS); + + } + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); + } + + public void shutDown() { + executor.shutdownNow(); + } + + private boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + /** + * 巡查所有的store,检查是否在线,存储空间是否充足 + */ + public List patrolStores() throws PDException { + if (!isLeader()) { + return null; + } + + List changedStores = new ArrayList<>(); + // 检查store在线状态 + List stores = storeService.getStores(""); + Map activeStores = storeService.getActiveStores("") + .stream().collect( + Collectors.toMap(Metapb.Store::getId, t -> t)); + for (Metapb.Store store : stores) { + Metapb.Store changeStore = null; + if ((store.getState() == Metapb.StoreState.Up + || store.getState() == Metapb.StoreState.Unknown) + && !activeStores.containsKey(store.getId())) { + // 不在线,修改状态为离线 + changeStore = Metapb.Store.newBuilder(store) + .setState(Metapb.StoreState.Offline) + .build(); + + } else if ((store.getState() == Metapb.StoreState.Exiting && + !activeStores.containsKey(store.getId())) || + (store.getState() == Metapb.StoreState.Offline && + (System.currentTimeMillis() - store.getLastHeartbeat() > + pdConfig.getStore().getMaxDownTime() * 1000) && + (System.currentTimeMillis() - clusterStartTime > + pdConfig.getStore().getMaxDownTime() * 1000))) { + //手工修改为下线或者离线达到时长 + // 修改状态为关机, 增加 checkStoreCanOffline 检测 + if (storeService.checkStoreCanOffline(store)) { + changeStore = Metapb.Store.newBuilder(store) + .setState(Metapb.StoreState.Tombstone).build(); + this.logService.insertLog(LogService.NODE_CHANGE, + LogService.TASK, changeStore); + log.info("patrolStores store {} Offline", changeStore.getId()); + } + } + if (changeStore != null) { + storeService.updateStore(changeStore); + changedStores.add(changeStore); + } + } + return changedStores; + } + + + /** + * 巡查所有的分区,检查副本数是否正确 + */ + public List patrolPartitions() throws PDException { + if (!isLeader()) { + return null; + } + + // 副本数不一致,重新分配副本 + for (Metapb.ShardGroup group : storeService.getShardGroups()) { + if (group.getShardsCount() != pdConfig.getPartition().getShardCount()) { + storeService.reallocShards(group); + // 避免后面的 balance partition shard 马上执行. + kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); + } + } + //检查shard是否在线。 + Map tombStores = storeService.getTombStores().stream().collect( + Collectors.toMap(Metapb.Store::getId, t -> t)); + + var partIds = new HashSet(); + + for (var pair : tombStores.entrySet()) { + for (var partition : partitionService.getPartitionByStore(pair.getValue())) { + if (partIds.contains(partition.getId())) { + continue; + } + partIds.add(partition.getId()); + + storeService.storeTurnoff(pair.getValue()); + partitionService.shardOffline(partition, pair.getValue().getId()); + } + + } + + return null; + } + + + /** + * 在Store之间平衡分区的数量 + * 机器转为UP半小时后才能进行动态平衡 + */ + public synchronized Map> balancePartitionShard() throws + PDException { + log.info("balancePartitions starting, isleader:{}", isLeader()); + + if (!isLeader()) { + return null; + } + + if (System.currentTimeMillis() - lastStoreTurnoffTime < TurnOffAndBalanceInterval) { + return null;//机器下线半小时后才能进行动态平衡 + } + + + int activeStores = storeService.getActiveStores().size(); + if (activeStores == 0) { + log.warn("balancePartitionShard non active stores, skip to balancePartitionShard"); + return null; + } + + // 避免频繁调用. (当改变副本数,需要调整shard list,此时又需要平衡分区)会发送重复的指令。造成结果不可预料。 + // 严重会删除掉分区. + if (Objects.equals(kvService.get(BALANCE_SHARD_KEY), "DOING")) { + return null; + } + + int totalShards = pdConfig.getConfigService().getPartitionCount() * + pdConfig.getPartition().getShardCount(); + int averageCount = totalShards / activeStores; + int remainder = totalShards % activeStores; + + // 统计每个store上分区, StoreId ->PartitionID, ShardRole + Map> partitionMap = new HashMap<>(); + storeService.getActiveStores().forEach(store -> { + partitionMap.put(store.getId(), new HashMap<>()); + }); + + // 如果是leaner 说明迁移正在进行,不要重复提交任务 + AtomicReference isLeaner = new AtomicReference<>(false); + partitionService.getPartitions().forEach(partition -> { + + try { + storeService.getShardList(partition.getId()).forEach(shard -> { + Long storeId = shard.getStoreId(); + // 判断每个shard为leaner或者状态非正常状态 + if (shard.getRole() == Metapb.ShardRole.Learner + || partition.getState() != Metapb.PartitionState.PState_Normal) { + isLeaner.set(true); + } + if (partitionMap.containsKey(storeId)) { + partitionMap.get(storeId).put(partition.getId(), shard.getRole()); + } + }); + } catch (PDException e) { + log.error("get partition {} shard list error:{}.", partition.getId(), + e.getMessage()); + } + }); + + if (isLeaner.get()) { + log.warn("balancePartitionShard is doing, skip this balancePartitionShard task"); + return null; + } + + // 按照shard数量由高到低排序store + List> sortedList = new ArrayList<>(); + partitionMap.forEach((storeId, shards) -> { + sortedList.add(new KVPair(storeId, shards.size())); + }); + // 由大到小排序的list + sortedList.sort(((o1, o2) -> o2.getValue().compareTo(o1.getValue()))); + // 最大堆 + PriorityQueue> maxHeap = new PriorityQueue<>(sortedList.size(), + (o1, o2) -> o2.getValue() + .compareTo( + o1.getValue())); + + // 各个副本的 committedIndex + Map> committedIndexMap = partitionService.getCommittedIndexStats(); + // 分区ID --> 源StoreID,目标StoreID + Map> movedPartitions = new HashMap<>(); + // 移除多余的shard, 按照shards由多到少的顺序遍历store,余数remainder优先给shards多的store分配,减少迁移的概率 + for (int index = 0; index < sortedList.size(); index++) { + long storeId = sortedList.get(index).getKey(); + if (!partitionMap.containsKey(storeId)) { + log.error("cannot found storeId {} in partitionMap", storeId); + return null; + } + Map shards = partitionMap.get(storeId); + int targetCount = index < remainder ? averageCount + 1 : averageCount; + // 移除多余的shard, 添加源StoreID. 非Leader,并且该分区唯一 + if (shards.size() > targetCount) { + int movedCount = shards.size() - targetCount; + log.info( + "balancePartitionShard storeId {}, shardsSize {}, targetCount {}, " + + "moveCount {}", + storeId, shards.size(), targetCount, movedCount); + for (Iterator iterator = shards.keySet().iterator(); + movedCount > 0 && iterator.hasNext(); ) { + Integer id = iterator.next(); + + if (!movedPartitions.containsKey(id)) { + log.info("store {}, shard of partition {} can be moved", storeId, id); + movedPartitions.put(id, new KVPair<>(storeId, 0L)); + movedCount--; + } + } + } else if (shards.size() < targetCount) { + int addCount = targetCount - shards.size(); + log.info( + "balancePartitionShard storeId {}, shardsSize {}, targetCount {}, " + + "addCount {}", + storeId, shards.size(), targetCount, addCount); + maxHeap.add(new KVPair<>(storeId, addCount)); + } + } + + if (movedPartitions.size() == 0) { + log.warn( + "movedPartitions is empty, totalShards:{} averageCount:{} remainder:{} " + + "sortedList:{}", + totalShards, averageCount, remainder, sortedList); + } + Iterator>> moveIterator = + movedPartitions.entrySet().iterator(); + + while (moveIterator.hasNext()) { + if (maxHeap.size() == 0) { + break; + } + Map.Entry> moveEntry = moveIterator.next(); + int partitionId = moveEntry.getKey(); + long sourceStoreId = moveEntry.getValue().getKey(); + + List> tmpList = new ArrayList<>(maxHeap.size()); + while (maxHeap.size() > 0) { + KVPair pair = maxHeap.poll(); + long destStoreId = pair.getKey(); + boolean destContains = false; + if (partitionMap.containsKey(destStoreId)) { + destContains = partitionMap.get(destStoreId).containsKey(partitionId); + } + // 如果目的store已经包含了该partition,则取一下store + if (!destContains) { + moveEntry.getValue().setValue(pair.getKey()); + log.info( + "balancePartitionShard will move partition {} from store {} to store " + + "{}", + moveEntry.getKey(), + moveEntry.getValue().getKey(), + moveEntry.getValue().getValue()); + if (pair.getValue() > 1) { + pair.setValue(pair.getValue() - 1); + tmpList.add(pair); + } + break; + } + tmpList.add(pair); + } + maxHeap.addAll(tmpList); + } + + kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); + + // 开始迁移 + movedPartitions.forEach((partId, storePair) -> { + // 源和目标storeID都不为0 + if (storePair.getKey() > 0 && storePair.getValue() > 0) { + partitionService.movePartitionsShard(partId, storePair.getKey(), + storePair.getValue()); + } else { + log.warn("balancePartitionShard key or value is zero, partId:{} storePair:{}", + partId, storePair); + } + }); + return movedPartitions; + } + + /** + * 在Store之间平衡分区的Leader的数量 + */ + public synchronized Map balancePartitionLeader(boolean immediately) throws + PDException { + Map results = new HashMap<>(); + + if (!isLeader()) { + return results; + } + + if (!immediately && + System.currentTimeMillis() - lastBalanceLeaderTime < BalanceLeaderInterval) { + return results; + } + lastBalanceLeaderTime = System.currentTimeMillis(); + + List shardGroups = storeService.getShardGroups(); + + // 分裂或者缩容任务的时候,退出 + var taskMeta = storeService.getTaskInfoMeta(); + if (taskMeta.hasSplitTaskDoing() || taskMeta.hasMoveTaskDoing()) { + throw new PDException(1001, "split or combine task is processing, please try later!"); + } + + // 数据迁移的时候,退出 + if (Objects.equals(kvService.get(BALANCE_SHARD_KEY), "DOING")) { + throw new PDException(1001, "balance shard is processing, please try later!"); + } + + if (shardGroups.size() == 0) { + return results; + } + + Map storeShardCount = new HashMap<>(); + + shardGroups.forEach(group -> { + group.getShardsList().forEach(shard -> { + storeShardCount.put(shard.getStoreId(), + storeShardCount.getOrDefault(shard.getStoreId(), 0) + 1); + }); + }); + + log.info("balancePartitionLeader, shard group size: {}, by store: {}", shardGroups.size(), + storeShardCount); + + // 按照 target count, store id稳定排序 + PriorityQueue> targetCount = + new PriorityQueue<>(kvPairComparatorDesc); + + var sortedGroups = storeShardCount.entrySet().stream() + .map(entry -> new KVPair<>(entry.getKey(), + entry.getValue())) + .sorted(kvPairComparatorAsc) + .collect(Collectors.toList()); + int sum = 0; + + for (int i = 0; i < sortedGroups.size() - 1; i++) { + // at least one + int v = Math.max( + sortedGroups.get(i).getValue() / pdConfig.getPartition().getShardCount(), 1); + targetCount.add(new KVPair<>(sortedGroups.get(i).getKey(), v)); + sum += v; + } + // 最后一个, 除不尽的情况,保证总数正确 + targetCount.add(new KVPair<>(sortedGroups.get(sortedGroups.size() - 1).getKey(), + shardGroups.size() - sum)); + log.info("target count: {}", targetCount); + + for (var group : shardGroups) { + var map = group.getShardsList().stream() + .collect(Collectors.toMap(Metapb.Shard::getStoreId, shard -> shard)); + var tmpList = new ArrayList>(); + // store比较多的情况,可能不包含对应的store id. 则先将不符合的store保存到临时列表,直到找到一个合适的store + while (!targetCount.isEmpty()) { + var pair = targetCount.poll(); + var storeId = pair.getKey(); + if (map.containsKey(storeId)) { + if (map.get(storeId).getRole() != Metapb.ShardRole.Leader) { + log.info("shard group{}, store id:{}, set to leader", group.getId(), + storeId); + partitionService.transferLeader(group.getId(), map.get(storeId)); + results.put(group.getId(), storeId); + } else { + log.info("shard group {}, store id :{}, is leader, no need change", + group.getId(), storeId); + } + + if (pair.getValue() > 1) { + // count -1 + pair.setValue(pair.getValue() - 1); + tmpList.add(pair); + } + // 找到了,则处理完成 + break; + } else { + tmpList.add(pair); + } + } + targetCount.addAll(tmpList); + } + + return results; + } + + + private long getMaxIndexGap(Map> committedIndexMap, int partitionId) { + long maxGap = Long.MAX_VALUE; + if (committedIndexMap == null || !committedIndexMap.containsKey(partitionId)) { + return maxGap; + } + Map shardMap = committedIndexMap.get(partitionId); + if (shardMap == null || shardMap.size() == 0) { + return maxGap; + } + List sortedList = new ArrayList<>(); + shardMap.forEach((storeId, committedIndex) -> { + sortedList.add(committedIndex); + }); + // 由大到小排序的list + sortedList.sort(Comparator.reverseOrder()); + maxGap = sortedList.get(0) - sortedList.get(sortedList.size() - 1); + return maxGap; + } + + + /** + * 执行分区分裂,分为自动分裂和手工分裂 + * + * @return + * @throws PDException + */ + public List splitPartition( + Pdpb.OperationMode mode, List params) throws PDException { + + if (mode == Pdpb.OperationMode.Auto) { + return autoSplitPartition(); + } + + var list = params.stream() + .map(param -> new KVPair<>(param.getPartitionId(), param.getCount())) + .collect(Collectors.toList()); + + storeService.splitShardGroups(list); + return null; + } + + /** + * 自动进行分区分裂,每个store达到最大分区数量 + * 执行条件 + * 分裂后每台机器分区数量少于partition.max-partitions-per-store + * + * @throws PDException + */ + public List autoSplitPartition() throws PDException { + if (!isLeader()) { + return null; + } + + if (Metapb.ClusterState.Cluster_OK != storeService.getClusterStats().getState()) { + if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats().getState()) { + throw new PDException(Pdpb.ErrorType.Split_Partition_Doing_VALUE, + "The data is splitting"); + } else { + throw new PDException(Pdpb.ErrorType.Cluster_State_Forbid_Splitting_VALUE, + "The current state of the cluster prohibits splitting data"); + } + } + + //For TEST + // pdConfig.getPartition().setMaxShardsPerStore(pdConfig.getPartition() + // .getMaxShardsPerStore()*2); + + // 计算集群能能支持的最大split count + int splitCount = pdConfig.getPartition().getMaxShardsPerStore() * + storeService.getActiveStores().size() / + (storeService.getShardGroups().size() * + pdConfig.getPartition().getShardCount()); + + if (splitCount < 2) { + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "Too many partitions per store, partition.store-max-shard-count" + + " = " + + pdConfig.getPartition().getMaxShardsPerStore()); + } + + // 每store未达最大分区数,进行分裂 + log.info("Start to split partitions..., split count = {}", splitCount); + + // 设置集群状态为下线 + storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); + // 修改默认分区数量 + // pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size() * + // splitCount); + + var list = storeService.getShardGroups().stream() + .map(shardGroup -> new KVPair<>(shardGroup.getId(), splitCount)) + .collect(Collectors.toList()); + storeService.splitShardGroups(list); + + return null; + } + + + /** + * Store汇报任务状态 + * 分区状态发生改变,重新计算分区所在的ShardGroup、图和整个集群的状态 + * + * @param task + */ + public void reportTask(MetaTask.Task task) { + try { + switch (task.getType()) { + case Split_Partition: + partitionService.handleSplitTask(task); + break; + case Move_Partition: + partitionService.handleMoveTask(task); + break; + case Clean_Partition: + partitionService.handleCleanPartitionTask(task); + break; + default: + break; + } + } catch (Exception e) { + log.error("Report task exception {}, {}", e, task); + } + } + + /** + * 对rocksdb进行compaction + * + * @throws PDException + */ + public Boolean dbCompaction(String tableName) throws PDException { + if (!isLeader()) { + return false; + } + + for (Metapb.ShardGroup shardGroup : storeService.getShardGroups()) { + storeService.shardGroupsDbCompaction(shardGroup.getId(), tableName); + } + + // + return true; + } + + /** + * 判断是否能把一个store的分区全部迁出,给出判断结果和迁移方案 + */ + public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) throws + PDException { + if (!isLeader()) { + return null; + } + // 分析一个store上面的分区是否可以完全迁出 + Map resultMap = new HashMap<>(); + // 定义对象用于保存源store上面的分区 StoreId ->PartitionID, ShardRole + Map> sourcePartitionMap = new HashMap<>(); + sourcePartitionMap.put(sourceStore.getId(), new HashMap<>()); + // 定义对象用于保存其他活跃store上面的分区 StoreId ->PartitionID, ShardRole + Map> otherPartitionMap = new HashMap<>(); + Map availableDiskSpace = new HashMap<>(); // 每个store剩余的磁盘空间 + Map partitionDataSize = new HashMap<>(); // 记录待迁移的分区的数据量 + + storeService.getActiveStores().forEach(store -> { + if (store.getId() != sourceStore.getId()) { + otherPartitionMap.put(store.getId(), new HashMap<>()); + // 记录其他store的剩余的磁盘空间, 单位为Byte + availableDiskSpace.put(store.getId(), store.getStats().getAvailable()); + } else { + resultMap.put("current_store_is_online", true); + } + }); + // 统计待迁移的分区的数据大小 (从storeStats中统计,单位为KB) + for (Metapb.GraphStats graphStats : sourceStore.getStats().getGraphStatsList()) { + partitionDataSize.put(graphStats.getPartitionId(), + partitionDataSize.getOrDefault(graphStats.getPartitionId(), 0L) + + graphStats.getApproximateSize()); + } + // 给sourcePartitionMap 和 otherPartitionMap赋值 + partitionService.getPartitions().forEach(partition -> { + try { + storeService.getShardList(partition.getId()).forEach(shard -> { + long storeId = shard.getStoreId(); + if (storeId == sourceStore.getId()) { + sourcePartitionMap.get(storeId).put(partition.getId(), shard.getRole()); + } else { + if (otherPartitionMap.containsKey(storeId)) { + otherPartitionMap.get(storeId).put(partition.getId(), shard.getRole()); + } + } + + }); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + // 统计待移除的分区:即源store上面的所有分区 + Map> movedPartitions = new HashMap<>(); + for (Map.Entry entry : sourcePartitionMap.get( + sourceStore.getId()).entrySet()) { + movedPartitions.put(entry.getKey(), new KVPair<>(sourceStore.getId(), 0L)); + } + // 统计其他store的分区数量, 用小顶堆保存,以便始终把分区数量较少的store优先考虑 + PriorityQueue> minHeap = new PriorityQueue<>(otherPartitionMap.size(), + (o1, o2) -> o1.getValue() + .compareTo( + o2.getValue())); + otherPartitionMap.forEach((storeId, shards) -> { + minHeap.add(new KVPair(storeId, shards.size())); + }); + // 遍历待迁移的分区,优先迁移到分区比较少的store + Iterator>> moveIterator = + movedPartitions.entrySet().iterator(); + while (moveIterator.hasNext()) { + Map.Entry> moveEntry = moveIterator.next(); + int partitionId = moveEntry.getKey(); + List> tmpList = new ArrayList<>(); // 记录已经弹出优先队列的元素 + while (minHeap.size() > 0) { + KVPair pair = minHeap.poll(); //弹出首个元素 + long storeId = pair.getKey(); + int partitionCount = pair.getValue(); + Map shards = otherPartitionMap.get(storeId); + final int unitRate = 1024; // 平衡不同存储单位的进率 + if ((!shards.containsKey(partitionId)) && ( + availableDiskSpace.getOrDefault(storeId, 0L) / unitRate >= + partitionDataSize.getOrDefault(partitionId, 0L))) { + // 如果目标store上面不包含该分区,且目标store剩余空间能容纳该分区,则进行迁移 + moveEntry.getValue().setValue(storeId); //设置移动的目标store + log.info("plan to move partition {} to store {}, " + + "available disk space {}, current partitionSize:{}", + partitionId, + storeId, + availableDiskSpace.getOrDefault(storeId, 0L) / unitRate, + partitionDataSize.getOrDefault(partitionId, 0L) + ); + // 更新该store预期的剩余空间 + availableDiskSpace.put(storeId, availableDiskSpace.getOrDefault(storeId, 0L) + - partitionDataSize.getOrDefault(partitionId, + 0L) * + unitRate); + // 更新统计变量中该store的分区数量 + partitionCount += 1; + pair.setValue(partitionCount); + tmpList.add(pair); + break; + } else { + tmpList.add(pair); + } + } + minHeap.addAll(tmpList); + } + //检查是否未存在未分配目标store的分区 + List remainPartitions = new ArrayList<>(); + movedPartitions.forEach((partId, storePair) -> { + if (storePair.getValue() == 0L) { + remainPartitions.add(partId); + } + }); + if (remainPartitions.size() > 0) { + resultMap.put("flag", false); + resultMap.put("movedPartitions", null); + } else { + resultMap.put("flag", true); + resultMap.put("movedPartitions", movedPartitions); + } + return resultMap; + + } + + public Map> movePartitions( + Map> movedPartitions) { + if (!isLeader()) { + return null; + } + // 开始迁移 + log.info("begin move partitions:"); + movedPartitions.forEach((partId, storePair) -> { + // 源和目标storeID都不为0 + if (storePair.getKey() > 0 && storePair.getValue() > 0) { + partitionService.movePartitionsShard(partId, storePair.getKey(), + storePair.getValue()); + } + }); + return movedPartitions; + } + + +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java new file mode 100644 index 0000000000..84e8581673 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @@ -0,0 +1,277 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.config; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Configuration; +import org.springframework.stereotype.Component; + +import lombok.Data; + + +/** + * PD配置文件 + */ +@Data +@Component +public class PDConfig { + + @Value("${pd.cluster_id:1}") + private long clusterId; // 集群ID + + @Value("${pd.patrol-interval:300}") + private long patrolInterval = 300; //巡查任务时间间隔 + @Value("${pd.data-path}") + private String dataPath; + @Value("${pd.initial-store-count:3}") + private int minStoreCount; + + // 初始store列表,该列表内的store自动激活 + @Value("${pd.initial-store-list: ''}") + private String initialStoreList; + @Value("${grpc.host}") + private String host; + + @Value("${license.verify-path}") + private String verifyPath; + @Value("${license.license-path}") + private String licensePath; + @Autowired + private ThreadPoolGrpc threadPoolGrpc; + @Autowired + private Raft raft; + @Autowired + private Store store; + @Autowired + private Partition partition; + @Autowired + private Discovery discovery; + private Map initialStoreMap = null; + private ConfigService configService; + private IdService idService; + + public Map getInitialStoreMap() { + if (initialStoreMap == null) { + initialStoreMap = new HashMap<>(); + Arrays.asList(initialStoreList.split(",")).forEach(s -> { + initialStoreMap.put(s, s); + }); + } + return initialStoreMap; + } + + /** + * 初始分区数量 + * Store数量 * 每Store最大副本数 /每分区副本数 + * + * @return + */ + public int getInitialPartitionCount() { + return getInitialStoreMap().size() * partition.getMaxShardsPerStore() + / partition.getShardCount(); + } + + public ConfigService getConfigService() { + return configService; + } + + public void setConfigService(ConfigService configService) { + this.configService = configService; + } + + public IdService getIdService() { + return idService; + } + + public void setIdService(IdService idService) { + this.idService = idService; + } + + @Data + @Configuration + public class ThreadPoolGrpc { + @Value("${thread.pool.grpc.core:600}") + private int core; + @Value("${thread.pool.grpc.max:1000}") + private int max; + @Value("${thread.pool.grpc.queue:" + Integer.MAX_VALUE + "}") + private int queue; + } + + @Data + @Configuration + public class Raft { + @Value("${raft.enable:true }") + private boolean enable; + @Value("${raft.address}") + private String address; + @Value("${pd.data-path}") + private String dataPath; + @Value("${raft.peers-list}") + private String peersList; + @Value("${raft.snapshotInterval: 300}") + private int snapshotInterval; + @Value("${raft.rpc-timeout:10000}") + private int rpcTimeout; + @Value("${grpc.host}") + private String host; + @Value("${server.port}") + private int port; + + @Value("${pd.cluster_id:1}") + private long clusterId; // 集群ID + @Value("${grpc.port}") + private int grpcPort; + + public String getGrpcAddress() { + return host + ":" + grpcPort; + } + } + + @Data + @Configuration + public class Store { + // store 心跳超时时间 + @Value("${store.keepAlive-timeout:300}") + private long keepAliveTimeout = 300; + @Value("${store.max-down-time:1800}") + private long maxDownTime = 1800; + + @Value("${store.monitor_data_enabled:true}") + private boolean monitorDataEnabled = true; + + @Value("${store.monitor_data_interval: 1 minute}") + private String monitorDataInterval = "1 minute"; + + @Value("${store.monitor_data_retention: 1 day}") + private String monitorDataRetention = "1 day"; + + /** + * interval -> seconds. + * minimum value is 1 seconds. + * + * @return the seconds of the interval + */ + public Long getMonitorInterval() { + return parseTimeExpression(this.monitorDataInterval); + } + + /** + * the monitor data that saved in rocksdb, will be deleted + * out of period + * + * @return the period of the monitor data should keep + */ + public Long getRetentionPeriod() { + return parseTimeExpression(this.monitorDataRetention); + } + + /** + * parse time expression , support pattern: + * [1-9][ ](second, minute, hour, day, month, year) + * unit could not be null, the number part is 1 by default. + * + * @param exp + * @return seconds value of the expression. 1 will return by illegal expression + */ + private Long parseTimeExpression(String exp) { + if (exp != null) { + Pattern pattern = Pattern.compile( + "(?(\\d+)*)(\\s)*(?(second|minute|hour|day|month|year)$)"); + Matcher matcher = pattern.matcher(exp.trim()); + if (matcher.find()) { + String n = matcher.group("n"); + String unit = matcher.group("unit"); + + if (null == n || n.length() == 0) { + n = "1"; + } + + Long interval; + switch (unit) { + case "minute": + interval = 60L; + break; + case "hour": + interval = 3600L; + break; + case "day": + interval = 86400L; + break; + case "month": + interval = 86400L * 30; + break; + case "year": + interval = 86400L * 365; + break; + case "second": + default: + interval = 1L; + } + // avoid n == '0' + return Math.max(1L, interval * Integer.parseInt(n)); + } + } + return 1L; + } + + } + + @Data + @Configuration + public class Partition { + private int totalCount = 0; + + // 每个Store最大副本数 + @Value("${partition.store-max-shard-count:24}") + private int maxShardsPerStore = 24; + + // 默认分副本数量 + @Value("${partition.default-shard-count:3}") + private int shardCount = 3; + + public int getTotalCount() { + if (totalCount == 0) { + totalCount = getInitialPartitionCount(); + } + return totalCount; + } + + public void setTotalCount(int totalCount) { + this.totalCount = totalCount; + } + } + + @Data + @Configuration + public class Discovery { + // 客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + @Value("${discovery.heartbeat-try-count:3}") + private int heartbeatOutTimes = 3; + } + +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java new file mode 100644 index 0000000000..c1e92ada4c --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.util.List; +import java.util.Optional; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +public class ConfigMetaStore extends MetadataRocksDBStore { + + + private final long clusterId; + + public ConfigMetaStore(PDConfig pdConfig) { + super(pdConfig); + this.clusterId = pdConfig.getClusterId(); + } + + /** + * 更新图空间存储状态信息 + * + * @param + */ + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getGraphSpaceKey(graphSpace.getName()); + graphSpace = graphSpace.toBuilder().setTimestamp(System.currentTimeMillis()).build(); + put(graphSpaceKey, graphSpace.toByteArray()); + return graphSpace; + } + + public List getGraphSpace(String graphSpace) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getGraphSpaceKey(graphSpace); + return scanPrefix(Metapb.GraphSpace.parser(), graphSpaceKey); + } + + public Metapb.PDConfig setPdConfig(Metapb.PDConfig pdConfig) throws PDException { + byte[] graphSpaceKey = + MetadataKeyHelper.getPdConfigKey(String.valueOf(pdConfig.getVersion())); + Metapb.PDConfig config = Metapb.PDConfig.newBuilder( + pdConfig).setTimestamp(System.currentTimeMillis()).build(); + put(graphSpaceKey, config.toByteArray()); + return config; + } + + public Metapb.PDConfig getPdConfig(long version) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getPdConfigKey(version <= 0 ? null : + String.valueOf(version)); + Optional max = scanPrefix( + Metapb.PDConfig.parser(), graphSpaceKey).stream().max( + (o1, o2) -> (o1.getVersion() > o2.getVersion()) ? 1 : -1); + return max.isPresent() ? max.get() : null; + } + + +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java new file mode 100644 index 0000000000..c95185363d --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class DiscoveryMetaStore extends MetadataRocksDBStore { + + /** + * appName --> address --> registryInfo + */ + private static final String PREFIX = "REGIS-"; + private static final String SPLITTER = "-"; + + public DiscoveryMetaStore(PDConfig pdConfig) { + super(pdConfig); + } + + public void register(NodeInfo nodeInfo, int outTimes) throws PDException { + putWithTTL(toKey(nodeInfo.getAppName(), nodeInfo.getVersion(), nodeInfo.getAddress()), + nodeInfo.toByteArray(), (nodeInfo.getInterval() / 1000) * outTimes); + } + + byte[] toKey(String appName, String version, String address) { + StringBuilder builder = getPrefixBuilder(appName, version); + builder.append(SPLITTER); + builder.append(address); + return builder.toString().getBytes(); + } + + private StringBuilder getPrefixBuilder(String appName, String version) { + StringBuilder builder = new StringBuilder(); + builder.append(PREFIX); + if (!StringUtils.isEmpty(appName)) { + builder.append(appName); + builder.append(SPLITTER); + } + if (!StringUtils.isEmpty(version)) { + builder.append(version); + } + return builder; + } + + public NodeInfos getNodes(Query query) { + List nodeInfos = null; + try { + StringBuilder builder = getPrefixBuilder(query.getAppName(), + query.getVersion()); + nodeInfos = getInstanceListWithTTL( + NodeInfo.parser(), + builder.toString().getBytes()); + builder.setLength(0); + } catch (PDException e) { + log.error("An error occurred getting data from the store,{}", e); + } + if (query.getLabelsMap() != null && !query.getLabelsMap().isEmpty()) { + List result = new LinkedList(); + for (NodeInfo node : nodeInfos) { + if (labelMatch(node, query)) { + result.add(node); + } + } + return NodeInfos.newBuilder().addAllInfo(result).build(); + } + return NodeInfos.newBuilder().addAllInfo(nodeInfos).build(); + + } + + private boolean labelMatch(NodeInfo node, Query query) { + Map labelsMap = node.getLabelsMap(); + for (Map.Entry entry : query.getLabelsMap().entrySet()) { + if (!entry.getValue().equals(labelsMap.get(entry.getKey()))) { + return false; + } + } + return true; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java new file mode 100644 index 0000000000..a2d36a0028 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.store.KV; + +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; + +import lombok.extern.slf4j.Slf4j; + +/** + * 自增id的实现类 + */ +@Slf4j +public class IdMetaStore extends MetadataRocksDBStore { + + + private static final String ID_PREFIX = "@ID@"; + private static final String CID_PREFIX = "@CID@"; + private static final String CID_SLOT_PREFIX = "@CID_SLOT@"; + private static final String CID_DEL_SLOT_PREFIX = "@CID_DEL_SLOT@"; + private static final String SEPARATOR = "@"; + private static final ConcurrentHashMap SEQUENCES = new ConcurrentHashMap<>(); + public static long CID_DEL_TIMEOUT = 24 * 3600 * 1000; + private final long clusterId; + + public IdMetaStore(PDConfig pdConfig) { + super(pdConfig); + this.clusterId = pdConfig.getClusterId(); + } + + public static long bytesToLong(byte[] b) { + ByteBuffer buf = ByteBuffer.wrap(b); + return buf.getLong(); + } + + public static byte[] longToBytes(long l) { + ByteBuffer buf = ByteBuffer.wrap(new byte[Long.SIZE]); + buf.putLong(l); + buf.flip(); + return buf.array(); + } + + /** + * 获取自增id + * + * @param key + * @param delta + * @return + * @throws PDException + */ + public long getId(String key, int delta) throws PDException { + Object probableLock = getLock(key); + byte[] keyBs = (ID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + byte[] bs = getOne(keyBs); + long current = bs != null ? bytesToLong(bs) : 0L; + long next = current + delta; + put(keyBs, longToBytes(next)); + return current; + } + } + + private Object getLock(String key) { + Object probableLock = new Object(); + Object currentLock = SEQUENCES.putIfAbsent(key, probableLock); + if (currentLock != null) { + probableLock = currentLock; + } + return probableLock; + } + + public void resetId(String key) throws PDException { + Object probableLock = new Object(); + Object currentLock = SEQUENCES.putIfAbsent(key, probableLock); + if (currentLock != null) { + probableLock = currentLock; + } + byte[] keyBs = (ID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + removeByPrefix(keyBs); + } + } + + /** + * 在删除name标识的cid的24小时内重复申请同一个name的cid保持同一值 + * 如此设计为了防止缓存的不一致,造成数据错误 + * + * @param key + * @param name cid 标识 + * @param max + * @return + * @throws PDException + */ + public long getCId(String key, String name, long max) throws PDException { + // 检测是否有过期的cid,删除图的频率比较低,此处对性能影响不大 + byte[] delKeyPrefix = (CID_DEL_SLOT_PREFIX + + key + SEPARATOR).getBytes(Charset.defaultCharset()); + synchronized (this) { + scanPrefix(delKeyPrefix).forEach(kv -> { + long[] value = (long[]) deserialize(kv.getValue()); + if (value.length >= 2) { + if (System.currentTimeMillis() - value[1] > CID_DEL_TIMEOUT) { + try { + delCId(key, value[0]); + remove(kv.getKey()); + } catch (Exception e) { + log.error("Exception ", e); + } + } + } + }); + + // 从延时删除队列恢复Key + byte[] cidDelayKey = getCIDDelayKey(key, name); + byte[] value = getOne(cidDelayKey); + if (value != null) { + // 从延迟删除队列删除 + remove(cidDelayKey); + return ((long[]) deserialize(value))[0]; + } else { + return getCId(key, max); + } + } + } + + /** + * 添加到删除队列,延后删除 + */ + public long delCIdDelay(String key, String name, long cid) throws PDException { + byte[] delKey = getCIDDelayKey(key, name); + put(delKey, serialize(new long[]{cid, System.currentTimeMillis()})); + return cid; + } + + /** + * 获取自增循环不重复id, 达到上限后从0开始自增 + * + * @param key + * @param max id上限,达到该值后,重新从0开始自增 + * @return + * @throws PDException + */ + public long getCId(String key, long max) throws PDException { + Object probableLock = getLock(key); + byte[] keyBs = (CID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + byte[] bs = getOne(keyBs); + long current = bs != null ? bytesToLong(bs) : 0L; + long last = current == 0 ? max - 1 : current - 1; + { // 查找一个未使用的cid + List kvs = scanRange(genCIDSlotKey(key, current), genCIDSlotKey(key, max)); + for (KV kv : kvs) { + if (current == bytesToLong(kv.getValue())) { + current++; + } else { + break; + } + } + } + if (current == max) { + current = 0; + List kvs = scanRange(genCIDSlotKey(key, current), genCIDSlotKey(key, last)); + for (KV kv : kvs) { + if (current == bytesToLong(kv.getValue())) { + current++; + } else { + break; + } + } + } + if (current == last) { + return -1; + } + put(genCIDSlotKey(key, current), longToBytes(current)); + put(keyBs, longToBytes(current + 1)); + return current; + } + } + + private byte[] genCIDSlotKey(String key, long value) { + byte[] keySlot = (CID_SLOT_PREFIX + key + SEPARATOR).getBytes(Charset.defaultCharset()); + ByteBuffer buf = ByteBuffer.allocate(keySlot.length + Long.SIZE); + buf.put(keySlot); + buf.put(longToBytes(value)); + return buf.array(); + } + + private byte[] getCIDDelayKey(String key, String name) { + byte[] bsKey = (CID_DEL_SLOT_PREFIX + + key + SEPARATOR + + name).getBytes(Charset.defaultCharset()); + return bsKey; + } + + /** + * 删除一个循环id,释放id值 + * + * @param key + * @param cid + * @return + * @throws PDException + */ + public long delCId(String key, long cid) throws PDException { + return remove(genCIDSlotKey(key, cid)); + } + + private byte[] serialize(Object obj) { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + Hessian2Output output = new Hessian2Output(bos); + output.writeObject(obj); + output.flush(); + return bos.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private Object deserialize(byte[] bytes) { + try (ByteArrayInputStream bis = new ByteArrayInputStream(bytes)) { + Hessian2Input input = new Hessian2Input(bis); + Object obj = input.readObject(); + input.close(); + return obj; + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java new file mode 100644 index 0000000000..ab3660034d --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +public class LogMeta extends MetadataRocksDBStore { + + private final PDConfig pdConfig; + + public LogMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + } + + public void insertLog(Metapb.LogRecord record) throws PDException { + byte[] storeLogKey = MetadataKeyHelper.getLogKey(record); + put(storeLogKey, record.toByteArray()); + + } + + public List getLog(String action, Long start, Long end) throws PDException { + byte[] keyStart = MetadataKeyHelper.getLogKeyPrefix(action, start); + byte[] keyEnd = MetadataKeyHelper.getLogKeyPrefix(action, end); + List stores = this.scanRange(Metapb.LogRecord.parser(), + keyStart, keyEnd); + return stores; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java new file mode 100644 index 0000000000..b8e824c3c5 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.HgKVStoreImpl; +import org.apache.hugegraph.pd.store.RaftKVStore; + +/** + * 存储工厂类,创建相关对象的存储类 + */ +public class MetadataFactory { + + private static HgKVStore store = null; + + public static HgKVStore getStore(PDConfig pdConfig) { + if (store == null) { + synchronized (MetadataFactory.class) { + if (store == null) { + HgKVStore proto = new HgKVStoreImpl(); + //proto.init(pdConfig); + store = pdConfig.getRaft().isEnable() ? + new RaftKVStore(RaftEngine.getInstance(), proto) : + proto; + store.init(pdConfig); + } + } + } + return store; + } + + public static void closeStore() { + if (store != null) { + store.close(); + } + } + + public static StoreInfoMeta newStoreInfoMeta(PDConfig pdConfig) { + return new StoreInfoMeta(pdConfig); + } + + public static PartitionMeta newPartitionMeta(PDConfig pdConfig) { + return new PartitionMeta(pdConfig); + } + + public static IdMetaStore newHugeServerMeta(PDConfig pdConfig) { + return new IdMetaStore(pdConfig); + } + + public static DiscoveryMetaStore newDiscoveryMeta(PDConfig pdConfig) { + return new DiscoveryMetaStore(pdConfig); + } + + public static ConfigMetaStore newConfigMeta(PDConfig pdConfig) { + return new ConfigMetaStore(pdConfig); + } + + public static TaskInfoMeta newTaskInfoMeta(PDConfig pdConfig) { + return new TaskInfoMeta(pdConfig); + } + + + public static QueueStore newQueueStore(PDConfig pdConfig) { + return new QueueStore(pdConfig); + } + + public static LogMeta newLogMeta(PDConfig pdConfig) { + return new LogMeta(pdConfig); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java new file mode 100644 index 0000000000..a8866a2217 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.nio.charset.Charset; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.grpc.Metapb; + +public class MetadataKeyHelper { + + public static final char DELIMITER = '/'; + + private static final String STORE = "STORE"; + private static final String ACTIVESTORE = "ACTIVESTORE"; + private static final String STORESTATUS = "STORESTATUS"; + private static final String PARTITION = "PARTITION"; + private static final String PARTITION_V36 = "PARTITION_V36"; + private static final String SHARDGROUP = "SHARDGROUP"; + + private static final String PARTITION_STATUS = "PARTITION_STATUS"; + private static final String GRAPH = "GRAPH"; + private static final String GRAPHMETA = "GRAPHMETA"; + private static final String GRAPH_SPACE = "GRAPH_SPACE"; + private static final String PD_CONFIG = "PD_CONFIG"; + private static final String TASK_SPLIT = "TASK_SPLIT"; + private static final String TASK_MOVE = "TASK_MOVE"; + private static final String LOG_RECORD = "LOG_RECORD"; + + private static final String QUEUE = "QUEUE"; + + public static byte[] getStoreInfoKey(final long storeId) { + //STORE/{storeId} + String key = StringBuilderHelper.get() + .append(STORE).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getActiveStoreKey(final long storeId) { + //ACTIVESTORE/{storeId} + String key = StringBuilderHelper.get() + .append(ACTIVESTORE).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getActiveStorePrefix() { + //ACTIVESTORE + String key = StringBuilderHelper.get() + .append(ACTIVESTORE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getStorePrefix() { + //STORE + String key = StringBuilderHelper.get() + .append(STORE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreStatusKey(final long storeId) { + //STORESTATUS/{storeId} + String key = StringBuilderHelper.get() + .append(STORESTATUS).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardGroupKey(final long groupId) { + //SHARDGROUP/{storeId} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(groupId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardGroupPrefix() { + //SHARDGROUP + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionKey(final String graphName, final int partId) { + //GRAPH/{graphName}/Partition/{partId} + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionV36Key(final String graphName, final int partId) { + // GRAPH/{graphName}/PartitionV36/{partId} + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION_V36).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionPrefix(final String graphName) { + //GRAPH/{graph}/Partition + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardKey(final long storeId, final int partId) { + //SHARD/{graphName}/{type} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(storeId).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardPrefix(final long storeId) { + //SHARD/{graphName}/{type} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(storeId).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphKey(final String graphName) { + //GRAPHMETA/{graphName} + String key = StringBuilderHelper.get() + .append(GRAPHMETA).append(DELIMITER) + .append(graphName).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphPrefix() { + //GRAPHMETA/{ + String key = StringBuilderHelper.get() + .append(GRAPHMETA).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionStatusKey(String graphName, int id) { + //PARTITION_STATUS/{ + String key = StringBuilderHelper.get() + .append(PARTITION_STATUS) + .append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(id).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionStatusPrefixKey(String graphName) { + //PARTITION_STATUS/{ + StringBuilder builder = StringBuilderHelper.get().append(PARTITION_STATUS) + .append(DELIMITER); + if (!StringUtils.isEmpty(graphName)) { + builder.append(graphName).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphSpaceKey(String graphSpace) { + //GRAPH_SPACE/{ + StringBuilder builder = StringBuilderHelper.get().append( + GRAPH_SPACE).append(DELIMITER); + if (!StringUtils.isEmpty(graphSpace)) { + builder.append(graphSpace).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getPdConfigKey(String configKey) { + //PD_CONFIG/{ + StringBuilder builder = StringBuilderHelper.get().append( + PD_CONFIG).append(DELIMITER); + if (!StringUtils.isEmpty(configKey)) { + builder.append(configKey).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getQueueItemPrefix() { + //QUEUE + String key = StringBuilderHelper.get() + .append(QUEUE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getQueueItemKey(String itemId) { + //QUEUE + StringBuilder builder = StringBuilderHelper.get() + .append(QUEUE).append(DELIMITER); + if (!StringUtils.isEmpty(itemId)) { + builder.append(itemId).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getSplitTaskKey(String graphName, int groupId) { + // TASK_SPLIT/{GraphName}/{partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getSplitTaskPrefix(String graphName) { + // TASK_SPLIT/{GraphName}/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getAllSplitTaskPrefix() { + // TASK_SPLIT/{GraphName}/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getMoveTaskKey(String graphName, int targetGroupId, int groupId) { + // TASK_MOVE/{GraphName}/to PartitionID/{source partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(targetGroupId).append(DELIMITER) + .append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getMoveTaskPrefix(String graphName) { + // TASK_MOVE/{graphName}/toPartitionId/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER) + .append(graphName); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getAllMoveTaskPrefix() { + // TASK_MOVE/{graphName}/toPartitionId/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getLogKey(Metapb.LogRecord record) { + //LOG_RECORD/{action}/{time}/ + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(record.getAction()) + .append(DELIMITER) + .append(record.getTimestamp()); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getLogKeyPrefix(String action, long time) { + //LOG_DATA_SPLIT/{time}/{GraphName} + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(action) + .append(DELIMITER) + .append(time); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getKVPrefix(String prefix, String key) { + //K@/{key} + StringBuilder builder = StringBuilderHelper.get() + .append(prefix).append(DELIMITER); + if (!StringUtils.isEmpty(key)) { + builder.append(key).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getKVTTLPrefix(String ttlPrefix, String prefix, String key) { + StringBuilder builder = StringBuilderHelper.get().append(ttlPrefix) + .append(prefix).append(DELIMITER); + if (!StringUtils.isEmpty(key)) { + builder.append(key).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static String getKVWatchKeyPrefix(String key, String watchDelimiter, long clientId) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(watchDelimiter).append(DELIMITER); + builder.append(key == null ? "" : key).append(DELIMITER); + builder.append(clientId); + return builder.toString(); + } + + public static String getKVWatchKeyPrefix(String key, String watchDelimiter) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(watchDelimiter).append(DELIMITER); + builder.append(key == null ? "" : key).append(DELIMITER); + return builder.toString(); + } + + public static char getDelimiter() { + return DELIMITER; + } + + public static StringBuilder getStringBuilderHelper() { + return StringBuilderHelper.get(); + } + + static class StringBuilderHelper { + private static final int DISCARD_LIMIT = 1024 << 3; // 8k + + private static final ThreadLocal holderThreadLocal = ThreadLocal + .withInitial(StringBuilderHolder::new); + + public static StringBuilder get() { + final StringBuilderHolder holder = holderThreadLocal.get(); + return holder.getStringBuilder(); + } + + public static void truncate() { + final StringBuilderHolder holder = holderThreadLocal.get(); + holder.truncate(); + } + + private static class StringBuilderHolder { + + private final StringBuilder buf = new StringBuilder(); + + private StringBuilder getStringBuilder() { + truncate(); + return buf; + } + + private void truncate() { + buf.setLength(0); + } + } + } + +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java new file mode 100644 index 0000000000..05aa938f07 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.KV; + +import com.google.protobuf.Parser; + +public class MetadataRocksDBStore extends MetadataStoreBase { + + HgKVStore store; + + PDConfig pdConfig; + + public MetadataRocksDBStore(PDConfig pdConfig) { + store = MetadataFactory.getStore(pdConfig); + this.pdConfig = pdConfig; + } + + public HgKVStore getStore() { + if (store == null) { + store = MetadataFactory.getStore(pdConfig); + } + return store; + } + + @Override + public byte[] getOne(byte[] key) throws PDException { + try { + byte[] bytes = store.get(key); + return bytes; + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } + + @Override + public E getOne(Parser parser, byte[] key) throws PDException { + try { + byte[] bytes = store.get(key); + if (ArrayUtils.isEmpty(bytes)) { + return null; + } + return parser.parseFrom(bytes); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + try { + getStore().put(key, value); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.store.putWithTTL(key, value, ttl); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + this.store.putWithTTL(key, value, ttl, timeUnit); + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + return this.store.getWithTTL(key); + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + return this.store.getListWithTTL(key); + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + this.store.removeWithTTL(key); + } + + @Override + public List scanPrefix(byte[] prefix) throws PDException { + //TODO 使用rocksdb 前缀查询 + try { + return this.store.scanPrefix(prefix); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } + + @Override + public List scanRange(byte[] start, byte[] end) throws PDException { + return this.store.scanRange(start, end); + } + + @Override + public List scanRange(Parser parser, byte[] start, byte[] end) throws PDException { + List stores = new LinkedList<>(); + try { + List kvs = this.scanRange(start, end); + for (KV keyValue : kvs) { + stores.add(parser.parseFrom(keyValue.getValue())); + } + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + return stores; + } + + @Override + public List scanPrefix(Parser parser, byte[] prefix) throws PDException { + List stores = new LinkedList<>(); + try { + List kvs = this.scanPrefix(prefix); + for (KV keyValue : kvs) { + stores.add(parser.parseFrom(keyValue.getValue())); + } + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + return stores; + } + + @Override + public boolean containsKey(byte[] key) throws PDException { + return !ArrayUtils.isEmpty(store.get(key)); + } + + @Override + public long remove(byte[] key) throws PDException { + try { + return this.store.remove(key); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public long removeByPrefix(byte[] prefix) throws PDException { + try { + return this.store.removeByPrefix(prefix); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public void clearAllCache() throws PDException { + this.store.clear(); + } + + @Override + public void close() { + + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java new file mode 100644 index 0000000000..297384c146 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.store.KV; + +import com.google.protobuf.Parser; + +public abstract class MetadataStoreBase { + + // public long timeout = 3; // 请求超时时间,默认三秒 + + public abstract byte[] getOne(byte[] key) throws PDException; + + public abstract E getOne(Parser parser, byte[] key) throws PDException; + + public abstract void put(byte[] key, byte[] value) throws PDException; + + /** + * 带有过期时间的put + */ + + public abstract void putWithTTL(byte[] key, + byte[] value, + long ttl) throws PDException; + + public abstract void putWithTTL(byte[] key, + byte[] value, + long ttl, TimeUnit timeUnit) throws PDException; + + public abstract byte[] getWithTTL(byte[] key) throws PDException; + + public abstract List getListWithTTL(byte[] key) throws PDException; + + public abstract void removeWithTTL(byte[] key) throws PDException; + + /** + * 前缀查询 + * + * @param prefix + * @return + * @throws PDException + */ + public abstract List scanPrefix(byte[] prefix) throws PDException; + + /** + * 前缀查询 + * + * @param prefix + * @return + * @throws PDException + */ + + public abstract List scanPrefix(Parser parser, byte[] prefix) throws PDException; + + public abstract List scanRange(byte[] start, byte[] end) throws PDException; + + public abstract List scanRange(Parser parser, byte[] start, byte[] end) throws + PDException; + + /** + * 检查Key是否存在 + * + * @param key + * @return + * @throws PDException + */ + + public abstract boolean containsKey(byte[] key) throws PDException; + + public abstract long remove(byte[] key) throws PDException; + + public abstract long removeByPrefix(byte[] prefix) throws PDException; + + public abstract void clearAllCache() throws PDException; + + public abstract void close() throws IOException; + + public T getInstanceWithTTL(Parser parser, byte[] key) throws PDException { + try { + byte[] withTTL = this.getWithTTL(key); + return parser.parseFrom(withTTL); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } + + public List getInstanceListWithTTL(Parser parser, byte[] key) + throws PDException { + try { + List withTTL = this.getListWithTTL(key); + LinkedList ts = new LinkedList<>(); + for (int i = 0; i < withTTL.size(); i++) { + ts.add(parser.parseFrom((byte[]) withTTL.get(i))); + } + return ts; + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java new file mode 100644 index 0000000000..38cce28b05 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java @@ -0,0 +1,295 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionCache; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + +/** + * 分区信息管理 + */ +@Slf4j +public class PartitionMeta extends MetadataRocksDBStore { + static String CID_GRAPH_ID_KEY = "GraphID"; + static int CID_GRAPH_ID_MAX = 0xFFFE; + private final PDConfig pdConfig; + private final PartitionCache cache; + + public PartitionMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + //this.timeout = pdConfig.getEtcd().getTimeout(); + this.cache = new PartitionCache(); + } + + /** + * 初始化,加载所有的分区 + */ + public void init() throws PDException { + loadShardGroups(); + loadGraphs(); + } + + public void reload() throws PDException { + cache.clear(); + loadShardGroups(); + loadGraphs(); + } + + private void loadGraphs() throws PDException { + byte[] key = MetadataKeyHelper.getGraphPrefix(); + List graphs = scanPrefix(Metapb.Graph.parser(), key); + for (Metapb.Graph graph : graphs) { + cache.updateGraph(graph); + loadPartitions(graph); + } + } + + /** + * partition 和 shard group分开存储,再init的时候,需要加载进来 + * + * @throws PDException + */ + private void loadShardGroups() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + for (var shardGroup : scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix)) { + cache.updateShardGroup(shardGroup); + } + } + + private void loadPartitions(Metapb.Graph graph) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName()); + List partitions = scanPrefix(Metapb.Partition.parser(), prefix); + partitions.forEach(p -> { + cache.updatePartition(p); + }); + } + + /** + * 根据id查找分区 (先从缓存找,再到数据库中找) + * + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public Metapb.Partition getPartitionById(String graphName, int partId) throws PDException { + var pair = cache.getPartitionById(graphName, partId); + Metapb.Partition partition; + if (pair == null) { + byte[] key = MetadataKeyHelper.getPartitionKey(graphName, partId); + partition = getOne(Metapb.Partition.parser(), key); + if (partition != null) { + cache.updatePartition(partition); + } + } else { + partition = pair.getKey(); + } + return partition; + } + + public List getPartitionById(int partId) throws PDException { + List partitions = new ArrayList<>(); + cache.getGraphs().forEach(graph -> { + cache.getPartitions(graph.getGraphName()).forEach(partition -> { + if (partition.getId() == partId) { + partitions.add(partition); + } + }); + }); + return partitions; + } + + /** + * 根据code查找分区 + */ + public Metapb.Partition getPartitionByCode(String graphName, long code) throws PDException { + var pair = cache.getPartitionByCode(graphName, code); + if (pair != null) { + return pair.getKey(); + } + return null; + } + + public Metapb.Graph getAndCreateGraph(String graphName) throws PDException { + return getAndCreateGraph(graphName, pdConfig.getPartition().getTotalCount()); + } + + public Metapb.Graph getAndCreateGraph(String graphName, int partitionCount) throws PDException { + + if (partitionCount > pdConfig.getPartition().getTotalCount()) { + partitionCount = pdConfig.getPartition().getTotalCount(); + } + + // 管理图,只有一个分区 + if (graphName.endsWith("/s") || graphName.endsWith("/m")) { + partitionCount = 1; + } + + Metapb.Graph graph = cache.getGraph(graphName); + if (graph == null) { + // 保存图信息 + graph = Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + updateGraph(graph); + } + return graph; + } + + /** + * 保存分区信息 + * + * @param partition + * @return + * @throws PDException + */ + public Metapb.Partition updatePartition(Metapb.Partition partition) throws PDException { + if (!cache.hasGraph(partition.getGraphName())) { + getAndCreateGraph(partition.getGraphName()); + } + byte[] key = MetadataKeyHelper.getPartitionKey(partition.getGraphName(), partition.getId()); + put(key, partition.toByteString().toByteArray()); + cache.updatePartition(partition); + return partition; + } + + /** + * 检查数据库,是否存在对应的图,不存在,则创建。 + * 更新partition的 version, conf version 和 shard list + * + * @param partition + * @return + * @throws PDException + */ + public Metapb.Partition updateShardList(Metapb.Partition partition) throws PDException { + if (!cache.hasGraph(partition.getGraphName())) { + getAndCreateGraph(partition.getGraphName()); + } + + Metapb.Partition pt = getPartitionById(partition.getGraphName(), partition.getId()); + // pt = pt.toBuilder().setVersion(partition.getVersion()) + // .setConfVer(partition.getConfVer()) + // .clearShards() + // .addAllShards(partition.getShardsList()).build(); + + byte[] key = MetadataKeyHelper.getPartitionKey(pt.getGraphName(), pt.getId()); + put(key, pt.toByteString().toByteArray()); + cache.updatePartition(pt); + return partition; + } + + /** + * 删除所有分区 + */ + public long removeAllPartitions(String graphName) throws PDException { + cache.removeAll(graphName); + byte[] prefix = MetadataKeyHelper.getPartitionPrefix(graphName); + return removeByPrefix(prefix); + } + + public long removePartition(String graphName, int id) throws PDException { + cache.remove(graphName, id); + byte[] key = MetadataKeyHelper.getPartitionKey(graphName, id); + return remove(key); + } + + public void updatePartitionStats(Metapb.PartitionStats stats) throws PDException { + for (String graphName : stats.getGraphNameList()) { + byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, stats.getId()); + put(prefix, stats.toByteArray()); + } + } + + /** + * 获取分区状态 + */ + public Metapb.PartitionStats getPartitionStats(String graphName, int id) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, id); + return getOne(Metapb.PartitionStats.parser(), prefix); + } + + + /** + * 获取分区状态 + */ + public List getPartitionStats(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionStatusPrefixKey(graphName); + return scanPrefix(Metapb.PartitionStats.parser(), prefix); + } + + /** + * 更新图信息 + * + * @param graph + * @return + */ + public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + log.info("updateGraph {}", graph); + byte[] key = MetadataKeyHelper.getGraphKey(graph.getGraphName()); + // 保存图信息 + put(key, graph.toByteString().toByteArray()); + cache.updateGraph(graph); + return graph; + } + + public List getPartitions() { + List partitions = new ArrayList<>(); + List graphs = cache.getGraphs(); + graphs.forEach(e -> { + partitions.addAll(cache.getPartitions(e.getGraphName())); + }); + return partitions; + } + + public List getPartitions(String graphName) { + return cache.getPartitions(graphName); + } + + public List getGraphs() throws PDException { + byte[] key = MetadataKeyHelper.getGraphPrefix(); + return scanPrefix(Metapb.Graph.parser(), key); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getGraphKey(graphName); + return getOne(Metapb.Graph.parser(), key); + } + + /** + * 删除图,并删除图id + */ + public long removeGraph(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getGraphKey(graphName); + long l = remove(key); + return l; + } + + public PartitionCache getPartitionCache() { + return cache; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java new file mode 100644 index 0000000000..ce850e5b90 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.util.List; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.store.RaftKVStore; + +public class QueueStore extends MetadataRocksDBStore { + QueueStore(PDConfig pdConfig) { + super(pdConfig); + } + + public void addItem(Metapb.QueueItem queueItem) throws PDException { + HgAssert.isArgumentNotNull(queueItem, "queueItem"); + byte[] key = MetadataKeyHelper.getQueueItemKey(queueItem.getItemId()); + put(key, queueItem.toByteString().toByteArray()); + } + + public void removeItem(String itemId) throws PDException { + if (RaftEngine.getInstance().isLeader()) { + remove(MetadataKeyHelper.getQueueItemKey(itemId)); + } else { + var store = getStore(); + // todo: delete record via client + if (store instanceof RaftKVStore) { + ((RaftKVStore) store).doRemove(MetadataKeyHelper.getQueueItemKey(itemId)); + } + } + } + + public List getQueue() throws PDException { + byte[] prefix = MetadataKeyHelper.getQueueItemPrefix(); + return scanPrefix(Metapb.QueueItem.parser(), prefix); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java new file mode 100644 index 0000000000..dfe709dfd6 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + +/** + * Store信息存储 + */ +@Slf4j +public class StoreInfoMeta extends MetadataRocksDBStore { + private final PDConfig pdConfig; + + public StoreInfoMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + // this.timeout = pdConfig.getDiscovery().getHeartbeatOutTimes(); + } + + public static boolean shardGroupEquals(List g1, List g2) { + ListIterator e1 = g1.listIterator(); + ListIterator e2 = g2.listIterator(); + while (e1.hasNext() && e2.hasNext()) { + Metapb.Shard o1 = e1.next(); + Metapb.Shard o2 = e2.next(); + if (!(o1 == null ? o2 == null : o1.getStoreId() == o2.getStoreId())) { + return false; + } + } + return !(e1.hasNext() || e2.hasNext()); + } + + /** + * 更新Store信息 + * + * @param store + * @throws PDException + */ + public void updateStore(Metapb.Store store) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(store.getId()); + put(storeInfoKey, store.toByteArray()); + } + + /** + * 更新Store的存活状态 + * + * @param store + */ + public void keepStoreAlive(Metapb.Store store) throws PDException { + byte[] activeStoreKey = MetadataKeyHelper.getActiveStoreKey(store.getId()); + putWithTTL(activeStoreKey, store.toByteArray(), pdConfig.getStore().getKeepAliveTimeout()); + } + + public void removeActiveStore(Metapb.Store store) throws PDException { + byte[] activeStoreKey = MetadataKeyHelper.getActiveStoreKey(store.getId()); + removeWithTTL(activeStoreKey); + } + + public Metapb.Store getStore(Long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + Metapb.Store store = getOne(Metapb.Store.parser(), storeInfoKey); + return store; + } + + /** + * 获取所有的store + * + * @param graphName + * @return + * @throws PDException + */ + public List getStores(String graphName) throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + return scanPrefix(Metapb.Store.parser(), storePrefix); + } + + /** + * 获取活跃的Store + * + * @param graphName + * @return + * @throws PDException + */ + public List getActiveStores(String graphName) throws PDException { + byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); + List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), + activePrefix); + return listWithTTL; + } + + public List getActiveStores() throws PDException { + byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); + List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), + activePrefix); + return listWithTTL; + } + + /** + * 检查storeid是否存在 + * + * @param storeId + * @return + */ + public boolean storeExists(Long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + return containsKey(storeInfoKey); + } + + /** + * 更新存储状态信息 + * + * @param storeStats + */ + public Metapb.StoreStats updateStoreStats(Metapb.StoreStats storeStats) throws PDException { + byte[] storeStatusKey = MetadataKeyHelper.getStoreStatusKey(storeStats.getStoreId()); + + put(storeStatusKey, storeStats.toByteArray()); + return storeStats; + } + + public long removeStore(long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + return remove(storeInfoKey); + } + + public long removeAll() throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + return this.removeByPrefix(storePrefix); + } + + public void updateShardGroup(Metapb.ShardGroup group) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(group.getId()); + put(shardGroupKey, group.toByteArray()); + } + + public void deleteShardGroup(int groupId) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(groupId); + remove(shardGroupKey); + } + + public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(groupId); + return getOne(Metapb.ShardGroup.parser(), shardGroupKey); + } + + public int getShardGroupCount() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + return scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix).size(); + } + + public List getShardGroups() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + return scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix); + } + + public Metapb.StoreStats getStoreStats(long storeId) throws PDException { + byte[] storeStatusKey = MetadataKeyHelper.getStoreStatusKey(storeId); + Metapb.StoreStats stats = getOne(Metapb.StoreStats.parser(), + storeStatusKey); + return stats; + } + + /** + * @return store及状态信息 + * @throws PDException + */ + public List getStoreStatus(boolean isActive) throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + List stores = isActive ? getActiveStores() : + scanPrefix(Metapb.Store.parser(), storePrefix); + LinkedList list = new LinkedList<>(); + for (int i = 0; i < stores.size(); i++) { + Metapb.Store store = stores.get(i); + Metapb.StoreStats stats = getStoreStats(store.getId()); + if (stats != null) { + store = Metapb.Store.newBuilder(store).setStats(getStoreStats(store.getId())) + .build(); + } + list.add(store); + } + return list; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java new file mode 100644 index 0000000000..cdbc022dd3 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.meta; + +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; + +/** + * 任务管理 + */ +public class TaskInfoMeta extends MetadataRocksDBStore { + public TaskInfoMeta(PDConfig pdConfig) { + super(pdConfig); + } + + /** + * 添加分区分裂任务 + */ + public void addSplitTask(int groupID, Metapb.Partition partition, SplitPartition splitPartition) + throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskKey(partition.getGraphName(), groupID); + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Split_Partition) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setSplitPartition(splitPartition) + .build(); + put(key, task.toByteString().toByteArray()); + } + + public void updateSplitTask(MetaTask.Task task) throws PDException { + var partition = task.getPartition(); + byte[] key = MetadataKeyHelper.getSplitTaskKey(partition.getGraphName(), partition.getId()); + put(key, task.toByteString().toByteArray()); + } + + public MetaTask.Task getSplitTask(String graphName, int groupID) throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskKey(graphName, groupID); + return getOne(MetaTask.Task.parser(), key); + } + + public List scanSplitTask(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getSplitTaskPrefix(graphName); + return scanPrefix(MetaTask.Task.parser(), prefix); + } + + public void removeSplitTaskPrefix(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskPrefix(graphName); + removeByPrefix(key); + } + + public boolean hasSplitTaskDoing() throws PDException { + byte[] key = MetadataKeyHelper.getAllSplitTaskPrefix(); + return scanPrefix(key).size() > 0; + } + + public void addMovePartitionTask(Metapb.Partition partition, MovePartition movePartition) + throws PDException { + byte[] key = MetadataKeyHelper.getMoveTaskKey(partition.getGraphName(), + movePartition.getTargetPartition().getId(), + partition.getId()); + + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Move_Partition) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setMovePartition(movePartition) + .build(); + put(key, task.toByteArray()); + } + + public void updateMovePartitionTask(MetaTask.Task task) + throws PDException { + + byte[] key = MetadataKeyHelper.getMoveTaskKey(task.getPartition().getGraphName(), + task.getMovePartition().getTargetPartition() + .getId(), + task.getPartition().getId()); + put(key, task.toByteArray()); + } + + public MetaTask.Task getMovePartitionTask(String graphName, int targetId, int partId) throws + PDException { + byte[] key = MetadataKeyHelper.getMoveTaskKey(graphName, targetId, partId); + return getOne(MetaTask.Task.parser(), key); + } + + public List scanMoveTask(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getMoveTaskPrefix(graphName); + return scanPrefix(MetaTask.Task.parser(), prefix); + } + + /** + * 按照prefix删除迁移任务,一次分组的 + * + * @param graphName 图名称 + * @throws PDException io error + */ + public void removeMoveTaskPrefix(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getMoveTaskPrefix(graphName); + removeByPrefix(key); + } + + public boolean hasMoveTaskDoing() throws PDException { + byte[] key = MetadataKeyHelper.getAllMoveTaskPrefix(); + return scanPrefix(key).size() > 0; + } + +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java new file mode 100644 index 0000000000..e7b985842e --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import java.util.concurrent.CompletableFuture; + +import com.alipay.sofa.jraft.Closure; +import com.alipay.sofa.jraft.Status; + +public class FutureClosureAdapter implements Closure { + public final CompletableFuture future = new CompletableFuture<>(); + private T resp; + + public T getResponse() { + return this.resp; + } + + public void setResponse(T resp) { + this.resp = resp; + future.complete(resp); + run(Status.OK()); + } + + public void failure(Throwable t) { + future.completeExceptionally(t); + run(new Status(-1, t.getMessage())); + } + + @Override + public void run(Status status) { + + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java new file mode 100644 index 0000000000..4af18f50a2 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import com.alipay.sofa.jraft.util.BytesUtil; +import com.alipay.sofa.jraft.util.Requires; +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; + +import lombok.Data; + +@Data +public class KVOperation { + + /** + * Put operation + */ + public static final byte PUT = 0x01; + /** + * Get operation + */ + public static final byte GET = 0x02; + public static final byte DEL = 0x03; + public static final byte REMOVE_BY_PREFIX = 0x04; + public static final byte REMOVE = 0x05; + public static final byte PUT_WITH_TTL = 0x06; + public static final byte CLEAR = 0x07; + public static final byte PUT_WITH_TTL_UNIT = 0x08; + public static final byte REMOVE_WITH_TTL = 0x09; + /** + * Snapshot operation + */ + public static final byte SAVE_SNAPSHOT = 0x10; + public static final byte LOAD_SNAPSHOT = 0x11; + + private byte[] key; + private byte[] value; + private Object attach; // 原始对象,用于本机处理,减少一次反序列化操作 + private Object arg; + private byte op; + + public KVOperation() { + + } + + public KVOperation(byte[] key, byte[] value, Object attach, byte op) { + this.key = key; + this.value = value; + this.attach = attach; + this.op = op; + } + + public KVOperation(byte[] key, byte[] value, Object attach, byte op, Object arg) { + this.key = key; + this.value = value; + this.attach = attach; + this.op = op; + this.arg = arg; + } + + public static KVOperation fromByteArray(byte[] value) throws IOException { + + try (ByteArrayInputStream bis = new ByteArrayInputStream(value, 1, value.length - 1)) { + Hessian2Input input = new Hessian2Input(bis); + KVOperation op = new KVOperation(); + op.op = value[0]; + op.key = input.readBytes(); + op.value = input.readBytes(); + op.arg = input.readObject(); + input.close(); + return op; + } + } + + public static KVOperation createPut(final byte[] key, final byte[] value) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, null, PUT); + } + + public static KVOperation createGet(final byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, BytesUtil.EMPTY_BYTES, null, GET); + } + + public static KVOperation createPutWithTTL(byte[] key, byte[] value, long ttl) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, value, PUT_WITH_TTL, + ttl); + } + + public static KVOperation createPutWithTTL(byte[] key, byte[] value, long ttl, + TimeUnit timeUnit) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, value, PUT_WITH_TTL_UNIT, + new Object[]{ttl, timeUnit}); + } + + public static KVOperation createRemoveWithTTL(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE_WITH_TTL); + } + + public static KVOperation createRemoveByPrefix(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE_BY_PREFIX); + } + + public static KVOperation createRemove(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE); + } + + public static KVOperation createClear() { + return new KVOperation(null, null, null, CLEAR); + } + + public static KVOperation createSaveSnapshot(String snapshotPath) { + Requires.requireNonNull(snapshotPath, "snapshotPath"); + return new KVOperation(null, null, snapshotPath, SAVE_SNAPSHOT); + } + + public static KVOperation createLoadSnapshot(String snapshotPath) { + Requires.requireNonNull(snapshotPath, "snapshotPath"); + return new KVOperation(null, null, snapshotPath, LOAD_SNAPSHOT); + } + + public byte[] toByteArray() throws IOException { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + bos.write(op); + Hessian2Output output = new Hessian2Output(bos); + output.writeObject(key); + output.writeObject(value); + output.writeObject(arg); + output.flush(); + return bos.toByteArray(); + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java new file mode 100644 index 0000000000..db13f0ceb1 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import org.apache.hugegraph.pd.grpc.Pdpb; + +import com.alipay.sofa.jraft.Closure; + +public interface KVStoreClosure extends Closure { + + Pdpb.Error getError(); + + void setError(final Pdpb.Error error); + + Object getData(); + + void setData(final Object data); +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java new file mode 100644 index 0000000000..910240cbd7 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java @@ -0,0 +1,377 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.Node; +import com.alipay.sofa.jraft.RaftGroupService; +import com.alipay.sofa.jraft.ReplicatorGroup; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.core.Replicator; +import com.alipay.sofa.jraft.entity.PeerId; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; +import com.alipay.sofa.jraft.option.NodeOptions; +import com.alipay.sofa.jraft.option.RaftOptions; +import com.alipay.sofa.jraft.option.RpcOptions; +import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory; +import com.alipay.sofa.jraft.rpc.RpcServer; +import com.alipay.sofa.jraft.util.Endpoint; +import com.alipay.sofa.jraft.util.ThreadId; +import com.alipay.sofa.jraft.util.internal.ThrowUtil; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class RaftEngine { + private static final RaftEngine INSTANCE = new RaftEngine(); + private final RaftStateMachine stateMachine; + private PDConfig.Raft config; + private RaftGroupService raftGroupService; + private RpcServer rpcServer; + private Node raftNode; + private RaftRpcClient raftRpcClient; + + public RaftEngine() { + this.stateMachine = new RaftStateMachine(); + } + + public static RaftEngine getInstance() { + return INSTANCE; + } + + public boolean init(PDConfig.Raft config) { + if (this.raftNode != null) { + return false; + } + this.config = config; + + raftRpcClient = new RaftRpcClient(); + raftRpcClient.init(new RpcOptions()); + + String groupId = "pd_raft"; + String raftPath = config.getDataPath() + "/" + groupId; + new File(raftPath).mkdirs(); + + new File(config.getDataPath()).mkdirs(); + Configuration initConf = new Configuration(); + initConf.parse(config.getPeersList()); + if (config.isEnable() && config.getPeersList().length() < 3) { + log.error("The RaftEngine parameter is incorrect." + + " When RAFT is enabled, the number of peers " + "cannot be less than 3"); + } + // 设置 Node 参数,包括日志存储路径和状态机实例 + NodeOptions nodeOptions = new NodeOptions(); + nodeOptions.setFsm(stateMachine); + nodeOptions.setEnableMetrics(true); + // 日志路径 + nodeOptions.setLogUri(raftPath + "/log"); + // raft 元数据路径 + nodeOptions.setRaftMetaUri(raftPath + "/meta"); + // 快照路径 + nodeOptions.setSnapshotUri(raftPath + "/snapshot"); + // 初始集群 + nodeOptions.setInitialConf(initConf); + // 快照时间间隔 + nodeOptions.setSnapshotIntervalSecs(config.getSnapshotInterval()); + + nodeOptions.setRpcConnectTimeoutMs(config.getRpcTimeout()); + nodeOptions.setRpcDefaultTimeout(config.getRpcTimeout()); + nodeOptions.setRpcInstallSnapshotTimeout(config.getRpcTimeout()); + // 设置 raft 配置 + RaftOptions raftOptions = nodeOptions.getRaftOptions(); + + nodeOptions.setEnableMetrics(true); + + final PeerId serverId = JRaftUtils.getPeerId(config.getAddress()); + + rpcServer = createRaftRpcServer(config.getAddress()); + // 构建 raft 组并启动 raft + this.raftGroupService = + new RaftGroupService(groupId, serverId, nodeOptions, rpcServer, true); + this.raftNode = raftGroupService.start(false); + log.info("RaftEngine start successfully: id = {}, peers list = {}", groupId, + nodeOptions.getInitialConf().getPeers()); + return this.raftNode != null; + } + + /** + * 创建 raft rpc server,用于 pd 之间通讯 + */ + private RpcServer createRaftRpcServer(String raftAddr) { + Endpoint endpoint = JRaftUtils.getEndPoint(raftAddr); + RpcServer rpcServer = RaftRpcServerFactory.createRaftRpcServer(endpoint); + RaftRpcProcessor.registerProcessor(rpcServer, this); + rpcServer.init(null); + return rpcServer; + } + + public void shutDown() { + if (this.raftGroupService != null) { + this.raftGroupService.shutdown(); + try { + this.raftGroupService.join(); + } catch (final InterruptedException e) { + this.raftNode = null; + ThrowUtil.throwException(e); + } + this.raftGroupService = null; + } + if (this.rpcServer != null) { + this.rpcServer.shutdown(); + this.rpcServer = null; + } + if (this.raftNode != null) { + this.raftNode.shutdown(); + } + this.raftNode = null; + } + + public boolean isLeader() { + return this.raftNode.isLeader(true); + } + + /** + * 添加 Raft 任务,grpc 通过该接口给 raft 发送数据 + */ + public void addTask(Task task) { + if (!isLeader()) { + KVStoreClosure closure = (KVStoreClosure) task.getDone(); + closure.setError(Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.NOT_LEADER).build()); + closure.run(new Status(RaftError.EPERM, "Not leader")); + return; + } + this.raftNode.apply(task); + } + + public void addStateListener(RaftStateListener listener) { + this.stateMachine.addStateListener(listener); + } + + public void addTaskHandler(RaftTaskHandler handler) { + this.stateMachine.addTaskHandler(handler); + } + + public PDConfig.Raft getConfig() { + return this.config; + } + + public PeerId getLeader() { + return raftNode.getLeaderId(); + } + + /** + * 向 leader 发消息,获取 grpc 地址; + */ + public String getLeaderGrpcAddress() throws ExecutionException, InterruptedException { + if (isLeader()) { + return config.getGrpcAddress(); + } + + if (raftNode.getLeaderId() == null) { + waitingForLeader(10000); + } + + return raftRpcClient.getGrpcAddress(raftNode.getLeaderId().getEndpoint().toString()).get() + .getGrpcAddress(); + } + + public Metapb.Member getLocalMember() { + Metapb.Member.Builder builder = Metapb.Member.newBuilder(); + builder.setClusterId(config.getClusterId()); + builder.setRaftUrl(config.getAddress()); + builder.setDataPath(config.getDataPath()); + builder.setGrpcUrl(config.getGrpcAddress()); + builder.setState(Metapb.StoreState.Up); + return builder.build(); + } + + public List getMembers() { + List members = new ArrayList<>(); + + List peers = raftNode.listPeers(); + peers.addAll(raftNode.listLearners()); + var learners = new HashSet<>(raftNode.listLearners()); + + for (PeerId peerId : peers) { + Metapb.Member.Builder builder = Metapb.Member.newBuilder(); + builder.setClusterId(config.getClusterId()); + CompletableFuture future = + raftRpcClient.getGrpcAddress(peerId.getEndpoint().toString()); + + Metapb.ShardRole role = Metapb.ShardRole.Follower; + if (peerEquals(peerId, raftNode.getLeaderId())) { + role = Metapb.ShardRole.Leader; + } else if (learners.contains(peerId)) { + role = Metapb.ShardRole.Learner; + var state = getReplicatorState(peerId); + if (state != null) { + builder.setReplicatorState(state.name()); + } + } + + builder.setRole(role); + + try { + if (future.isCompletedExceptionally()) { + log.error("failed to getGrpcAddress of {}", peerId.getEndpoint().toString()); + builder.setState(Metapb.StoreState.Offline); + builder.setRaftUrl(peerId.getEndpoint().toString()); + members.add(builder.build()); + } else { + RaftRpcProcessor.GetMemberResponse response = future.get(); + builder.setState(Metapb.StoreState.Up); + builder.setRaftUrl(response.getRaftAddress()); + builder.setDataPath(response.getDatePath()); + builder.setGrpcUrl(response.getGrpcAddress()); + builder.setRestUrl(response.getRestAddress()); + members.add(builder.build()); + } + } catch (Exception e) { + log.error("failed to getGrpcAddress of {}.", peerId.getEndpoint().toString(), e); + builder.setState(Metapb.StoreState.Offline); + builder.setRaftUrl(peerId.getEndpoint().toString()); + members.add(builder.build()); + } + + } + return members; + } + + public Status changePeerList(String peerList) { + AtomicReference result = new AtomicReference<>(); + try { + String[] peers = peerList.split(",", -1); + if ((peers.length & 1) != 1) { + throw new PDException(-1, "the number of peer list must be odd."); + } + Configuration newPeers = new Configuration(); + newPeers.parse(peerList); + CountDownLatch latch = new CountDownLatch(1); + this.raftNode.changePeers(newPeers, status -> { + result.set(status); + latch.countDown(); + }); + latch.await(); + } catch (Exception e) { + log.error("failed to changePeerList to {}", peerList, e); + result.set(new Status(-1, e.getMessage())); + } + return result.get(); + } + + public PeerId waitingForLeader(long timeOut) { + PeerId leader = getLeader(); + if (leader != null) { + return leader; + } + + synchronized (this) { + leader = getLeader(); + long start = System.currentTimeMillis(); + while ((System.currentTimeMillis() - start < timeOut) && (leader == null)) { + try { + this.wait(1000); + } catch (InterruptedException e) { + log.error("Raft wait for leader exception", e); + } + leader = getLeader(); + } + return leader; + } + + } + + public Node getRaftNode() { + return raftNode; + } + + private boolean peerEquals(PeerId p1, PeerId p2) { + if (p1 == null && p2 == null) { + return true; + } + if (p1 == null || p2 == null) { + return false; + } + return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); + } + + private Replicator.State getReplicatorState(PeerId peerId) { + var replicateGroup = getReplicatorGroup(); + if (replicateGroup == null) { + return null; + } + + ThreadId threadId = replicateGroup.getReplicator(peerId); + if (threadId == null) { + return null; + } else { + Replicator r = (Replicator) threadId.lock(); + if (r == null) { + return Replicator.State.Probe; + } + Replicator.State result = getState(r); + threadId.unlock(); + return result; + } + } + + private ReplicatorGroup getReplicatorGroup() { + var clz = this.raftNode.getClass(); + try { + var f = clz.getDeclaredField("replicatorGroup"); + f.setAccessible(true); + var group = (ReplicatorGroup) f.get(this.raftNode); + f.setAccessible(false); + return group; + } catch (NoSuchFieldException | IllegalAccessException e) { + log.info("getReplicatorGroup: error {}", e.getMessage()); + return null; + } + } + + private Replicator.State getState(Replicator r) { + var clz = r.getClass(); + try { + var f = clz.getDeclaredField("state"); + f.setAccessible(true); + var state = (Replicator.State) f.get(this.raftNode); + f.setAccessible(false); + return state; + } catch (NoSuchFieldException | IllegalAccessException e) { + log.info("getReplicatorGroup: error {}", e.getMessage()); + return null; + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java new file mode 100644 index 0000000000..2dfe3b3069 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import java.util.concurrent.CompletableFuture; + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.option.RpcOptions; +import com.alipay.sofa.jraft.rpc.InvokeCallback; +import com.alipay.sofa.jraft.rpc.InvokeContext; +import com.alipay.sofa.jraft.rpc.RaftRpcFactory; +import com.alipay.sofa.jraft.rpc.RpcClient; +import com.alipay.sofa.jraft.util.Endpoint; +import com.alipay.sofa.jraft.util.RpcFactoryHelper; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class RaftRpcClient { + protected volatile RpcClient rpcClient; + private RpcOptions rpcOptions; + + public synchronized boolean init(final RpcOptions rpcOptions) { + this.rpcOptions = rpcOptions; + final RaftRpcFactory factory = RpcFactoryHelper.rpcFactory(); + this.rpcClient = + factory.createRpcClient(factory.defaultJRaftClientConfigHelper(this.rpcOptions)); + return this.rpcClient.init(null); + } + + /** + * 请求快照 + */ + public CompletableFuture + getGrpcAddress(final String address) { + RaftRpcProcessor.GetMemberRequest request = new RaftRpcProcessor.GetMemberRequest(); + FutureClosureAdapter response = + new FutureClosureAdapter<>(); + internalCallAsyncWithRpc(JRaftUtils.getEndPoint(address), request, response); + return response.future; + } + + private void internalCallAsyncWithRpc(final Endpoint endpoint, + final RaftRpcProcessor.BaseRequest request, + final FutureClosureAdapter closure) { + final InvokeContext invokeCtx = null; + final InvokeCallback invokeCallback = new InvokeCallback() { + + @Override + public void complete(final Object result, final Throwable err) { + if (err == null) { + final RaftRpcProcessor.BaseResponse response = + (RaftRpcProcessor.BaseResponse) result; + closure.setResponse((V) response); + } else { + closure.failure(err); + closure.run(new Status(-1, err.getMessage())); + } + } + }; + + try { + this.rpcClient.invokeAsync(endpoint, request, invokeCtx, invokeCallback, + this.rpcOptions.getRpcDefaultTimeout()); + } catch (final Throwable t) { + log.error("failed to call rpc to {}. {}", endpoint, t.getMessage()); + closure.failure(t); + closure.run(new Status(-1, t.getMessage())); + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java new file mode 100644 index 0000000000..984e59c60a --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import java.io.Serializable; + +import com.alipay.sofa.jraft.rpc.RpcContext; +import com.alipay.sofa.jraft.rpc.RpcProcessor; +import com.alipay.sofa.jraft.rpc.RpcServer; + +import lombok.Data; + +public class RaftRpcProcessor implements RpcProcessor { + + + private final Class requestClass; + private final RaftEngine raftEngine; + + public RaftRpcProcessor(Class requestClass, RaftEngine raftEngine) { + this.requestClass = requestClass; + this.raftEngine = raftEngine; + } + + public static void registerProcessor(final RpcServer rpcServer, RaftEngine raftEngine) { + rpcServer.registerProcessor(new RaftRpcProcessor<>(GetMemberRequest.class, raftEngine)); + } + + @Override + public void handleRequest(RpcContext rpcCtx, T request) { + if (request.magic() == BaseRequest.GET_GRPC_ADDRESS) { + rpcCtx.sendResponse(getGrpcAddress()); + } + } + + @Override + public String interest() { + return this.requestClass.getName(); + } + + private GetMemberResponse getGrpcAddress() { + GetMemberResponse rep = new GetMemberResponse(); + rep.setGrpcAddress(raftEngine.getConfig().getGrpcAddress()); + rep.setClusterId(raftEngine.getConfig().getClusterId()); + rep.setDatePath(raftEngine.getConfig().getDataPath()); + rep.setRaftAddress(raftEngine.getConfig().getAddress()); + rep.setRestAddress( + raftEngine.getConfig().getHost() + ":" + raftEngine.getConfig().getPort()); + rep.setStatus(Status.OK); + return rep; + } + + public enum Status implements Serializable { + UNKNOWN(-1, "unknown"), + OK(0, "ok"), + COMPLETE(0, "Transmission completed"), + INCOMPLETE(1, "Incomplete transmission"), + NO_PARTITION(10, "Partition not found"), + IO_ERROR(11, "io error"), + EXCEPTION(12, "exception"), + ABORT(100, "Transmission aborted"); + + private final int code; + private String msg; + + Status(int code, String msg) { + this.code = code; + this.msg = msg; + } + + public int getCode() { + return this.code; + } + + public Status setMsg(String msg) { + this.msg = msg; + return this; + } + + public boolean isOK() { + return this.code == 0; + } + } + + public abstract static class BaseRequest implements Serializable { + public static final byte GET_GRPC_ADDRESS = 0x01; + + public abstract byte magic(); + } + + @Data + public abstract static class BaseResponse implements Serializable { + private Status status; + + } + + @Data + public static class GetMemberRequest extends BaseRequest { + @Override + public byte magic() { + return GET_GRPC_ADDRESS; + } + } + + @Data + public static class GetMemberResponse extends BaseResponse { + private long clusterId; + private String raftAddress; + private String grpcAddress; + private String datePath; + private String restAddress; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java new file mode 100644 index 0000000000..3c4de74e2c --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +public interface RaftStateListener { + void onRaftLeaderChanged(); +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java new file mode 100644 index 0000000000..6b3815d6cc --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java @@ -0,0 +1,330 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; +import java.util.zip.Checksum; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.springframework.util.CollectionUtils; + +import com.alipay.sofa.jraft.Closure; +import com.alipay.sofa.jraft.Iterator; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.core.StateMachineAdapter; +import com.alipay.sofa.jraft.entity.LeaderChangeContext; +import com.alipay.sofa.jraft.entity.LocalFileMetaOutter; +import com.alipay.sofa.jraft.error.RaftError; +import com.alipay.sofa.jraft.error.RaftException; +import com.alipay.sofa.jraft.storage.snapshot.SnapshotReader; +import com.alipay.sofa.jraft.storage.snapshot.SnapshotWriter; +import com.alipay.sofa.jraft.util.CRC64; +import com.alipay.sofa.jraft.util.Utils; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class RaftStateMachine extends StateMachineAdapter { + private static final String SNAPSHOT_DIR_NAME = "snapshot"; + private static final String SNAPSHOT_ARCHIVE_NAME = "snapshot.zip"; + private final AtomicLong leaderTerm = new AtomicLong(-1); + private final List taskHandlers; + private final List stateListeners; + + public RaftStateMachine() { + this.taskHandlers = new CopyOnWriteArrayList<>(); + this.stateListeners = new CopyOnWriteArrayList<>(); + } + + public void addTaskHandler(RaftTaskHandler handler) { + taskHandlers.add(handler); + } + + public void addStateListener(RaftStateListener listener) { + stateListeners.add(listener); + } + + public boolean isLeader() { + return this.leaderTerm.get() > 0; + } + + @Override + public void onApply(Iterator iter) { + while (iter.hasNext()) { + final RaftClosureAdapter done = (RaftClosureAdapter) iter.done(); + try { + KVOperation kvOp; + if (done != null) { + kvOp = done.op; + } else { + kvOp = KVOperation.fromByteArray(iter.getData().array()); + } + for (RaftTaskHandler taskHandler : taskHandlers) { + taskHandler.invoke(kvOp, done); + } + if (done != null) { + done.run(Status.OK()); + } + } catch (Throwable t) { + log.error("StateMachine meet critical error: {}.", t); + if (done != null) { + done.run(new Status(RaftError.EINTERNAL, t.getMessage())); + } + } + iter.next(); + } + } + + @Override + public void onError(final RaftException e) { + log.error("Raft StateMachine on error {}", e); + } + + @Override + public void onShutdown() { + super.onShutdown(); + } + + @Override + public void onLeaderStart(final long term) { + this.leaderTerm.set(term); + super.onLeaderStart(term); + + log.info("Raft becomes leader"); + Utils.runInThread(() -> { + if (!CollectionUtils.isEmpty(stateListeners)) { + stateListeners.forEach(listener -> { + listener.onRaftLeaderChanged(); + }); + } + }); + } + + @Override + public void onLeaderStop(final Status status) { + this.leaderTerm.set(-1); + super.onLeaderStop(status); + log.info("Raft lost leader "); + } + + @Override + public void onStartFollowing(final LeaderChangeContext ctx) { + super.onStartFollowing(ctx); + Utils.runInThread(() -> { + if (!CollectionUtils.isEmpty(stateListeners)) { + stateListeners.forEach(listener -> { + listener.onRaftLeaderChanged(); + }); + } + }); + } + + @Override + public void onStopFollowing(final LeaderChangeContext ctx) { + super.onStopFollowing(ctx); + } + + + @Override + public void onConfigurationCommitted(final Configuration conf) { + log.info("Raft onConfigurationCommitted {}", conf); + } + + @Override + public void onSnapshotSave(final SnapshotWriter writer, final Closure done) { + + String snapshotDir = writer.getPath() + File.separator + SNAPSHOT_DIR_NAME; + try { + FileUtils.deleteDirectory(new File(snapshotDir)); + FileUtils.forceMkdir(new File(snapshotDir)); + } catch (IOException e) { + log.error("Failed to create snapshot directory {}", snapshotDir); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + + CountDownLatch latch = new CountDownLatch(taskHandlers.size()); + for (RaftTaskHandler taskHandler : taskHandlers) { + Utils.runInThread(() -> { + try { + KVOperation op = KVOperation.createSaveSnapshot(snapshotDir); + taskHandler.invoke(op, null); + log.info("Raft onSnapshotSave success"); + latch.countDown(); + } catch (PDException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + } + }); + } + try { + latch.await(); + } catch (InterruptedException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + + // compress + try { + compressSnapshot(writer); + FileUtils.deleteDirectory(new File(snapshotDir)); + } catch (Exception e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + done.run(Status.OK()); + } + + @Override + public boolean onSnapshotLoad(final SnapshotReader reader) { + if (isLeader()) { + log.warn("Leader is not supposed to load snapshot"); + return false; + } + String snapshotDir = reader.getPath() + File.separator + SNAPSHOT_DIR_NAME; + String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; + // 2. decompress snapshot archive + try { + decompressSnapshot(reader); + } catch (PDException e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + return true; + } + + CountDownLatch latch = new CountDownLatch(taskHandlers.size()); + for (RaftTaskHandler taskHandler : taskHandlers) { + try { + KVOperation op = KVOperation.createLoadSnapshot(snapshotDir); + taskHandler.invoke(op, null); + log.info("Raft onSnapshotLoad success"); + latch.countDown(); + } catch (PDException e) { + log.error("Raft onSnapshotLoad failed. {}", e.toString()); + return false; + } + } + try { + latch.await(); + } catch (InterruptedException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + return false; + } + + + try { + // TODO: remove file from meta + // SnapshotReader 沒有提供刪除文件的接口 + FileUtils.deleteDirectory(new File(snapshotDir)); + File file = new File(snapshotArchive); + if (file.exists()) { + FileUtils.forceDelete(file); + } + } catch (IOException e) { + log.error("Failed to delete snapshot directory {} and file {}", snapshotDir, + snapshotArchive); + return false; + } + + return true; + } + + private void compressSnapshot(final SnapshotWriter writer) throws PDException { + final Checksum checksum = new CRC64(); + final String snapshotArchive = writer.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; + try { + ZipUtils.compress(writer.getPath(), SNAPSHOT_DIR_NAME, snapshotArchive, checksum); + LocalFileMetaOutter.LocalFileMeta.Builder metaBuild = + LocalFileMetaOutter.LocalFileMeta.newBuilder(); + metaBuild.setChecksum(Long.toHexString(checksum.getValue())); + if (!writer.addFile(SNAPSHOT_ARCHIVE_NAME, metaBuild.build())) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, + "failed to add file to LocalFileMeta"); + } + } catch (IOException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); + } + } + + private void decompressSnapshot(final SnapshotReader reader) throws PDException { + final LocalFileMetaOutter.LocalFileMeta meta = + (LocalFileMetaOutter.LocalFileMeta) reader.getFileMeta(SNAPSHOT_ARCHIVE_NAME); + final Checksum checksum = new CRC64(); + final String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; + try { + ZipUtils.decompress(snapshotArchive, reader.getPath(), checksum); + if (meta.hasChecksum()) { + if (!meta.getChecksum().equals(Long.toHexString(checksum.getValue()))) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, + "Snapshot checksum failed"); + } + } + } catch (IOException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } + } + + + public static class RaftClosureAdapter implements KVStoreClosure { + private final KVOperation op; + private final KVStoreClosure closure; + + public RaftClosureAdapter(KVOperation op, KVStoreClosure closure) { + this.op = op; + this.closure = closure; + } + + public KVStoreClosure getClosure() { + return closure; + } + + @Override + public void run(Status status) { + closure.run(status); + } + + @Override + public Pdpb.Error getError() { + return null; + } + + @Override + public void setError(Pdpb.Error error) { + + } + + @Override + public Object getData() { + return null; + } + + @Override + public void setData(Object data) { + + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java new file mode 100644 index 0000000000..310cf0fbae --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import org.apache.hugegraph.pd.common.PDException; + +/** + * 接收raft发送的数据 + */ +public interface RaftTaskHandler { + boolean invoke(final KVOperation op, KVStoreClosure response) throws PDException; +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java new file mode 100644 index 0000000000..8bb417db48 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Paths; +import java.util.zip.CheckedInputStream; +import java.util.zip.CheckedOutputStream; +import java.util.zip.Checksum; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import java.util.zip.ZipOutputStream; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.commons.io.output.NullOutputStream; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public final class ZipUtils { + + public static void compress(final String rootDir, final String sourceDir, + final String outputFile, final Checksum checksum) throws + IOException { + try (final FileOutputStream fos = new FileOutputStream(outputFile); + final CheckedOutputStream cos = new CheckedOutputStream(fos, checksum); + final ZipOutputStream zos = new ZipOutputStream(new BufferedOutputStream(cos))) { + ZipUtils.compressDirectoryToZipFile(rootDir, sourceDir, zos); + zos.flush(); + fos.getFD().sync(); + } + } + + private static void compressDirectoryToZipFile(final String rootDir, final String sourceDir, + final ZipOutputStream zos) throws IOException { + final String dir = Paths.get(rootDir, sourceDir).toString(); + final File[] files = new File(dir).listFiles(); + for (final File file : files) { + final String child = Paths.get(sourceDir, file.getName()).toString(); + if (file.isDirectory()) { + compressDirectoryToZipFile(rootDir, child, zos); + } else { + zos.putNextEntry(new ZipEntry(child)); + try (final FileInputStream fis = new FileInputStream(file); + final BufferedInputStream bis = new BufferedInputStream(fis)) { + IOUtils.copy(bis, zos); + } + } + } + } + + public static void decompress(final String sourceFile, final String outputDir, + final Checksum checksum) throws IOException { + try (final FileInputStream fis = new FileInputStream(sourceFile); + final CheckedInputStream cis = new CheckedInputStream(fis, checksum); + final ZipInputStream zis = new ZipInputStream(new BufferedInputStream(cis))) { + ZipEntry entry; + while ((entry = zis.getNextEntry()) != null) { + final String fileName = entry.getName(); + final File entryFile = new File(Paths.get(outputDir, fileName).toString()); + FileUtils.forceMkdir(entryFile.getParentFile()); + try (final FileOutputStream fos = new FileOutputStream(entryFile); + final BufferedOutputStream bos = new BufferedOutputStream(fos)) { + IOUtils.copy(zis, bos); + bos.flush(); + fos.getFD().sync(); + } + } + IOUtils.copy(cis, NullOutputStream.NULL_OUTPUT_STREAM); + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java new file mode 100644 index 0000000000..14c5c3a13e --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.store; + +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.raft.KVStoreClosure; + +public abstract class BaseKVStoreClosure implements KVStoreClosure { + private Pdpb.Error error; + private Object data; + + @Override + public Pdpb.Error getError() { + return error; + } + + @Override + public void setError(Pdpb.Error error) { + this.error = error; + } + + @Override + public Object getData() { + return data; + } + + @Override + public void setData(Object data) { + this.data = data; + } + + +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java new file mode 100644 index 0000000000..d8cf954f08 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.store; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; + +public interface HgKVStore { + void init(PDConfig config); + + void put(byte[] key, byte[] value) throws PDException; + + byte[] get(byte[] key) throws PDException; + + List scanPrefix(byte[] prefix); + + long remove(byte[] bytes) throws PDException; + + long removeByPrefix(byte[] bytes) throws PDException; + + void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException; + + void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException; + + byte[] getWithTTL(byte[] key) throws PDException; + + void removeWithTTL(byte[] key) throws PDException; + + List getListWithTTL(byte[] key) throws PDException; + + void clear() throws PDException; + + void saveSnapshot(String snapshotPath) throws PDException; + + void loadSnapshot(String snapshotPath) throws PDException; + + List scanRange(byte[] start, byte[] end); + + void close(); +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java new file mode 100644 index 0000000000..8ef79bbb7d --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java @@ -0,0 +1,343 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.store; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.rocksdb.Checkpoint; +import org.rocksdb.Options; +import org.rocksdb.ReadOptions; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; +import org.rocksdb.Slice; + +import com.alipay.sofa.jraft.util.Utils; +import com.google.common.cache.CacheBuilder; +import com.google.common.primitives.Bytes; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class HgKVStoreImpl implements HgKVStore { + private static final ConcurrentHashMap> CACHE = new ConcurrentHashMap(); + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private RocksDB db; + private String dbPath; + private Options dbOptions; + + @Override + public void init(PDConfig config) { + dbOptions = new Options().setCreateIfMissing(true); + + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try { + this.dbPath = config.getDataPath() + "/rocksdb/"; + File file = new File(this.dbPath); + if (!file.exists()) { + try { + FileUtils.forceMkdir(file); + } catch (IOException e) { + log.warn("Failed to create data file,{}", e); + } + } + openRocksDB(dbPath); + } catch (PDException e) { + log.error("Failed to open data file,{}", e); + } finally { + writeLock.unlock(); + } + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + db.put(key, value); + } catch (RocksDBException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + } + + @Override + public byte[] get(byte[] key) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + return db.get(key); + } catch (RocksDBException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + } + + @Override + public List scanPrefix(byte[] prefix) { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try (ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(prefix))) { + List kvs = new ArrayList<>(); + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + while (iterator.isValid() && 0 == Bytes.indexOf(iterator.key(), prefix)) { + kvs.add(new KV(iterator.key(), iterator.value())); + iterator.next(); + } + return kvs; + } finally { + readLock.unlock(); + } + } + + @Override + public long remove(byte[] key) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + db.delete(key); + } catch (RocksDBException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_DEL_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + return 0; + } + + @Override + public long removeByPrefix(byte[] prefix) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try (ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(prefix))) { + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + + while (iterator.isValid()) { + if (0 == Bytes.indexOf(iterator.key(), prefix)) { + db.delete(iterator.key()); + } else { + break; + } + iterator.next(); + } + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + return 0; + } + + @Override + public void clear() throws PDException { + CACHE.clear(); + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + String storeKey = new String(key, Charset.defaultCharset()); + LinkedList ts = new LinkedList<>(); + CACHE.keySet().forEach((cacheKey) -> { + if (cacheKey.startsWith(storeKey)) { + ConcurrentMap map; + if ((map = CACHE.get(cacheKey)) == null) { + return; + } + map.values().forEach((element) -> { + ts.add((byte[]) element); + }); + } + }); + return ts; + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + ConcurrentMap map; + String storeKey = new String(key, Charset.defaultCharset()); + if ((map = CACHE.get(storeKey)) == null) { + return null; + } + Object value = map.get(storeKey); + return value == null ? null : (byte[]) value; + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + ConcurrentMap map; + String storeKey = new String(key, Charset.defaultCharset()); + if ((map = CACHE.get(storeKey)) == null) { + return; + } + map.remove(storeKey); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.putWithTTL(key, value, ttl, TimeUnit.SECONDS); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + try { + ConcurrentMap spaceNode = CacheBuilder.newBuilder().initialCapacity(200) + .expireAfterWrite(ttl, + timeUnit) + .build().asMap(); + String storeKey = new String(key, Charset.defaultCharset()); + ConcurrentMap space = CACHE.putIfAbsent(storeKey, spaceNode); + if (space == null) { + space = spaceNode; + } + space.put(storeKey, value); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public void saveSnapshot(String snapshotPath) throws PDException { + log.info("begin save snapshot at {}", snapshotPath); + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try (final Checkpoint checkpoint = Checkpoint.create(this.db)) { + final String tempPath = Paths.get(snapshotPath) + "_temp"; + final File tempFile = new File(tempPath); + FileUtils.deleteDirectory(tempFile); + checkpoint.createCheckpoint(tempPath); + final File snapshotFile = new File(snapshotPath); + FileUtils.deleteDirectory(snapshotFile); + if (!Utils.atomicMoveFile(tempFile, snapshotFile, true)) { + log.error("Fail to rename {} to {}", tempPath, snapshotPath); + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", tempPath, + snapshotPath)); + } + } catch (final PDException e) { + throw e; + } catch (final Exception e) { + log.error("Fail to write snapshot at path: {}", snapshotPath, e); + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); + } finally { + writeLock.unlock(); + } + log.info("saved snapshot into {}", snapshotPath); + } + + @Override + public void loadSnapshot(String snapshotPath) throws PDException { + log.info("begin load snapshot from {}", snapshotPath); + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try { + final File snapshotFile = new File(snapshotPath); + if (!snapshotFile.exists()) { + log.error("Snapshot file {} not exists.", snapshotPath); + return; + } + // close DB + closeRocksDB(); + // replace rocksdb data with snapshot data + final File dbFile = new File(this.dbPath); + FileUtils.deleteDirectory(dbFile); + if (!Utils.atomicMoveFile(snapshotFile, dbFile, true)) { + log.error("Fail to rename {} to {}", snapshotPath, this.dbPath); + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", snapshotPath, + this.dbPath)); + } + // reopen the db + openRocksDB(this.dbPath); + } catch (final PDException e) { + throw e; + } catch (final Exception e) { + log.error("failed to load snapshot from {}", snapshotPath); + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } finally { + writeLock.unlock(); + } + log.info("loaded snapshot from {}", snapshotPath); + } + + @Override + public List scanRange(byte[] start, byte[] end) { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try (ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(start)) + .setIterateUpperBound(new Slice(end))) { + List kvs = new ArrayList<>(); + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + while (iterator.isValid()) { + kvs.add(new KV(iterator.key(), iterator.value())); + iterator.next(); + } + return kvs; + } finally { + readLock.unlock(); + } + } + + @Override + public void close() { + closeRocksDB(); + } + + + private void closeRocksDB() { + if (this.db != null) { + this.db.close(); + this.db = null; + } + } + + private void openRocksDB(String dbPath) throws PDException { + try { + this.db = RocksDB.open(dbOptions, dbPath); + } catch (RocksDBException e) { + log.error("Failed to open RocksDB from {}", dbPath, e); + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java new file mode 100644 index 0000000000..fec074579d --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.store; + + +public class KV { + private byte[] key; + private byte[] value; + + public KV(byte[] key, byte[] value) { + this.key = key; + this.value = value; + } + + public byte[] getKey() { + return key; + } + + public void setKey(byte[] key) { + this.key = key; + } + + public byte[] getValue() { + return value; + } + + public void setValue(byte[] value) { + this.value = value; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java new file mode 100644 index 0000000000..ca86a5aacf --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java @@ -0,0 +1,324 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.store; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.raft.KVOperation; +import org.apache.hugegraph.pd.raft.KVStoreClosure; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateMachine; +import org.apache.hugegraph.pd.raft.RaftTaskHandler; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; + +import lombok.extern.slf4j.Slf4j; + + +@Slf4j +public class RaftKVStore implements HgKVStore, RaftTaskHandler { + + private final RaftEngine engine; + private final HgKVStore store; + + public RaftKVStore(RaftEngine engine, HgKVStore store) { + this.engine = engine; + this.store = store; + } + + @Override + public void init(PDConfig config) { + this.store.init(config); + this.engine.addTaskHandler(this); + } + + private BaseKVStoreClosure createClosure() { + return new BaseKVStoreClosure() { + @Override + public void run(Status status) { + if (!status.isOk()) { + log.error("An exception occurred while performing the RAFT,{}", + status.getErrorMsg()); + } else { + log.info("RAFT done!"); + } + } + }; + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + KVOperation operation = KVOperation.createPut(key, value); + try { + applyOperation(operation).get(); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + /** + * 查询可以不走raft,直接读取 + */ + @Override + public byte[] get(byte[] key) throws PDException { + return store.get(key); + + } + + @Override + public List scanPrefix(byte[] prefix) { + return store.scanPrefix(prefix); + } + + @Override + public long remove(byte[] bytes) throws PDException { + try { + applyOperation(KVOperation.createRemove(bytes)).get(); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + return 0; + } + + + @Override + public long removeByPrefix(byte[] bytes) throws PDException { + try { + applyOperation(KVOperation.createRemoveByPrefix(bytes)).get(); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + return 0; + } + + @Override + public void clear() throws PDException { + try { + applyOperation(KVOperation.createClear()).get(); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + try { + applyOperation(KVOperation.createPutWithTTL(key, value, ttl)).get(); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + try { + applyOperation(KVOperation.createPutWithTTL(key, value, ttl, timeUnit)).get(); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + return store.getListWithTTL(key); + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + return store.getWithTTL(key); + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + try { + applyOperation(KVOperation.createRemoveWithTTL(key)).get(); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + @Override + public void saveSnapshot(String snapshotPath) throws PDException { + store.saveSnapshot(snapshotPath); + } + + @Override + public void loadSnapshot(String snapshotPath) throws PDException { + store.loadSnapshot(snapshotPath); + } + + @Override + public List scanRange(byte[] start, byte[] end) { + return store.scanRange(start, end); + } + + @Override + public void close() { + store.close(); + } + + /** + * 需要走Raft的真实操作 + */ + private void doPut(byte[] key, byte[] value) throws PDException { + + store.put(key, value); + } + + public long doRemove(byte[] bytes) throws PDException { + return this.store.remove(bytes); + } + + public long doRemoveByPrefix(byte[] bytes) throws PDException { + return this.store.removeByPrefix(bytes); + } + + public void doRemoveWithTTL(byte[] key) throws PDException { + this.store.removeWithTTL(key); + } + + public void doClear() throws PDException { + this.store.clear(); + } + + public void doPutWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.store.putWithTTL(key, value, ttl); + } + + public void doPutWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + this.store.putWithTTL(key, value, ttl, timeUnit); + } + + public void doSaveSnapshot(String snapshotPath) throws PDException { + this.store.saveSnapshot(snapshotPath); + } + + public void doLoadSnapshot(String snapshotPath) throws PDException { + this.store.loadSnapshot(snapshotPath); + } + + private CompletableFuture applyOperation(final KVOperation op) throws PDException { + CompletableFuture future = new CompletableFuture<>(); + try { + final Task task = new Task(); + task.setData(ByteBuffer.wrap(op.toByteArray())); + task.setDone(new RaftStateMachine.RaftClosureAdapter(op, new KVStoreClosure() { + Object data; + Pdpb.Error error; + + @Override + public Pdpb.Error getError() { + return error; + } + + @Override + public void setError(Pdpb.Error error) { + this.error = error; + } + + @Override + public Object getData() { + return data; + } + + @Override + public void setData(Object data) { + this.data = data; + } + + @Override + public void run(Status status) { + if (status.isOk()) { + future.complete((T) data); + } else { + RaftError raftError = status.getRaftError(); + Pdpb.ErrorType type; + if (RaftError.EPERM.equals(raftError)) { + type = Pdpb.ErrorType.NOT_LEADER; + } else { + type = Pdpb.ErrorType.UNKNOWN; + } + error = Pdpb.Error.newBuilder().setType(type) + .setMessage(status.getErrorMsg()) + .build(); + future.completeExceptionally( + new PDException(error.getTypeValue())); + } + } + })); + this.engine.addTask(task); + return future; + } catch (Exception e) { + future.completeExceptionally(e); + return future; + } + } + + private boolean isLeader() { + return this.engine.isLeader(); + } + + @Override + public boolean invoke(KVOperation op, KVStoreClosure response) throws PDException { + switch (op.getOp()) { + case KVOperation.GET: + break; + case KVOperation.PUT: + doPut(op.getKey(), op.getValue()); + break; + case KVOperation.REMOVE: + doRemove(op.getKey()); + break; + case KVOperation.PUT_WITH_TTL: + doPutWithTTL(op.getKey(), op.getValue(), (long) op.getArg()); + break; + case KVOperation.PUT_WITH_TTL_UNIT: + Object[] arg = (Object[]) op.getArg(); + doPutWithTTL(op.getKey(), op.getValue(), (long) arg[0], (TimeUnit) arg[1]); + break; + case KVOperation.REMOVE_BY_PREFIX: + doRemoveByPrefix(op.getKey()); + break; + case KVOperation.REMOVE_WITH_TTL: + doRemoveWithTTL(op.getKey()); + break; + case KVOperation.CLEAR: + doClear(); + break; + case KVOperation.SAVE_SNAPSHOT: + doSaveSnapshot((String) op.getAttach()); + break; + case KVOperation.LOAD_SNAPSHOT: + doLoadSnapshot((String) op.getAttach()); + break; + default: + log.error("Err op {}", op.getOp()); + } + return false; + } +} diff --git a/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java new file mode 100644 index 0000000000..2b8d708df2 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.util.concurrent.ExecutionException; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Assert; +import org.junit.BeforeClass; + +// import org.junit.Test; + +public class MonitorServiceTest { + static PDConfig pdConfig; + + @BeforeClass + public static void init() throws ExecutionException, InterruptedException { + pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + }}; + + //pdConfig.setEtcd(new PDConfig().new Etcd() {{ + // this.setAddress("http://localhost:2379"); + // + //}}); + pdConfig.setStore(new PDConfig().new Store() {{ + this.setMaxDownTime(1); + this.setKeepAliveTimeout(5); + }}); + + pdConfig.setPartition(new PDConfig().new Partition() {{ + this.setShardCount(3); + this.setTotalCount(10); + }}); + + clearClusterData(); + } + + public static void clearClusterData() throws ExecutionException, InterruptedException { + //Client client = Client.builder().endpoints(pdConfig.getEtcd().getAddress()).build(); + //KV kvClient = client.getKVClient(); + // + //ByteSequence key = ByteSequence.from("HUGEGRAPH/" + pdConfig.getClusterId(), Charset + // .forName("utf-8")); + //CompletableFuture rsp = kvClient.delete(key, DeleteOption.newBuilder() + // .isPrefix(true).build()); + //System.out.println("删除数量 : " + rsp.get().getDeleted()); + //kvClient.close(); + //client.close(); + } + + // @Test + public void testPatrolStores() throws PDException, InterruptedException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService); + TaskScheduleService monitorService = + new TaskScheduleService(pdConfig, storeService, partitionService); + storeService.init(partitionService); + partitionService.init(); + monitorService.init(); + + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress(String.valueOf(i)) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + partitionService.updateGraph(graph); + Thread.sleep(10000); + count = 0; + count += storeService.getStores("").stream() + .filter(store -> store.getState() == Metapb.StoreState.Tombstone) + .count(); + + Assert.assertEquals(6, count); + + } + + +} diff --git a/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java new file mode 100644 index 0000000000..5a501f6bbd --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Test; + +public class PartitionServiceTest { + @Test + public void testPartitionHeartbeat() { + List shardList = new ArrayList<>(); + shardList.add(Metapb.Shard.newBuilder().setStoreId(1).build()); + shardList.add(Metapb.Shard.newBuilder().setStoreId(2).build()); + shardList.add(Metapb.Shard.newBuilder().setStoreId(3).build()); + shardList = new ArrayList<>(shardList); + Metapb.PartitionStats stats = Metapb.PartitionStats.newBuilder() + .addAllShard(shardList).build(); + List shardList2 = new ArrayList<>(stats.getShardList()); + Collections.shuffle(shardList2); + shardList2.forEach(shard -> { + System.out.println(shard.getStoreId()); + }); + + + } +} diff --git a/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java new file mode 100644 index 0000000000..2e6e06fc9c --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java @@ -0,0 +1,485 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.junit.Assert; +import org.junit.BeforeClass; + +public class StoreNodeServiceTest { + static PDConfig pdConfig; + + @BeforeClass + public static void init() throws Exception { + String path = "tmp/unitTest"; + deleteDirectory(new File(path)); + pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setInitialStoreList( + "127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503,127.0.0.1:8504," + + "127.0.0.1:8505"); + }}; + + pdConfig.setStore(new PDConfig().new Store() {{ + this.setMaxDownTime(3600); + this.setKeepAliveTimeout(3600); + }}); + + pdConfig.setPartition(new PDConfig().new Partition() {{ + this.setShardCount(3); + this.setMaxShardsPerStore(3); + }}); + pdConfig.setRaft(new PDConfig().new Raft() {{ + this.setEnable(false); + }}); + pdConfig.setDiscovery(new PDConfig().new Discovery()); + pdConfig.setDataPath(path); + ConfigService configService = new ConfigService(pdConfig); + pdConfig = configService.loadConfig(); + } + + public static byte[] intToByteArray(int i) { + byte[] result = new byte[4]; + result[0] = (byte) ((i >> 24) & 0xFF); + result[1] = (byte) ((i >> 16) & 0xFF); + result[2] = (byte) ((i >> 8) & 0xFF); + result[3] = (byte) (i & 0xFF); + return result; + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.printf("Failed to start ....,%s%n", e.getMessage()); + } + } + + // @Test + public void testStoreNodeService() throws PDException { + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), + (long) pdConfig.getInitialStoreMap().size() * + pdConfig.getPartition().getMaxShardsPerStore() + / pdConfig.getPartition().getShardCount()); + StoreNodeService storeService = new StoreNodeService(pdConfig); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + stores[i].getId()); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + for (Metapb.Store store : stores) { + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() + .setStoreId(store.getId()) + .build(); + storeService.heartBeat(stats); + } + + Assert.assertEquals(6, storeService.getActiveStores("").size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .setPartitionCount(10) + .build(); + // 分配shard + List shards = storeService.allocShards(graph, 1); + + + Assert.assertEquals(3, shards.size()); + + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), + storeService.getShardGroups().size()); // 设置leader + Metapb.Shard leader = Metapb.Shard.newBuilder(shards.get(0)) + .setRole(Metapb.ShardRole.Leader).build(); + shards = new ArrayList<>(shards); + shards.set(0, leader); + // 增加shard + pdConfig.getPartition().setShardCount(5); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(1) + .addAllShards(shards).build(); + shards = storeService.reallocShards(shardGroup); + + Assert.assertEquals(5, shards.size()); + // 减少shard + pdConfig.getPartition().setShardCount(3); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(3, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + // 减少shard + pdConfig.getPartition().setShardCount(1); + graph = Metapb.Graph.newBuilder(graph).build(); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(1, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + for (Metapb.Store store : stores) { + storeService.removeStore(store.getId()); + } + Assert.assertEquals(0, storeService.getStores("").size()); + + + } + + // @Test + public void testSplitPartition() throws PDException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService); + + storeService.init(partitionService); + partitionService.addInstructionListener(new PartitionInstructionListener() { + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { + + } + + @Override + public void transferLeader(Metapb.Partition partition, + TransferLeader transferLeader) throws PDException { + + } + + @Override + public void splitPartition(Metapb.Partition partition, + SplitPartition splitPartition) throws PDException { + splitPartition.getNewPartitionList().forEach(p -> { + System.out.println("SplitPartition " + p.getId() + " " + p.getStartKey() + "," + + p.getEndKey()); + }); + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { + + } + + @Override + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) throws + PDException { + + } + }); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Assert.assertEquals(count, storeService.getStores().size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .build(); + Metapb.PartitionShard ptShard = + partitionService.getPartitionByCode(graph.getGraphName(), 0); + System.out.println(ptShard.getPartition().getId()); + { + Metapb.Partition pt = ptShard.getPartition(); + System.out.println(pt.getId() + " " + pt.getStartKey() + "," + pt.getEndKey()); + } + + Assert.assertEquals(6, storeService.getShardGroups().size()); + // storeService.splitShardGroups(ptShard.getPartition().getId(), 4); + Assert.assertEquals(9, storeService.getShardGroups().size()); + storeService.getShardGroups().forEach(shardGroup -> { + System.out.println("shardGroup id = " + shardGroup.getId()); + }); + } + + // @Test + public void testPartitionService() throws PDException, ExecutionException, + InterruptedException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress(String.valueOf(i)) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + + PartitionService partitionService = new PartitionService(pdConfig, storeService); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + // 申请分区 + Metapb.PartitionShard[] partitions = new Metapb.PartitionShard[10]; + for (int i = 0; i < partitions.length; i++) { + partitions[i] = + partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); + Assert.assertEquals(3, storeService.getShardGroup(i).getShardsCount()); + } + System.out.println( + "分区数量: " + partitionService.getPartitions(graph.getGraphName()).size()); + + int[] caseNo = {0}; //1 测试增加shard, 2 //测试store下线 + + Metapb.Shard leader = null; + int[] finalCaseNo = caseNo; + + partitionService.addInstructionListener(new PartitionInstructionListener() { + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { + switch (finalCaseNo[0]) { + case 2: + Assert.assertEquals(5, storeService.getShardGroup(partition.getId()) + .getShardsCount()); + break; + case 3: + storeService.getShardGroup(partition.getId()).getShardsList() + .forEach(shard -> { + Assert.assertNotEquals(shard.getStoreId(), + stores[0].getId()); + }); + break; + } + + } + + @Override + public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) { + + } + + @Override + public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) { + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { + + } + + @Override + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) + throws PDException { + + } + }); + Metapb.Partition partition = partitions[0].getPartition(); + leader = Metapb.Shard.newBuilder( + storeService.getShardGroup(partition.getId()).getShardsList().get(0)).build(); + Metapb.Shard finalLeader = leader; + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition partition, + Metapb.Partition newPartition) { + + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); + // 测试修改图 + caseNo[0] = 1; + partitionService.updateGraph(graph); + for (int i = 0; i < partitions.length; i++) { + partitions[i] = + partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); + Assert.assertEquals(3, storeService.getShardGroup(i).getShardsCount()); + } + + graph = Metapb.Graph.newBuilder(graph) + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + caseNo[0] = 2; + partitionService.updateGraph(graph); + + // 测试store离线 + caseNo[0] = 3; + partitionService.storeOffline(stores[0]); + + + Metapb.PartitionStats stats = Metapb.PartitionStats.newBuilder() + .addGraphName(partition.getGraphName()) + .setId(partition.getId()) + .setLeader( + Metapb.Shard.newBuilder(leader) + .setRole( + Metapb.ShardRole.Leader)) + .build(); + // 测试leader飘移 + caseNo[0] = 4; + partitionService.partitionHeartbeat(stats); + AtomicReference shard = new AtomicReference<>(); + Metapb.PartitionShard ss = + partitionService.getPartitionShardById(partition.getGraphName(), partition.getId()); + storeService.getShardList(partition.getId()).forEach(s -> { + if (s.getRole() == Metapb.ShardRole.Leader) { + Assert.assertNull(shard.get()); + shard.set(s); + } + }); + + Assert.assertEquals(leader.getStoreId(), shard.get().getStoreId()); + + } + + // @Test + public void testMergeGraphParams() throws PDException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService); + + Metapb.Graph dfGraph = Metapb.Graph.newBuilder() + + .setPartitionCount( + pdConfig.getPartition().getTotalCount()) + + .build(); + + Metapb.Graph graph1 = Metapb.Graph.newBuilder() + .setGraphName("test") + .setPartitionCount(20) + + .build(); + + Metapb.Graph graph2 = Metapb.Graph.newBuilder() + .setGraphName("test") + .setPartitionCount(7).build(); + Metapb.Graph graph3 = Metapb.Graph.newBuilder() + .setGraphName("test") + .build(); + Metapb.Graph graph4 = Metapb.Graph.newBuilder() + .setGraphName("test") + .build(); + + Metapb.Graph graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph2).build(); + Assert.assertEquals(graph2.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(graph2.getPartitionCount(), graph.getPartitionCount()); + + + graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph3).build(); + Assert.assertEquals(graph3.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(dfGraph.getPartitionCount(), graph.getPartitionCount()); + + + graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph4).build(); + Assert.assertEquals(graph4.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(dfGraph.getPartitionCount(), graph.getPartitionCount()); + + } + + // @Test + public void test() { + int[] n = new int[3]; + + + if (++n[2] > 1) { + System.out.println(n[2]); + } + if (++n[2] > 1) { + System.out.println(n[2]); + } + if (++n[2] > 1) { + System.out.println(n[2]); + } + } +} diff --git a/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java new file mode 100644 index 0000000000..0f26b1e55d --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd; + +import java.io.File; + +public class UnitTestBase { + public static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java new file mode 100644 index 0000000000..7a13791bd7 --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +// import org.junit.Test; + +import java.nio.charset.StandardCharsets; + +import org.apache.hugegraph.pd.common.PartitionUtils; + +public class PartitionUtilsTest { + + // @Test + public void testHashCode() { + int partCount = 10; + int partSize = PartitionUtils.MAX_VALUE / partCount + 1; + int[] counter = new int[partCount]; + for (int i = 0; i < 10000; i++) { + String s = String.format("BATCH-GET-UNIT-%02d", i); + int c = PartitionUtils.calcHashcode(s.getBytes(StandardCharsets.UTF_8)); + + counter[c / partSize]++; + + } + + for (int i = 0; i < counter.length; i++) { + System.out.println(i + " " + counter[i]); + } + } + + +} diff --git a/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java new file mode 100644 index 0000000000..5b27ff757a --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.store; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.junit.Assert; +import org.junit.BeforeClass; + +public class HgKVStoreImplTest { + static final String testPath = "tmp/test"; + static PDConfig pdConfig; + + @BeforeClass + public static void init() throws IOException { + File testFile = new File(testPath); + if (testFile.exists()) { + FileUtils.deleteDirectory(testFile); + } + FileUtils.forceMkdir(testFile); + pdConfig = new PDConfig() {{ + setDataPath(testPath); + }}; + } + + // @Test + public void Test() throws PDException { + HgKVStore kvStore = new HgKVStoreImpl(); + kvStore.init(pdConfig); + + { + byte[] key = "hello".getBytes(); + byte[] value = "pd".getBytes(); + kvStore.put(key, value); + } + for (int i = 0; i < 100; i++) { + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + kvStore.removeByPrefix("k".getBytes()); + Assert.assertEquals(0, kvStore.scanPrefix("k".getBytes()).size()); + } + + // @Test + public void TestSnapshot() throws PDException { + HgKVStore kvStore = new HgKVStoreImpl(); + kvStore.init(pdConfig); + + // put 100 data + for (int i = 0; i < 100; i++) { + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + // save snapshot + String snapshotPath = Paths.get(testPath, "snapshot").toString(); + kvStore.saveSnapshot(snapshotPath); + + // put another 100 data + for (int i = 100; i < 200; i++) { + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(200, kvStore.scanPrefix("k".getBytes()).size()); + + // load snapshot + kvStore.loadSnapshot(snapshotPath); + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + // put another 100 data + for (int i = 100; i < 200; i++) { + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(200, kvStore.scanPrefix("k".getBytes()).size()); + } +} diff --git a/hugegraph-pd/hg-pd-core/src/test/resources/log4j2.xml b/hugegraph-pd/hg-pd-core/src/test/resources/log4j2.xml new file mode 100644 index 0000000000..a26fe62d9d --- /dev/null +++ b/hugegraph-pd/hg-pd-core/src/test/resources/log4j2.xml @@ -0,0 +1,139 @@ + + + + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hugegraph-pd/hg-pd-dist/pom.xml b/hugegraph-pd/hg-pd-dist/pom.xml new file mode 100644 index 0000000000..e2d1da0a74 --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/pom.xml @@ -0,0 +1,151 @@ + + + + + + hugegraph-pd + org.apache.hugegraph + ${revision} + ../pom.xml + + 4.0.0 + + hg-pd-dist + + + ${project.parent.basedir}/dist + bash + ${project.basedir}/src/assembly + ${assembly.dir}/descriptor + ${assembly.dir}/static + hugegraph-pd-${project.parent.version} + hugegraph-pd + + + + + org.apache.hugegraph + hugegraph-pd + ${revision} + + + + + + maven-assembly-plugin + 2.4 + + + assembly-hugegraph-pd + package + + single + + + false + false + ${dist.dir} + + + ${assembly.descriptor.dir}/server-assembly.xml + + + ${final.name} + + + + + + maven-clean-plugin + + + + ${dist.dir} + + + + + + initialize + + clean + + + + + + maven-antrun-plugin + 1.3 + + + package + + run + + + + + + + + + tar zcvf \ + ${dist.dir}/${final.name}.tar.gz \ + ${final.name} || exit 1 + rm -f ${dist.dir}/dist.sh + echo + echo "HugeGraph dist tar.gz available at: + ${dist.dir}/${final.name}.tar.gz" + echo + + + + + + + + + + + + + + + + + ant-contrib + ant-contrib + 1.0b3 + + + ant + ant + + + + + + + + + + diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/descriptor/server-assembly.xml b/hugegraph-pd/hg-pd-dist/src/assembly/descriptor/server-assembly.xml new file mode 100644 index 0000000000..7aed34c2d2 --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/src/assembly/descriptor/server-assembly.xml @@ -0,0 +1,57 @@ + + + + distribution + false + + + dir + + + + + ${assembly.static.dir}/bin + bin + + * + + 755 + + + ${assembly.static.dir}/conf + conf + + * + + + + + + + + /lib + false + runtime + false + + org.apache.hugegraph:${executable.jar.name}:jar:* + + + + + \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh new file mode 100644 index 0000000000..df44dd1078 --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +if [ -z "$GC_OPTION" ];then + GC_OPTION="" +fi +if [ -z "$USER_OPTION" ];then + USER_OPTION="" +fi + +while getopts "g:j:v" arg; do + case ${arg} in + g) GC_OPTION="$OPTARG" ;; + j) USER_OPTION="$OPTARG" ;; + ?) echo "USAGE: $0 [-g g1] [-j xxx] [-v]" && exit 1 ;; + esac +done + +function abs_path() { + SOURCE="${BASH_SOURCE[0]}" + while [ -h "$SOURCE" ]; do + DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + SOURCE="$(readlink "$SOURCE")" + [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" + done + echo "$( cd -P "$( dirname "$SOURCE" )" && pwd )" +} + +BIN=$(abs_path) +TOP="$(cd "$BIN"/../ && pwd)" +CONF="$TOP/conf" +LIB="$TOP/lib" +LOGS="$TOP/logs" +OUTPUT=${LOGS}/hugegraph-pd-stdout.log +PID_FILE="$BIN/pid" + +. "$BIN"/util.sh + +mkdir -p ${LOGS} + +# The maximum and minium heap memory that service can use +MAX_MEM=$((32 * 1024)) +MIN_MEM=$((1 * 512)) +EXPECT_JDK_VERSION=11 + +# Change to $BIN's parent +cd "${TOP}" || exit + +# Find Java +if [ "$JAVA_HOME" = "" ]; then + JAVA="java" +else + JAVA="$JAVA_HOME/bin/java" +fi + +# check jdk version +JAVA_VERSION=$($JAVA -version 2>&1 | awk 'NR==1{gsub(/"/,""); print $3}' | awk -F'_' '{print $1}') +if [[ $? -ne 0 || $JAVA_VERSION < $EXPECT_JDK_VERSION ]]; then + echo "Please make sure that the JDK is installed and the version >= $EXPECT_JDK_VERSION" >> ${OUTPUT} + exit 1 +fi + +# Set Java options +if [ "$JAVA_OPTIONS" = "" ]; then + XMX=$(calc_xmx $MIN_MEM $MAX_MEM) + if [ $? -ne 0 ]; then + echo "Failed to start HugeGraphPDServer, requires at least ${MIN_MEM}m free memory" \ + >> ${OUTPUT} + exit 1 + fi + JAVA_OPTIONS="-Xms${MIN_MEM}m -Xmx${XMX}m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${LOGS} ${USER_OPTION}" + + # Rolling out detailed GC logs + #JAVA_OPTIONS="${JAVA_OPTIONS} -XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M -XX:NumberOfGCLogFiles=3 \ + # -Xloggc:./logs/gc.log -XX:+PrintHeapAtGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps" +fi + +# Using G1GC as the default garbage collector (Recommended for large memory machines) +case "$GC_OPTION" in + g1) + echo "Using G1GC as the default garbage collector" + JAVA_OPTIONS="${JAVA_OPTIONS} -XX:+UseG1GC -XX:+ParallelRefProcEnabled \ + -XX:InitiatingHeapOccupancyPercent=50 -XX:G1RSetUpdatingPauseTimePercent=5" + ;; + "") ;; + *) + echo "Unrecognized gc option: '$GC_OPTION', only support 'g1' now" >> ${OUTPUT} + exit 1 +esac + +#if [ "${JMX_EXPORT_PORT}" != "" ] && [ ${JMX_EXPORT_PORT} -ne 0 ] ; then +# JAVA_OPTIONS="${JAVA_OPTIONS} -javaagent:${LIB}/jmx_prometheus_javaagent-0.16.1.jar=${JMX_EXPORT_PORT}:${CONF}/jmx_exporter.yml" +#fi +if [ $(ps -ef|grep -v grep| grep java|grep -cE ${CONF}) -ne 0 ]; then + echo "HugeGraphPDServer is already running..." + exit 0 +fi +echo "Starting HugeGraphPDServer..." + +# Turn on security check +exec ${JAVA} ${JAVA_OPTIONS} -jar -Dspring.config.location=${CONF}/application.yml \ + ${LIB}/hugegraph-pd-*.jar >> ${OUTPUT} 2>&1 & + +PID="$!" +# Write pid to file +echo "$PID" > "$PID_FILE" +echo "[+pid] $PID" diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh new file mode 100644 index 0000000000..55499ee59c --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +abs_path() { + SOURCE="${BASH_SOURCE[0]}" + while [ -h "$SOURCE" ]; do + DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + SOURCE="$(readlink "$SOURCE")" + [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" + done + echo "$( cd -P "$( dirname "$SOURCE" )" && pwd )" +} + +BIN=$(abs_path) +TOP="$(cd $BIN/../ && pwd)" + +. "$BIN"/util.sh + +PID_FILE=$BIN/pid +SERVER_SHUTDOWN_TIMEOUT_S=30 + +if [ ! -f ${PID_FILE} ]; then + echo "The pid file $PID_FILE doesn't exist" + exit 0 +fi + +PID=`cat $PID_FILE` +kill_process_and_wait "HugeGraphPDServer" "$PID" "$SERVER_SHUTDOWN_TIMEOUT_S" + +if [ $? -eq 0 ]; then + rm "$PID_FILE" +fi + diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/util.sh b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/util.sh new file mode 100644 index 0000000000..d32871f13f --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/util.sh @@ -0,0 +1,372 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +function command_available() { + local cmd=$1 + if [ $(command -v $cmd >/dev/null 2>&1) ]; then + return 1 + else + return 0 + fi +} + +# read a property from .properties file +function read_property() { + # file path + file_name=$1 + # replace "." to "\." + property_name=`echo $2 | sed 's/\./\\\./g'` + cat $file_name | sed -n -e "s/^[ ]*//g;/^#/d;s/^$property_name=//p" | tail -1 +} + +function write_property() { + local file=$1 + local key=$2 + local value=$3 + + local os=`uname` + case $os in + # Note: in mac os should use sed -i '' "xxx" to replace string, + # otherwise prompt 'command c expects \ followed by text'. + # See http://www.cnblogs.com/greedy-day/p/5952899.html + Darwin) sed -i '' "s!$key=.*!$key=$value!g" "$file" ;; + *) sed -i "s!$key=.*!$key=$value!g" "$file" ;; + esac +} + +function parse_yaml() { + local file=$1 + local version=$2 + local module=$3 + + cat $file | tr -d '\n {}'| awk -F',+|:' '''{ + pre=""; + for(i=1; i<=NF; ) { + if(match($i, /version/)) { + pre=$i; + i+=1 + } else { + result[pre"-"$i] = $(i+1); + i+=2 + } + } + } END {for(e in result) {print e": "result[e]}}''' \ + | grep "$version-$module" | awk -F':' '{print $2}' | tr -d ' ' && echo +} + +function process_num() { + num=`ps -ef | grep $1 | grep -v grep | wc -l` + return $num +} + +function process_id() { + pid=`ps -ef | grep $1 | grep -v grep | awk '{print $2}'` + return $pid +} + +# check the port of rest server is occupied +function check_port() { + local port=`echo $1 | awk -F':' '{print $3}'` + if ! command_available "lsof"; then + echo "Required lsof but it is unavailable" + exit 1 + fi + lsof -i :$port >/dev/null + if [ $? -eq 0 ]; then + echo "The port $port has already been used" + exit 1 + fi +} + +function crontab_append() { + local job="$1" + crontab -l | grep -F "$job" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + return 1 + fi + (crontab -l ; echo "$job") | crontab - +} + +function crontab_remove() { + local job="$1" + # check exist before remove + crontab -l | grep -F "$job" >/dev/null 2>&1 + if [ $? -eq 1 ]; then + return 0 + fi + + crontab -l | grep -Fv "$job" | crontab - + + # Check exist after remove + crontab -l | grep -F "$job" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + return 1 + else + return 0 + fi +} + +# wait_for_startup friendly_name host port timeout_s +function wait_for_startup() { + local pid="$1" + local server_name="$2" + local server_url="$3" + local timeout_s="$4" + + local now_s=`date '+%s'` + local stop_s=$(( $now_s + $timeout_s )) + + local status + + echo -n "Connecting to $server_name ($server_url)" + while [ $now_s -le $stop_s ]; do + echo -n . + process_status "$server_name" "$pid" >/dev/null + if [ $? -eq 1 ]; then + echo "Starting $server_name failed" + return 1 + fi + + status=`curl -o /dev/null -s -k -w %{http_code} $server_url` + if [[ $status -eq 200 || $status -eq 401 ]]; then + echo "OK" + echo "Started [pid $pid]" + return 0 + fi + sleep 2 + now_s=`date '+%s'` + done + + echo "The operation timed out when attempting to connect to $server_url" >&2 + return 1 +} + +function free_memory() { + local free="" + local os=`uname` + if [ "$os" == "Linux" ]; then + local mem_free=`cat /proc/meminfo | grep -w "MemFree" | awk '{print $2}'` + local mem_buffer=`cat /proc/meminfo | grep -w "Buffers" | awk '{print $2}'` + local mem_cached=`cat /proc/meminfo | grep -w "Cached" | awk '{print $2}'` + if [[ "$mem_free" == "" || "$mem_buffer" == "" || "$mem_cached" == "" ]]; then + echo "Failed to get free memory" + exit 1 + fi + free=`expr $mem_free + $mem_buffer + $mem_cached` + free=`expr $free / 1024` + elif [ "$os" == "Darwin" ]; then + local pages_free=`vm_stat | awk '/Pages free/{print $0}' | awk -F'[:.]+' '{print $2}' | tr -d " "` + local pages_inactive=`vm_stat | awk '/Pages inactive/{print $0}' | awk -F'[:.]+' '{print $2}' | tr -d " "` + local pages_available=`expr $pages_free + $pages_inactive` + free=`expr $pages_available \* 4096 / 1024 / 1024` + else + echo "Unsupported operating system $os" + exit 1 + fi + echo $free +} + +function calc_xmx() { + local min_mem=$1 + local max_mem=$2 + # Get machine available memory + local free=`free_memory` + local half_free=$[free/2] + + local xmx=$min_mem + if [[ "$free" -lt "$min_mem" ]]; then + exit 1 + elif [[ "$half_free" -ge "$max_mem" ]]; then + xmx=$max_mem + elif [[ "$half_free" -lt "$min_mem" ]]; then + xmx=$min_mem + else + xmx=$half_free + fi + echo $xmx +} + +function remove_with_prompt() { + local path=$1 + local tips="" + + if [ -d "$path" ]; then + tips="Remove directory '$path' and all sub files [y/n]?" + elif [ -f "$path" ]; then + tips="Remove file '$path' [y/n]?" + else + return 0 + fi + + read -p "$tips " yn + case $yn in + [Yy]* ) rm -rf "$path";; + * ) ;; + esac +} + +function ensure_path_writable() { + local path=$1 + # Ensure input path exist + if [ ! -d "${path}" ]; then + mkdir -p ${path} + fi + # Check for write permission + if [ ! -w "${path}" ]; then + echo "No write permission on directory ${path}" + exit 1 + fi +} + +function get_ip() { + local os=`uname` + local loopback="127.0.0.1" + local ip="" + case $os in + Linux) + if command_available "ifconfig"; then + ip=`ifconfig | grep 'inet addr:' | grep -v "$loopback" | cut -d: -f2 | awk '{ print $1}'` + elif command_available "ip"; then + ip=`ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | awk -F"/" '{print $1}'` + else + ip=$loopback + fi + ;; + FreeBSD|OpenBSD|Darwin) + if command_available "ifconfig"; then + ip=`ifconfig | grep -E 'inet.[0-9]' | grep -v "$loopback" | awk '{ print $2}'` + else + ip=$loopback + fi + ;; + SunOS) + if command_available "ifconfig"; then + ip=`ifconfig -a | grep inet | grep -v "$loopback" | awk '{ print $2} '` + else + ip=$loopback + fi + ;; + *) ip=$loopback;; + esac + echo $ip +} + +function download() { + local path=$1 + local link_url=$2 + + if command_available "wget"; then + wget --help | grep -q '\--show-progress' && progress_opt="-q --show-progress" || progress_opt="" + wget ${link_url} -P ${path} $progress_opt + elif command_available "curl"; then + curl ${link_url} -o ${path}/${link_url} + else + echo "Required wget or curl but they are unavailable" + exit 1 + fi +} + +function ensure_package_exist() { + local path=$1 + local dir=$2 + local tar=$3 + local link=$4 + + if [ ! -d ${path}/${dir} ]; then + if [ ! -f ${path}/${tar} ]; then + echo "Downloading the compressed package '${tar}'" + download ${path} ${link} + if [ $? -ne 0 ]; then + echo "Failed to download, please ensure the network is available and link is valid" + exit 1 + fi + echo "[OK] Finished download" + fi + echo "Unzip the compressed package '$tar'" + tar -zxvf ${path}/${tar} -C ${path} >/dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "Failed to unzip, please check the compressed package" + exit 1 + fi + echo "[OK] Finished unzip" + fi +} + +########################################################################### + +function wait_for_shutdown() { + local process_name="$1" + local pid="$2" + local timeout_s="$3" + + local now_s=`date '+%s'` + local stop_s=$(( $now_s + $timeout_s )) + + echo -n "Killing $process_name(pid $pid)" >&2 + while [ $now_s -le $stop_s ]; do + echo -n . + process_status "$process_name" "$pid" >/dev/null + if [ $? -eq 1 ]; then + echo "OK" + return 0 + fi + sleep 2 + now_s=`date '+%s'` + done + echo "$process_name shutdown timeout(exceeded $timeout_s seconds)" >&2 + return 1 +} + +function process_status() { + local process_name="$1" + local pid="$2" + + ps -p "$pid" + if [ $? -eq 0 ]; then + echo "$process_name is running with pid $pid" + return 0 + else + echo "The process $process_name does not exist" + return 1 + fi +} + +function kill_process() { + local process_name="$1" + local pid="$2" + + if [ -z "$pid" ]; then + echo "The process $pid does not exist" + return 0 + fi + + case "`uname`" in + CYGWIN*) taskkill /F /PID "$pid" ;; + *) kill "$pid" ;; + esac +} + +function kill_process_and_wait() { + local process_name="$1" + local pid="$2" + local timeout_s="$3" + + kill_process "$process_name" "$pid" + wait_for_shutdown "$process_name" "$pid" "$timeout_s" +} diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml new file mode 100644 index 0000000000..7859670420 --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml @@ -0,0 +1,78 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +logging: + config: 'file:./conf/log4j2.xml' +license: + verify-path: ./conf/verify-license.json + license-path: ./conf/hugegraph.license +grpc: + port: 8686 + # grpc的服务地址, 部署时需要改为本地实际IPv4地址。 + host: 127.0.0.1 + +server: + # rest服务端口号 + port: 8620 + +pd: + # 存储路径 + data-path: ./pd_data + # 自动扩容的检查周期,定时检查每个Store的分区数量,自动进行分区数量平衡 + patrol-interval: 1800 + # 初始store列表,grpc IP:grpc port, 在列表内的store自动激活 + # NOTE: set to one addr when in stand-alone mode + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 +raft: + # 本机raft服务地址 + address: 127.0.0.1:8610 + # PD集群服务地址 NOTE: set to one addr when in stand-alone mode + peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 + +store: + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 172800 + # 是否开启store监控数据存储 + monitor_data_enabled: true + # 监控数据的间隔,minute(默认), hour, second + # default: 1 min * 1 day = 1440 + monitor_data_interval: 1 minute + # 监控数据的保留时间 1 天; day, month, year + monitor_data_retention: 1 day + # NOTE: set to 1 when in stand-alone mode + # initial-store-count: 1 + +partition: + # 默认每个分区副本数, set to 1 when in stand-alone mode + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 12 + diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template new file mode 100644 index 0000000000..43f52df609 --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template @@ -0,0 +1,71 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spring: + application: + name: hugegraph-pd +logging: + config: file:./conf/log4j2.xml + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: $GRPC_PORT$ + # grpc的服务地址, + #注意:部署时需要改为本地实际IPv4地址。 + host: $GRPC_HOST$ + netty-server: + max-inbound-message-size: 100MB + +server: + port : $SERVER_PORT$ + +pd: + # 集群ID,区分不同的PD集群 + + patrol-interval: 2147483647 + data-path: $PD_DATA_PATH$ + +raft: + address: $RAFT_ADDRESS$ + # raft集群 + peers-list: $RAFT_PEERS_LIST$ + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 60 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 1800 +partition: + # 默认分区总数 + default-total-count: 30 + # 默认每个分区副本数 + default-shard-count: 3 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/hugegraph.license b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/hugegraph.license new file mode 100644 index 0000000000000000000000000000000000000000..bca71af9a1966024e7a99880302f6b592a33ca84 GIT binary patch literal 856 zcmV-e1E>69hn&I!I)GnSv~^hE{iID$vF0rJw0JnV&Nu&pJ?*HLQ#!nms8=CeFwzT% z)5v&U=s7DO7$r#-RSw|2Vq*`|(2D5<5GKxmIX=E1#UCniwf>oeSOP{p(e{%an?_vT zYc)wIUQsJ<9Ai=YsETz;YFaYQ^&YAJD(|KO1~NKs#B+QiROyg@d5VjOt~PJcGl>9= zPZ!gB)$<%a;*Yw{lDP)9Cj#PNktQ)_SRi&!l=1Y})2$oO3ASGnC|m#T{b9DXGt@^9 zRQlb4GeipvU&m_F1_$wwu0?i5Cz0TTSl`#uc}6(t4&YjvWkFI)*jMO@Me_Le_=M(v zSrA?RjyET9V15A)-p(d1q)ry6yF$rH4#5n`?m|ws9TUw@;$3qiBbOlEoKdy_MT)~V z=N#&y{vn8U2Og2iJ{)W-ZO7$zLHEUMN-l$XRvs_I_2T?X5&UXMi&;q8i8d}FWtd=8y@PLaCFm+wpR#G)aC{`rVrs%8)@u!?}9E$ZnDYo!c5hiA?8Qm}y{K9F) zTlyJ+OL<98%B`lIL|}hc9JSDFm9?7x2CnOjGJE_7%Jp&Bif`p29?ai>C(n0UWicnv zrZH578>dDfFP_O?YlK@|!U^eZflg1)Hs(pq1{WqS2Cflfgsa$(yyyfhqxr2y)xO<- ztE~YE$2dMsybWD-Bi<89DZ@L;S$hw~0DG7-yoR%(%Gon5wDYD`dL4k_U6j*47Z4h4 zwCZyUzNtj|D0Iax|0F9J(~M5|FP&r1ysHQXgy8J7aEa}MA3?yWdO@W6NuOQq zxI&O;%`%{#7kN9?F(hYrclJ8cN^CxZ6s68>v%<^cjA#tlBAKkmyCm&l%`g32m?oed z6hL($sbOP?W%_S#UNEW$*u&~DQD5$(f*}}lGTzkLe#?(+LA~;aL=0#EKm?Ju+ zaOe2lRrWmfBd1Mk57Xciq<;l;cn=*wkW>MbOpHWHg~zj9arkkR#q-;Ch&7kk6E(mR i^Sj|__)(b4Oh-2cCDrT@A%)082}=;$lBJ*eQVTP`m7MAT literal 0 HcmV?d00001 diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml new file mode 100644 index 0000000000..96fae99a31 --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml @@ -0,0 +1,134 @@ + + + + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/verify-license.json b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/verify-license.json new file mode 100644 index 0000000000..868ccbebbb --- /dev/null +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/verify-license.json @@ -0,0 +1,6 @@ +{ + "subject": "hugegraph-license", + "public_alias": "publiccert", + "store_ticket": "803b6cc3-d144-47e8-948f-ec8b39c8881e", + "publickey_path": "/public-certs.store" +} diff --git a/hugegraph-pd/hg-pd-grpc/pom.xml b/hugegraph-pd/hg-pd-grpc/pom.xml new file mode 100644 index 0000000000..538647a7a3 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/pom.xml @@ -0,0 +1,138 @@ + + + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-pd + ${revision} + ../pom.xml + + hg-pd-grpc + + + + 1.6.0 + 1.39.0 + 3.17.2 + 0.6.1 + + + + + io.grpc + grpc-netty-shaded + ${grpc.version} + + + io.grpc + grpc-protobuf + ${grpc.version} + + + io.grpc + grpc-stub + ${grpc.version} + + + javax.annotation + javax.annotation-api + 1.3.2 + + + + + ${basedir}/src/main/java + + + src/main/resources + + + src/main/proto + + + + + kr.motd.maven + os-maven-plugin + ${os.plugin.version} + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf.plugin.version} + true + + + com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} + + grpc-java + + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + ${project.basedir}/src/main/proto + + + ${project.basedir}/src/main/java + + false + + + + + + generate-sources + + + compile + + compile-custom + + + + + + maven-clean-plugin + + + + src/main/java + + + + + + + initialize + + clean + + + + + + + diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto new file mode 100644 index 0000000000..f3f0e2a9e8 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; +package discovery; +import "pdpb.proto"; + +option java_package = "org.apache.hugegraph.pd.grpc.discovery"; +option java_multiple_files = true; + + +service DiscoveryService { + rpc register(NodeInfo) returns (RegisterInfo); + rpc getNodes(Query) returns (NodeInfos); + // rpc getNodesByLabel(Conditions) returns (NodeInfos); +} + +/* requests */ +message NodeInfo { + string id = 1; + string appName = 2; + string version = 3; + string address = 4; + int64 interval = 5; + map labels = 6; +} +message Query { + string appName = 1; + string version = 2; + map labels = 3; +} +message LeaseInfo { + int64 registrationTs = 1; + int64 lastHeartbeatTs = 2; + int64 serverUpTs = 3; +} +message RegisterInfo { + NodeInfo nodeInfo = 1; + LeaseInfo leaseInfo = 2 ; + RegisterType type = 3 ; + pdpb.ResponseHeader header = 4; +} +enum RegisterType { + Register = 0; + Heartbeat = 1; + Dislodge = 2; +} +//message Condition{ +// string label = 1; +//} +//message Conditions{ +// string label = 1; +// string value = 2; +//} +message NodeInfos{ + repeated NodeInfo info = 1; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto new file mode 100644 index 0000000000..5f88b2ee15 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto @@ -0,0 +1,126 @@ +syntax = "proto3"; +package kv; +import "pdpb.proto"; +import "metapb.proto"; + +option java_package = "org.apache.hugegraph.pd.grpc.kv"; +option java_multiple_files = true; + + +service KvService { + rpc put(Kv) returns (KvResponse); + rpc get(K) returns (KResponse); + rpc delete(K) returns (KvResponse); + rpc deletePrefix(K) returns (KvResponse); + rpc scanPrefix(K) returns (ScanPrefixResponse); + rpc watch(WatchRequest) returns (stream WatchResponse); + rpc watchPrefix(WatchRequest) returns (stream WatchResponse); + rpc lock(LockRequest) returns (LockResponse); + rpc lockWithoutReentrant(LockRequest) returns (LockResponse); + rpc unlock(LockRequest) returns (LockResponse); + rpc keepAlive(LockRequest) returns (LockResponse); + rpc isLocked(LockRequest) returns (LockResponse); + rpc putTTL(TTLRequest) returns (TTLResponse); + rpc keepTTLAlive(TTLRequest) returns (TTLResponse); +} + +/* requests */ +message Kv { + pdpb.RequestHeader header = 1; + string key = 2; + string value = 3; +} +message KvResponse { + pdpb.ResponseHeader header = 1; +} + +message K{ + pdpb.RequestHeader header = 1; + string key = 2; +} + +message KResponse{ + pdpb.ResponseHeader header = 1; + string value = 2; +} + +message ScanPrefixResponse { + pdpb.ResponseHeader header = 1; + map kvs = 2; +} + +message LockRequest{ + pdpb.RequestHeader header = 1; + string key = 2; + int64 ttl = 3; + int64 clientId = 4; +} +message LockResponse{ + pdpb.ResponseHeader header = 1; + string key = 2; + int64 ttl = 3; + int64 clientId = 4; + bool succeed = 5; +} + +message LockAliveResponse{ + pdpb.ResponseHeader header = 1; + int64 clientId = 2; +} + + +message WatchKv { + string key = 1; + string value = 2; +} + +enum WatchType { + Put = 0; + Delete = 1; + Unrecognized = 2; +} + +message WatchEvent { + WatchKv current = 1; + WatchKv prev = 2; + WatchType type = 3; +} + +message WatchResponse { + pdpb.ResponseHeader header = 1; + repeated WatchEvent events = 2; + int64 clientId = 3; + WatchState state = 4; +} + +enum WatchState { + Starting = 0; + Started = 1; + Leader_Changed = 2; + Alive = 3; +} + +message WatchRequest { + pdpb.RequestHeader header = 1; + WatchState state = 2; + string key = 3; + int64 clientId = 4; +} + +message V{ + string value = 1; + int64 ttl = 2; + int64 st = 3; +} + +message TTLRequest{ + pdpb.RequestHeader header = 1; + string key = 2; + string value = 3; + int64 ttl = 4; +} + +message TTLResponse{ + pdpb.ResponseHeader header = 1; + bool succeed = 2; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto new file mode 100644 index 0000000000..ff6fc679d1 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; +package metaTask; +import "metapb.proto"; +import "pd_pulse.proto"; +option java_package = "org.apache.hugegraph.pd.grpc"; + +enum TaskType { + Unknown = 0; + Split_Partition = 1; + Change_Shard = 2; + Move_Partition = 3; + Clean_Partition = 4; + Change_KeyRange = 5; +} + +// 一条任务信息 +message Task { + uint64 id = 1; + TaskType type = 2; + TaskState state = 3; + int64 start_timestamp = 4; + metapb.Partition partition = 5; + string message = 6; + //每个shard执行的任务状态 + repeated ShardTaskState shardState = 7; + ChangeShard changeShard = 9; + SplitPartition splitPartition = 10; + MovePartition movePartition = 11; + CleanPartition cleanPartition = 12; + PartitionKeyRange partitionKeyRange = 13; +} + +enum TaskState{ + Task_Unknown = 0; + Task_Ready = 1; //任务就绪 + Task_Doing = 2; //执行中 + Task_Done = 3; //完成 + Task_Exit = 4; //退出 + Task_Stop = 10; + Task_Success = 11; + Task_Failure = 12; +} + +message ShardTaskState{ + uint64 store_id = 1; + TaskState state = 2; +} + diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto new file mode 100644 index 0000000000..d156a00fb9 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto @@ -0,0 +1,377 @@ +syntax = "proto3"; +package metapb; +option java_package = "org.apache.hugegraph.pd.grpc"; +import "google/protobuf/any.proto"; + +enum ClusterState{ + // 集群健康 + Cluster_OK = 0; + // 分区警告,存在部分故障节点,短时间不影响读写 + Cluster_Warn = 2; + // 分区下线,可以读,无法写 + Cluster_Offline = 10; + // 分区故障,无法读写,需要尽快修复故障节点。 + Cluster_Fault = 11; + Cluster_Not_Ready = -1; +} +// 集群状态 +message ClusterStats{ + ClusterState state = 1; + string message = 2; + uint64 timestamp = 16; +} + +enum StoreState { + Unknown = 0; + // 未激活 + Pending = 4; + // 在线 + Up = 1; + // 离线 + Offline = 2; + // 下线中 + Exiting = 5; + // 已下线 + Tombstone = 3; +} + +// Store label for Storage grouping. +message StoreLabel { + string key = 1; + string value = 2; +} + +message Store { + uint64 id = 1; + // Address to handle client requests + string address = 2; + string raft_address = 3; + repeated StoreLabel labels = 4; + // Store软件版本号 + string version = 5; + StoreState state = 6; + // The start timestamp of the current store + int64 start_timestamp = 7; + string deploy_path = 8; + // The last heartbeat timestamp of the store. + int64 last_heartbeat = 9; + StoreStats stats = 10; + // 数据格式版本号 + int32 data_version = 11; + int32 cores = 12; + string data_path = 13; +} + +enum ShardRole { + None = 0; + Leader = 1; + Follower = 2; + // Learner/None -> Learner + Learner = 3; +} + +message Shard { + uint64 store_id = 2; + ShardRole role = 3; +} + +message ShardGroup{ + uint32 id = 1; + uint64 version = 2; + uint64 conf_ver = 3; + repeated Shard shards = 6; + PartitionState state = 10; + string message = 11; +} + +message Graph { + string graph_name = 2; + // 分区数量,0表示无效,不能大于raft分组总数 + int32 partition_count = 3; + // 当前工作状态 + PartitionState state = 10; + string message = 11; + GraphState graph_state = 12; +} +// 分区工作状态 +enum PartitionState{ + PState_None = 0; + // + PState_Normal = 1; + // 分区警告,存在部分故障节点,短时间不影响读写 + PState_Warn = 2; + // 分区下线,可以读,无法写 + PState_Offline = 10; + // 分区故障,无法读写,需要尽快修复故障节点。 + PState_Fault = 11; +} + +message PartitionV36 { + uint32 id = 1; + string graph_name = 3; + // 分区范围 [start_key, end_key). + uint64 start_key = 4; + uint64 end_key = 5; + repeated Shard shards = 6; + // Leader任期,leader切换后递增 + uint64 version = 7; + // shards版本号,每次改变后递增 + uint64 conf_ver = 8; + // 当前工作状态 + PartitionState state = 10; + string message = 11; +} + +message Partition { + uint32 id = 1; + string graph_name = 3; + // 分区范围 [start_key, end_key). + uint64 start_key = 4; + uint64 end_key = 5; + // Partition 对象不在保存 shard list(根据对应的shard group 去查询), version 和 conf version不再有实际的意义 + // repeated Shard shards = 6; + // key range 每次改变后递增 + uint64 version = 7; + // shards版本号,每次改变后递增 + // uint64 conf_ver = 8; + // 当前工作状态 + PartitionState state = 10; + string message = 11; +} + +message PartitionShard { + metapb.Partition partition = 1; + metapb.Shard leader = 2; + // 离线的Shard + repeated metapb.Shard offline_shards = 3; +} +// 记录分区所在的存储位置 +message PartitionStore { + uint32 partition_id = 1; + string graph_name = 3; + // 存储位置 + string store_location = 4; +} + +message PartitionRaft { + uint32 partition_id = 1; + string graph_name = 3; + // 存储位置 + string raft_location = 4; +} + +message ShardStats{ + uint64 store_id = 2; + ShardRole role = 3; + ShardState state = 4; + // 安装快照的进度 + uint32 progress = 5; +} +message PartitionStats{ + uint32 id = 1; + // raft分组的任期. + uint64 leader_term = 2; + repeated string graph_name = 3; + metapb.Shard leader = 4; + // 离线 shards + repeated metapb.Shard shard = 5; + repeated metapb.Shard learner = 6; + uint64 conf_ver = 7; + // 分区状态 + PartitionState state = 8; + repeated ShardStats shardStats = 9; + // 分区近似大小 + uint64 approximate_size = 10; + // 分区key的近似数量 + uint64 approximate_keys = 13; + // heartbeat timestamp + int64 timestamp = 16; +} + +message GraphStats{ + // 图名 + string graph_name = 1; + // 分区近似大小 + uint64 approximate_size = 2; + // 分区key的近似数量 + uint64 approximate_keys = 3; + // // committed index + // uint64 committed_index = 4; + uint32 partition_id = 5; + ShardRole role = 6; + // 当前工作状态 + PartitionState work_state = 8; +} + +message RaftStats { + // partition id + uint32 partition_id = 1; + // committed index + uint64 committed_index = 2; +} + +message TimeInterval { + // The unix timestamp in seconds of the start of this period. + uint64 start_timestamp = 1; + // The unix timestamp in seconds of the end of this period. + uint64 end_timestamp = 2; +} + +message RecordPair { + string key = 1; + uint64 value = 2; +} + + +message QueryStats { + uint64 GC = 1; + uint64 Get = 2; + uint64 Scan = 3; + uint64 Coprocessor = 4; + uint64 Delete = 5; + uint64 DeleteRange = 6; + uint64 Put = 7; +} + +enum ShardState{ + SState_None = 0; + // 正常 + SState_Normal = 1; + // 安装快照 + SState_Snapshot = 2; + // 离线 + SState_Offline = 10; +} + + +message StoreStats { + uint64 store_id = 1; + // Capacity for the store. + uint64 capacity = 2; + // Available size for the store. + uint64 available = 3; + // Total partition count in this store. + uint32 partition_count = 4; + // Current sending snapshot count. + uint32 sending_snap_count = 5; + // Current receiving snapshot count. + uint32 receiving_snap_count = 6; + // When the store is started (unix timestamp in seconds). + uint32 start_time = 7; + // How many partition is applying snapshot. + uint32 applying_snap_count = 8; + // If the store is busy + bool is_busy = 9; + // Actually used space by db + uint64 used_size = 10; + // Bytes written for the store during this period. + uint64 bytes_written = 11; + // Keys written for the store during this period. + uint64 keys_written = 12; + // Bytes read for the store during this period. + uint64 bytes_read = 13; + // Keys read for the store during this period. + uint64 keys_read = 14; + // Actually reported time interval + TimeInterval interval = 15; + // Threads' CPU usages in the store + repeated RecordPair cpu_usages = 16; + // Threads' read disk I/O rates in the store + repeated RecordPair read_io_rates = 17; + // Threads' write disk I/O rates in the store + repeated RecordPair write_io_rates = 18; + // Operations' latencies in the store + repeated RecordPair op_latencies = 19; + // Store query stats + QueryStats query_stats = 21; + // graph stats + repeated GraphStats graph_stats = 22; + // raft stats + repeated RaftStats raft_stats = 23; + int32 cores = 24; + // system metrics + repeated RecordPair system_metrics = 25; +} + +// 分区查询条件 +message PartitionQuery{ + optional uint64 store_id = 1; // 0 表示查询条件不包含store_id + optional string graph_name = 2; + optional uint32 partition_id = 4; +} + +//PD 节点信息 +message Member { + uint64 cluster_id = 1; + string raft_url = 3; + string grpc_url = 4; + string rest_url = 5; + string data_path = 6; + StoreState state = 7; + ShardRole role = 8; + string replicator_state = 9; +} + +// 图空间配置 +message GraphSpace{ + string name = 1; + // 最大占用存储 + uint64 storage_limit = 2; + // 已使用空间 + uint64 used_size = 3; + // 修改时间 + uint64 timestamp = 10; +} + +// PD 配置 +message PDConfig{ + uint64 version = 1; + // 分区数量, 初始化根据Store数量动态计算,分裂后进行修改 + int32 partition_count = 2; + // 每分区副本数量 + int32 shard_count = 3; + // pd集群列表 + string peers_list = 4; + // 集群中最少store数量 + int32 min_store_count = 6; + // 每个store最大副本数 + int32 max_Shards_Per_Store = 7; + // 修改时间 + uint64 timestamp = 10; +} + + + +//消息持久化 +message QueueItem{ + string item_id = 1; + string item_class = 2; + bytes item_content = 3; + int64 timestamp = 10; +} + +message LogRecord{ + string action = 1; + int64 timestamp = 2; + map labels = 3; + google.protobuf.Any object = 4; + string message = 5; +} + +message GraphState{ + GraphMode mode = 1; + GraphModeReason reason = 2; +} + +enum GraphMode{ + ReadWrite = 0; + ReadOnly = 1; + WriteOnly = 2; +} + +enum GraphModeReason{ + Empty = 0; // 空 + Initiative = 1; // 主动的状态设置 + Quota = 2; // 达到限额条件 + +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto new file mode 100644 index 0000000000..9c2dfe6174 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.pd.grpc.common"; +option java_outer_classname = "HgPdCommonProto"; + +message RequestHeader { + // 集群 ID. + uint64 cluster_id = 1; + // 发送者 ID. + uint64 sender_id = 2; +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + Error error = 2; +} + +enum ErrorType { + OK = 0; + UNKNOWN = 1; + STORE_NON_EXIST = 101; + STORE_TOMBSTONE = 103; + ALREADY_BOOTSTRAPPED = 4; + INCOMPATIBLE_VERSION = 5; + PARTITION_NOT_FOUND = 6; + + ETCD_READ_ERROR = 1000; + ETCD_WRITE_ERROR = 1001; +} + +message Error { + ErrorType type = 1; + string message = 2; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto new file mode 100644 index 0000000000..f49c3aeaad --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto @@ -0,0 +1,155 @@ +syntax = "proto3"; + +import "metapb.proto"; +import "pd_common.proto"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.pd.grpc.pulse"; +option java_outer_classname = "HgPdPulseProto"; + +service HgPdPulse { + rpc Pulse(stream PulseRequest) returns (stream PulseResponse); +} + +/* requests */ +message PulseRequest { + PulseCreateRequest create_request = 1; + PulseCancelRequest cancel_request = 2; + PulseNoticeRequest notice_request = 3; + PulseAckRequest ack_request = 4; +} + +message PulseCreateRequest { + PulseType pulse_type = 1; +} + +message PulseCancelRequest { + int64 observer_id = 1; +} + +message PulseNoticeRequest { + int64 observer_id = 1; + oneof request_union { + PartitionHeartbeatRequest partition_heartbeat_request = 10; + } +} + +message PulseAckRequest { + int64 observer_id = 1; + int64 notice_id = 2; +} + +// 分区心跳,分区的peer增减、leader改变等事件发生时,由leader发送心跳。 +// 同时pd对分区进行shard增减通过Response发送给leader +message PartitionHeartbeatRequest { + RequestHeader header = 1; + // Leader Peer sending the heartbeat + metapb.PartitionStats states = 4; +} + +/* responses */ +message PulseResponse { + PulseType pulse_type = 1; + int64 observer_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id=4; + oneof response_union { + PartitionHeartbeatResponse partition_heartbeat_response = 10; + PdInstructionResponse instruction_response = 11; + } +} + +message PartitionHeartbeatResponse { + ResponseHeader header = 1; + uint64 id = 3; + metapb.Partition partition = 2; + ChangeShard change_shard = 4; + + TransferLeader transfer_leader = 5; + // 拆分成多个分区,第一个SplitPartition是原分区,从第二开始是新分区 + SplitPartition split_partition = 6; + // rocksdb compaction 指定的表,null是针对所有 + DbCompaction db_compaction = 7; + // 将partition的数据,迁移到 target + MovePartition move_partition = 8; + // 清理partition的graph的数据 + CleanPartition clean_partition = 9; + // partition key range 变化 + PartitionKeyRange key_range = 10; +} + +/* Date model */ +message ChangeShard { + repeated metapb.Shard shard = 1; + ConfChangeType change_type = 2; +} + +message TransferLeader { + metapb.Shard shard = 1; +} + +message SplitPartition { + repeated metapb.Partition new_partition = 1; +} + +message DbCompaction { + string table_name = 3; +} + +message MovePartition{ + // target partition的key range为,迁移后的新range + metapb.Partition target_partition = 1; + // partition 的 key start 和 key end的所有数据, + // 会迁移到 target partition 上 + uint64 key_start = 2; + uint64 key_end = 3; +} + +message CleanPartition { + uint64 key_start = 1; + uint64 key_end = 2; + CleanType clean_type = 3; + bool delete_partition = 4; //是否删除分区 +} + +message PartitionKeyRange{ + uint32 partition_id = 1; + uint64 key_start = 2; + uint64 key_end = 3; +} + +message PdInstructionResponse { + PdInstructionType instruction_type = 1; + string leader_ip = 2; +} + +/* enums */ +enum PulseType { + PULSE_TYPE_UNKNOWN = 0; + PULSE_TYPE_PARTITION_HEARTBEAT = 1; + PULSE_TYPE_PD_INSTRUCTION = 2; +} + +enum PulseChangeType { + PULSE_CHANGE_TYPE_UNKNOWN = 0; + PULSE_CHANGE_TYPE_ADD = 1; + PULSE_CHANGE_TYPE_ALTER = 2; + PULSE_CHANGE_TYPE_DEL = 3; +} + +enum ConfChangeType { + CONF_CHANGE_TYPE_UNKNOWN = 0; + CONF_CHANGE_TYPE_ADD_NODE = 1; + CONF_CHANGE_TYPE_REMOVE_NODE = 2; + CONF_CHANGE_TYPE_ADD_LEARNER_NODE = 3; + CONF_CHANGE_TYPE_ADJUST = 4; // 调整shard,leader根据新的配置动态增减。 +} + +enum CleanType { + CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range + CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range +} + +enum PdInstructionType { + CHANGE_TO_FOLLOWER = 0; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto new file mode 100644 index 0000000000..7470057a85 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +import "metapb.proto"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.pd.grpc.watch"; +option java_outer_classname = "HgPdWatchProto"; + +service HgPdWatch { + rpc Watch(stream WatchRequest) returns (stream WatchResponse); +} + +message WatchRequest { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; +} + +message WatchCreateRequest { + WatchType watch_type = 1; +} + +message WatchCancelRequest { + int64 watcher_id = 1; +} + +message WatchResponse { + WatchType watch_type = 1; + int64 watcher_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id = 4; + string msg = 5; + oneof response_union { + WatchPartitionResponse partition_response = 10; + WatchNodeResponse node_response = 11; + WatchGraphResponse graph_response = 12; + WatchShardGroupResponse shard_group_response = 13; + } +} + +message WatchPartitionResponse { + string graph = 1; + int32 partition_id = 2; + WatchChangeType change_type = 3; +} + +message WatchNodeResponse { + string graph = 1; + uint64 node_id = 2; + NodeEventType node_event_type = 3; +} + +message WatchGraphResponse { + metapb.Graph graph = 1; + WatchType type = 2; +} + +message WatchShardGroupResponse { + metapb.ShardGroup shard_group = 1; + WatchChangeType type = 2; + int32 shard_group_id = 3; +} + +enum WatchType { + WATCH_TYPE_UNKNOWN = 0; + WATCH_TYPE_PARTITION_CHANGE = 1; + WATCH_TYPE_STORE_NODE_CHANGE = 2; + WATCH_TYPE_GRAPH_CHANGE = 3; + WATCH_TYPE_SHARD_GROUP_CHANGE = 4; +} + +enum WatchChangeType { + WATCH_CHANGE_TYPE_UNKNOWN = 0; + WATCH_CHANGE_TYPE_ADD = 1; + WATCH_CHANGE_TYPE_ALTER = 2; + WATCH_CHANGE_TYPE_DEL = 3; + WATCH_CHANGE_TYPE_SPECIAL1 = 4; +} + +enum NodeEventType { + NODE_EVENT_TYPE_UNKNOWN = 0; + NODE_EVENT_TYPE_NODE_ONLINE = 1; + NODE_EVENT_TYPE_NODE_OFFLINE = 2; + NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3; + // pd leader 变更 + NODE_EVENT_TYPE_PD_LEADER_CHANGE = 4; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto new file mode 100644 index 0000000000..cba5a49a4b --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto @@ -0,0 +1,590 @@ +syntax = "proto3"; +package pdpb; + +import "metapb.proto"; +import "metaTask.proto"; + +option java_package = "org.apache.hugegraph.pd.grpc"; + +service PD { + // 注册store,首次注册会生成新的store_id, store_id是store唯一标识 + rpc RegisterStore(RegisterStoreRequest) returns (RegisterStoreResponse) {} + rpc GetStore(GetStoreRequest) returns (GetStoreResponse) {} + // 修改Store状态等信息. + rpc SetStore(SetStoreRequest) returns (SetStoreResponse) {} + // 根据可以查找所属分区 + rpc DelStore(DetStoreRequest) returns (DetStoreResponse) {} + rpc GetAllStores(GetAllStoresRequest) returns (GetAllStoresResponse) {} + rpc StoreHeartbeat(StoreHeartbeatRequest) returns (StoreHeartbeatResponse) {} + + // 根据可以查找所属分区 + rpc GetPartition(GetPartitionRequest) returns (GetPartitionResponse) {} + + // 根据HashCode查找所属分区 + rpc GetPartitionByCode(GetPartitionByCodeRequest) returns (GetPartitionResponse) {} + // 根据PartitionID返回分区 + rpc GetPartitionByID(GetPartitionByIDRequest) returns (GetPartitionResponse) {} + rpc ScanPartitions(ScanPartitionsRequest) returns (ScanPartitionsResponse) {} + // 更新分区信息,主要用来更新分区key范围,调用此接口需谨慎,否则会造成数据丢失。 + rpc UpdatePartition(UpdatePartitionRequest) returns (UpdatePartitionResponse) {} + // 根据可以查找所属分区 + rpc DelPartition(DelPartitionRequest) returns (DelPartitionResponse) {} + // 根据条件查询分区信息, 包括Store、Graph等条件 + rpc QueryPartitions(QueryPartitionsRequest) returns (QueryPartitionsResponse){} + // 读取图信息 + rpc GetGraph(GetGraphRequest) returns (GetGraphResponse){} + // 修改图信息 + rpc SetGraph(SetGraphRequest) returns (SetGraphResponse){} + rpc DelGraph(DelGraphRequest) returns (DelGraphResponse){} + // 全局唯一自增ID + rpc GetId(GetIdRequest) returns (GetIdResponse){} + rpc ResetId(ResetIdRequest) returns (ResetIdResponse){} + // PD的集群列表 + rpc GetMembers(GetMembersRequest) returns (GetMembersResponse) {} + rpc GetStoreStatus(GetAllStoresRequest) returns (GetAllStoresResponse) {} + rpc GetPDConfig(GetPDConfigRequest) returns (GetPDConfigResponse){} + rpc SetPDConfig(SetPDConfigRequest) returns (SetPDConfigResponse){} + rpc GetGraphSpace(GetGraphSpaceRequest) returns (GetGraphSpaceResponse){} + rpc SetGraphSpace(SetGraphSpaceRequest) returns (SetGraphSpaceResponse){} + // 获取集群健康状态 + rpc GetClusterStats(GetClusterStatsRequest) returns (GetClusterStatsResponse){} + // 替换PD的集群节点 + rpc ChangePeerList(ChangePeerListRequest) returns (getChangePeerListResponse) {} + // 数据分裂 + rpc SplitData(SplitDataRequest) returns (SplitDataResponse){} + + rpc SplitGraphData(SplitGraphDataRequest) returns (SplitDataResponse) {} + // 数据迁移 + rpc MovePartition(MovePartitionRequest) returns (MovePartitionResponse){} + // 汇报分区分裂等任务执行结果 + rpc ReportTask(ReportTaskRequest) returns (ReportTaskResponse){} + + rpc GetPartitionStats(GetPartitionStatsRequest) returns (GetPartitionStatsResponse){} + //平衡store中分区leader的数量 + rpc BalanceLeaders(BalanceLeadersRequest) returns (BalanceLeadersResponse){} + + // 替换license文件 + rpc PutLicense(PutLicenseRequest) returns (PutLicenseResponse){} + + // 通知rocksdb进行compaction + rpc DbCompaction(DbCompactionRequest) returns (DbCompactionResponse){} + + // 合并分区 + rpc CombineCluster(CombineClusterRequest) returns (CombineClusterResponse){} + // 单个图缩容 + rpc CombineGraph(CombineGraphRequest) returns (CombineGraphResponse) {} + + // shard group + rpc GetShardGroup(GetShardGroupRequest) returns (GetShardGroupResponse){} + rpc UpdateShardGroup(UpdateShardGroupRequest) returns (UpdateShardGroupResponse){} + // 删除掉shard group + rpc DeleteShardGroup(DeleteShardGroupRequest) returns (DeleteShardGroupResponse) {} + // shard group 运维相关的处理 + rpc UpdateShardGroupOp(ChangeShardRequest) returns (ChangeShardResponse){} + // change shard + rpc ChangeShard(ChangeShardRequest) returns (ChangeShardResponse) {} + // 更新pd raft + rpc updatePdRaft(UpdatePdRaftRequest) returns (UpdatePdRaftResponse) {} + + rpc getCache(GetGraphRequest) returns (CacheResponse) {} + rpc getPartitions(GetGraphRequest) returns (CachePartitionResponse) {} +} + +message RequestHeader { + // 集群 ID. + uint64 cluster_id = 1; + // 发送者 ID. + uint64 sender_id = 2; +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + Error error = 2; +} + +enum ErrorType { + OK = 0; + UNKNOWN = 1; + + NOT_LEADER = 100; + STORE_ID_NOT_EXIST = 101; + NO_ACTIVE_STORE = 102; + NOT_FOUND = 103; + PD_UNREACHABLE = 104; + LESS_ACTIVE_STORE = 105; + STORE_HAS_BEEN_REMOVED = 106; + STORE_PROHIBIT_DELETION = 111; + SET_CONFIG_SHARD_COUNT_ERROR = 112; + UPDATE_STORE_STATE_ERROR = 113; + STORE_PROHIBIT_DUPLICATE = 114; + ROCKSDB_READ_ERROR = 1002; + ROCKSDB_WRITE_ERROR = 1003; + ROCKSDB_DEL_ERROR = 1004; + ROCKSDB_SAVE_SNAPSHOT_ERROR = 1005; + ROCKSDB_LOAD_SNAPSHOT_ERROR = 1006; + + // 当前集群状态禁止分裂 + Cluster_State_Forbid_Splitting = 1007; + // 正在分裂中 + Split_Partition_Doing = 1008; + // store上分区数量超过上限 + Too_Many_Partitions_Per_Store = 1009; + // license 错误 + LICENSE_ERROR = 107; + // license 认证错误 + LICENSE_VERIFY_ERROR = 108; + + //分区下线正在进行 + Store_Tombstone_Doing = 1010; + + // 不合法的分裂个数 + Invalid_Split_Partition_Count = 1011; +} + +message Error { + ErrorType type = 1; + string message = 2; +} +message GetStoreRequest { + RequestHeader header = 1; + uint64 store_id = 2; +} + +message GetStoreResponse { + ResponseHeader header = 1; + + metapb.Store store = 2; + metapb.StoreStats stats = 3; +} + +message DetStoreRequest { + RequestHeader header = 1; + uint64 store_id = 2; +} + +message DetStoreResponse { + ResponseHeader header = 1; + metapb.Store store = 2; +} + +message RegisterStoreRequest { + RequestHeader header = 1; + metapb.Store store = 2; +} + + +message RegisterStoreResponse { + ResponseHeader header = 1; + // 初次注册,返回新的store_id + uint64 store_id = 2; +} + +message SetStoreRequest { + RequestHeader header = 1; + metapb.Store store = 2; +} + +message SetStoreResponse { + ResponseHeader header = 1; + // 返回修改后的Store + metapb.Store store = 2; +} + + +// 返回graph_name所在的所有store,如果graph_name为空值,则返回系统所有的store +message GetAllStoresRequest { + RequestHeader header = 1; + string graph_name = 2; + // 是否返回离线的store + bool exclude_offline_stores = 3; +} + +message GetAllStoresResponse { + ResponseHeader header = 1; + + repeated metapb.Store stores = 2; +} + + +message StoreHeartbeatRequest { + RequestHeader header = 1; + + metapb.StoreStats stats = 2; +} + +message StoreHeartbeatResponse { + ResponseHeader header = 1; + string cluster_version = 3; + metapb.ClusterStats clusterStats = 4; +} + +message GetPartitionRequest { + RequestHeader header = 1; + string graph_name = 2; + bytes key = 3; +} + + +message GetPartitionByCodeRequest { + RequestHeader header = 1; + string graph_name = 2; + uint64 code = 3; +} + + +message GetPartitionResponse { + ResponseHeader header = 1; + metapb.Partition partition = 2; + metapb.Shard leader = 3; + // 离线的Shard + repeated metapb.Shard offline_shards = 4; +} + +message GetPartitionByIDRequest { + RequestHeader header = 1; + string graph_name = 2; + uint32 partition_id = 3; +} + +message DelPartitionRequest { + RequestHeader header = 1; + string graph_name = 2; + uint32 partition_id = 3; +} +message DelPartitionResponse { + ResponseHeader header = 1; + metapb.Partition partition = 2; +} + +message UpdatePartitionRequest{ + RequestHeader header = 1; + repeated metapb.Partition partition = 2; +} + +message UpdatePartitionResponse{ + ResponseHeader header = 1; + repeated metapb.Partition partition = 2; +} +// Use GetPartitionResponse as the response of GetPartitionByIDRequest. + +message ScanPartitionsRequest { + RequestHeader header = 1; + string graph_name = 2; + bytes start_key = 3; + bytes end_key = 4; // end_key is +inf when it is empty. +} + + + +message ScanPartitionsResponse { + ResponseHeader header = 1; + repeated metapb.PartitionShard partitions = 4; +} + + + +message QueryPartitionsRequest{ + RequestHeader header = 1; + metapb.PartitionQuery query = 2; +} + +message QueryPartitionsResponse { + ResponseHeader header = 1; + repeated metapb.Partition partitions = 4; +} + + + +message GetGraphRequest{ + RequestHeader header = 1; + string graph_name = 2; +} + +message GetGraphResponse{ + ResponseHeader header = 1; + metapb.Graph graph = 2; +} + +message SetGraphRequest{ + RequestHeader header = 1; + metapb.Graph graph = 2; +} + +message SetGraphResponse{ + ResponseHeader header = 1; + metapb.Graph graph = 2; +} + +message DelGraphRequest{ + RequestHeader header = 1; + string graph_name = 2; +} + +message DelGraphResponse{ + ResponseHeader header = 1; + metapb.Graph graph = 2; +} + +message GetIdRequest{ + RequestHeader header = 1; + string key = 2; + int32 delta = 3; +} + +message GetIdResponse{ + ResponseHeader header = 1; + int64 id = 2; + int32 delta = 3; +} + +message ResetIdRequest{ + RequestHeader header = 1; + string key = 2; +} + +message ResetIdResponse{ + ResponseHeader header = 1; + int32 result = 2; +} + +message GetMembersRequest{ + RequestHeader header = 1; +} + +message GetMembersResponse{ + ResponseHeader header = 1; + repeated metapb.Member members = 2; + metapb.Member leader = 3; +} + +message GetPDConfigRequest{ + RequestHeader header = 1; + uint64 version = 2 ; +} + +message GetPDConfigResponse{ + ResponseHeader header = 1; + metapb.PDConfig pd_config = 2; +} + +message SetPDConfigRequest{ + RequestHeader header = 1; + metapb.PDConfig pd_config = 2; +} + +message SetPDConfigResponse{ + ResponseHeader header = 1; +} + + +message GetGraphSpaceRequest{ + RequestHeader header = 1; + string graph_Space_Name = 2; +} + +message GetGraphSpaceResponse{ + ResponseHeader header = 1; + repeated metapb.GraphSpace graph_space = 2; +} + +message SetGraphSpaceRequest{ + RequestHeader header = 1; + metapb.GraphSpace graph_space = 2; +} + +message SetGraphSpaceResponse{ + ResponseHeader header = 1; +} + +message GetClusterStatsRequest{ + RequestHeader header = 1; +} + +message GetClusterStatsResponse{ + ResponseHeader header = 1; + metapb.ClusterStats cluster = 2; +} +message ChangePeerListRequest{ + RequestHeader header = 1; + string peer_List = 2; +} +message getChangePeerListResponse{ + ResponseHeader header = 1; +} + +enum OperationMode { + Auto = 0; + Expert = 1; +} + +message SplitDataParam{ + // 被分裂的源分区ID + uint32 partition_id = 1; + //目标分区数量 + uint32 count = 2; +} + +message SplitDataRequest{ + RequestHeader header = 1; + //工作模式 + // Auto:自动分裂,每个Store上分区数达到最大值 + // Expert:专家模式,需要指定splitParams + OperationMode mode = 2; + repeated SplitDataParam param = 3; +} + +message SplitGraphDataRequest{ + RequestHeader header = 1; + //工作模式 + string graph_name = 2; + uint32 to_count = 3; +} + +message SplitDataResponse{ + ResponseHeader header = 1; +} + +message MovePartitionParam{ + uint32 partition_id = 1; + uint64 src_store_id = 2; + uint64 dst_store_id = 3; +} + +message MovePartitionRequest{ + RequestHeader header = 1; + //工作模式 + // Auto:自动转移,达到每个Store上分区数量相同 + // Expert:专家模式,需要指定transferParams + OperationMode mode = 2; + repeated MovePartitionParam param = 3; +} + +message MovePartitionResponse{ + ResponseHeader header = 1; +} + +message ReportTaskRequest{ + RequestHeader header = 1; + metaTask.Task task = 2; +} + +message ReportTaskResponse{ + ResponseHeader header = 1; +} + +message GetPartitionStatsRequest{ + RequestHeader header = 1; + uint32 partition_id = 2; + // 如果未空,返回所有图的同一分区ID + string graph_name = 4; +} + +message GetPartitionStatsResponse{ + ResponseHeader header = 1; + metapb.PartitionStats partition_stats = 2; +} + +message BalanceLeadersRequest{ + RequestHeader header = 1; +} + +message BalanceLeadersResponse{ + ResponseHeader header = 1; +} + +message PutLicenseRequest{ + RequestHeader header = 1; + bytes content = 2; +} + +message PutLicenseResponse{ + ResponseHeader header = 1; +} + +message DbCompactionRequest{ + RequestHeader header = 1; + string tableName = 2; +} + +message DbCompactionResponse{ + ResponseHeader header = 1; +} + +message CombineClusterRequest { + RequestHeader header = 1; + uint32 toCount = 2; +} + +message CombineClusterResponse { + ResponseHeader header = 1; +} + +message CombineGraphRequest { + RequestHeader header = 1; + string graphName = 2; + uint32 toCount = 3; +} + +message CombineGraphResponse { + ResponseHeader header = 1; +} + +message DeleteShardGroupRequest { + RequestHeader header = 1; + uint32 groupId = 2; +} + +message DeleteShardGroupResponse { + ResponseHeader header = 1; +} + +message GetShardGroupRequest{ + RequestHeader header = 1; + uint32 group_id = 2 ; +} + +message GetShardGroupResponse{ + ResponseHeader header = 1; + metapb.ShardGroup shardGroup = 2; +} + +message UpdateShardGroupRequest{ + RequestHeader header = 1; + metapb.ShardGroup shardGroup = 2; +} + +message UpdateShardGroupResponse{ + ResponseHeader header = 1; +} + +message ChangeShardRequest{ + RequestHeader header = 1; + uint32 groupId = 2; + repeated metapb.Shard shards = 3; +} + +message ChangeShardResponse { + ResponseHeader header = 1; +} + +message UpdatePdRaftRequest{ + RequestHeader header = 1; + string config = 3; +} + +message UpdatePdRaftResponse{ + ResponseHeader header = 1; + string message = 2; +} +message CacheResponse { + ResponseHeader header = 1; + // 返回修改后的Store + repeated metapb.Store stores = 2; + repeated metapb.ShardGroup shards = 3; + repeated metapb.Graph graphs = 4; +} +message CachePartitionResponse { + ResponseHeader header = 1; + repeated metapb.Partition partitions = 2; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-service/pom.xml b/hugegraph-pd/hg-pd-service/pom.xml new file mode 100644 index 0000000000..e7f51323c2 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/pom.xml @@ -0,0 +1,151 @@ + + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-pd + ${revision} + ../pom.xml + + + hg-pd-service + + + 0.5.10 + + + + org.apache.hugegraph + hg-pd-grpc + + + io.grpc + * + + + + + + org.apache.hugegraph + hg-pd-core + ${revision} + + + + io.github.lognet + grpc-spring-boot-starter + 4.5.5 + + + org.springframework.boot + spring-boot-starter-logging + + + + + org.springframework.boot + spring-boot-starter-web + 2.5.14 + + + + + org.springframework.boot + spring-boot-starter-tomcat + + + org.springframework.boot + spring-boot-starter-logging + + + + + + org.springframework.boot + spring-boot-starter-actuator + 2.5.14 + + + io.micrometer + micrometer-registry-prometheus + 1.7.12 + + + org.springframework.boot + spring-boot-starter-jetty + 2.5.14 + + + org.springframework.boot + spring-boot-starter-log4j2 + 2.5.14 + + + com.lmax + disruptor + 3.4.1 + + + org.projectlombok + lombok + 1.18.24 + + + + com.google.protobuf + protobuf-java-util + 3.17.2 + + + org.apache.hugegraph + hugegraph-common + 1.0.1 + + + commons-io + commons-io + 2.7 + compile + + + + + + + org.springframework.boot + spring-boot-maven-plugin + 2.5.0 + + + + spring-boot + + org.apache.hugegraph.pd.boot.HugePDServer + + + + + + + + diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java new file mode 100644 index 0000000000..452f95a467 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.boot; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.context.annotation.ComponentScan; + +import com.alipay.remoting.util.StringUtils; + +/** + * PD服务启动类 + */ +@ComponentScan(basePackages = {"org.apache.hugegraph.pd"}) +@SpringBootApplication +public class HugePDServer { + public static void main(String[] args) { + String logPath = System.getProperty("logging.path"); + if (StringUtils.isBlank(logPath)) { + System.setProperty("logging.path", "logs"); + System.setProperty("com.alipay.remoting.client.log.level", "error"); + } + + SpringApplication.run(HugePDServer.class); + System.out.println("Hugegraph-pd started."); + } +} + diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java new file mode 100644 index 0000000000..344a71cd48 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java @@ -0,0 +1,426 @@ +// TODO: uncomment later +///* +// * Licensed to the Apache Software Foundation (ASF) under one or more +// * contributor license agreements. See the NOTICE file distributed with this +// * work for additional information regarding copyright ownership. The ASF +// * licenses this file to You under the Apache License, Version 2.0 (the +// * "License"); you may not use this file except in compliance with the License. +// * You may obtain a copy of the License at +// * +// * http://www.apache.org/licenses/LICENSE-2.0 +// * +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// * License for the specific language governing permissions and limitations +// * under the License. +// */ +// +//package org.apache.hugegraph.pd.license; +// +//import java.io.File; +//import java.io.IOException; +//import java.net.InetAddress; +//import java.net.UnknownHostException; +//import java.nio.charset.Charset; +//import java.text.SimpleDateFormat; +//import java.time.Duration; +//import java.time.Instant; +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.concurrent.CountDownLatch; +//import java.util.prefs.Preferences; +// +//import org.apache.commons.lang3.StringUtils; +//import org.apache.hugegraph.license.ExtraParam; +//import org.apache.hugegraph.license.LicenseVerifyParam; +//import org.apache.hugegraph.license.MachineInfo; +//import org.apache.hugegraph.pd.KvService; +//import org.apache.hugegraph.pd.common.PDRuntimeException; +//import org.apache.hugegraph.pd.config.PDConfig; +//import org.apache.hugegraph.pd.grpc.Pdpb; +//import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +//import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +//import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +//import org.apache.hugegraph.pd.raft.RaftEngine; +//import org.springframework.stereotype.Service; +//import org.springframework.util.Base64Utils; +// +//import com.fasterxml.jackson.databind.ObjectMapper; +//import com.google.gson.Gson; +//import com.google.gson.internal.LinkedTreeMap; +// +//import de.schlichtherle.license.CipherParam; +//import de.schlichtherle.license.DefaultCipherParam; +//import de.schlichtherle.license.DefaultKeyStoreParam; +//import de.schlichtherle.license.DefaultLicenseParam; +//import de.schlichtherle.license.KeyStoreParam; +//import de.schlichtherle.license.LicenseContent; +//import de.schlichtherle.license.LicenseParam; +//import io.grpc.CallOptions; +//import io.grpc.ManagedChannel; +//import io.grpc.ManagedChannelBuilder; +//import io.grpc.MethodDescriptor; +//import io.grpc.stub.AbstractBlockingStub; +//import io.grpc.stub.StreamObserver; +//import lombok.extern.slf4j.Slf4j; +// +//@Service +//@Slf4j +//public class LicenseVerifierService { +// +// private static final Duration CHECK_INTERVAL = Duration.ofMinutes(10); +// private static final String contentKey = "contentKey"; +// private static final Gson mapper = new Gson(); +// private static LicenseContent content; +// private static KvService kvService; +// private static volatile boolean installed = false; +// private final MachineInfo machineInfo; +// private final PDConfig pdConfig; +// private final Instant lastCheckTime = Instant.now(); +// SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); +// // private final LicenseVerifyParam verifyParam; +// private LicenseVerifyManager manager; +// private ManagedChannel channel; +// +// // public static LicenseVerifierService instance() { +// // if (INSTANCE == null) { +// // synchronized (LicenseVerifierService.class) { +// // if (INSTANCE == null) { +// // INSTANCE = new LicenseVerifierService(); +// // } +// // } +// // } +// // return INSTANCE; +// // } +// +// // public void verifyIfNeeded() { +// // Instant now = Instant.now(); +// // Duration interval = Duration.between(this.lastCheckTime, now); +// // if (!interval.minus(CHECK_INTERVAL).isNegative()) { +// // this.verify(); +// // this.lastCheckTime = now; +// // } +// // } +// +// public LicenseVerifierService(PDConfig pdConfig) { +// this.pdConfig = pdConfig; +// machineInfo = new MachineInfo(); +// kvService = new KvService(pdConfig); +// // verifyParam = initLicense(pdConfig); +// } +// +// private static LicenseVerifyParam buildVerifyParam(String path) { +// // NOTE: can't use JsonUtil due to it bind tinkerpop jackson +// try { +// ObjectMapper mapper = new ObjectMapper(); +// File licenseParamFile = new File(path); +// if (!licenseParamFile.exists()) { +// log.warn("failed to get file:{}", path); +// return null; +// } +// return mapper.readValue(licenseParamFile, LicenseVerifyParam.class); +// } catch (IOException e) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// String.format("Failed to read json stream to %s", +// LicenseVerifyParam.class)); +// } +// } +// +// public LicenseVerifyParam init() { +// LicenseVerifyParam verifyParam = null; +// if (!installed) { +// synchronized (LicenseVerifierService.class) { +// if (!installed) { +// verifyParam = buildVerifyParam(pdConfig.getVerifyPath()); +// log.info("get license param: {}", pdConfig.getVerifyPath()); +// if (verifyParam != null) { +// LicenseParam licenseParam = this.initLicenseParam(verifyParam); +// this.manager = new LicenseVerifyManager(licenseParam); +// // this.install("d01e1814cd9edb01a05671bebf3919cc"); +// try { +// // this.verifyPublicCert(md5); +// File licenseFile = new File(pdConfig.getLicensePath()); +// if (!licenseFile.exists()) { +// log.warn("invalid parameter:license-path"); +// return null; +// } else { +// log.info("get license file....{}", licenseFile.getAbsolutePath()); +// } +// this.manager.uninstall(); +// content = this.manager.install(licenseFile); +// ExtraParam param = LicenseVerifyManager.getExtraParams(content); +// content.setExtra(param); +// this.checkIpAndMac(param); +// // 获取有效期,并设置过期时间,通知 leader,将 content 保存到... +// Date notAfter = content.getNotAfter(); +// long ttl = notAfter.getTime() - System.currentTimeMillis(); +// final TTLResponse[] info = {null}; +// if (!isLeader()) { +// while (RaftEngine.getInstance().getLeader() == null) { +// this.wait(200); +// } +// if (RaftEngine.getInstance().getLeader() != null) { +// CountDownLatch latch = new CountDownLatch(1); +// TTLRequest request = +// TTLRequest.newBuilder().setKey(contentKey).setValue( +// mapper.toJson(content, +// LicenseContent.class)) +// .setTtl(ttl).build(); +// StreamObserver observer = +// new StreamObserver() { +// @Override +// public void onNext(TTLResponse value) { +// info[0] = value; +// latch.countDown(); +// } +// +// @Override +// public void onError(Throwable t) { +// latch.countDown(); +// } +// +// @Override +// public void onCompleted() { +// latch.countDown(); +// } +// }; +// redirectToLeader(KvServiceGrpc.getPutTTLMethod(), request, +// observer); +// latch.await(); +// Pdpb.Error error = info[0].getHeader().getError(); +// if (!error.getType().equals(Pdpb.ErrorType.OK)) { +// throw new Exception(error.getMessage()); +// } +// } else { +// log.warn("wait for leader to put the license content......"); +// } +// +// } else { +// kvService.put(contentKey, +// mapper.toJson(content, LicenseContent.class), ttl); +// } +// installed = true; +// log.info("The license is successfully installed, valid for {} - {}", +// content.getNotBefore(), notAfter); +// } catch (Exception e) { +// log.error("Failed to install license", e); +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, +// "Failed to install license, ", e); +// } +// } +// } +// } +// } +// return verifyParam; +// } +// +// public synchronized void install(String md5) { +// +// } +// +// public HashMap getContext() throws Exception { +// try { +// String value = kvService.get(contentKey); +// if (StringUtils.isEmpty(value)) { +// throw new Exception("can not find license content from storage"); +// } +// LicenseContent content = mapper.fromJson(value, LicenseContent.class); +// Date notAfter = content.getNotAfter(); +// Date notBefore = content.getNotBefore(); +// Date issued = content.getIssued(); +// // long currentTimeMillis = System.currentTimeMillis(); +// // long diff = notAfter - currentTimeMillis; +// // boolean expired = diff <= 0; +// HashMap result = mapper.fromJson(value, HashMap.class); +// result.put("current", formatter.format(new Date())); +// result.put("notAfter", formatter.format(notAfter)); +// result.put("issued", formatter.format(issued)); +// result.put("notBefore", formatter.format(notBefore)); +// return result; +// } catch (Exception e) { +// throw new Exception("can not find license content from storage:" + e.getMessage()); +// } +// } +// +// public LicenseContent verify(int cores, int nodeCount) { +// try { +// String value = kvService.get(contentKey); +// if (StringUtils.isEmpty(value)) { +// throw new Exception("can not find license content from storage"); +// } +// LicenseContent content = mapper.fromJson(value, LicenseContent.class); +// LinkedTreeMap param = (LinkedTreeMap) content.getExtra(); +// int licCpus = ((Double) param.get("cpus")).intValue(); +// int licNodes = ((Double) param.get("nodes")).intValue(); +// if (param != null) { +// if (licCpus != -1) { +// // licCpus 为 -1 时,表示不限制 cpu 核数 +// if (cores <= 0 || cores > licCpus) { +// String msg = +// String.format("无效的 cpu 核数:%s,授权数:%s", cores, licCpus); +// throw new PDRuntimeException( +// Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); +// } +// } +// +// if (licNodes != -1) { +// // licNodes 为 -1 时,表示不限制服务节点数目 +// if (nodeCount > licNodes) { +// String msg = +// String.format("无效的节点个数:%s,授权数:%s", nodeCount, licNodes); +// throw new PDRuntimeException( +// Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); +// } +// } +// } +// return content; +// } catch (Exception e) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// "授权信息校验异常," + e.getMessage()); +// } +// } +// +// public boolean isLeader() { +// return RaftEngine.getInstance().isLeader(); +// } +// +// // private void verifyPublicCert(String expectMD5) { +// // String path = this.verifyParam.publicKeyPath(); +// // try (InputStream is = LicenseVerifierService.class.getResourceAsStream(path)) { +// // String actualMD5 = DigestUtils.md5Hex(is); +// // if (!actualMD5.equals(expectMD5)) { +// // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Invalid public +// // cert"); +// // } +// // } catch (IOException e) { +// // log.error("Failed to read public cert", e); +// // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Failed to read +// // public cert", e); +// // } +// // } +// +// private > void redirectToLeader( +// MethodDescriptor method, ReqT req, +// io.grpc.stub.StreamObserver observer) { +// try { +// if (channel == null) { +// synchronized (this) { +// if (channel == null) { +// channel = ManagedChannelBuilder +// .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()) +// .usePlaintext() +// .build(); +// } +// } +// log.info("Grpc get leader address {}", +// RaftEngine.getInstance().getLeaderGrpcAddress()); +// } +// +// io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), +// req, +// observer); +// } catch (Exception e) { +// e.printStackTrace(); +// } +// +// } +// +// private LicenseParam initLicenseParam(LicenseVerifyParam param) { +// Preferences preferences = Preferences.userNodeForPackage(LicenseVerifierService.class); +// CipherParam cipherParam = new DefaultCipherParam(param.storePassword()); +// KeyStoreParam keyStoreParam = new DefaultKeyStoreParam(LicenseVerifierService.class, +// param.publicKeyPath(), +// param.publicAlias(), +// param.storePassword(), null); +// return new DefaultLicenseParam(param.subject(), preferences, keyStoreParam, cipherParam); +// } +// +// public String getIpAndMac() { +// List actualIps = this.machineInfo.getIpAddress(); +// String host = pdConfig.getHost(); +// String licenseHost = host; +// if (!actualIps.contains(host)) { +// licenseHost = actualIps.get(0); +// } +// try { +// String mac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(licenseHost)); +// HashMap ipAndMac = new HashMap<>(); +// ipAndMac.put("ip", licenseHost); +// ipAndMac.put("mac", mac); +// String json = new Gson().toJson(ipAndMac); +// String encode = Base64Utils.encodeToString(json.getBytes(Charset.defaultCharset())); +// return encode; +// } catch (Exception e) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, +// String.format("Failed to get ip and mac for %s", +// e.getMessage())); +// } +// } +// +// private void checkIpAndMac(ExtraParam param) { +// String expectIp = param.ip(); +// boolean matched = false; +// List actualIps = null; +// if (StringUtils.isEmpty(expectIp)) { +// matched = true; +// } else { +// actualIps = this.machineInfo.getIpAddress(); +// for (String actualIp : actualIps) { +// if (actualIp.equalsIgnoreCase(expectIp)) { +// matched = true; +// break; +// } +// } +// } +// if (!matched) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( +// "The server's ip '%s' doesn't match the authorized '%s'", actualIps, +// expectIp)); +// } +// String expectMac = param.mac(); +// if (StringUtils.isEmpty(expectMac)) { +// return; +// } +// // The mac must be not empty here +// if (!StringUtils.isEmpty(expectIp)) { +// String actualMac; +// try { +// actualMac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(expectIp)); +// } catch (UnknownHostException e) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// String.format("Failed to get mac address for ip +// '%s'", +// expectIp)); +// } +// String expectFormatMac = expectMac.replaceAll(":", "-"); +// String actualFormatMac = actualMac.replaceAll(":", "-"); +// if (!actualFormatMac.equalsIgnoreCase(expectFormatMac)) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// String.format( +// "The server's mac '%s' doesn't match the " + +// "authorized '%s'", +// actualMac, expectMac)); +// } +// } else { +// String expectFormatMac = expectMac.replaceAll(":", "-"); +// List actualMacs = this.machineInfo.getMacAddress(); +// matched = false; +// for (String actualMac : actualMacs) { +// String actualFormatMac = actualMac.replaceAll(":", "-"); +// if (actualFormatMac.equalsIgnoreCase(expectFormatMac)) { +// matched = true; +// break; +// } +// } +// if (!matched) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// String.format( +// "The server's macs %s don't match the " + +// "authorized '%s'", +// actualMacs, expectMac)); +// } +// } +// } +//} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java new file mode 100644 index 0000000000..d584c45400 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java @@ -0,0 +1,77 @@ +// TODO: uncomment later +///* +// * Licensed to the Apache Software Foundation (ASF) under one or more +// * contributor license agreements. See the NOTICE file distributed with this +// * work for additional information regarding copyright ownership. The ASF +// * licenses this file to You under the Apache License, Version 2.0 (the +// * "License"); you may not use this file except in compliance with the License. +// * You may obtain a copy of the License at +// * +// * http://www.apache.org/licenses/LICENSE-2.0 +// * +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// * License for the specific language governing permissions and limitations +// * under the License. +// */ +// +//package org.apache.hugegraph.pd.license; +// +//import java.io.IOException; +//import java.util.List; +// +//import org.apache.hugegraph.license.CommonLicenseManager; +//import org.apache.hugegraph.license.ExtraParam; +//import org.apache.hugegraph.pd.common.PDRuntimeException; +//import org.apache.hugegraph.pd.grpc.Pdpb; +// +//import com.fasterxml.jackson.core.type.TypeReference; +//import com.fasterxml.jackson.databind.ObjectMapper; +// +//import de.schlichtherle.license.LicenseContent; +//import de.schlichtherle.license.LicenseContentException; +//import de.schlichtherle.license.LicenseParam; +//import lombok.extern.slf4j.Slf4j; +// +//@Slf4j +//public class LicenseVerifyManager extends CommonLicenseManager { +// +// private static final ObjectMapper MAPPER = new ObjectMapper(); +// private static final int NO_LIMIT = -1; +// +// public LicenseVerifyManager(LicenseParam param) { +// super(param); +// } +// +// public static ExtraParam getExtraParams(LicenseContent content) { +// List params; +// try { +// TypeReference> type; +// type = new TypeReference>() { +// }; +// params = MAPPER.readValue((String) content.getExtra(), type); +// if (params != null && params.size() > 0) { +// return params.get(0); +// } +// } catch (IOException e) { +// log.error("Failed to read extra params", e); +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// "Failed to read extra params", e); +// } +// return null; +// } +// +// @Override +// protected synchronized void validate(LicenseContent content) throws LicenseContentException { +// // Call super validate firstly to verify the common license parameters +// try { +// super.validate(content); +// } catch (LicenseContentException e) { +// // log.error("Failed to verify license", e); +// throw e; +// } +// // Verify the customized license parameters. +// getExtraParams(content); +// } +//} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java new file mode 100644 index 0000000000..d051bfbdba --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.metrics; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import io.micrometer.core.instrument.MeterRegistry; + +@Configuration +public class MetricsConfig { + @Autowired + private PDMetrics metrics; + + @Bean + public MeterRegistryCustomizer metricsCommonTags() { + return (registry) -> registry.config().commonTags("hg", "pd"); + } + + @Bean + public MeterRegistryCustomizer registerMeters() { + return (registry) -> { + metrics.init(registry); + }; + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java new file mode 100644 index 0000000000..bb230e8d09 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.metrics; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.slf4j.Slf4j; + +@Component +@Slf4j +public final class PDMetrics { + public static final String PREFIX = "hg"; + private static final AtomicLong GRAPHS = new AtomicLong(0); + private MeterRegistry registry; + + @Autowired + private PDService pdService; + + public synchronized void init(MeterRegistry meterRegistry) { + + if (registry == null) { + registry = meterRegistry; + registerMeters(); + } + + } + + private void registerMeters() { + Gauge.builder(PREFIX + ".up", () -> 1).register(registry); + + Gauge.builder(PREFIX + ".graphs", () -> updateGraphs()) + .description("Number of graphs registered in PD") + .register(registry); + + Gauge.builder(PREFIX + ".stores", () -> updateStores()) + .description("Number of stores registered in PD") + .register(registry); + + } + + private long updateGraphs() { + long buf = getGraphs(); + + if (buf != GRAPHS.get()) { + GRAPHS.set(buf); + registerGraphMetrics(); + } + return buf; + } + + private long updateStores() { + return getStores(); + } + + private long getGraphs() { + return getGraphMetas().size(); + } + + private long getStores() { + try { + return this.pdService.getStoreNodeService().getStores(null).size(); + } catch (PDException e) { + log.error(e.getMessage(), e); + e.printStackTrace(); + } + return 0; + } + + private List getGraphMetas() { + try { + return this.pdService.getPartitionService().getGraphs(); + } catch (PDException e) { + log.error(e.getMessage(), e); + } + return Collections.EMPTY_LIST; + } + + private void registerGraphMetrics() { + this.getGraphMetas().forEach(meta -> { + Gauge.builder(PREFIX + ".partitions", this.pdService.getPartitionService() + , e -> e.getPartitions(meta.getGraphName()).size()) + .description("Number of partitions assigned to a graph") + .tag("graph", meta.getGraphName()) + .register(this.registry); + + }); + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java new file mode 100644 index 0000000000..72d9599c86 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import java.util.Objects; + +public class DemoModel { + private int status; + private String text; + + public DemoModel(int status, String text) { + this.status = status; + this.text = text; + } + + public int getStatus() { + return status; + } + + public DemoModel setStatus(int status) { + this.status = status; + return this; + } + + public String getText() { + return text; + } + + public DemoModel setText(String text) { + this.text = text; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DemoModel that = (DemoModel) o; + return status == that.status && Objects.equals(text, that.text); + } + + @Override + public int hashCode() { + return Objects.hash(status, text); + } + + @Override + public String toString() { + return "HgNodeStatus{" + + "status=" + status + + ", text='" + text + '\'' + + '}'; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java new file mode 100644 index 0000000000..933f0f6d66 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class GraphRestRequest { + private int partitionCount; + private int shardCount; +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java new file mode 100644 index 0000000000..86ba328699 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class GraphSpaceRestRequest { + private Long storageLimit; +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java new file mode 100644 index 0000000000..d0f5ce73b3 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class PeerRestRequest { + private String peerList; +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java new file mode 100644 index 0000000000..57e15f74a2 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class PromTargetsModel { + private static final String LABEL_METRICS_PATH = "__metrics_path__"; + private static final String LABEL_SCHEME = "__scheme__"; + private static final String LABEL_JOB_NAME = "job"; + private static final String LABEL_CLUSTER = "cluster"; + private final Map labels = new HashMap<>(); + private Set targets = new HashSet<>(); + + private PromTargetsModel() { + } + + public static PromTargetsModel of() { + return new PromTargetsModel(); + } + + public Set getTargets() { + return targets; + } + + public PromTargetsModel setTargets(Set targets) { + if (targets != null) { + this.targets = targets; + } + return this; + } + + public Map getLabels() { + return labels; + } + + public PromTargetsModel addTarget(String target) { + if (target == null) { + return this; + } + this.targets.add(target); + return this; + } + + public PromTargetsModel setMetricsPath(String path) { + return this.addLabel(LABEL_METRICS_PATH, path); + } + + public PromTargetsModel setScheme(String scheme) { + return this.addLabel(LABEL_SCHEME, scheme); + } + + public PromTargetsModel setClusterId(String clusterId) { + return this.addLabel(LABEL_CLUSTER, clusterId); + } + + public PromTargetsModel addLabel(String label, String value) { + if (label == null || value == null) { + return this; + } + this.labels.put(label, value); + return this; + } + + @Override + public String toString() { + return "PromTargetModel{" + + "targets=" + targets + + ", labels=" + labels + + '}'; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java new file mode 100644 index 0000000000..5833d99ca2 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import java.util.HashMap; + +import lombok.Data; + +@Data +public class RegistryQueryRestRequest { + + String appName; + String version; + HashMap labels; +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java new file mode 100644 index 0000000000..666a8661c3 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import java.util.HashMap; + +import lombok.Data; + +@Data +public class RegistryRestRequest { + + String id; + String appName; + String version; + String address; + String interval; + HashMap labels; +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java new file mode 100644 index 0000000000..93ab584e22 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import java.io.Serializable; + +import org.apache.hugegraph.pd.grpc.Pdpb; + +import lombok.Data; + +@Data +public class RegistryRestResponse { + + Pdpb.ErrorType errorType; + String message; + Serializable data; + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java new file mode 100644 index 0000000000..54f6c60bd5 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import java.util.HashMap; + +import org.apache.hugegraph.pd.grpc.Pdpb; + +import lombok.Data; + +@Data +public class RestApiResponse { + String message; + Object data; + int status; + + public RestApiResponse(Object data, Pdpb.ErrorType status, String message) { + if (data == null) { + data = new HashMap(); + } + this.data = data; + this.status = status.getNumber(); + this.message = message; + } + + public RestApiResponse() { + + } + + public RestApiResponse(Object data, int status, String message) { + if (data == null) { + data = new HashMap(); + } + this.data = data; + this.status = status; + this.message = message; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java new file mode 100644 index 0000000000..bcf2b1288f --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class StoreRestRequest { + String storeState; +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java new file mode 100644 index 0000000000..cc25cedd36 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class TimeRangeRequest { + String startTime; + String endTime; +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java new file mode 100644 index 0000000000..ec3976b8bf --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.notice; + +import java.util.function.Function; +import java.util.function.Supplier; + +import org.apache.hugegraph.pd.common.HgAssert; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class NoticeBroadcaster { + private final Supplier noticeSupplier; + private long noticeId; + private String durableId; + private Supplier durableSupplier; + private Function removeFunction; + private int state; //0=ready; 1=notified; 2=done ack; -1=error + private int counter; + private long timestamp; + + private NoticeBroadcaster(Supplier noticeSupplier) { + this.noticeSupplier = noticeSupplier; + this.timestamp = System.currentTimeMillis(); + } + + public static NoticeBroadcaster of(Supplier noticeSupplier) { + HgAssert.isArgumentNotNull(noticeSupplier, "noticeSupplier"); + return new NoticeBroadcaster(noticeSupplier); + } + + public NoticeBroadcaster setDurableSupplier(Supplier durableSupplier) { + this.durableSupplier = durableSupplier; + return this; + } + + public NoticeBroadcaster setRemoveFunction(Function removeFunction) { + this.removeFunction = removeFunction; + return this; + } + + public NoticeBroadcaster notifying() { + + if (this.state >= 2) { + log.warn("Aborted notifying as ack has done. notice: {}", this); + return this; + } + + this.counter++; + + if (this.durableId == null && this.durableSupplier != null) { + try { + this.durableId = this.durableSupplier.get(); + } catch (Throwable t) { + log.error("Failed to invoke durableSupplier, cause by:", t); + } + } + + try { + this.noticeId = this.noticeSupplier.get(); + state = 1; + } catch (Throwable t) { + state = -1; + log.error("Failed to invoke noticeSupplier: {}; cause by: " + + this.noticeSupplier.toString(), t); + } + + return this; + } + + public boolean checkAck(long ackNoticeId) { + boolean flag = false; + + if (this.noticeId == ackNoticeId) { + flag = true; + this.state = 2; + } + + if (flag) { + this.doRemoveDurable(); + } + + return flag; + } + + public boolean doRemoveDurable() { + log.info("Removing NoticeBroadcaster is stating, noticeId:{}, durableId: {}" + , this.noticeId, this.durableId); + boolean flag = false; + + if (this.removeFunction == null) { + log.warn("The remove-function hasn't been set."); + return false; + } + + if (this.durableId == null) { + log.warn("The durableId hasn't been set."); + return false; + } + + try { + if (!(flag = this.removeFunction.apply(this.durableId))) { + log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}" + , this.noticeId, this.durableId); + } + } catch (Throwable t) { + log.error("Failed to remove NoticeBroadcaster, noticeId: " + + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t); + } + + return flag; + } + + public long getNoticeId() { + return noticeId; + } + + public int getState() { + return state; + } + + public int getCounter() { + return counter; + } + + public String getDurableId() { + return durableId; + } + + public void setDurableId(String durableId) { + + if (HgAssert.isInvalid(durableId)) { + log.warn("Set an invalid durable-id to NoticeBroadcaster."); + } + + this.durableId = durableId; + } + + public long getTimestamp() { + return timestamp; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + @Override + public String toString() { + return "NoticeBroadcaster{" + + "noticeId=" + noticeId + + ", durableId='" + durableId + '\'' + + ", state=" + state + + ", counter=" + counter + + ", timestamp=" + timestamp + + '}'; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java new file mode 100644 index 0000000000..64472f5625 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.pulse; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.util.IdUtil; + +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@ThreadSafe +@Slf4j +abstract class AbstractObserverSubject { + /* send notice to client */ + private final Map> observerHolder = new HashMap<>(1024); + /* notice from client */ + private final Map listenerHolder = new HashMap<>(1024); + + private final byte[] lock = new byte[0]; + private final PulseResponse.Builder builder = PulseResponse.newBuilder(); + private final PulseType pulseType; + + protected AbstractObserverSubject(PulseType pulseType) { + this.pulseType = pulseType; + } + + /** + * Add an observer from remote client + * + * @param observerId + * @param responseObserver + */ + void addObserver(Long observerId, StreamObserver responseObserver) { + synchronized (this.observerHolder) { + + if (this.observerHolder.containsKey(observerId)) { + responseObserver.onError( + new Exception( + "The observer-id[" + observerId + "] of " + this.pulseType.name() + + " subject has been existing.")); + return; + } + + log.info("Adding a " + this.pulseType + "'s observer, observer-id is [" + observerId + + "]."); + this.observerHolder.put(observerId, responseObserver); + } + + } + + /** + * Remove an observer by id + * + * @param observerId + * @param responseObserver + */ + void removeObserver(Long observerId, StreamObserver responseObserver) { + synchronized (this.observerHolder) { + log.info("Removing a " + this.pulseType + "'s observer, observer-id is [" + observerId + + "]."); + this.observerHolder.remove(observerId); + } + + responseObserver.onCompleted(); + } + + abstract String toNoticeString(PulseResponse res); + + /** + * @param c + * @return notice ID + */ + protected long notifyClient(Consumer c) { + synchronized (lock) { + + if (c == null) { + log.error(this.pulseType.name() + + "'s notice was abandoned, caused by: notifyObserver(null)"); + return -1; + } + + try { + c.accept(this.builder.clear()); + } catch (Throwable t) { + log.error(this.pulseType.name() + "'s notice was abandoned, caused by:", t); + return -1; + } + + long noticeId = IdUtil.createMillisId(); + + Iterator>> iter = + observerHolder.entrySet().iterator(); + + // long start = System.currentTimeMillis(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long observerId = entry.getKey(); + PulseResponse res = + this.builder.setObserverId(observerId).setNoticeId(noticeId).build(); + + try { + entry.getValue().onNext(res); + } catch (Throwable e) { + log.error("Failed to send " + this.pulseType.name() + "'s notice[" + + toNoticeString(res) + + "] to observer[" + observerId + "].", e); + + // TODO: ? try multi-times? + // iter.remove(); + log.error("Removed a " + this.pulseType.name() + "'s observer[" + entry.getKey() + + "], because of once failure of sending.", e); + } + + } + + // log.info("notice client: notice id: {}, ts :{}, cost: {}", noticeId, System + // .currentTimeMillis(), + // (System.currentTimeMillis() - start )/1000); + return noticeId; + } + + } + + abstract long notifyClient(com.google.protobuf.GeneratedMessageV3 response); + + protected void notifyError(int code, String message){ + synchronized (lock) { + Iterator>> iter = + observerHolder.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long observerId = entry.getKey(); + PulseResponse res = this.builder.setObserverId(observerId).build(); + try { + entry.getValue().onError(Status.fromCodeValue(code).withDescription(message).asRuntimeException()); + } catch (Throwable e) { + log.warn("Failed to send {} 's notice[{}] to observer[{}], error:{}", + this.pulseType.name(), toNoticeString(res), observerId, e.getMessage()); + } + } + } + } + + /** + * Add a listener from local server + * + * @param listenerId + * @param listener + */ + void addListener(Long listenerId, PulseListener listener) { + synchronized (this.listenerHolder) { + + if (this.listenerHolder.containsKey(listenerId)) { + listener.onError( + new Exception( + "The listener-id[" + listenerId + "] of " + this.pulseType.name() + + " subject has been existing.")); + return; + } + + log.info("Adding a " + this.pulseType + "'s listener, listener-id is [" + listenerId + + "]."); + this.listenerHolder.put(listenerId, listener); + + } + + } + + /** + * Remove a listener by id + * + * @param listenerId + * @param listener + */ + void removeListener(Long listenerId, PulseListener listener) { + synchronized (this.listenerHolder) { + log.info("Removing a " + this.pulseType + "'s listener, listener-id is [" + listenerId + + "]."); + this.observerHolder.remove(listenerId); + } + + listener.onCompleted(); + } + + abstract Function getNoticeHandler(); + + void handleClientNotice(PulseNoticeRequest noticeRequest) throws Exception { + + Iterator> iter = listenerHolder.entrySet().iterator(); + + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + Long listenerId = entry.getKey(); + entry.getValue().onNext(getNoticeHandler().apply(noticeRequest)); + } + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java new file mode 100644 index 0000000000..ca5cfe7096 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java @@ -0,0 +1,444 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.pulse; + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.grpc.Pdpb.ErrorType.NOT_LEADER; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionType; +import org.apache.hugegraph.pd.grpc.pulse.PulseCreateRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.notice.NoticeBroadcaster; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.util.IdUtil; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Parser; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@ThreadSafe +public class PDPulseSubject { + private final static long NOTICE_EXPIRATION_TIME = 30 * 60 * 1000; + private final static int RETRYING_PERIOD_SECONDS = 60; + private final static Map subjectHolder = + new ConcurrentHashMap<>(); + private final static ConcurrentLinkedQueue broadcasterQueue = + new ConcurrentLinkedQueue<>(); + private final static ScheduledExecutorService scheduledExecutor = + Executors.newScheduledThreadPool(1); + + private static Supplier> queueRetrieveFunction = + () -> Collections.emptyList(); + private static Function queueDurableFunction = (e) -> true; + private static Function queueRemoveFunction = (e) -> true; + + static { + subjectHolder.put(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name(), + new PartitionHeartbeatSubject()); + subjectHolder.put(PulseType.PULSE_TYPE_PD_INSTRUCTION.name(), new PdInstructionSubject()); + // add some other type here... + // ... + } + + //Schedule tasks + static { + scheduledExecutor.scheduleAtFixedRate(() -> doSchedule(), 0, RETRYING_PERIOD_SECONDS, + TimeUnit.SECONDS); + } + + private static void doSchedule() { + appendQueue(); + expireQueue(); + //retry + broadcasterQueue.forEach(e -> { + + e.notifying(); + }); + } + + private static void appendQueue() { + broadcasterQueue.addAll( + getQueueItems() + .parallelStream() + .filter(e -> !broadcasterQueue + .stream() + .anyMatch(b -> e.getItemId().equals(b.getDurableId())) + ).map(e -> createBroadcaster(e)) + .peek(e -> log.info("Appending notice: {}", e)) + .filter(e -> e != null) + .collect(Collectors.toList()) + ); + } + + private static void expireQueue() { + broadcasterQueue.removeIf(e -> { + if (System.currentTimeMillis() - e.getTimestamp() >= NOTICE_EXPIRATION_TIME) { + log.info("Notice was expired, trying to remove, notice: {}", e); + return e.doRemoveDurable(); + } else { + return false; + } + }); + } + + private static List getQueueItems() { + try { + return queueRetrieveFunction.get(); + } catch (Throwable t) { + log.error("Failed to retrieve queue from queueRetrieveFunction, cause by:", t); + } + + return Collections.emptyList(); + } + + public static void setQueueRetrieveFunction( + Supplier> queueRetrieveFunction) { + HgAssert.isArgumentNotNull(queueRetrieveFunction, "queueRetrieveFunction"); + PDPulseSubject.queueRetrieveFunction = queueRetrieveFunction; + } + + public static void setQueueDurableFunction( + Function queueDurableFunction) { + HgAssert.isArgumentNotNull(queueDurableFunction, "queueDurableFunction"); + PDPulseSubject.queueDurableFunction = queueDurableFunction; + } + + public static void setQueueRemoveFunction(Function queueRemoveFunction) { + HgAssert.isArgumentNotNull(queueRemoveFunction, "queueRemoveFunction"); + PDPulseSubject.queueRemoveFunction = queueRemoveFunction; + } + + /** + * Add a responseObserver of client + * + * @param responseObserver + * @return + */ + public static StreamObserver addObserver( + StreamObserver responseObserver) { + isArgumentNotNull(responseObserver, "responseObserver"); + return new PDPulseStreamObserver(responseObserver); + } + + /** + * Send Notice to pd-client + * + * @param responseBuilder + */ + public static void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) { + HgAssert.isArgumentNotNull(responseBuilder, "responseBuilder"); + notifyClient(responseBuilder.build()); + } + + private static void notifyClient(PartitionHeartbeatResponse response) { + doBroadcast(createBroadcaster(response)); + } + + public static void notifyClient(PdInstructionResponse response) { + doBroadcast(createBroadcaster(response)); + } + + private static void doBroadcast(NoticeBroadcaster broadcaster) { + broadcasterQueue.add(broadcaster.notifying()); + } + + private static AbstractObserverSubject getSubject(PulseType pulseType) { + return subjectHolder.get(pulseType.name()); + } + + private static NoticeBroadcaster createBroadcaster(Metapb.QueueItem item) { + PartitionHeartbeatResponse notice = toNotice(item); + if (notice == null) { + return null; + } + NoticeBroadcaster res = createBroadcaster(notice); + res.setDurableId(item.getItemId()); + res.setTimestamp(item.getTimestamp()); + return res; + } + + private static NoticeBroadcaster createBroadcaster(PartitionHeartbeatResponse notice) { + return NoticeBroadcaster.of(getNoticeSupplier(notice)) + .setDurableSupplier(getDurableSupplier(notice)) + .setRemoveFunction(getRemoveFunction()); + } + + private static NoticeBroadcaster createBroadcaster(PdInstructionResponse notice) { + return NoticeBroadcaster.of(getNoticeSupplier(notice)) + .setDurableSupplier(getDurableSupplier(notice)) + .setRemoveFunction(getRemoveFunction()); + } + + // public static Supplier getNoticeSupplier(PartitionHeartbeatResponse notice) { + // TODO: PartitionHeartbeatSubject.class -> T + // return () -> getSubject(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT, + // PartitionHeartbeatSubject.class) + // .notifyClient(notice); + // } + + public static Supplier getNoticeSupplier( + T notice) { + PulseType type; + if (notice instanceof PdInstructionResponse) { + type = PulseType.PULSE_TYPE_PD_INSTRUCTION; + } else if (notice instanceof PartitionHeartbeatResponse) { + type = PulseType.PULSE_TYPE_PARTITION_HEARTBEAT; + } else { + throw new IllegalArgumentException("Unknown pulse type " + notice.getClass().getName()); + } + return () -> getSubject(type).notifyClient(notice); + } + + + private static Supplier getDurableSupplier( + com.google.protobuf.GeneratedMessageV3 notice) { + return () -> { + Metapb.QueueItem queueItem = toQueueItem(notice); + String res = null; + + try { + if (queueDurableFunction.apply(queueItem)) { + res = queueItem.getItemId(); + } else { + log.error( + "Failed to persist queue-item that contained " + + "PartitionHeartbeatResponse: {}" + , notice); + } + } catch (Throwable t) { + log.error("Failed to invoke queueDurableFunction, cause by:", t); + } + + return res; + }; + } + + private static Function getRemoveFunction() { + return s -> { + boolean flag = false; + + try { + flag = queueRemoveFunction.apply(s); + } catch (Throwable t) { + log.error("Failed to invoke queueRemoveFunction, cause by:", t); + } + + return flag; + }; + } + + private static Metapb.QueueItem toQueueItem(com.google.protobuf.GeneratedMessageV3 notice) { + return Metapb.QueueItem.newBuilder() + .setItemId(IdUtil.createMillisStr()) + .setItemClass(notice.getClass().getTypeName()) + .setItemContent(notice.toByteString()) + .setTimestamp(System.currentTimeMillis()) + .build(); + } + + private static PartitionHeartbeatResponse toNotice(Metapb.QueueItem item) { + Parser parser = PartitionHeartbeatResponse.parser(); + PartitionHeartbeatResponse buf = null; + try { + buf = parser.parseFrom(item.getItemContent()); + } catch (InvalidProtocolBufferException t) { + log.error("Failed to parse queue-item to PartitionHeartbeatResponse, cause by:", t); + } + return buf; + } + + public static void notifyError(int code, String message) { + subjectHolder.forEach((k, v) -> { + v.notifyError(code, message); + }); + } + + /** + * Adding notice listener, the notice is come from pd-client. + * + * @param listener + */ + public static void listenPartitionHeartbeat(PulseListener listener) { + subjectHolder.get(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name()) + .addListener(createListenerId(), listener); + } + + private static Long createListenerId() { + // TODO: Maybe some other way... + return createObserverId(); + } + + private static Long createObserverId() { + return IdUtil.createMillisId(); + } + + /* inner classes below */ + + private static class PDPulseStreamObserver implements StreamObserver { + private final StreamObserver responseObserver; + private AbstractObserverSubject subject; + private Long observerId; + + PDPulseStreamObserver(StreamObserver responseObserver) { + this.responseObserver = responseObserver; + } + + private void cancelObserver() { + + if (this.subject == null) { + this.responseObserver.onError( + new Exception("Invoke cancel-observer before create-observer.")); + return; + } + + this.subject.removeObserver(this.observerId, this.responseObserver); + } + + private void addObserver(PulseCreateRequest request) { + if (this.subject != null) { + return; + } + + PulseType pulseType = getPulseType(request); + if (pulseType == null) { + return; + } + + this.subject = getSubject(pulseType); + this.observerId = createObserverId(); + + this.subject.addObserver(this.observerId, this.responseObserver); + } + + private void ackNotice(long noticeId, long observerId) { + // log.info("ack noticeId, noticeId: {}, observerId: {}, ts:{}", + // noticeId,observerId, System.currentTimeMillis()); + broadcasterQueue.removeIf(e -> e.checkAck(noticeId)); + } + + private PulseType getPulseType(PulseCreateRequest request) { + PulseType pulseType = request.getPulseType(); + + if (pulseType.equals(PulseType.PULSE_TYPE_UNKNOWN)) { + this.responseObserver.onError(new Exception("unknown pulse type.")); + return null; + } + + return pulseType; + } + + private AbstractObserverSubject getSubject(PulseType pulseType) { + AbstractObserverSubject subject = subjectHolder.get(pulseType.name()); + + if (subject == null) { + responseObserver.onError( + new Exception("Unsupported pulse-type: " + pulseType.name())); + return null; + } + + return subject; + } + + private void handleNotice(PulseNoticeRequest noticeRequest) { + try { + subject.handleClientNotice(noticeRequest); + } catch (Exception e) { + if (e instanceof PDException) { + var pde = (PDException) e; + if (pde.getErrorCode() == NOT_LEADER.getNumber()) { + try { + log.info("send change leader command to watch, due to ERROR-100", pde); + notifyClient(PdInstructionResponse.newBuilder() + .setInstructionType( + PdInstructionType.CHANGE_TO_FOLLOWER) + .setLeaderIp(RaftEngine.getInstance() + .getLeaderGrpcAddress()) + .build()); + } catch (ExecutionException | InterruptedException ex) { + log.error("send notice to observer failed, ", ex); + } + } + } else { + log.error("handleNotice error", e); + } + } + } + + @Override + public void onNext(PulseRequest pulseRequest) { + + if (pulseRequest.hasCreateRequest()) { + this.addObserver(pulseRequest.getCreateRequest()); + return; + } + + if (pulseRequest.hasCancelRequest()) { + this.cancelObserver(); + return; + } + + if (pulseRequest.hasNoticeRequest()) { + this.handleNotice(pulseRequest.getNoticeRequest()); + } + + if (pulseRequest.hasAckRequest()) { + this.ackNotice(pulseRequest.getAckRequest().getNoticeId() + , pulseRequest.getAckRequest().getObserverId()); + } + } + + @Override + public void onError(Throwable throwable) { + this.cancelObserver(); + } + + @Override + public void onCompleted() { + this.cancelObserver(); + } + + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java new file mode 100644 index 0000000000..a42ffce1e5 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.pulse; + +import java.util.function.Function; + +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; + +import com.google.protobuf.GeneratedMessageV3; + +public class PartitionHeartbeatSubject extends AbstractObserverSubject { + + PartitionHeartbeatSubject() { + super(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getPartitionHeartbeatResponse().toString(); + } + + @Override + Function getNoticeHandler() { + return r -> r.getPartitionHeartbeatRequest(); + } + + void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) { + + super.notifyClient(b -> { + b.setPartitionHeartbeatResponse(responseBuilder); + }); + + } + + @Override + long notifyClient(GeneratedMessageV3 response) { + return super.notifyClient(b -> { + b.setPartitionHeartbeatResponse((PartitionHeartbeatResponse) response); + }); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java new file mode 100644 index 0000000000..b10b60f60b --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.pulse; + +import java.util.function.Function; + +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; + +import com.google.protobuf.GeneratedMessageV3; + +public class PdInstructionSubject extends AbstractObserverSubject { + + protected PdInstructionSubject() { + super(PulseType.PULSE_TYPE_PD_INSTRUCTION); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getInstructionResponse().toString(); + } + + /** + * pd单纯的向pulse发送的指令,不接收对应的notice + * + * @return null + */ + @Override + Function getNoticeHandler() { + return pulseNoticeRequest -> null; + } + + @Override + long notifyClient(GeneratedMessageV3 response) { + return super.notifyClient(b -> { + b.setInstructionResponse((PdInstructionResponse) response); + }); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java new file mode 100644 index 0000000000..09a7d3d05d --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.pulse; + +public interface PulseListener { + /** + * Invoked on new notice. + * + * @param notice the notice. + */ + void onNext(T notice) throws Exception; + + /** + * Invoked on errors. + * + * @param throwable the error. + */ + void onError(Throwable throwable); + + /** + * Invoked on completion. + */ + void onCompleted(); +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java new file mode 100644 index 0000000000..a4b83333ed --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.PDException; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.MessageOrBuilder; +import com.google.protobuf.util.JsonFormat; + +public class API { + + public static final String VERSION = "3.6.3"; + public static final String PD = "PD"; + public static final String STORE = "STORE"; + public static String STATUS_KEY = "status"; + public static String ERROR_KEY = "error"; + public static String QUOTATION = "\""; + public static String COMMA = ","; + public static String COLON = ": "; + + public String toJSON(List values, String key) { + + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append(key).append(QUOTATION).append(COLON) + .append("[ "); + + if (values != null) { + values.forEach(s -> { + try { + builder.append(JsonFormat.printer().print(s)); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); + } + builder.append("]}"); + return builder.toString(); + } + + public String toJSON(MessageOrBuilder value, String key) { + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append(key).append(QUOTATION).append(COLON); + try { + if (value != null) { + builder.append(JsonFormat.printer().print(value)); + } else { + builder.append("{}"); + } + builder.append("}"); + return builder.toString(); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + return toJSON(e); + } + + } + + public String toJSON(Map> values) { + StringBuilder builder = new StringBuilder(); + builder.append("{ "); + for (Map.Entry> entry : values.entrySet()) { + String entryKey = entry.getKey(); + List entryValue = entry.getValue(); + builder.append(QUOTATION).append(entryKey).append(QUOTATION).append(COLON).append("["); + if ((entryValue != null) && !(entryValue.isEmpty())) { + entryValue.forEach(s -> { + try { + if (s == null) { + builder.append("null"); + } else { + builder.append(JsonFormat.printer().print(s)); + } + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); //删除最后一个逗号 + } + builder.append("]").append(COMMA); + } + builder.deleteCharAt(builder.length() - 1); + builder.append("}"); + return builder.toString(); + } + + public String toJSON(PDException exception) { + String builder = "{" + + QUOTATION + STATUS_KEY + QUOTATION + COLON + + exception.getErrorCode() + COMMA + + QUOTATION + ERROR_KEY + QUOTATION + COLON + + QUOTATION + exception.getMessage() + QUOTATION + + "}"; + + return builder; + } + + public String toJSON(Exception exception) { + String builder = "{" + + QUOTATION + STATUS_KEY + QUOTATION + COLON + "-1" + + COMMA + + QUOTATION + ERROR_KEY + QUOTATION + COLON + + QUOTATION + exception.getMessage() + QUOTATION + + "}"; + + return builder; + } + + /** + * @param object + * @return + * @author tianxiaohui + */ + public String toJSON(Object object) { + ObjectMapper mapper = new ObjectMapper(); + try { + return mapper.writeValueAsString(object); + } catch (JsonProcessingException e) { + e.printStackTrace(); + return e.getMessage(); + } + } + + public Map okMap(String k, Object v) { + Map map = new HashMap<>(); + map.put(STATUS_KEY, 0); + map.put(k, v); + return map; + } + + public String toJSON(List values, + JsonFormat.TypeRegistry registry) { + + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append("log").append(QUOTATION).append(COLON) + .append("[ "); + JsonFormat.Printer printer = JsonFormat.printer().usingTypeRegistry(registry); + if (values != null) { + values.forEach(s -> { + try { + builder.append(printer.print(s)); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); + } + builder.append("]}"); + return builder.toString(); + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java new file mode 100644 index 0000000000..78d24be5cd --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.GraphRestRequest; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class GraphAPI extends API { + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @GetMapping(value = "/graph/partitionSizeRange", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getPartitionSizeRange() { + try { + int minPartitionSize = 1; + int maxPartitionSize = pdService.getStoreNodeService().getShardGroups().size(); + Map dataMap = new HashMap<>(); + dataMap.put("minPartitionSize", minPartitionSize); + dataMap.put("maxPartitionSize", maxPartitionSize); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException:", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "/graphs", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getGraphs() { + RestApiResponse response = new RestApiResponse(); + try { + List graphs = pdRestService.getGraphs(); + List resultGraphs = new ArrayList<>(); + for (Metapb.Graph graph : graphs) { + if ((graph.getGraphName() != null) && (graph.getGraphName().endsWith("/g"))) { + resultGraphs.add(new GraphStatistics(graph)); + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("graphs", resultGraphs); + response.setData(dataMap); + response.setStatus(Pdpb.ErrorType.OK.getNumber()); + response.setMessage(Pdpb.ErrorType.OK.name()); + + } catch (PDException e) { + log.error("PDException: ", e); + response.setData(new HashMap()); + response.setStatus(e.getErrorCode()); + response.setMessage(e.getMessage()); + } + return response; + } + + @PostMapping(value = "/graph/**", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setGraph(@RequestBody GraphRestRequest body, HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + final String prefix = "/graph/"; + final int limit = 2; + String graphName = requestURL.split(prefix, limit)[1]; + graphName = URLDecoder.decode(graphName, StandardCharsets.UTF_8); + Metapb.Graph curGraph = pdRestService.getGraph(graphName); + Metapb.Graph.Builder builder = Metapb.Graph.newBuilder( + curGraph == null ? Metapb.Graph.getDefaultInstance() : curGraph); + builder.setGraphName(graphName); + if (body.getPartitionCount() > 0) { + builder.setPartitionCount(body.getPartitionCount()); + } + + Metapb.Graph newGraph = pdRestService.updateGraph(builder.build()); + return toJSON(newGraph, "graph"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + + @GetMapping(value = "/graph/**", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getGraph(HttpServletRequest request) throws + UnsupportedEncodingException { + RestApiResponse response = new RestApiResponse(); + GraphStatistics statistics = null; + String requestURL = request.getRequestURL().toString(); + final String prefix = "/graph/"; + final int limit = 2; + String graphName = requestURL.split(prefix, limit)[1]; + graphName = URLDecoder.decode(graphName, StandardCharsets.UTF_8); + try { + Metapb.Graph graph = pdRestService.getGraph(graphName); + if (graph != null) { + statistics = new GraphStatistics(graph); + response.setData(statistics); + } else { + response.setData(new HashMap()); //没有该图 + } + response.setStatus(Pdpb.ErrorType.OK.getNumber()); + response.setMessage(Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error(e.getMessage()); + response.setData(new HashMap()); + response.setStatus(Pdpb.ErrorType.UNKNOWN.getNumber()); + response.setMessage(e.getMessage()); + } + return response; + } + + @Data + class Shard { + long partitionId; + long storeId; + String state; + String role; + int progress; + + public Shard(Metapb.ShardStats shardStats, long partitionId) { + this.role = String.valueOf(shardStats.getRole()); + this.storeId = shardStats.getStoreId(); + this.state = String.valueOf(shardStats.getState()); + this.partitionId = partitionId; + this.progress = shardStats.getProgress(); + } + + public Shard(Metapb.Shard shard, long partitionId) { + this.role = String.valueOf(shard.getRole()); + this.storeId = shard.getStoreId(); + this.state = Metapb.ShardState.SState_Normal.name(); //gshard的状态默认为normal + this.progress = 0; + this.partitionId = partitionId; + } + + } + + @Data + class Partition { + int partitionId; + String graphName; + String workState; + long startKey; + long endKey; + List shards; + long dataSize; + + public Partition(Metapb.Partition pt, Metapb.PartitionStats partitionStats) { + if (pt != null) { + partitionId = pt.getId(); + startKey = pt.getStartKey(); + endKey = pt.getEndKey(); + workState = String.valueOf(pt.getState()); + graphName = pt.getGraphName(); + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + if (partitionStats != null) { + List shardStatsList = partitionStats.getShardStatsList(); + List shardsList = new ArrayList<>(); + for (Metapb.ShardStats shardStats : shardStatsList) { + Shard shard = new Shard(shardStats, partitionId); + shardsList.add(shard); + } + this.shards = shardsList; + } else { + List shardsList = new ArrayList<>(); + try { + var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId()); + if (shardGroup != null) { + for (Metapb.Shard shard1 : shardGroup.getShardsList()) { + shardsList.add(new Shard(shard1, partitionId)); + } + } else { + log.error("GraphAPI.Partition(), get shard group: {} returns null", + pt.getId()); + } + } catch (PDException e) { + log.error("Partition init failed, error: {}", e.getMessage()); + } + this.shards = shardsList; + } + + + } + } + } + + @Data + class GraphStatistics { + //图统计信息 + String graphName; + long partitionCount; + String state; + List partitions; + long dataSize; + //todo + int nodeCount; + int edgeCount; + long keyCount; + + public GraphStatistics(Metapb.Graph graph) throws PDException { + if (graph == null) { + return; + } + Map partition2DataSize = new HashMap<>(); + graphName = graph.getGraphName(); + partitionCount = graph.getPartitionCount(); + state = String.valueOf(graph.getState()); + // 数据量及key的数量 + List stores = pdRestService.getStores(graphName); + for (Metapb.Store store : stores) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + if ((graphName.equals(graphStats.getGraphName())) + && (Metapb.ShardRole.Leader.equals(graphStats.getRole()))) { + keyCount += graphStats.getApproximateKeys(); + dataSize += graphStats.getApproximateSize(); + partition2DataSize.put(graphStats.getPartitionId(), + graphStats.getApproximateSize()); + } + } + } + List resultPartitionList = new ArrayList<>(); + List tmpPartitions = pdRestService.getPartitions(graphName); + if ((tmpPartitions != null) && (!tmpPartitions.isEmpty())) { + // 需要返回的分区信息 + for (Metapb.Partition partition : tmpPartitions) { + Metapb.PartitionStats partitionStats = pdRestService + .getPartitionStats(graphName, partition.getId()); + Partition pt = new Partition(partition, partitionStats); + pt.dataSize = partition2DataSize.getOrDefault(partition.getId(), 0L); + resultPartitionList.add(pt); + } + } + partitions = resultPartitionList; + // 隐去图名后面的 /g /m /s + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + } + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java new file mode 100644 index 0000000000..1bb4f183a5 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.model.GraphSpaceRestRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class GraphSpaceAPI extends API { + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/graph-spaces", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getGraphSpaces() { + try { + List graphSpaces = pdRestService.getGraphSpaces(); + return toJSON(graphSpaces, "graph-spaces"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @PostMapping(value = "/graph-spaces/**", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setGraphSpace(@RequestBody GraphSpaceRestRequest body, + HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1]; + graphSpaceName = URLDecoder.decode(graphSpaceName, StandardCharsets.UTF_8); + Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder() + .setName(graphSpaceName) + .setStorageLimit(body.getStorageLimit()) + .build(); + Metapb.GraphSpace newGraphSpace = pdRestService.setGraphSpace(graphSpace); + return toJSON(newGraphSpace, "graph-spaces"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + @GetMapping(value = "/graph-spaces/**", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getGraphSpace(HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1]; + graphSpaceName = URLDecoder.decode(graphSpaceName, StandardCharsets.UTF_8); + Metapb.GraphSpace graphSpace = pdRestService.getGraphSpace(graphSpaceName); + return toJSON(graphSpace, "graphs-paces"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java new file mode 100644 index 0000000000..0d0cebb3ec --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.rest.API; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/") +public class IndexAPI extends API { + @Autowired + PDService pdService; + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public BriefStatistics index() throws PDException, ExecutionException, InterruptedException { + + BriefStatistics statistics = new BriefStatistics(); + statistics.leader = RaftEngine.getInstance().getLeaderGrpcAddress(); + statistics.state = pdService.getStoreNodeService().getClusterStats().getState().toString(); + statistics.storeSize = pdService.getStoreNodeService().getActiveStores().size(); + statistics.graphSize = pdService.getPartitionService().getGraphs().size(); + statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size(); + return statistics; + + } + + @GetMapping(value = "/v1/cluster", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse cluster() throws InterruptedException, ExecutionException { + Statistics statistics = new Statistics(); + try { + statistics.state = + String.valueOf(pdService.getStoreNodeService().getClusterStats().getState()); + String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + CallStreamObserverWrap response = + new CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + List pdList = new ArrayList<>(); + for (Metapb.Member member : response.get().get(0).getMembersList()) { + Member member1 = new Member(member); + if ((leaderGrpcAddress != null) && + (leaderGrpcAddress.equals(member.getGrpcUrl()))) { + member1.role = "Leader"; + statistics.pdLeader = member1; + } else { + member1.role = "Follower"; + } + pdList.add(member1); + } + statistics.pdList = pdList; + statistics.memberSize = pdList.size(); + List stores = new ArrayList<>(); + for (Metapb.Store store : pdService.getStoreNodeService().getStores()) { + stores.add(new Store(store)); + } + statistics.stores = stores; + statistics.storeSize = statistics.stores.size(); + statistics.onlineStoreSize = pdService.getStoreNodeService().getActiveStores().size(); + statistics.offlineStoreSize = statistics.storeSize - statistics.onlineStoreSize; + List graphs = pdRestService.getGraphs(); + // 图的数量,只统计/g + statistics.graphSize = graphs.stream().filter((g) -> (g.getGraphName() != null) + && + (g.getGraphName().endsWith("/g"))) + .count(); + statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size(); + statistics.shardCount = pdService.getConfigService().getPDConfig().getShardCount(); + for (Metapb.Store store : pdService.getStoreNodeService().getStores()) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + statistics.keyCount += graphStats.getApproximateKeys(); + statistics.dataSize += graphStats.getApproximateSize(); + } + } + // 数据状态:根据图的状态推出数据状态,枚举值越大,问题越严重, 默认为正常状态 + Metapb.PartitionState dataState = Metapb.PartitionState.PState_Normal; + for (Metapb.Graph graph : pdRestService.getGraphs()) { + if (graph.getState() == Metapb.PartitionState.UNRECOGNIZED) { + continue; // 未识别不参与对比,不然会抛出异常 + } + if ((graph.getState() != null) && + (graph.getState().getNumber() > dataState.getNumber())) { + dataState = graph.getState(); + } + } + statistics.dataState = dataState.name(); + return new RestApiResponse(statistics, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("PD Exception: ", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @Data + class BriefStatistics { + String state; + String leader; + int memberSize; + int storeSize; + int graphSize; + int partitionSize; + } + + @Data + class Store { + long storeId; + String address; + String raftAddress; + String version; + String state; + long startTimeStamp; + + public Store(Metapb.Store store) { + if (store != null) { + storeId = store.getId(); + address = store.getAddress(); + raftAddress = store.getRaftAddress(); + version = store.getVersion(); + state = String.valueOf(store.getState()); + startTimeStamp = store.getStartTimestamp(); + } + + } + } + + @Data + class Member { + String raftUrl; + String grpcUrl; + String restUrl; + String state; + String dataPath; + String role; + String serviceName; //服务名称,自定义属性 + String serviceVersion; //静态定义 + long startTimeStamp; //进程启动时间 + + public Member(Metapb.Member member) { + if (member != null) { + raftUrl = member.getRaftUrl(); + grpcUrl = member.getGrpcUrl(); + restUrl = member.getRestUrl(); + state = String.valueOf(member.getState()); + dataPath = member.getDataPath(); + serviceName = grpcUrl + "-PD"; + serviceVersion = VERSION; + startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + } + } + + public Member() { + + } + } + + @Data + class Statistics { + /** + * 集群状态 + */ + String state; + /** + * 数据状态 + */ + String dataState; + /** + * pd集群成员 + */ + List pdList; + /** + * pd集群的leader + */ + Member pdLeader; + /** + * pd集群的大小 + */ + int memberSize; + /** + * stores列表 + */ + List stores; + /** + * store的数量 + */ + int storeSize; + /** + * onlineStore + */ + int onlineStoreSize; + /** + * 离线的store的数量 + */ + int offlineStoreSize; + /** + * 图的数量 + */ + long graphSize; + /** + * 分区的数量 + */ + int partitionSize; + /** + * 分区副本数 + */ + int shardCount; + /** + * key的数量 + */ + long keyCount; + /** + * 数据量 + */ + long dataSize; + + } +} + diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java new file mode 100644 index 0000000000..16f560f92b --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @@ -0,0 +1,239 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.PeerRestRequest; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import io.grpc.stub.CallStreamObserver; +import io.grpc.stub.StreamObserver; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class MemberAPI extends API { + //TODO + @Autowired + PDService pdService; + + @GetMapping(value = "/members", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getMembers() throws InterruptedException, ExecutionException { + + String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + List members = new ArrayList<>(); + Member leader = null; + Map stateCountMap = new HashMap<>(); + for (Metapb.Member member : response.get().get(0).getMembersList()) { + String stateKey = member.getState().name(); + stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); + Member member1 = new Member(member); + if ((leaderGrpcAddress != null) && (leaderGrpcAddress.equals(member.getGrpcUrl()))) { + leader = member1; + } + member1.role = member.getRole().name(); + members.add(member1); + } + String state = pdService.getStoreNodeService().getClusterStats().getState().toString(); + HashMap resultMap = new HashMap<>(); + resultMap.put("state", state); + resultMap.put("pdList", members); + resultMap.put("pdLeader", leader); + resultMap.put("numOfService", members.size()); + resultMap.put("numOfNormalService", + stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); + resultMap.put("stateCountMap", stateCountMap); + return new RestApiResponse(resultMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } + + @PostMapping(value = "/members/change", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String changePeerList(@RequestBody PeerRestRequest body, HttpServletRequest request) { + try { + Pdpb.ChangePeerListRequest rpcRequest = + Pdpb.ChangePeerListRequest.newBuilder().setPeerList( + body.getPeerList()).build(); + CountDownLatch latch = new CountDownLatch(1); + final Pdpb.ResponseHeader[] responseHeader = {null}; + StreamObserver observer = + new StreamObserver() { + @Override + public void onNext(Pdpb.getChangePeerListResponse value) { + responseHeader[0] = value.getHeader(); + } + + @Override + public void onError(Throwable t) { + responseHeader[0] = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType( + Pdpb.ErrorType.UNKNOWN).setMessage( + t.getMessage()).build()).build(); + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + pdService.changePeerList(rpcRequest, observer); + latch.await(); + return toJSON(responseHeader[0], "changeResult"); + } catch (Exception e) { + return toJSON(e); + } + } + + + public static class CallStreamObserverWrap extends CallStreamObserver implements + Future> { + CompletableFuture> future = new CompletableFuture<>(); + List values = new ArrayList<>(); + + @Override + public boolean isReady() { + return false; + } + + @Override + public void setOnReadyHandler(Runnable runnable) { + + } + + @Override + public void disableAutoInboundFlowControl() { + + } + + @Override + public void request(int i) { + + } + + @Override + public void setMessageCompression(boolean b) { + + } + + @Override + public void onNext(V v) { + values.add(v); + } + + @Override + public void onError(Throwable throwable) { + future.completeExceptionally(throwable); + } + + @Override + public void onCompleted() { + future.complete(values); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return future.cancel(mayInterruptIfRunning); + } + + @Override + public boolean isCancelled() { + return future.isCancelled(); + } + + @Override + public boolean isDone() { + return future.isDone(); + } + + @Override + public List get() throws InterruptedException, ExecutionException { + return future.get(); + } + + @Override + public List get(long timeout, TimeUnit unit) throws InterruptedException, + ExecutionException, + TimeoutException { + return future.get(timeout, unit); + } + } + + @Data + class Member { + String raftUrl; + String grpcUrl; + String restUrl; + String state; + String dataPath; + String role; + String replicateState; + String serviceName; //服务名称,自定义属性 + String serviceVersion; //静态定义 + long startTimeStamp; //启动时间,暂时取进程的启动时间 + + public Member(Metapb.Member member) { + if (member != null) { + raftUrl = member.getRaftUrl(); + grpcUrl = member.getGrpcUrl(); + restUrl = member.getRestUrl(); + state = String.valueOf(member.getState()); + dataPath = member.getDataPath(); + serviceName = grpcUrl + "-PD"; + serviceVersion = VERSION; + startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + replicateState = member.getReplicatorState(); + } + + } + + public Member() { + + } + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java new file mode 100644 index 0000000000..5bc5cdfae0 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java @@ -0,0 +1,475 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import org.apache.commons.lang.time.DateFormatUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.util.DateUtil; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import com.google.protobuf.util.JsonFormat; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class PartitionAPI extends API { + public static final String DEFAULT_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss"; + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/highLevelPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + public RestApiResponse getHighLevelPartitions() { + // 分区下多个图的信息 + Map> partitions2GraphsMap = new HashMap<>(); + Map resultPartitionsMap = new HashMap<>(); + // 每一个分区的keyCount, 只从leader处取出 + Map partition2KeyCount = new HashMap<>(); + // 每一个分区的dataSize, 只从leader处取出 + Map partition2DataSize = new HashMap<>(); + List stores; + Map storesMap = new HashMap<>(); + try { + stores = pdRestService.getStores(""); + } catch (PDException e) { + log.error("getStores error", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + for (Metapb.Store store : stores) { + storesMap.put(store.getId(), store); + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + // 获取分区保存的图信息(只从leader处取出来) + if (Metapb.ShardRole.Leader != graphStats.getRole()) { + continue; + } + // 计算分区的keyCount(不区分图) + partition2KeyCount.put(graphStats.getPartitionId(), + partition2KeyCount.getOrDefault(graphStats.getPartitionId(), + graphStats.getApproximateKeys())); + // 计算分区的dataSize, 通过累加图的大小实现 + partition2DataSize.put(graphStats.getPartitionId(), + partition2DataSize.getOrDefault(graphStats.getPartitionId(), + 0L) + + graphStats.getApproximateSize()); + // 构造分区下的图信息 + if (partitions2GraphsMap.get(graphStats.getPartitionId()) == null) { + partitions2GraphsMap.put(graphStats.getPartitionId(), + new HashMap()); + } + Map partitionGraphsMap = + partitions2GraphsMap.get(graphStats.getPartitionId()); + partitionGraphsMap.put(graphStats.getGraphName(), new GraphStats(graphStats)); + } + } + // 构造分区的所有需返回的信息 + List partitionList = pdRestService.getPartitions(""); + for (Metapb.Partition partition : partitionList) { + // 补充分区内图信息的startKey, endKey + if (partitions2GraphsMap.get(partition.getId()) != null) { + GraphStats graphStats = + partitions2GraphsMap.get(partition.getId()).get(partition.getGraphName()); + if (graphStats != null) { + graphStats.startKey = partition.getStartKey(); + graphStats.endKey = partition.getEndKey(); + } + } + // 构造分区整体信息(不区分图) + if ((resultPartitionsMap.get(partition.getId()) == null) + && (!partition.getGraphName().endsWith("/s")) + ) { + Metapb.PartitionStats partitionStats; + try { + partitionStats = pdRestService.getPartitionStats(partition.getGraphName(), + partition.getId()); + } catch (PDException e) { + log.error("getPartitionStats error", e); + partitionStats = null; + } + // 初始化分区信息 + HighLevelPartition resultPartition = + new HighLevelPartition(partition, partitionStats); + resultPartition.keyCount = + partition2KeyCount.getOrDefault(resultPartition.partitionId, 0L); + resultPartition.dataSize = + partition2DataSize.getOrDefault(resultPartition.partitionId, 0L); + for (ShardStats shard : resultPartition.shards) { + // 对副本的地址,分区信息赋值 + shard.address = storesMap.get(shard.storeId).getAddress(); + shard.partitionId = partition.getId(); + } + if ((partitionStats != null) && (partitionStats.getLeader() != null)) { + long storeId = partitionStats.getLeader().getStoreId(); // 获取leader的storeId + resultPartition.leaderAddress = + storesMap.get(storeId).getAddress(); // 获取leader的address + } + resultPartitionsMap.put(partition.getId(), resultPartition); + } + } + // 构造需返回的分区下的图列表,只返回/g, 且按名称排序 + for (Map.Entry entry : resultPartitionsMap.entrySet()) { + Integer partitionId = entry.getKey(); + HighLevelPartition currentPartition = resultPartitionsMap.get(partitionId); + Map graphsMap = partitions2GraphsMap + .getOrDefault(partitionId, new HashMap<>()); // 避免后面出现空指针异常 + ArrayList graphsList = new ArrayList<>(); + for (Map.Entry entry1 : graphsMap.entrySet()) { + if (!entry1.getKey().endsWith("/g")) { + continue; // 只保留/g的图 + } + String graphName = entry1.getKey(); + GraphStats tmpGraph = graphsMap.get(graphName); + final int postfixLength = 2; + tmpGraph.graphName = tmpGraph.graphName.substring(0, tmpGraph.graphName.length() - + postfixLength); + graphsList.add(tmpGraph); + } + graphsList.sort((o1, o2) -> o1.graphName.compareTo(o2.graphName)); + currentPartition.graphs = graphsList; + } + List resultPartitionList = new ArrayList<>(); + if (!resultPartitionsMap.isEmpty()) { + ArrayList partitionids = new ArrayList(resultPartitionsMap.keySet()); + partitionids.sort((o1, o2) -> o1.intValue() - o2.intValue()); + for (Integer partitionId : partitionids) { + resultPartitionList.add(resultPartitionsMap.get(partitionId)); + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("partitions", resultPartitionList); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } + + @GetMapping(value = "/partitions", produces = MediaType.APPLICATION_JSON_VALUE) + public RestApiResponse getPartitions() { + try { + List partitions = new ArrayList<>();//需返回的分区对象 + List partitionList = pdRestService.getPartitions(""); + List stores = pdRestService.getStoreStats(false); + //分区的raftNode的状态 + HashMap> raftMap = new HashMap<>(); + + HashMap> shardIndexMap = new HashMap<>(); + String delimiter = "@"; + for (int i = 0; i < stores.size(); i++) { + Metapb.Store store = stores.get(i); + Metapb.StoreStats storeStats = store.getStats(); + HashMap storeRaftStats = new HashMap<>(); + List raftStatsList = storeStats.getRaftStatsList(); + for (int j = 0; j < raftStatsList.size(); j++) { + Metapb.RaftStats raftStats = raftStatsList.get(j); + storeRaftStats.put(raftStats.getPartitionId(), raftStats); + } + + HashMap partitionShardStats = new HashMap<>(); + List graphStatsList = storeStats.getGraphStatsList(); + StringBuilder builder = new StringBuilder(); + for (int j = 0; j < graphStatsList.size(); j++) { + Metapb.GraphStats graphStats = graphStatsList.get(j); + String graphName = graphStats.getGraphName(); + String partitionId = Integer.toString(graphStats.getPartitionId()); + builder.append(graphName).append(delimiter).append(partitionId); + partitionShardStats.put(builder.toString(), graphStats); + builder.setLength(0); + } + raftMap.put(store.getId(), storeRaftStats); + shardIndexMap.put(store.getId(), partitionShardStats); + } + + for (Metapb.Partition pt : partitionList) { + Partition partition = new Partition(pt); + String graphName = partition.getGraphName(); + partition.getShards().sort(Comparator.comparing(Shard::getStoreId)); + Metapb.PartitionStats partitionStats = + pdRestService.getPartitionStats(graphName, pt.getId()); + Map shardStats = new HashMap<>(); + if (partitionStats != null) { + String dateTime = DateFormatUtils.format( + partitionStats.getTimestamp(), DEFAULT_DATETIME_FORMAT); + partition.setTimestamp(dateTime); + shardStats = getShardStats(partitionStats); + } + + for (Metapb.Shard shard : pdRestService.getShardList(pt.getId())) { + Map finalShardStats = shardStats; + partition.getShards().add(new Shard() {{ + storeId = Long.toString(shard.getStoreId()); + role = shard.getRole(); + address = pdRestService.getStore( + shard.getStoreId()).getAddress(); + if (finalShardStats.containsKey(shard.getStoreId())) { + state = finalShardStats.get(shard.getStoreId()).getState().toString(); + progress = finalShardStats.get(shard.getStoreId()).getProgress(); + role = finalShardStats.get(shard.getStoreId()).getRole(); + } + + + HashMap storeRaftStats = + raftMap.get(shard.getStoreId()); + if (storeRaftStats != null) { + Metapb.RaftStats raftStats = storeRaftStats.get(partition.getId()); + if (raftStats != null) { + committedIndex = Long.toString(raftStats.getCommittedIndex()); + } + } + }}); + } + + partition.setPartitionStats(partitionStats); + + partitions.add(partition); + } + partitions.sort( + Comparator.comparing(Partition::getGraphName).thenComparing(Partition::getId)); + HashMap dataMap = new HashMap<>(); + dataMap.put("partitions", partitions); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("query metric data error", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "/partitionsAndStats", produces = MediaType.APPLICATION_JSON_VALUE) + public String getPartitionsAndStats() { + //for debug use, return partition && partitionStats + try { + Map> graph2Partitions = new HashMap<>(); + Map> graph2PartitionStats = new HashMap<>(); + for (Metapb.Graph graph : pdRestService.getGraphs()) { + List partitionList = new ArrayList<>(); + List partitionStatsList = new ArrayList<>(); + for (Metapb.Partition partition : pdRestService.getPartitions( + graph.getGraphName())) { + Metapb.PartitionStats partitionStats = pdRestService + .getPartitionStats(graph.getGraphName(), partition.getId()); + partitionList.add(partition); + partitionStatsList.add(partitionStats); + } + graph2Partitions.put(graph.getGraphName(), partitionList); + graph2PartitionStats.put(graph.getGraphName(), partitionStatsList); + } + String builder = "{\"partitions\":" + toJSON(graph2Partitions) + + ",\"partitionStats\":" + toJSON(graph2PartitionStats) + "}"; + return builder; + } catch (PDException e) { + log.error("PD exception:" + e); + return toJSON(e); + } + } + + private Map getShardStats(Metapb.PartitionStats partitionStats) { + Map stats = new HashMap<>(); + if (partitionStats.getShardStatsList() != null) { + partitionStats.getShardStatsList().forEach(shardStats -> { + stats.put(shardStats.getStoreId(), shardStats); + }); + } + return stats; + } + + @PostMapping(value = "/partitions/log", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getPartitionLog(@RequestBody TimeRangeRequest request) { + try { + Date dateStart = DateUtil.getDate(request.getStartTime()); + Date dateEnd = DateUtil.getDate(request.getEndTime()); + List changedRecords = + pdRestService.getPartitionLog(dateStart.getTime(), + dateEnd.getTime()); + if (changedRecords != null) { + JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry + .newBuilder().add(Pdpb.SplitDataRequest.getDescriptor()).build(); + return toJSON(changedRecords, registry); + } else { + return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public Statistics getStatistics() throws PDException, ExecutionException, InterruptedException { + + Statistics statistics = new Statistics(); + int partitionId = -1; + return statistics; + } + + @Data + class Shard { + String address; + String storeId; + Metapb.ShardRole role; + String state; + int progress; + String committedIndex; + long partitionId; + + } + + @Data + class Partition { + int id; + long version; + String graphName; + long startKey; + long endKey; + + Metapb.PartitionState workState; + List shards; + String timestamp; + + + Partition(Metapb.Partition pt) { + id = pt.getId(); + version = pt.getVersion(); + graphName = pt.getGraphName(); + startKey = pt.getStartKey(); + endKey = pt.getEndKey(); + workState = pt.getState(); + shards = new ArrayList<>(); + } + + public void setPartitionStats(Metapb.PartitionStats stats) { + + } + } + + @Data + class Statistics { + + + } + + @Data + class HighLevelPartition { + int partitionId; + String state; + String leaderAddress; + long keyCount; + long dataSize; + String shardState; + int progress; + long raftTerm; //任期 + List graphs; + List shards; + String failureCause = ""; + + HighLevelPartition(Metapb.Partition partition, Metapb.PartitionStats partitionStats) { + partitionId = partition.getId(); + state = String.valueOf(partition.getState()); + if (partitionStats != null) { + raftTerm = partitionStats.getLeaderTerm(); + } + Metapb.ShardState tmpShardState = Metapb.ShardState.SState_Normal; + if (partitionStats != null) { + shards = new ArrayList<>(); + for (Metapb.ShardStats shardStats : partitionStats.getShardStatsList()) { + if ((shardStats.getState() != Metapb.ShardState.UNRECOGNIZED) + && (shardStats.getState().getNumber() > tmpShardState.getNumber())) { + tmpShardState = shardStats.getState(); + progress = shardStats.getProgress(); + } + shards.add(new ShardStats(shardStats)); + } + } else { + shards = new ArrayList<>(); + try { + for (Metapb.Shard shard : pdRestService.getShardList(partition.getId())) { + shards.add(new ShardStats(shard)); + } + } catch (PDException e) { + log.error("get shard list failed, {}", e.getMessage()); + } + } + // 综合所有副本的状态,给shardState赋值 + shardState = tmpShardState.name(); + } + } + + @Data + class GraphStats { + String graphName; + long keyCount; + long startKey; + long endKey; + long dataSize; + String workState; + long partitionId; + + GraphStats(Metapb.GraphStats graphStats) { + graphName = graphStats.getGraphName(); + keyCount = graphStats.getApproximateKeys(); + workState = graphStats.getWorkState().toString(); + dataSize = graphStats.getApproximateSize(); + partitionId = graphStats.getPartitionId(); + } + } + + @Data + class ShardStats { + long storeId; + String role; + String state; + int progress; + //额外属性 + long partitionId; + String address; + + ShardStats(Metapb.ShardStats shardStats) { + storeId = shardStats.getStoreId(); + role = String.valueOf(shardStats.getRole()); + state = shardStats.getState().toString(); + progress = shardStats.getProgress(); + } + + ShardStats(Metapb.Shard shard) { + //当没有shardStats的初始化方法 + storeId = shard.getStoreId(); + role = String.valueOf(shard.getRole()); + state = Metapb.ShardState.SState_Normal.name(); + progress = 0; + } + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java new file mode 100644 index 0000000000..f2432b093f --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import org.apache.hugegraph.pd.model.PromTargetsModel; +import org.apache.hugegraph.pd.service.PromTargetsService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import lombok.extern.slf4j.Slf4j; + +/** + * TODO: ensure if we need this class & method (delete) + */ +@RestController +@Slf4j +@RequestMapping("/v1/prom") +public class PromTargetsAPI { + + @Autowired + private PromTargetsService service; + + @GetMapping(value = "/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getPromTargets(@PathVariable(value = "appName", + required = true) + String appName) { + return ResponseEntity.of(Optional.ofNullable(this.service.getTargets(appName))); + } + + @GetMapping(value = "/targets-all", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getPromAllTargets() { + return ResponseEntity.of(Optional.ofNullable(this.service.getAllTargets())); + } + + @GetMapping(value = "/demo/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) + public List getDemoTargets(@PathVariable(value = "appName", + required = true) String targetType) { + // TODO: ensure the IP addr is correct & useful + PromTargetsModel model = null; + switch (targetType) { + case "node": + model = PromTargetsModel.of() + .addTarget("10.14.139.26:8100") + .addTarget("10.14.139.27:8100") + .addTarget("10.14.139.28:8100") + .setMetricsPath("/metrics") + .setScheme("http"); + break; + case "store": + model = PromTargetsModel.of() + .addTarget("172.20.94.98:8521") + .addTarget("172.20.94.98:8522") + .addTarget("172.20.94.98:8523") + .setMetricsPath("/actuator/prometheus") + .setScheme("http"); + break; + case "pd": + model = PromTargetsModel.of() + .addTarget("172.20.94.98:8620") + .setMetricsPath("/actuator/prometheus"); + + break; + default: + + } + return Collections.singletonList(model); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java new file mode 100644 index 0000000000..2aedd5f305 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.model.RegistryQueryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class RegistryAPI extends API { + + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @PostMapping(value = "/registry", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse register(@RequestBody RegistryRestRequest body, + HttpServletRequest request) { + RegistryRestResponse registryResponse = null; + try { + long interval = Long.valueOf(body.getInterval()).longValue(); + NodeInfo info = NodeInfo.newBuilder().setAppName(body.getAppName()) + .setVersion(body.getVersion()) + .setAddress(body.getAddress()).putAllLabels(body.getLabels()) + .setInterval(interval).build(); + registryResponse = pdRestService.register(info); + } catch (PDException e) { + registryResponse = new RegistryRestResponse(); + registryResponse.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + registryResponse.setMessage(e.getMessage()); + } catch (PDRuntimeException e) { + registryResponse = new RegistryRestResponse(); + registryResponse.setErrorType(Pdpb.ErrorType.LICENSE_VERIFY_ERROR); + registryResponse.setMessage(e.getMessage()); + } + return registryResponse; + } + + @PostMapping(value = "/registryInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getInfo(@RequestBody RegistryQueryRestRequest body, + HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + boolean labelNotEmpty = body.getLabels() != null && !body.getLabels().isEmpty(); + Query query = Query.newBuilder() + .setAppName(StringUtils.isEmpty(body.getAppName()) ? "" : + body.getAppName()) + .putAllLabels(labelNotEmpty ? body.getLabels() : new HashMap<>()) + .setVersion(StringUtils.isEmpty(body.getVersion()) ? "" : + body.getVersion()) + .build(); + ArrayList registryResponse = pdRestService.getNodeInfo(query); + response.setErrorType(Pdpb.ErrorType.OK); + response.setData(registryResponse); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + @GetMapping(value = "/allInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse allInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + //1.normal registry + Query query = + Query.newBuilder().setAppName("").putAllLabels(new HashMap<>()).setVersion("") + .build(); + ArrayList registryResponse = pdRestService.getNodeInfo(query); + //2.pd member + LinkedList pdMembers = getMembers(); + //3.store member + List stores = pdRestService.getStores(""); + LinkedList storeMembers = new LinkedList<>(); + for (Metapb.Store store : stores) { + RegistryRestRequest restRequest = new RegistryRestRequest(); + restRequest.setAddress(store.getAddress()); + restRequest.setVersion(store.getVersion()); + restRequest.setAppName(STORE); + restRequest.setId(String.valueOf(store.getId())); + storeMembers.add(restRequest); + } + response.setErrorType(Pdpb.ErrorType.OK); + HashMap result = new HashMap<>(); + result.put("other", registryResponse); + result.put(PD, pdMembers); + result.put(STORE, storeMembers); + response.setData(result); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + private LinkedList getMembers() throws Exception { + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + LinkedList members = new LinkedList<>(); + List membersList = response.get().get(0).getMembersList(); + for (Metapb.Member member : membersList) { + RegistryRestRequest restRequest = new RegistryRestRequest(); + restRequest.setAddress(member.getRestUrl()); + restRequest.setVersion(VERSION); + restRequest.setAppName(PD); + members.add(restRequest); + } + return members; + } + + @GetMapping(value = "/license", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getLicenseInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + response.setErrorType(Pdpb.ErrorType.OK); + // TODO: uncomment later + //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + //response.setData(licenseVerifierService.getContext()); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + @GetMapping(value = "/license/machineInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getLicenseMachineInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + response.setErrorType(Pdpb.ErrorType.OK); + // TODO: uncomment later + //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + //response.setData(licenseVerifierService.getIpAndMac()); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java new file mode 100644 index 0000000000..53249b9436 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class ShardAPI extends API { + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @GetMapping(value = "/shards", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getShards() { + + //对shards信息的统计 + try { + List resultShardList = new ArrayList<>(); + List graphs = pdRestService.getGraphs(); + for (Metapb.Graph graph : graphs) { + String graphName = graph.getGraphName(); + List partitions = pdRestService.getPartitions(graphName); + for (Metapb.Partition pt : partitions) { + Metapb.PartitionStats partitionStats = + pdRestService.getPartitionStats(graphName, pt.getId()); + if (partitionStats != null) { + List shardStatsList = partitionStats.getShardStatsList(); + for (Metapb.ShardStats shardStats : shardStatsList) { + Shard resultShard = new Shard(); + resultShard.storeId = shardStats.getStoreId(); + resultShard.partitionId = pt.getId(); + resultShard.role = String.valueOf(shardStats.getRole()); + resultShard.state = String.valueOf(shardStats.getState()); + resultShard.graphName = graphName; + resultShard.progress = shardStats.getProgress(); + resultShardList.add(resultShard); + } + } else { + List shardList = new ArrayList<>(); + var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId()); + if (shardGroup != null) { + shardList = shardGroup.getShardsList(); + } else { + log.error( + "ShardAPI.getShards(), get shards of group id: {} returns " + + "null.", + pt.getId()); + } + + for (Metapb.Shard shard : shardList) { + Shard resultShard = new Shard(); + resultShard.storeId = shard.getStoreId(); + resultShard.partitionId = pt.getId(); + resultShard.role = String.valueOf(shard.getRole()); + resultShard.state = String.valueOf(Metapb.ShardState.SState_Normal); + resultShard.graphName = graphName; + resultShard.progress = 0; + resultShardList.add(resultShard); + } + } + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("shards", resultShardList); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException: ", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @Data + class Shard { + long storeId; + long partitionId; + String role; + String state; + String graphName; + int progress; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java new file mode 100644 index 0000000000..f288ef877f --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java @@ -0,0 +1,355 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.StoreRestRequest; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.util.DateUtil; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import com.google.protobuf.util.JsonFormat; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class StoreAPI extends API { + + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/stores", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStores() { + List storeStatsList = new ArrayList<>(); + try { + HashMap dataMap = new HashMap<>(); + Map stateCountMap = new HashMap<>(); + for (Metapb.Store store : pdRestService.getStores("")) { + String stateKey = store.getState().name(); + stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); + storeStatsList.add(new StoreStatistics(store)); + } + storeStatsList.sort((o1, o2) -> o1.address.compareTo(o2.address)); + dataMap.put("stores", storeStatsList); + dataMap.put("numOfService", storeStatsList.size()); + dataMap.put("numOfNormalService", + stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); + dataMap.put("stateCountMap", stateCountMap); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + // 仅支持通过该接口修改 storeState + @PostMapping(value = "/store/{storeId}", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setStore(@PathVariable long storeId, @RequestBody StoreRestRequest request) { + try { + Metapb.Store lastStore = pdRestService.getStore(storeId); + if (lastStore != null) { + Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore); + Metapb.StoreState storeState = Metapb.StoreState.valueOf(request.getStoreState()); + builder.setState(storeState); + Metapb.Store newStore = pdRestService.updateStore(builder.build()); + return toJSON(newStore, "store"); + } else { + return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + @GetMapping(value = "/shardGroups", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getShardGroups() { + try { + return toJSON(pdRestService.getShardGroups(), "shardGroups"); + } catch (PDException e) { + return toJSON(e); + } + } + + /** + * 返回每个store上的leader + * + * @return + */ + @GetMapping(value = "/shardLeaders") + public Map> shardLeaders() throws PDException { + Map> leaders = new HashMap<>(); + try { + + List groups = pdRestService.getShardGroups(); + groups.forEach(group -> { + group.getShardsList().forEach(shard -> { + if (shard.getRole() == Metapb.ShardRole.Leader) { + try { + String ip = pdRestService.getStore(shard.getStoreId()).getRaftAddress(); + if (!leaders.containsKey(ip)) { + leaders.put(ip, new ArrayList<>()); + } + leaders.get(ip).add(group.getId()); + } catch (PDException e) { + throw new RuntimeException(e); + } + } + }); + }); + } catch (PDException e) { + throw e; + } + return leaders; + } + + @GetMapping(value = "/balanceLeaders") + public Map balanceLeaders() throws PDException { + return pdRestService.balancePartitionLeader(); + } + + @DeleteMapping(value = "/store/{storeId}") + public String removeStore(@PathVariable(value = "storeId") Long storeId) { + try { + pdRestService.removeStore(storeId); + } catch (PDException e) { + return e.getStackTrace().toString(); + } + return "OK"; + } + + @PostMapping(value = "/store/log", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getStoreLog(@RequestBody TimeRangeRequest request) { + try { + Date dateStart = DateUtil.getDate(request.getStartTime()); + Date dateEnd = DateUtil.getDate(request.getEndTime()); + List changedStore = + pdRestService.getStoreStatusLog(dateStart.getTime(), + dateEnd.getTime()); + if (changedStore != null) { + JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry + .newBuilder().add(Metapb.Store.getDescriptor()).build(); + return toJSON(changedStore, registry); + } else { + return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + + @GetMapping(value = "store/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStore(@PathVariable long storeId) { + //获取store的统计信息 + Metapb.Store store = null; + try { + store = pdRestService.getStore(storeId); + } catch (PDException e) { + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + if (store != null) { + StoreStatistics resultStoreStats = resultStoreStats = new StoreStatistics(store); + return new RestApiResponse(resultStoreStats, Pdpb.ErrorType.OK, + Pdpb.ErrorType.OK.name()); + } else { + return new RestApiResponse(null, Pdpb.ErrorType.STORE_ID_NOT_EXIST, + Pdpb.ErrorType.STORE_ID_NOT_EXIST.name()); + } + } + + @GetMapping(value = "storesAndStats", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getStoresAndStats() { + //for debug use + try { + List stores = pdRestService.getStores(""); + return toJSON(stores, "stores"); + } catch (PDException e) { + log.error("PD exception:" + e); + return toJSON(e); + } + } + + @GetMapping(value = "store_monitor/json/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStoreMonitorData(@PathVariable long storeId) { + try { + List> result = pdRestService.getMonitorData(storeId); + return new RestApiResponse(result, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "store_monitor/{storeId}") + @ResponseBody + public String getStoreMonitorDataText(@PathVariable long storeId) { + try { + return pdRestService.getMonitorDataText(storeId); + } catch (PDException e) { + return "error:" + e.getErrorCode() + e.getMessage(); + } + } + + @Data + class Partition { + //分区信息 + int partitionId; + String graphName; + String role; // shard role + String workState; + long dataSize; // 占用的存储空间 + + Partition() { + } + + Partition(Metapb.GraphStats graphStats) { + partitionId = graphStats.getPartitionId(); + graphName = graphStats.getGraphName(); + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + role = String.valueOf(graphStats.getRole()); + workState = String.valueOf(graphStats.getWorkState()); + dataSize = graphStats.getApproximateSize(); + } + } + + @Data + class StoreStatistics { + //store的统计信息 + long storeId; + String address; + String raftAddress; + String version; + String state; + String deployPath; + String dataPath; // 数据存储路径 + long startTimeStamp; + long registedTimeStamp; // 暂时取第一次心跳时间作为注册时间 + long lastHeartBeat; // 上一次心跳时间 + long capacity; + long available; + int partitionCount; + int graphSize; + long keyCount; + long leaderCount; // shard role = 'Leader'的分区数量 + String serviceName; + String serviceVersion; + long serviceCreatedTimeStamp; // 服务创建时间 + List partitions; + + StoreStatistics(Metapb.Store store) { + if (store != null) { + storeId = store.getId(); + address = store.getAddress(); + raftAddress = store.getRaftAddress(); + state = String.valueOf(store.getState()); + version = store.getVersion(); + deployPath = store.getDeployPath(); + final String prefix = "file:"; + if ((deployPath != null) && (deployPath.startsWith(prefix))) { + // 去掉前缀 + deployPath = deployPath.substring(prefix.length()); + } + if ((deployPath != null) && (deployPath.contains(".jar"))) { + // 去掉jar包之后的信息 + deployPath = deployPath.substring(0, deployPath.indexOf(".jar") + 4); + } + dataPath = store.getDataPath(); + startTimeStamp = store.getStartTimestamp(); + try { + serviceCreatedTimeStamp = pdRestService.getStore(store.getId()) + .getStats().getStartTime(); // 实例时间 + final int base = 1000; + serviceCreatedTimeStamp *= base; // 转化为毫秒 + } catch (PDException e) { + e.printStackTrace(); + serviceCreatedTimeStamp = store.getStartTimestamp(); + } + registedTimeStamp = store.getStartTimestamp(); // 注册时间 + lastHeartBeat = store.getLastHeartbeat(); + capacity = store.getStats().getCapacity(); + available = store.getStats().getAvailable(); + partitionCount = store.getStats().getPartitionCount(); + serviceName = address + "-store"; + serviceVersion = store.getVersion(); + List graphStatsList = store.getStats().getGraphStatsList(); + List partitionStatsList = new ArrayList<>(); // 保存分区信息 + HashSet graphNameSet = new HashSet<>(); // 用于统计图的数量 + HashSet leaderPartitionIds = new HashSet(); // 统计leader的分区数量 + // 构造分区信息(store中存储的图信息) + Map partition2KeyCount = new HashMap<>(); + for (Metapb.GraphStats graphStats : graphStatsList) { + String graphName = graphStats.getGraphName(); + // 图名只保留/g /m /s前面的部分 + final int postfixLength = 2; + graphNameSet.add(graphName.substring(0, graphName.length() - postfixLength)); + if ((graphStats.getGraphName() != null) && + (graphStats.getGraphName().endsWith("/g"))) { + Partition pt = new Partition(graphStats); + partitionStatsList.add(pt); + } + // 统计每个分区的keyCount + partition2KeyCount.put(graphStats.getPartitionId(), + graphStats.getApproximateKeys()); + if (graphStats.getRole() == Metapb.ShardRole.Leader) { + leaderPartitionIds.add(graphStats.getPartitionId()); + } + } + for (Map.Entry entry : partition2KeyCount.entrySet()) { + keyCount += entry.getValue(); + } + partitions = partitionStatsList; + graphSize = graphNameSet.size(); + leaderCount = leaderPartitionIds.size(); + } + + } + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java new file mode 100644 index 0000000000..d1419d2ddb --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.service.PDRestService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1/task") +public class TaskAPI extends API { + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/patrolStores", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String patrolStores() { + try { + List stores = pdRestService.patrolStores(); + return toJSON(stores, "stores"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @GetMapping(value = "/patrolPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String patrolPartitions() { + try { + List partitions = pdRestService.patrolPartitions(); + return toJSON(partitions, "partitions"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @GetMapping(value = "/balancePartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public Map> balancePartitions() { + try { + Map> partitions = pdRestService.balancePartitions(); + return partitions; + } catch (PDException e) { + e.printStackTrace(); + return null; + } + } + + @GetMapping(value = "/splitPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String splitPartitions() { + try { + List partitions = pdRestService.splitPartitions(); + return toJSON(partitions, "partitions"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @GetMapping(value = "/balanceLeaders") + public Map balanceLeaders() throws PDException { + return pdRestService.balancePartitionLeader(); + } + + @GetMapping(value = "/compact") + public String dbCompaction() throws PDException { + pdRestService.dbCompaction(); + return "compact ok"; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java new file mode 100644 index 0000000000..1ab6326112 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.QueueStore; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Parser; + +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/test") +public class TestAPI { + + @Autowired + private PDConfig pdConfig; + + @GetMapping(value = "/discovery/{appName}", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String discovery(@PathVariable(value = "appName", required = true) String appName) { + RegistryService register = new RegistryService(pdConfig); + // Query query=Query.newBuilder().setAppName("hugegraph").build(); + AtomicLong label = new AtomicLong(); + HashMap labels = new HashMap<>(); + String labelValue = String.valueOf(label.incrementAndGet()); + //labels.put("address",labelValue); + Query query = Query.newBuilder().build(); + // Query query = Query.newBuilder().setAppName("hugegraph").set.build(); + + return register.getNodes(query).toString(); + } + + @GetMapping(value = "/pulse", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyClient() { + PDPulseSubject.notifyClient( + PartitionHeartbeatResponse.newBuilder() + .setPartition(Metapb.Partition.newBuilder() + .setId(8) + .setGraphName("graphName8")) + + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue(8) + .addShard(Metapb.Shard.newBuilder() + .setRoleValue(8) + .setStoreId(8) + ) + ) + + ); + return "partition"; + } + + @GetMapping(value = "/partition", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String noticePartition() { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER, "graph-test", 99); + return "partition"; + } + + @PutMapping(value = "/queue", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String testPutQueue() { + this.putQueue(); + return "queue"; + } + + public void putQueue() { + PartitionHeartbeatResponse response = PartitionHeartbeatResponse.newBuilder() + .setPartition( + Metapb.Partition.newBuilder() + .setId(9) + .setGraphName( + "graphName")) + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue( + 9) + .addShard( + Metapb.Shard.newBuilder() + .setRoleValue( + 9) + .setStoreId( + 9) + ) + ).build(); + + Metapb.QueueItem.Builder builder = Metapb.QueueItem.newBuilder() + .setItemId("item-id") + .setItemClass("item-class") + .setItemContent(response.toByteString()); + + + QueueStore store = MetadataFactory.newQueueStore(pdConfig); + + try { + store.addItem(builder.setItemId("item-id-1").build()); + store.addItem(builder.setItemId("item-id-2").build()); + store.addItem(builder.setItemId("item-id-3").build()); + } catch (PDException e) { + e.printStackTrace(); + } + List queue = null; + try { + queue = store.getQueue(); + } catch (PDException e) { + e.printStackTrace(); + } + Parser parser = PartitionHeartbeatResponse.parser(); + + queue.stream().forEach(e -> { + PartitionHeartbeatResponse buf = null; + try { + buf = parser.parseFrom(e.getItemContent()); + } catch (InvalidProtocolBufferException ex) { + ex.printStackTrace(); + } + PDPulseSubject.notifyClient(PartitionHeartbeatResponse.newBuilder(buf)); + }); + + + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java new file mode 100644 index 0000000000..e1f6fcef3f --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import javax.annotation.PostConstruct; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import io.grpc.ManagedChannel; +import lombok.extern.slf4j.Slf4j; + +// TODO: uncomment later +@Slf4j +@GRpcService +public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplBase implements + ServiceGrpc, + RaftStateListener { + + static final AtomicLong id = new AtomicLong(); + private static final String CORES = "cores"; + RegistryService register = null; + //LicenseVerifierService licenseVerifierService; + @Autowired + private PDConfig pdConfig; + private ManagedChannel channel; + + @PostConstruct + public void init() throws PDException { + log.info("PDService init………… {}", pdConfig); + RaftEngine.getInstance().init(pdConfig.getRaft()); + RaftEngine.getInstance().addStateListener(this); + register = new RegistryService(pdConfig); + //licenseVerifierService = new LicenseVerifierService(pdConfig); + } + + private Pdpb.ResponseHeader newErrorHeader(PDException e) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())) + .build(); + return header; + } + + @Override + public void register(NodeInfo request, io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(null, DiscoveryServiceGrpc.getRegisterMethod(), request, observer); + return; + } + int outTimes = pdConfig.getDiscovery().getHeartbeatOutTimes(); + RegisterInfo registerInfo; + try { + if (request.getAppName().equals("hg")) { + Query queryRequest = Query.newBuilder().setAppName(request.getAppName()) + .setVersion(request.getVersion()).build(); + NodeInfos nodes = register.getNodes(queryRequest); + String address = request.getAddress(); + int nodeCount = nodes.getInfoCount() + 1; + for (NodeInfo node : nodes.getInfoList()) { + if (node.getAddress().equals(address)) { + nodeCount = nodes.getInfoCount(); + break; + } + } + Map labelsMap = request.getLabelsMap(); + String coreCount = labelsMap.get(CORES); + if (StringUtils.isEmpty(coreCount)) { + throw new PDException(-1, "core count can not be null"); + } + int core = Integer.parseInt(coreCount); + //licenseVerifierService.verify(core, nodeCount); + } + register.register(request, outTimes); + String valueId = request.getId(); + registerInfo = RegisterInfo.newBuilder().setNodeInfo(NodeInfo.newBuilder().setId( + "0".equals(valueId) ? + String.valueOf(id.incrementAndGet()) : valueId).build()) + .build(); + + } catch (PDException e) { + registerInfo = RegisterInfo.newBuilder().setHeader(newErrorHeader(e)).build(); + log.debug("registerStore exception: ", e); + } catch (PDRuntimeException ex) { + Pdpb.Error error = Pdpb.Error.newBuilder().setTypeValue(ex.getErrorCode()) + .setMessage(ex.getMessage()).build(); + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); + registerInfo = RegisterInfo.newBuilder().setHeader(header).build(); + log.debug("registerStore exception: ", ex); + } catch (Exception e) { + Pdpb.Error error = + Pdpb.Error.newBuilder().setTypeValue(Pdpb.ErrorType.UNKNOWN.getNumber()) + .setMessage(e.getMessage()).build(); + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); + registerInfo = RegisterInfo.newBuilder().setHeader(header).build(); + } + observer.onNext(registerInfo); + observer.onCompleted(); + } + + @Override + public void getNodes(Query request, io.grpc.stub.StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(null, DiscoveryServiceGrpc.getGetNodesMethod(), request, + responseObserver); + return; + } + responseObserver.onNext(register.getNodes(request)); + responseObserver.onCompleted(); + } + + @Override + public boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java new file mode 100644 index 0000000000..7898248d2d --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -0,0 +1,592 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import javax.annotation.PostConstruct; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.kv.K; +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.KvResponse; +import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +import org.apache.hugegraph.pd.grpc.kv.LockRequest; +import org.apache.hugegraph.pd.grpc.kv.LockResponse; +import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchRequest; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchState; +import org.apache.hugegraph.pd.grpc.kv.WatchType; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.watch.KvWatchSubject; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import io.grpc.ManagedChannel; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * kv 存储的核心实现类 + */ +@Slf4j +@GRpcService +public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements RaftStateListener, + ServiceGrpc { + + private final ManagedChannel channel = null; + KvService kvService; + AtomicLong count = new AtomicLong(); + String msg = "node is not leader,it is necessary to redirect to the leader on the client"; + @Autowired + private PDConfig pdConfig; + private KvWatchSubject subjects; + private ScheduledExecutorService executor; + + @PostConstruct + public void init() { + RaftEngine.getInstance().init(pdConfig.getRaft()); + RaftEngine.getInstance().addStateListener(this); + kvService = new KvService(pdConfig); + subjects = new KvWatchSubject(pdConfig); + executor = Executors.newScheduledThreadPool(1); + executor.scheduleWithFixedDelay(() -> { + if (isLeader()) { + subjects.keepClientAlive(); + } + }, 0, KvWatchSubject.WATCH_TTL / 2, TimeUnit.MILLISECONDS); + } + + /** + * 普通的 put + * + * @param request + * @param responseObserver + */ + @Override + public void put(Kv request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + String value = request.getValue(); + this.kvService.put(key, value); + WatchKv watchKV = getWatchKv(key, value); + subjects.notifyAllObserver(key, WatchType.Put, new WatchKv[]{watchKV}); + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 普通的 get + * + * @param request + * @param responseObserver + */ + @Override + public void get(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver); + return; + } + KResponse response; + KResponse.Builder builder = KResponse.newBuilder(); + try { + String value = this.kvService.get(request.getKey()); + builder.setHeader(getResponseHeader()); + if (value != null) { + builder.setValue(value); + } + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 普通的 delete + * + * @param request + * @param responseObserver + */ + @Override + public void delete(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + Kv deleted = this.kvService.delete(key); + if (deleted.getValue() != null) { + WatchKv watchKV = getWatchKv(deleted.getKey(), deleted.getValue()); + subjects.notifyAllObserver(key, WatchType.Delete, new WatchKv[]{watchKV}); + } + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 按前缀删除 + * + * @param request + * @param responseObserver + */ + @Override + public void deletePrefix(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, + responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + List kvs = this.kvService.deleteWithPrefix(key); + WatchKv[] watchKvs = new WatchKv[kvs.size()]; + int i = 0; + for (Kv kv : kvs) { + WatchKv watchKV = getWatchKv(kv.getKey(), kv.getValue()); + watchKvs[i++] = watchKV; + } + subjects.notifyAllObserver(key, WatchType.Delete, watchKvs); + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 按前缀查询 + * + * @param request + * @param responseObserver + */ + @Override + public void scanPrefix(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, + responseObserver); + return; + } + ScanPrefixResponse response; + ScanPrefixResponse.Builder builder = ScanPrefixResponse.newBuilder(); + try { + Map kvs = this.kvService.scanWithPrefix(request.getKey()); + response = builder.setHeader(getResponseHeader()).putAllKvs(kvs).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 获取随机非 0 字符串做 Id + * + * @return + */ + private long getRandomLong() { + + long result; + Random random = new Random(); + while ((result = random.nextLong()) == 0) { + continue; + } + return result; + } + + /** + * 普通的 watch + * + * @param request + * @param responseObserver + */ + @Override + public void watch(WatchRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + responseObserver.onError(new PDException(-1, msg)); + return; + } + try { + clientWatch(request, responseObserver, false); + } catch (PDException e) { + if (!isLeader()) { + try { + responseObserver.onError(new PDException(-1, msg)); + } catch (IllegalStateException ie) { + + } catch (Exception e1) { + log.error("redirect with error: ", e1); + } + } + } + } + + /** + * 普通的前缀 watch + * + * @param request + * @param responseObserver + */ + @Override + public void watchPrefix(WatchRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + responseObserver.onError(new PDException(-1, msg)); + return; + } + try { + clientWatch(request, responseObserver, true); + } catch (PDException e) { + if (!isLeader()) { + try { + responseObserver.onError(new PDException(-1, msg)); + } catch (IllegalStateException ie) { + + } catch (Exception e1) { + log.error("redirect with error: ", e1); + } + } + } + } + + /** + * 上面两个方法的通用方式 + * + * @param request + * @param responseObserver + * @param isPrefix + * @throws PDException + */ + private void clientWatch(WatchRequest request, StreamObserver responseObserver, + boolean isPrefix) throws PDException { + try { + String key = request.getKey(); + long clientId = request.getClientId(); + WatchResponse.Builder builder = WatchResponse.newBuilder(); + WatchResponse response; + if (request.getState().equals(WatchState.Starting) && clientId == 0) { + clientId = getRandomLong(); + response = builder.setClientId(clientId).setState(WatchState.Starting).build(); + } else { + response = builder.setState(WatchState.Started).build(); + } + String delimiter = + isPrefix ? KvWatchSubject.PREFIX_DELIMITER : KvWatchSubject.KEY_DELIMITER; + subjects.addObserver(key, clientId, responseObserver, delimiter); + synchronized (responseObserver) { + responseObserver.onNext(response); + } + } catch (PDException e) { + if (!isLeader()) { + throw new PDException(-1, msg); + } + throw new PDException(e.getErrorCode(), e); + } + + } + + + /** + * 加锁 + * + * @param request + * @param responseObserver + */ + @Override + public void lock(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + clientId = getRandomLong(); + } + boolean locked = this.kvService.lock(request.getKey(), request.getTtl(), clientId); + response = + builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId) + .build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver); + return; + } + log.error("lock with error :", e); + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void lockWithoutReentrant(LockRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request, + responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + clientId = getRandomLong(); + } + boolean locked = this.kvService.lockWithoutReentrant(request.getKey(), request.getTtl(), + clientId); + response = + builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId) + .build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request, + responseObserver); + return; + } + log.error("lock with error :", e); + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void isLocked(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + boolean locked = this.kvService.locked(request.getKey()); + response = builder.setHeader(getResponseHeader()).setSucceed(locked).build(); + } catch (PDException e) { + log.error("lock with error :", e); + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 解锁 + * + * @param request + * @param responseObserver + */ + @Override + public void unlock(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + throw new PDException(-1, "incorrect clientId: 0"); + } + boolean unlocked = this.kvService.unlock(request.getKey(), clientId); + response = builder.setHeader(getResponseHeader()).setSucceed(unlocked) + .setClientId(clientId).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + + /** + * 锁续活 + * + * @param request + * @param responseObserver + */ + @Override + public void keepAlive(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, + responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + throw new PDException(-1, "incorrect clientId: 0"); + } + boolean alive = this.kvService.keepAlive(request.getKey(), clientId); + response = + builder.setHeader(getResponseHeader()).setSucceed(alive).setClientId(clientId) + .build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 带超时时间的 put + * + * @param request + * @param responseObserver + */ + @Override + public void putTTL(TTLRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, responseObserver); + return; + } + TTLResponse response; + TTLResponse.Builder builder = TTLResponse.newBuilder(); + try { + this.kvService.put(request.getKey(), request.getValue(), request.getTtl()); + response = builder.setHeader(getResponseHeader()).setSucceed(true).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 续活带有超时时间的 key + * + * @param request + * @param responseObserver + */ + @Override + public void keepTTLAlive(TTLRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, + responseObserver); + return; + } + TTLResponse response; + TTLResponse.Builder builder = TTLResponse.newBuilder(); + try { + this.kvService.keepAlive(request.getKey()); + response = builder.setHeader(getResponseHeader()).setSucceed(true).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + private WatchKv getWatchKv(String key, String value) { + WatchKv kv = WatchKv.newBuilder().setKey(key).setValue(value).build(); + return kv; + } + + @Override + public void onRaftLeaderChanged() { + subjects.notifyClientChangeLeader(); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java new file mode 100644 index 0000000000..2f21cfbacd --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.QueueStore; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@GRpcService +public class PDPulseService extends HgPdPulseGrpc.HgPdPulseImplBase { + + private static final Supplier> QUEUE_RETRIEVE_FUNCTION = + () -> Collections.emptyList(); + private static final Function QUEUE_ITEM_BOOLEAN_FUNCTION = (e) -> true; + private static final Function QUEUE_REMOVE_FUNCTION = (e) -> true; + @Autowired + private PDConfig pdConfig; + private QueueStore queueStore = null; + + public PDPulseService() { + PDPulseSubject.setQueueRetrieveFunction(() -> getQueue()); + PDPulseSubject.setQueueDurableFunction(getQueueDurableFunction()); + PDPulseSubject.setQueueRemoveFunction(getQueueRemoveFunction()); + } + + @Override + public StreamObserver pulse(StreamObserver responseObserver) { + return PDPulseSubject.addObserver(responseObserver); + } + + private Function getQueueRemoveFunction() { + return itemId -> { + try { + this.getQueueStore().removeItem(itemId); + return true; + } catch (Throwable t) { + log.error("Failed to remove item from store, item-id: " + itemId + ", cause by:", + t); + } + return false; + }; + } + + private Function getQueueDurableFunction() { + return item -> { + try { + this.getQueueStore().addItem(item); + return true; + } catch (Throwable t) { + log.error("Failed to add item to store, item: " + item.toString() + ", cause by:", + t); + } + return false; + }; + } + + private boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + private List getQueue() { + + if (!isLeader()) { + return Collections.emptyList(); + } + + try { + return this.getQueueStore().getQueue(); + } catch (Throwable t) { + log.error("Failed to retrieve queue from QueueStore, cause by:", t); + } + + log.warn("Returned empty queue list."); + return Collections.emptyList(); + } + + private QueueStore getQueueStore() { + if (this.queueStore == null) { + this.queueStore = MetadataFactory.newQueueStore(pdConfig); + } + return this.queueStore; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java new file mode 100644 index 0000000000..a7402886ff --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.springframework.beans.factory.InitializingBean; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Service +public class PDRestService implements InitializingBean { + private static final String EMPTY_STRING = ""; + @Autowired + PDService pdService; + @Autowired + DiscoveryService discoveryService; + private StoreNodeService storeNodeService; + private PartitionService partitionService; + private TaskScheduleService monitorService; + private ConfigService configService; + private LogService logService; + private StoreMonitorDataService storeMonitorDataService; + + /** + * 初始化 + * + * @throws Exception + */ + @Override + public void afterPropertiesSet() throws Exception { + storeNodeService = pdService.getStoreNodeService(); + partitionService = pdService.getPartitionService(); + monitorService = pdService.getTaskService(); + configService = pdService.getConfigService(); + logService = pdService.getLogService(); + storeMonitorDataService = pdService.getStoreMonitorDataService(); + HgAssert.isNotNull(storeNodeService, "storeNodeService does not initialize"); + HgAssert.isNotNull(partitionService, "partitionService does not initialize"); + } + + + public List getStores(String graphName) throws PDException { + return storeNodeService.getStores(graphName); + } + + public Metapb.Store getStore(long storeId) throws PDException { + return storeNodeService.getStore(storeId); + } + + public List getShardGroups() throws PDException { + return storeNodeService.getShardGroups(); + } + + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + logService.insertLog(LogService.NODE_CHANGE, LogService.REST, store); + return storeNodeService.updateStore(store); + } + + public boolean removeStore(Long storeId) throws PDException { + if (storeId == null) { + return false; + } + return 0 != storeNodeService.removeStore(storeId); + } + + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + return configService.setGraphSpace(graphSpace); + } + + public List getGraphSpaces() throws PDException { + return configService.getGraphSpace(EMPTY_STRING); + } + + public Metapb.GraphSpace getGraphSpace(String graphSpaceName) throws PDException { + return configService.getGraphSpace(graphSpaceName).get(0); + } + + public List getGraphs() throws PDException { + return partitionService.getGraphs(); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + return partitionService.getGraph(graphName); + } + + public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + return partitionService.updateGraph(graph); + } + + public List getPartitions(String graphName) { + return partitionService.getPartitions(graphName); + } + + public List patrolStores() throws PDException { + return monitorService.patrolStores(); + } + + public List patrolPartitions() throws PDException { + return monitorService.patrolPartitions(); + } + + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws + PDException { + return partitionService.getPartitionStats(graphName, partitionId); + } + + public List getPartitionStatus(String graphName) throws PDException { + return partitionService.getPartitionStatus(graphName); + } + + public Map> balancePartitions() throws PDException { + return monitorService.balancePartitionShard(); + } + + public List splitPartitions() throws PDException { + return monitorService.autoSplitPartition(); + } + + public List getStoreStats(boolean isActive) throws PDException { + return storeNodeService.getStoreStatus(isActive); + } + + public List> getMonitorData(long storeId) throws PDException { + return storeMonitorDataService.getStoreMonitorData(storeId); + } + + public String getMonitorDataText(long storeId) throws PDException { + return storeMonitorDataService.getStoreMonitorDataText(storeId); + } + + public RegistryRestResponse register(NodeInfo nodeInfo) throws PDException { + CountDownLatch latch = new CountDownLatch(1); + final RegisterInfo[] info = {null}; + RegistryRestResponse response = new RegistryRestResponse(); + try { + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(RegisterInfo value) { + info[0] = value; + latch.countDown(); + } + + @Override + public void onError(Throwable t) { + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + this.discoveryService.register(nodeInfo, observer); + latch.await(); + Pdpb.Error error = info[0].getHeader().getError(); + response.setErrorType(error.getType()); + response.setMessage(error.getMessage()); + } catch (InterruptedException e) { + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + public ArrayList getNodeInfo(Query request) throws PDException { + CountDownLatch latch = new CountDownLatch(1); + final NodeInfos[] info = {null}; + RegistryRestResponse response = new RegistryRestResponse(); + ArrayList registryRestRequests = null; + try { + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(NodeInfos value) { + info[0] = value; + latch.countDown(); + } + + @Override + public void onError(Throwable t) { + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + this.discoveryService.getNodes(request, observer); + latch.await(); + List infoList = info[0].getInfoList(); + registryRestRequests = new ArrayList(infoList.size()); + for (int i = 0; i < infoList.size(); i++) { + NodeInfo element = infoList.get(i); + RegistryRestRequest registryRestRequest = new RegistryRestRequest(); + registryRestRequest.setAddress(element.getAddress()); + registryRestRequest.setAppName(element.getAppName()); + registryRestRequest.setVersion(element.getVersion()); + registryRestRequest.setInterval(String.valueOf(element.getInterval())); + HashMap labels = new HashMap<>(); + labels.putAll(element.getLabelsMap()); + registryRestRequest.setLabels(labels); + registryRestRequests.add(registryRestRequest); + } + } catch (InterruptedException e) { + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return registryRestRequests; + } + + public List getStoreStatusLog(Long start, Long end) throws PDException { + return logService.getLog(LogService.NODE_CHANGE, start, end); + } + + + public List getPartitionLog(Long start, Long end) throws PDException { + return logService.getLog(LogService.PARTITION_CHANGE, start, end); + } + + public Map balancePartitionLeader() throws PDException { + return monitorService.balancePartitionLeader(true); + } + + public void dbCompaction() throws PDException { + monitorService.dbCompaction(""); + } + + public List getShardList(int partitionId) throws PDException { + return storeNodeService.getShardList(partitionId); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java new file mode 100644 index 0000000000..b7ec32abf1 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -0,0 +1,1796 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +import javax.annotation.PostConstruct; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionInstructionListener; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.PartitionStatusListener; +import org.apache.hugegraph.pd.ShardGroupStatusListener; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.StoreStatusListener; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; +import org.apache.hugegraph.pd.grpc.watch.WatchGraphResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.util.CollectionUtils; + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.entity.PeerId; + +import io.grpc.ManagedChannel; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +// TODO: uncomment later - remove license verifier service now +@Slf4j +@GRpcService +public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc, RaftStateListener { + + static String TASK_ID_KEY = "task_id"; + private final Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); + // private ManagedChannel channel; + private final Map channelMap = new ConcurrentHashMap<>(); + @Autowired + private PDConfig pdConfig; + private StoreNodeService storeNodeService; + private PartitionService partitionService; + private TaskScheduleService taskService; + private IdService idService; + private ConfigService configService; + private LogService logService; + //private LicenseVerifierService licenseVerifierService; + private StoreMonitorDataService storeMonitorDataService; + private ManagedChannel channel; + + private Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build(); + return header; + } + + private Pdpb.ResponseHeader newErrorHeader(PDException e) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())) + .build(); + return header; + } + + public StoreNodeService getStoreNodeService() { + return storeNodeService; + } + + public PartitionService getPartitionService() { + return partitionService; + } + + public TaskScheduleService getTaskService() { + return taskService; + } + + public ConfigService getConfigService() { + return configService; + } + + public StoreMonitorDataService getStoreMonitorDataService() { + return this.storeMonitorDataService; + } + + public LogService getLogService() { + return logService; + } + + //public LicenseVerifierService getLicenseVerifierService() { + // return licenseVerifierService; + //} + + /** + * 初始化 + */ + @PostConstruct + public void init() throws PDException { + log.info("PDService init………… {}", pdConfig); + configService = new ConfigService(pdConfig); + + RaftEngine.getInstance().addStateListener(this); + RaftEngine.getInstance().addStateListener(configService); + RaftEngine.getInstance().init(pdConfig.getRaft()); + //pdConfig = configService.loadConfig(); onLeaderChanged 中加载 + storeNodeService = new StoreNodeService(pdConfig); + partitionService = new PartitionService(pdConfig, storeNodeService); + taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService); + idService = new IdService(pdConfig); + logService = new LogService(pdConfig); + storeMonitorDataService = new StoreMonitorDataService(pdConfig); + //if (licenseVerifierService == null) { + // licenseVerifierService = new LicenseVerifierService(pdConfig); + //} + RaftEngine.getInstance().addStateListener(partitionService); + pdConfig.setIdService(idService); + + // 接收心跳消息 + PDPulseSubject.listenPartitionHeartbeat(new PulseListener() { + @Override + public void onNext(PartitionHeartbeatRequest request) throws Exception { + partitionService.partitionHeartbeat(request.getStates()); + } + + @Override + public void onError(Throwable throwable) { + log.error("Received an error notice from pd-client", throwable); + } + + @Override + public void onCompleted() { + log.info("Received an completed notice from pd-client"); + } + }); + + + /** + * 监听分区指令,并转发给 Store + */ + partitionService.addInstructionListener(new PartitionInstructionListener() { + private PartitionHeartbeatResponse.Builder getBuilder(Metapb.Partition partition) throws + PDException { + return PartitionHeartbeatResponse.newBuilder().setPartition(partition) + .setId(idService.getId(TASK_ID_KEY, 1)); + } + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setChangeShard(changeShard)); + + } + + @Override + public void transferLeader(Metapb.Partition partition, + TransferLeader transferLeader) throws + PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setTransferLeader(transferLeader)); + } + + @Override + public void splitPartition(Metapb.Partition partition, + SplitPartition splitPartition) throws + PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setSplitPartition(splitPartition)); + + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setDbCompaction(dbCompaction)); + + } + + @Override + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setMovePartition(movePartition)); + } + + @Override + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setCleanPartition(cleanPartition)); + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) + throws PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setKeyRange(partitionKeyRange)); + } + }); + + /** + * 监听分区状态改变消息,并转发给 Client + */ + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER, + partition.getGraphName(), partition.getId()); + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.DEL, + partition.getGraphName(), + partition.getId()); + + } + }); + + storeNodeService.addShardGroupStatusListener(new ShardGroupStatusListener() { + @Override + public void onShardListChanged(Metapb.ShardGroup shardGroup, + Metapb.ShardGroup newShardGroup) { + // invoked before change, saved to db and update cache. + if (newShardGroup == null) { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.DEL, + shardGroup.getId(), + shardGroup); + } else { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.ALTER, + shardGroup.getId(), newShardGroup); + } + } + + @Override + public void onShardListOp(Metapb.ShardGroup shardGroup) { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.USER_DEFINED, + shardGroup.getId(), shardGroup); + } + }); + + /** + * 监听 store 状态改变消息,并转发给 Client + */ + storeNodeService.addStatusListener(new StoreStatusListener() { + + @Override + public void onStoreStatusChanged(Metapb.Store store, + Metapb.StoreState old, + Metapb.StoreState status) { + NodeEventType type = NodeEventType.NODE_EVENT_TYPE_UNKNOWN; + if (status == Metapb.StoreState.Up) { + type = NodeEventType.NODE_EVENT_TYPE_NODE_ONLINE; + } else if (status == Metapb.StoreState.Offline) { + type = NodeEventType.NODE_EVENT_TYPE_NODE_OFFLINE; + } + PDWatchSubject.notifyNodeChange(type, "", store.getId()); + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + WatchGraphResponse wgr = WatchGraphResponse.newBuilder() + .setGraph(graph) + .build(); + WatchResponse.Builder wr = WatchResponse.newBuilder() + .setGraphResponse(wgr); + PDWatchSubject.notifyChange(WatchType.WATCH_TYPE_GRAPH_CHANGE, + wr); + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_NODE_RAFT_CHANGE, "", + store.getId()); + } + }); + storeNodeService.init(partitionService); + partitionService.init(); + taskService.init(); + // log.info("init ......."); + // licenseVerifierService.init(); + + // UpgradeService upgradeService = new UpgradeService(pdConfig); + // upgradeService.upgrade(); + } + + /** + *
+     * 注册 store,首次注册会生成新的 store_id,store_id 是 store 唯一标识
+     * 
+ */ + @Override + public void registerStore(Pdpb.RegisterStoreRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getRegisterStoreMethod(), request, observer); + return; + } + Pdpb.RegisterStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.register(request.getStore()); + response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(okHeader) + .setStoreId(store.getId()) + .build(); + } catch (PDException e) { + response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("registerStore exception: ", e); + } + // 拉取所有分区信息,并返回 + observer.onNext(response); + observer.onCompleted(); + + } + + /** + * 根据 store_id 查找 store + */ + @Override + public void getStore(Pdpb.GetStoreRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoreMethod(), request, observer); + return; + } + Pdpb.GetStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.getStore(request.getStoreId()); + response = + Pdpb.GetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); + } catch (PDException e) { + response = Pdpb.GetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("{} getStore exception: {}", StreamObserverUtil.getRemoteIP(observer), e); + } + + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 修改 Store 状态等信息。
+     * 
+ */ + @Override + public void setStore(Pdpb.SetStoreRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetStoreMethod(), request, observer); + return; + } + Pdpb.SetStoreResponse response = null; + try { + Metapb.StoreState state = request.getStore().getState(); + Long storeId = request.getStore().getId(); + // 处于 Pending 状态,才可以上线 + Metapb.Store lastStore = storeNodeService.getStore(request.getStore().getId()); + if (lastStore == null) { + // storeId 不存在,抛出异常 + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist!", storeId)); + } + if (Metapb.StoreState.Up.equals(state)) { + if (!Metapb.StoreState.Pending.equals(lastStore.getState())) { + throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "only stores in Pending state can be set to Up!"); + } + } + if (state.equals(Metapb.StoreState.Offline)) { + Metapb.ClusterStats stats = storeNodeService.getClusterStats(); + if (stats.getState() != Metapb.ClusterState.Cluster_OK) { + Pdpb.ResponseHeader errorHeader = newErrorHeader(-1, + "can not offline node " + + + "when cluster state is not " + + "normal "); + response = Pdpb.SetStoreResponse.newBuilder().setHeader(errorHeader).build(); + observer.onNext(response); + observer.onCompleted(); + return; + } + } + logService.insertLog(LogService.NODE_CHANGE, LogService.GRPC, request.getStore()); + // 检查失败,状态改为 Pending,把错误原因返回去 + if (state.equals(Metapb.StoreState.Up)) { + int cores = 0; + long id = request.getStore().getId(); + List stores = storeNodeService.getStores(); + int nodeCount = 0; + for (Metapb.Store store : stores) { + if (store.getId() == id) { + // 获取之前注册的 store 中的 cores 作为验证参数 + cores = store.getCores(); + } + if (store.getState().equals(Metapb.StoreState.Up)) { + nodeCount++; + } + } + try { + //licenseVerifierService.verify(cores, nodeCount); + } catch (Exception e) { + Metapb.Store store = Metapb.Store.newBuilder(request.getStore()) + .setState(Metapb.StoreState.Pending).build(); + storeNodeService.updateStore(store); + throw new PDException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, + "check license with error :" + + e.getMessage() + + ", and changed node state to 'Pending'"); + } + } + Metapb.Store store = request.getStore(); + // 下线之前先判断一下,活跃机器数是否大于最小阈值 + if (state.equals(Metapb.StoreState.Tombstone)) { + List activeStores = storeNodeService.getActiveStores(); + if (lastStore.getState() == Metapb.StoreState.Up + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); + } + if (!storeNodeService.checkStoreCanOffline(request.getStore())) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "check activeStores or online shardsList size"); + } + if (lastStore.getState() == Metapb.StoreState.Exiting) { + // 如果已经是下线中的状态,则不作进一步处理 + throw new PDException(Pdpb.ErrorType.Store_Tombstone_Doing_VALUE, + "Downline is in progress, do not resubmit"); + } + Map resultMap = taskService.canAllPartitionsMovedOut(lastStore); + if ((boolean) resultMap.get("flag")) { + if (resultMap.get("current_store_is_online") != null + && (boolean) resultMap.get("current_store_is_online")) { + log.info("updateStore removeActiveStores store {}", store.getId()); + // 将在线的 store 的状态设置为下线中,等待副本迁移 + store = Metapb.Store.newBuilder(lastStore) + .setState(Metapb.StoreState.Exiting).build(); + // 进行分区迁移操作 + taskService.movePartitions((Map>) resultMap.get( + "movedPartitions")); + } else { + // store 已经离线的,不做副本迁移 + // 将状态改为 Tombstone + } + } else { + throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "the resources on other stores may be not enough to " + + "store " + + "the partitions of current store!"); + } + } + // 替换 license 都走 grpc + store = storeNodeService.updateStore(store); + response = + Pdpb.SetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); + } catch (PDException e) { + response = Pdpb.SetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("setStore exception: ", e); + } + + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 返回所有的 store,exclude_offline_stores=true,返回活跃的 stores + */ + @Override + public void getAllStores(Pdpb.GetAllStoresRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetAllStoresMethod(), request, observer); + return; + } + Pdpb.GetAllStoresResponse response = null; + try { + List stores = null; + if (request.getExcludeOfflineStores()) { + stores = storeNodeService.getActiveStores(request.getGraphName()); + } else { + stores = storeNodeService.getStores(request.getGraphName()); + } + response = + Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) + .build(); + } catch (PDException e) { + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getAllStores exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 处理 store 心跳 + */ + @Override + public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getStoreHeartbeatMethod(), request, observer); + return; + } + + Metapb.StoreStats stats = request.getStats(); + + // save monitor data when monitor data enabled + if (this.pdConfig.getStore().isMonitorDataEnabled()) { + try { + storeMonitorDataService.saveMonitorData(stats); + } catch (PDException e) { + log.error("save status failed, state:{}", stats); + } + // remove system_metrics + stats = Metapb.StoreStats.newBuilder() + .mergeFrom(request.getStats()) + .clearField(Metapb.StoreStats.getDescriptor().findFieldByName( + "system_metrics")) + .build(); + } + + Pdpb.StoreHeartbeatResponse response = null; + try { + Metapb.ClusterStats clusterStats = storeNodeService.heartBeat(stats); + response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(okHeader) + .setClusterStats(clusterStats).build(); + } catch (PDException e) { + response = + Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("storeHeartbeat exception: ", e); + } catch (Exception e2) { + response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader( + newErrorHeader(Pdpb.ErrorType.UNKNOWN_VALUE, e2.getMessage())).build(); + log.error("storeHeartbeat exception: ", e2); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 查找 key 所属的分区
+     * 
+ */ + @Override + public void getPartition(Pdpb.GetPartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = + partitionService.getPartitionShard(request.getGraphName(), + request.getKey() + .toByteArray()); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getPartition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 查找 HashCode 所属的分区
+     * 
+ */ + @Override + public void getPartitionByCode(Pdpb.GetPartitionByCodeRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionByCodeMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = + partitionService.getPartitionByCode(request.getGraphName(), + request.getCode()); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getPartitionByCode exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 根据 partition_id 查找 partition + */ + @Override + public void getPartitionByID(Pdpb.GetPartitionByIDRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionByIDMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = + partitionService.getPartitionShardById(request.getGraphName(), + request.getPartitionId()); + if (partShard == null) { + throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, + String.format("partition: %s-%s not found", + request.getGraphName(), + request.getPartitionId())); + } + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getPartitionByID exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 更新分区信息,主要用来更新分区 key 范围,调用此接口需谨慎,否则会造成数据丢失。
+     * 
+ */ + @Override + public void updatePartition(Pdpb.UpdatePartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdatePartitionMethod(), request, observer); + return; + } + Pdpb.UpdatePartitionResponse response = null; + try { + partitionService.updatePartition(request.getPartitionList()); + response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(okHeader).build(); + + } catch (PDException e) { + response = + Pdpb.UpdatePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("update partition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 根据 partition_id 查找 partition + */ + @Override + public void delPartition(Pdpb.DelPartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelPartitionMethod(), request, observer); + return; + } + Pdpb.DelPartitionResponse response = null; + try { + Metapb.Partition partition = partitionService.getPartitionById(request.getGraphName(), + request.getPartitionId()); + if (partition != null) { + partitionService.removePartition(request.getGraphName(), + request.getPartitionId()); + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partition) + .build(); + } else { + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader).build(); + } + } catch (PDException e) { + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("delPartition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 给定 key 范围查找所属的 partition 集合 + */ + @Override + public void scanPartitions(Pdpb.ScanPartitionsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getScanPartitionsMethod(), request, observer); + return; + } + Pdpb.ScanPartitionsResponse response = null; + try { + List partShards = + partitionService.scanPartitions(request.getGraphName(), + request.getStartKey() + .toByteArray(), + request.getEndKey() + .toByteArray()); + response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(okHeader) + .addAllPartitions(partShards).build(); + } catch (PDException e) { + response = + Pdpb.ScanPartitionsResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("scanPartitions exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 获得图信息 + */ + @Override + public void getGraph(GetGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetGraphMethod(), request, observer); + return; + } + + Pdpb.GetGraphResponse response = null; + String graphName = request.getGraphName(); + try { + Metapb.Graph graph = partitionService.getGraph(graphName); + if (graph != null) { + response = Pdpb.GetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph) + .build(); + } else { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.NOT_FOUND).build()).build(); + response = Pdpb.GetGraphResponse.newBuilder().setHeader(header).build(); + } + } catch (PDException e) { + response = Pdpb.GetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getGraph exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 修改图信息 + */ + @Override + public void setGraph(Pdpb.SetGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetGraphMethod(), request, observer); + return; + } + Pdpb.SetGraphResponse response = null; + Metapb.Graph graph = request.getGraph(); + try { + graph = partitionService.updateGraph(graph); + response = + Pdpb.SetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + } catch (PDException e) { + log.error("setGraph exception: ", e); + response = Pdpb.SetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 获得图信息 + */ + @Override + public void delGraph(Pdpb.DelGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelGraphMethod(), request, observer); + return; + } + + Pdpb.DelGraphResponse response = null; + String graphName = request.getGraphName(); + try { + Metapb.Graph graph = partitionService.delGraph(graphName); + if (graph != null) { + response = Pdpb.DelGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph) + .build(); + } + } catch (PDException e) { + response = Pdpb.DelGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getGraph exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 根据条件查询分区信息,包括 Store、Graph 等条件
+     * 
+ */ + @Override + public void queryPartitions(Pdpb.QueryPartitionsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getQueryPartitionsMethod(), request, observer); + return; + } + //TODO 临时采用遍历方案,后续使用 rocksdb 存储时,通过 kv 索引实现 + Metapb.PartitionQuery query = request.getQuery(); + List partitions = partitionService.getPartitions(query.getGraphName()); + List result = new ArrayList<>(); + if (!CollectionUtils.isEmpty(partitions)) { + for (Metapb.Partition partition : partitions) { + if (query.hasPartitionId() && partition.getId() != query.getPartitionId()) { + continue; + } + if (query.hasGraphName() && + !partition.getGraphName().equals(query.getGraphName())) { + continue; + } + long storeId = query.getStoreId(); + if (query.hasStoreId() && query.getStoreId() != 0) { + try { + storeNodeService.getShardGroup(partition.getId()).getShardsList() + .forEach(shard -> { + if (shard.getStoreId() == storeId) { + result.add(partition); + } + }); + } catch (PDException e) { + log.error("query partitions error, req:{}, error:{}", request, + e.getMessage()); + } + } else { + result.add(partition); + } + } + } + Pdpb.QueryPartitionsResponse response = Pdpb.QueryPartitionsResponse.newBuilder() + .addAllPartitions( + result).build(); + observer.onNext(response); + observer.onCompleted(); + + } + + @Override + public void getId(Pdpb.GetIdRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetIdMethod(), request, responseObserver); + return; + } + long id = 0L; + try { + id = idService.getId(request.getKey(), request.getDelta()); + } catch (PDException e) { + responseObserver.onError(e); + log.error("getId exception: ", e); + return; + } + Pdpb.GetIdResponse response = + Pdpb.GetIdResponse.newBuilder().setId(id).setDelta(request.getDelta()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void resetId(Pdpb.ResetIdRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getResetIdMethod(), request, responseObserver); + return; + } + try { + idService.resetId(request.getKey()); + } catch (PDException e) { + responseObserver.onError(e); + log.error("getId exception: ", e); + return; + } + Pdpb.ResetIdResponse response = Pdpb.ResetIdResponse.newBuilder().setResult(0).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 获取集群成员信息 + */ + @Override + public void getMembers(Pdpb.GetMembersRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetMembersMethod(), request, observer); + return; + } + Pdpb.GetMembersResponse response; + try { + response = Pdpb.GetMembersResponse.newBuilder() + .addAllMembers(RaftEngine.getInstance().getMembers()) + .setLeader(RaftEngine.getInstance().getLocalMember()) + .build(); + + } catch (Exception e) { + log.error("getMembers exception: ", e); + response = Pdpb.GetMembersResponse.newBuilder() + .setHeader(newErrorHeader(-1, e.getMessage())) + .build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getStoreStatus(Pdpb.GetAllStoresRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoreStatusMethod(), request, observer); + return; + } + Pdpb.GetAllStoresResponse response = null; + try { + List stores = null; + stores = storeNodeService.getStoreStatus(request.getExcludeOfflineStores()); + response = + Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) + .build(); + } catch (PDException e) { + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getAllStores exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 读取 PD 配置 + */ + @Override + public void getPDConfig(Pdpb.GetPDConfigRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPDConfigMethod(), request, observer); + return; + } + Pdpb.GetPDConfigResponse response = null; + try { + Metapb.PDConfig pdConfig = null; + pdConfig = configService.getPDConfig(request.getVersion()); + response = + Pdpb.GetPDConfigResponse.newBuilder().setHeader(okHeader).setPdConfig(pdConfig) + .build(); + } catch (PDException e) { + response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 修改 PD 配置 + */ + @Override + public void setPDConfig(Pdpb.SetPDConfigRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetPDConfigMethod(), request, observer); + return; + } + Pdpb.SetPDConfigResponse response = null; + try { + if (request.getPdConfig().getShardCount() % 2 != 1) { + // 副本数奇偶校验 + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count must be an odd number!"); + } + if (request.getPdConfig().getShardCount() > + storeNodeService.getActiveStores().size()) { + // 不能大于活跃的 store 数量 + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count can't be greater than the number of active " + + "stores!"); + } + int oldShardCount = configService.getPDConfig().getShardCount(); + int newShardCount = request.getPdConfig().getShardCount(); + if (newShardCount > oldShardCount) { + // 如果副本数增大,则检查 store 内部的资源是否够用 + if (!isResourceEnough(oldShardCount, newShardCount)) { + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "There is not enough disk space left!"); + } + + if (!checkShardCount(newShardCount)) { + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "the cluster can't support so many shard count!"); + } + } + configService.setPDConfig(request.getPdConfig()); + response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 读取图空间配置 + */ + @Override + public void getGraphSpace(Pdpb.GetGraphSpaceRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetGraphSpaceMethod(), request, observer); + return; + } + Pdpb.GetGraphSpaceResponse response = null; + try { + List graphSpaces = null; + graphSpaces = configService.getGraphSpace(request.getGraphSpaceName()); + response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(okHeader) + .addAllGraphSpace(graphSpaces).build(); + } catch (PDException e) { + response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 修改图空间配置 + */ + @Override + public void setGraphSpace(Pdpb.SetGraphSpaceRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetGraphSpaceMethod(), request, observer); + return; + } + Pdpb.SetGraphSpaceResponse response = null; + try { + configService.setGraphSpace(request.getGraphSpace()); + response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 数据分裂
+     * 
+ */ + @Override + public void splitData(Pdpb.SplitDataRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSplitDataMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "splitData", request); + Pdpb.SplitDataResponse response = null; + try { + taskService.splitPartition(request.getMode(), request.getParamList()); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("splitData exception {}", e); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + + } + + @Override + public void splitGraphData(Pdpb.SplitGraphDataRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSplitGraphDataMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "splitGraphData", request); + Pdpb.SplitDataResponse response; + try { + partitionService.splitPartition(partitionService.getGraph(request.getGraphName()), + request.getToCount()); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("splitGraphData exception {}", e); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 在 store 之间平衡数据 + */ + @Override + public void movePartition(Pdpb.MovePartitionRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getMovePartitionMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "balanceData", request); + Pdpb.MovePartitionResponse response = null; + try { + taskService.patrolPartitions(); + taskService.balancePartitionShard(); + response = Pdpb.MovePartitionResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("transferData exception {}", e); + response = Pdpb.MovePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 获取集群健康状态
+     * 
+ */ + @Override + public void getClusterStats(Pdpb.GetClusterStatsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetClusterStatsMethod(), request, observer); + return; + } + Pdpb.GetClusterStatsResponse response = null; + response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(okHeader) + .setCluster(storeNodeService.getClusterStats()) + .build(); + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 汇报分区分裂等任务执行结果
+     * 
+ */ + @Override + public void reportTask(Pdpb.ReportTaskRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getReportTaskMethod(), request, observer); + return; + } + try { + taskService.reportTask(request.getTask()); + } catch (Exception e) { + log.error("PDService.reportTask {}", e); + } + Pdpb.ReportTaskResponse response = null; + response = Pdpb.ReportTaskResponse.newBuilder().setHeader(okHeader).build(); + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + @Override + public void getPartitionStats(Pdpb.GetPartitionStatsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionStatsMethod(), request, observer); + return; + } + Pdpb.GetPartitionStatsResponse response; + // TODO + try { + Metapb.PartitionStats stats = partitionService.getPartitionStats(request.getGraphName(), + request.getPartitionId()); + response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(okHeader) + .setPartitionStats(stats).build(); + } catch (PDException e) { + log.error("getPartitionStats exception {}", e); + response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(newErrorHeader(e)) + .build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + //private > void redirectToLeader( + // MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver + // observer) { + // try { + // var addr = RaftEngine.getInstance().getLeaderGrpcAddress(); + // ManagedChannel channel; + // + // if ((channel = channelMap.get(addr)) == null) { + // synchronized (this) { + // if ((channel = channelMap.get(addr)) == null|| channel.isShutdown()) { + // channel = ManagedChannelBuilder + // .forTarget(addr).usePlaintext() + // .build(); + // } + // } + // log.info("Grpc get leader address {}", RaftEngine.getInstance() + // .getLeaderGrpcAddress()); + // } + // + // io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions + // .DEFAULT), req, + // observer); + // } catch (Exception e) { + // e.printStackTrace(); + // } + //} + + /** + * 更新 peerList + */ + @Override + public void changePeerList(Pdpb.ChangePeerListRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getChangePeerListMethod(), request, observer); + return; + } + Pdpb.getChangePeerListResponse response; + try { + Status status = RaftEngine.getInstance().changePeerList(request.getPeerList()); + Pdpb.ResponseHeader responseHeader = + status.isOk() ? okHeader : newErrorHeader(status.getCode(), + status.getErrorMsg()); + response = + Pdpb.getChangePeerListResponse.newBuilder().setHeader(responseHeader).build(); + + } catch (Exception e) { + log.error("changePeerList exception: ", e); + response = Pdpb.getChangePeerListResponse.newBuilder() + .setHeader(newErrorHeader(-1, e.getMessage())) + .build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public synchronized void onRaftLeaderChanged() { + log.info("onLeaderChanged"); + // channel = null; + // TODO: uncomment later + //if (licenseVerifierService == null) { + // licenseVerifierService = new LicenseVerifierService(pdConfig); + //} + //licenseVerifierService.init(); + + try { + PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_PD_LEADER_CHANGE, + RaftEngine.getInstance().getLeaderGrpcAddress(), 0L); + } catch (ExecutionException | InterruptedException e) { + log.error("failed to notice client", e); + } + } + + @Override + public void balanceLeaders(Pdpb.BalanceLeadersRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getBalanceLeadersMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "balanceLeaders", request); + Pdpb.BalanceLeadersResponse response = null; + try { + taskService.balancePartitionLeader(true); + response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("balance Leaders exception: ", e); + response = + Pdpb.BalanceLeadersResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void putLicense(PutLicenseRequest request, + StreamObserver responseObserver) { + PutLicenseResponse response = null; + boolean moved = false; + String bakPath = pdConfig.getLicensePath() + "-bak"; + File bakFile = new File(bakPath); + File licenseFile = new File(pdConfig.getLicensePath()); + try { + byte[] content = request.getContent().toByteArray(); + if (licenseFile.exists()) { + if (bakFile.exists()) { + FileUtils.deleteQuietly(bakFile); + } + FileUtils.moveFile(licenseFile, bakFile); + moved = true; + } + FileUtils.writeByteArrayToFile(licenseFile, content, false); + } catch (Exception e) { + log.error("putLicense with error: {}", e); + if (moved) { + try { + FileUtils.moveFile(bakFile, licenseFile); + } catch (IOException ex) { + log.error("failed to restore the license file.{}", ex); + } + } + Pdpb.ResponseHeader header = + newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); + response = Pdpb.PutLicenseResponse.newBuilder().setHeader(header).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void delStore(Pdpb.DetStoreRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelStoreMethod(), request, observer); + return; + } + long storeId = request.getStoreId(); + Pdpb.DetStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.getStore(storeId); + if (Metapb.StoreState.Tombstone == store.getState()) { + // 只有已经被下线 (Tombstone) 的 store 可以被删除 + storeNodeService.removeStore(storeId); + response = Pdpb.DetStoreResponse.newBuilder() + .setHeader(okHeader) + .setStore(store) + .build(); + } else { + throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DELETION_VALUE, + "the store can't be deleted, please check store state!"); + } + } catch (PDException e) { + log.error("delete store exception: {}", e); + response = Pdpb.DetStoreResponse.newBuilder() + .setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * check the shard whether exceed the cluster's max shard group count + * + * @param newShardCount new shard count + * @return true if can be set to new shard count, otherwise false + */ + private boolean checkShardCount(int newShardCount) { + try { + var maxCount = pdConfig.getPartition().getMaxShardsPerStore() * + storeNodeService.getActiveStores().size() / + pdConfig.getConfigService().getPartitionCount(); + + if (newShardCount > maxCount) { + log.error("new shard count :{} exceed current cluster max shard count {}", + newShardCount, maxCount); + return false; + } + } catch (Exception e) { + log.error("checkShardCount: {}", e.getMessage()); + } + return true; + } + + /** + * 检查 store 资源是否够用 + */ + public boolean isResourceEnough(int oldShardCount, int newShardCount) { + // 活跃的 store 的资源是否够用 + try { + + float expansionRatio = newShardCount / oldShardCount; // 占用的存储空间膨胀的倍数 + // 当前占用的空间 + long currentDataSize = 0L; + // 数据膨胀后占用的空间 + long newDataSize = 0L; + // 总的可用空间 + long totalAvaible = 0L; + // 统计当前占用的存储空间 + for (Metapb.Store store : storeNodeService.getStores()) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + currentDataSize += graphStats.getApproximateSize(); + } + } + // 估计数据膨胀后占用的存储空间 + newDataSize = (long) Math.ceil(currentDataSize * expansionRatio); + // 统计所有活跃的 store 里面可用的空间 + List activeStores = storeNodeService.getActiveStores(); + for (Metapb.Store store : activeStores) { + Metapb.StoreStats storeStats = store.getStats(); + totalAvaible += storeStats.getAvailable(); + } + // 考虑当分区均匀分配的情况下,资源是否可用 + return totalAvaible > newDataSize - currentDataSize; + } catch (PDException e) { + e.printStackTrace(); + return false; + } + } + + /** + *
+     * 对 rocksdb 进行 compaction
+     * 
+ */ + @Override + public void dbCompaction(Pdpb.DbCompactionRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDbCompactionMethod(), request, observer); + return; + } + logService.insertLog(LogService.TASK, "dbCompaction", request); + Pdpb.DbCompactionResponse response = null; + try { + log.info("dbCompaction call dbCompaction"); + taskService.dbCompaction(request.getTableName()); + response = Pdpb.DbCompactionResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("dbCompaction exception {}", e); + response = Pdpb.DbCompactionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void combineCluster(Pdpb.CombineClusterRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCombineClusterMethod(), request, observer); + return; + } + + Pdpb.CombineClusterResponse response; + + try { + partitionService.combinePartition(request.getToCount()); + response = Pdpb.CombineClusterResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = + Pdpb.CombineClusterResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void combineGraph(Pdpb.CombineGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCombineGraphMethod(), request, observer); + return; + } + + Pdpb.CombineGraphResponse response; + + try { + partitionService.combineGraphPartition(request.getGraphName(), request.getToCount()); + response = Pdpb.CombineGraphResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.CombineGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void deleteShardGroup(Pdpb.DeleteShardGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDeleteShardGroupMethod(), request, observer); + return; + } + + Pdpb.DeleteShardGroupResponse response; + + try { + storeNodeService.deleteShardGroup(request.getGroupId()); + response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = + Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getShardGroup(Pdpb.GetShardGroupRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetShardGroupMethod(), request, observer); + return; + } + Pdpb.GetShardGroupResponse response; + // TODO + try { + Metapb.ShardGroup shardGroup = storeNodeService.getShardGroup(request.getGroupId()); + response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(okHeader) + .setShardGroup(shardGroup).build(); + } catch (PDException e) { + log.error("getPartitionStats exception", e); + response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void updateShardGroup(Pdpb.UpdateShardGroupRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateShardGroupMethod(), request, responseObserver); + return; + } + Pdpb.UpdateShardGroupResponse response; + + try { + var group = request.getShardGroup(); + storeNodeService.updateShardGroup(group.getId(), group.getShardsList(), + group.getVersion(), group.getConfVer()); + response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("updateShardGroup exception, ", e); + response = + Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void updateShardGroupOp(Pdpb.ChangeShardRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateShardGroupOpMethod(), request, observer); + return; + } + + Pdpb.ChangeShardResponse response; + + try { + storeNodeService.shardGroupOp(request.getGroupId(), request.getShardsList()); + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("changeShard exception, ", e); + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void changeShard(Pdpb.ChangeShardRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getChangeShardMethod(), request, observer); + return; + } + + Pdpb.ChangeShardResponse response; + + try { + partitionService.changeShard(request.getGroupId(), request.getShardsList()); + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("changeShard exception, ", e); + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdatePdRaftMethod(), request, observer); + return; + } + + var list = parseConfig(request.getConfig()); + + log.info("update raft request: {}, list: {}", request.getConfig(), list); + + Pdpb.UpdatePdRaftResponse response = + Pdpb.UpdatePdRaftResponse.newBuilder().setHeader(okHeader).build(); + + do { + var leaders = list.stream().filter(s -> s.getKey().equals("leader")) + .collect(Collectors.toList()); + var node = RaftEngine.getInstance().getRaftNode(); + + if (leaders.size() == 1) { + var leaderPeer = leaders.get(0).getValue(); + // change leader + var peers = new HashSet<>(node.listPeers()); + + if (!peerEquals(leaderPeer, node.getLeaderId())) { + if (peers.contains(leaderPeer)) { + log.info("updatePdRaft, transfer to {}", leaderPeer); + node.transferLeadershipTo(leaderPeer); + } else { + response = Pdpb.UpdatePdRaftResponse.newBuilder() + .setHeader(newErrorHeader(6667, + "new leader" + + " not in " + + "raft peers")) + .build(); + } + break; + } + } else { + response = Pdpb.UpdatePdRaftResponse.newBuilder() + .setHeader(newErrorHeader(6666, + "leader size != 1")) + .build(); + break; + } + + Configuration config = new Configuration(); + // add peer + for (var peer : list) { + if (!peer.getKey().equals("learner")) { + config.addPeer(peer.getValue()); + } else { + config.addLearner(peer.getValue()); + } + } + + log.info("pd raft update with new config: {}", config); + + node.changePeers(config, status -> { + if (status.isOk()) { + log.info("updatePdRaft, change peers success"); + } else { + log.error("changePeers status: {}, msg:{}, code: {}, raft error:{}", + status, status.getErrorMsg(), status.getCode(), + status.getRaftError()); + } + }); + } while (false); + + observer.onNext(response); + observer.onCompleted(); + } + + public void getCache(GetGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetCacheMethod(), request, observer); + return; + } + CacheResponse response; + try { + response = CacheResponse.newBuilder().mergeFrom(storeNodeService.getCache()) + .setHeader(okHeader).build(); + } catch (PDException e) { + log.error("get cache exception, ", e); + response = CacheResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + public void getPartitions(GetGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionsMethod(), request, observer); + return; + } + CachePartitionResponse response; + List partitions = partitionService.getPartitions(request.getGraphName()); + response = CachePartitionResponse.newBuilder().addAllPartitions(partitions) + .setHeader(okHeader).build(); + observer.onNext(response); + observer.onCompleted(); + } + + + private List> parseConfig(String conf) { + List> result = new LinkedList<>(); + + if (conf != null && conf.length() > 0) { + for (var s : conf.split(",")) { + if (s.endsWith("/leader")) { + result.add(new KVPair<>("leader", + JRaftUtils.getPeerId(s.substring(0, s.length() - 7)))); + } else if (s.endsWith("/learner")) { + result.add(new KVPair<>("learner", + JRaftUtils.getPeerId(s.substring(0, s.length() - 8)))); + } else if (s.endsWith("/follower")) { + result.add(new KVPair<>("follower", + JRaftUtils.getPeerId(s.substring(0, s.length() - 9)))); + } else { + result.add(new KVPair<>("follower", JRaftUtils.getPeerId(s))); + } + } + } + + return result; + } + + private boolean peerEquals(PeerId p1, PeerId p2) { + if (p1 == null && p2 == null) { + return true; + } + if (p1 == null || p2 == null) { + return false; + } + return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java new file mode 100644 index 0000000000..d4b9481e9d --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.grpc.watch.HgPdWatchGrpc; +import org.apache.hugegraph.pd.grpc.watch.WatchRequest; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import org.lognet.springboot.grpc.GRpcService; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@GRpcService +public class PDWatchService extends HgPdWatchGrpc.HgPdWatchImplBase { + + @Override + public StreamObserver watch(StreamObserver responseObserver) { + return PDWatchSubject.addObserver(responseObserver); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java new file mode 100644 index 0000000000..7683e58073 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.model.PromTargetsModel; +import org.apache.hugegraph.pd.rest.MemberAPI; +import org.apache.hugegraph.pd.util.HgMapCache; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import lombok.extern.slf4j.Slf4j; + +@Service +@Slf4j +public class PromTargetsService { + + private final PromTargetsModel pdModel = PromTargetsModel.of() + .addLabel("__app_name", "pd") + .setScheme("http") + .setMetricsPath( + "/actuator/prometheus"); + private final PromTargetsModel storeModel = PromTargetsModel.of() + .addLabel("__app_name", "store") + .setScheme("http") + .setMetricsPath( + "/actuator/prometheus"); + private final HgMapCache> targetsCache = + HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H. + @Autowired + private PDConfig pdConfig; + @Autowired + private PDService pdService; + private RegistryService register; + + private RegistryService getRegister() { + if (this.register == null) { + this.register = new RegistryService(this.pdConfig); + } + return this.register; + } + + public List getAllTargets() { + List res = new LinkedList<>(); + List buf = + this.toModels(this.getRegister().getNodes(Query.newBuilder().build())); + + if (buf != null) { + res.addAll(buf); + } + + res.add(getPdTargets()); + res.add(getStoreTargets()); + + return res; + } + + /** + * @param appName + * @return null if it's not existing + */ + public List getTargets(String appName) { + HgAssert.isArgumentNotNull(appName, "appName"); + switch (appName) { + case "pd": + return Collections.singletonList(this.getPdTargets()); + case "store": + return Collections.singletonList(this.getStoreTargets()); + default: + return this.toModels(this.getRegister() + .getNodes(Query.newBuilder().setAppName(appName).build())); + } + } + + private PromTargetsModel getPdTargets() { + return setTargets(pdModel, () -> this.mergeCache("pd", getPdAddresses())); + } + + private PromTargetsModel getStoreTargets() { + return setTargets(storeModel, () -> this.mergeCache("store", getStoreAddresses())); + } + + private PromTargetsModel setTargets(PromTargetsModel model, Supplier> supplier) { + return model.setTargets(supplier.get()) + .setClusterId(String.valueOf(pdConfig.getClusterId())); + } + + /* to prevent the failure of connection between pd and store or pd and pd.*/ + //TODO: To add a schedule task to refresh targets, not to retrieve in every time. + private Set mergeCache(String key, Set set) { + Set buf = this.targetsCache.get(key); + + if (buf == null) { + buf = new HashSet<>(); + this.targetsCache.put(key, buf); + } + + if (set != null) { + buf.addAll(set); + } + + return buf; + } + + private List toModels(NodeInfos info) { + if (info == null) { + return null; + } + + List nodes = info.getInfoList(); + if (nodes == null || nodes.isEmpty()) { + return null; + } + + List res = + nodes.stream().map(e -> { + Map labels = e.getLabelsMap(); + + String target = labels.get("target"); + if (HgAssert.isInvalid(target)) { + return null; + } + + PromTargetsModel model = PromTargetsModel.of(); + model.addTarget(target); + model.addLabel("__app_name", e.getAppName()); + + labels.forEach((k, v) -> { + k = k.trim(); + switch (k) { + case "metrics": + model.setMetricsPath(v.trim()); + break; + case "scheme": + model.setScheme(v.trim()); + break; + default: + if (k.startsWith("__")) { + model.addLabel(k, v); + } + + } + }); + + + return model; + }) + .filter(e -> e != null) + .collect(Collectors.toList()); + + if (res.isEmpty()) { + return null; + } + return res; + } + + private Set getPdAddresses() { + MemberAPI.CallStreamObserverWrap response = + new MemberAPI.CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + List members = null; + + try { + members = response.get().get(0).getMembersList(); + } catch (Throwable e) { + log.error("Failed to get all pd members.", e); + } + + Set res = new HashSet<>(); + if (members != null) { + members.stream().forEach(e -> res.add(e.getRestUrl())); + } + + return res; + } + + private Set getStoreAddresses() { + Set res = new HashSet<>(); + List stores = null; + try { + stores = pdService.getStoreNodeService().getStores(); + } catch (PDException e) { + log.error("Failed to get all stores.", e); + } + + if (stores != null) { + stores.stream().forEach(e -> { + String buf = this.getRestAddress(e); + if (buf != null) { + res.add(buf); + } + }); + } + + return res; + } + + //TODO: optimized store registry data, to add host:port of REST server. + private String getRestAddress(Metapb.Store store) { + String address = store.getAddress(); + if (address == null || address.isEmpty()) { + return null; + } + try { + Optional port = store.getLabelsList().stream().map( + e -> { + if ("rest.port".equals(e.getKey())) { + return e.getValue(); + } + return null; + }).filter(e -> e != null).findFirst(); + + if (port.isPresent()) { + address = address.substring(0, address.indexOf(':') + 1); + address = address + port.get(); + + } + } catch (Throwable t) { + log.error("Failed to extract the REST address of store, cause by:", t); + } + return address; + + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java new file mode 100644 index 0000000000..5aa67f93bf --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; + +import io.grpc.CallOptions; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; + +public interface ServiceGrpc extends RaftStateListener { + + ConcurrentHashMap channels = new ConcurrentHashMap(); + + default Pdpb.ResponseHeader getResponseHeader(PDException e) { + Pdpb.Error error = + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage()) + .build(); + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); + return header; + } + + default Pdpb.ResponseHeader getResponseHeader() { + Pdpb.Error error = Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK).build(); + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); + return header; + } + + default boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + default void redirectToLeader(ManagedChannel channel, + MethodDescriptor method, + ReqT req, + io.grpc.stub.StreamObserver observer) { + try { + String address = RaftEngine.getInstance().getLeaderGrpcAddress(); + if ((channel = channels.get(address)) == null || channel.isTerminated() || + channel.isShutdown()) { + synchronized (ServiceGrpc.class) { + if ((channel = channels.get(address)) == null || channel.isTerminated() || + channel.isShutdown()) { + while (channel != null && channel.isShutdown() && !channel.isTerminated()) { + channel.awaitTermination(50, TimeUnit.MILLISECONDS); + } + ManagedChannel c = + ManagedChannelBuilder.forTarget(address).usePlaintext().build(); + channels.put(address, c); + channel = c; + } + } + } + io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), + req, observer); + } catch (Exception e) { + e.printStackTrace(); + } + + } + + default void redirectToLeader(MethodDescriptor method, + ReqT req, + io.grpc.stub.StreamObserver observer) { + redirectToLeader(null, method, req, observer); + + } + + @Override + default void onRaftLeaderChanged() { + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java new file mode 100644 index 0000000000..78af36aaea --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.rest.API; +import org.apache.hugegraph.pd.upgrade.VersionScriptFactory; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class UpgradeService { + + private static final String VERSION_KEY = "DATA_VERSION"; + + private static final String RUN_LOG_PREFIX = "SCRIPT_RUN_LOG"; + + private final PDConfig pdConfig; + + private final KvService kvService; + + public UpgradeService(PDConfig pdConfig) { + this.pdConfig = pdConfig; + this.kvService = new KvService(pdConfig); + } + + public void upgrade() throws PDException { + + log.info("upgrade service start"); + VersionScriptFactory factory = VersionScriptFactory.getInstance(); + var dataVersion = getDataVersion(); + log.info("now db data version : {}", dataVersion); + for (VersionUpgradeScript script : factory.getScripts()) { + // 执行过,run once的跳过 + if (isExecuted(script.getClass().getName()) && script.isRunOnce()) { + log.info("Script {} is Executed and is run once", script.getClass().getName()); + continue; + } + + // 判断跳过的条件 + if (dataVersion == null && !script.isRunWithoutDataVersion() || dataVersion != null && + !versionCompare( + dataVersion, + script.getHighVersion(), + script.getLowVersion())) { + log.info( + "Script {} is did not match version requirements, current data " + + "version:{}, current version:{}" + + "script run version({} to {}), run without data version:{}", + script.getClass().getName(), + dataVersion, + API.VERSION, + script.getHighVersion(), + script.getLowVersion(), + script.isRunWithoutDataVersion()); + continue; + } + + script.runInstruction(pdConfig); + logRun(script.getClass().getName()); + } + + writeCurrentDataVersion(); + } + + private boolean isExecuted(String className) throws PDException { + var ret = kvService.get(RUN_LOG_PREFIX + "/" + className); + return ret.length() > 0; + } + + private void logRun(String className) throws PDException { + kvService.put(RUN_LOG_PREFIX + "/" + className, API.VERSION); + } + + private String getDataVersion() throws PDException { + return kvService.get(VERSION_KEY); + } + + private boolean versionCompare(String dataVersion, String high, String low) { + var currentVersion = API.VERSION; + return (high.equals(VersionUpgradeScript.UNLIMITED_VERSION) || + high.compareTo(dataVersion) >= 0) + && (low.equals(VersionUpgradeScript.UNLIMITED_VERSION) || + low.compareTo(currentVersion) <= 0); + } + + private void writeCurrentDataVersion() throws PDException { + log.info("update db version to {}", API.VERSION); + kvService.put(VERSION_KEY, API.VERSION); + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java new file mode 100644 index 0000000000..7f4b4fda4d --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.upgrade; + +import java.util.LinkedList; +import java.util.List; + +import org.apache.hugegraph.pd.upgrade.scripts.PartitionMetaUpgrade; +import org.apache.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade; + +public class VersionScriptFactory { + private static final List SCRIPTS = new LinkedList<>(); + private static volatile VersionScriptFactory factory; + + static { + registerScript(new PartitionMetaUpgrade()); + registerScript(new TaskCleanUpgrade()); + } + + private VersionScriptFactory() { + + } + + public static VersionScriptFactory getInstance() { + if (factory == null) { + synchronized (VersionScriptFactory.class) { + if (factory == null) { + factory = new VersionScriptFactory(); + } + } + } + return factory; + } + + public static void registerScript(VersionUpgradeScript script) { + SCRIPTS.add(script); + } + + public List getScripts() { + return SCRIPTS; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java new file mode 100644 index 0000000000..8cd54864aa --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.upgrade; + +import org.apache.hugegraph.pd.config.PDConfig; + +public interface VersionUpgradeScript { + + String UNLIMITED_VERSION = "UNLIMITED_VERSION"; + + /** + * the highest version that need to run upgrade instruction + * + * @return high version + */ + String getHighVersion(); + + /** + * the lowest version that need to run upgrade instruction + * + * @return lower version + */ + String getLowVersion(); + + /** + * pd中没有data version的时候,是否执行. 一般是对应3。6。2之前的版本 + * + * @return run when pd has no data version + */ + boolean isRunWithoutDataVersion(); + + /** + * the scrip just run once, ignore versions + * + * @return run once script + */ + boolean isRunOnce(); + + /** + * run the upgrade instruction + */ + void runInstruction(PDConfig config); + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java new file mode 100644 index 0000000000..e113d95ff4 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.upgrade.scripts; + +import java.util.HashSet; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class PartitionMetaUpgrade implements VersionUpgradeScript { + + @Override + public String getHighVersion() { + return "3.6.2"; + } + + @Override + public String getLowVersion() { + return UNLIMITED_VERSION; + } + + @Override + public void runInstruction(PDConfig config) { + + log.info("run PartitionMetaUpgrade script"); + var dbStore = new MetadataRocksDBStore(config); + + try { + var partSet = new HashSet(); + for (var graph : dbStore.scanPrefix(Metapb.Graph.parser(), + MetadataKeyHelper.getGraphPrefix())) { + var graphPrefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName()); + for (var partition : dbStore.scanPrefix(Metapb.PartitionV36.parser(), + graphPrefix)) { + var newPartition = trans(partition); + var partId = partition.getId(); + log.info("trans partition structure: from {} to {}", partition, newPartition); + // backup + var key36 = MetadataKeyHelper.getPartitionV36Key(graph.getGraphName(), partId); + dbStore.put(key36, partition.toByteArray()); + // write new structure + var key = MetadataKeyHelper.getPartitionKey(graph.getGraphName(), partId); + dbStore.put(key, newPartition.toByteArray()); + + // construct shard group + if (!partSet.contains(partId)) { + var shardGroupKey = MetadataKeyHelper.getShardGroupKey(partId); + var shardGroup = dbStore.getOne(Metapb.ShardGroup.parser(), shardGroupKey); + if (shardGroup == null) { + var shardList = partition.getShardsList(); + if (shardList.size() > 0) { + shardGroup = Metapb.ShardGroup.newBuilder() + .setId(partId) + .setVersion(partition.getVersion()) + .setConfVer(0) + .setState(partition.getState()) + .addAllShards(shardList) + .build(); + dbStore.put(shardGroupKey, shardGroup.toByteArray()); + log.info("extract shard group from partition, {}", shardGroup); + } else { + throw new PDException(1000, + "trans partition failed, no shard list"); + } + } + partSet.add(partId); + } + + } + } + } catch (Exception e) { + log.error("script: {}, run error : {}", getClass().getName(), e.getMessage()); + } + } + + @Override + public boolean isRunOnce() { + return true; + } + + @Override + public boolean isRunWithoutDataVersion() { + return true; + } + + private Metapb.Partition trans(Metapb.PartitionV36 partition) { + + return Metapb.Partition.newBuilder() + .setId(partition.getId()) + .setGraphName(partition.getGraphName()) + .setStartKey(partition.getStartKey()) + .setEndKey(partition.getEndKey()) + .setVersion(partition.getVersion()) + .setState(partition.getState()) + .setMessage(partition.getMessage()) + .build(); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java new file mode 100644 index 0000000000..6370f839f9 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.upgrade.scripts; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class TaskCleanUpgrade implements VersionUpgradeScript { + @Override + public String getHighVersion() { + return UNLIMITED_VERSION; + } + + @Override + public String getLowVersion() { + return UNLIMITED_VERSION; + } + + @Override + public boolean isRunWithoutDataVersion() { + return true; + } + + @Override + public boolean isRunOnce() { + return true; + } + + @Override + public void runInstruction(PDConfig config) { + log.info("run TaskCleanUpgrade script"); + var dbStore = new MetadataRocksDBStore(config); + + try { + byte[] key = MetadataKeyHelper.getAllSplitTaskPrefix(); + log.info("delete split task:{}", dbStore.removeByPrefix(key)); + byte[] key2 = MetadataKeyHelper.getAllMoveTaskPrefix(); + log.info("delete move task:{}", dbStore.removeByPrefix(key2)); + } catch (PDException e) { + throw new RuntimeException(e); + } + + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java new file mode 100644 index 0000000000..f26be26dd9 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.util; + +import java.text.ParseException; +import java.util.Date; + +import org.apache.commons.lang3.time.DateUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; + +public class DateUtil { + private static final String DATE = "yyyy-MM-dd"; + private static final String DATETIME = "yyyy-MM-dd HH:mm:ss"; + private static final String DATETIME_MM = "yyyy-MM-dd HH:mm"; + private static final String DATETIME_SSS = "yyyy-MM-dd HH:mm:ss.SSS"; + private static final String TIME = "HH:mm"; + private static final String TIME_SS = "HH:mm:ss"; + private static final String SYS_DATE = "yyyy/MM/dd"; + private static final String SYS_DATETIME = "yyyy/MM/dd HH:mm:ss"; + private static final String SYS_DATETIME_MM = "yyyy/MM/dd HH:mm"; + private static final String SYS_DATETIME_SSS = "yyyy/MM/dd HH:mm:ss.SSS"; + private static final String NONE_DATE = "yyyyMMdd"; + private static final String NONE_DATETIME = "yyyyMMddHHmmss"; + private static final String NONE_DATETIME_MM = "yyyyMMddHHmm"; + private static final String NONE_DATETIME_SSS = "yyyyMMddHHmmssSSS"; + private static final String[] PATTERNS = new String[]{ + DATE, + DATETIME, + DATETIME_MM, + DATETIME_SSS, + TIME, + TIME_SS, + SYS_DATE, + SYS_DATETIME, + SYS_DATETIME_MM, + SYS_DATETIME_SSS, + NONE_DATE, + NONE_DATETIME, + NONE_DATETIME_MM, + NONE_DATETIME_SSS + }; + + public static String[] getDefaultPattern() { + return PATTERNS; + } + + public static Date getDate(String date) throws PDException { + try { + return DateUtils.parseDate(date, getDefaultPattern()); + } catch (ParseException e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java new file mode 100644 index 0000000000..7619254483 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.util; + +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hugegraph.pd.common.HgAssert; + +import lombok.extern.slf4j.Slf4j; + + +@Slf4j +public final class HgExecutorUtil { + private static final Map EXECUTOR_MAP = new ConcurrentHashMap<>(); + private static final Executor COMMON_EXECUTOR + = new ThreadPoolExecutor(0, Integer.MAX_VALUE, + 60L, TimeUnit.SECONDS, + new SynchronousQueue(), + newThreadFactory("pd-common")); + + public static void execute(Runnable command) { + if (command == null) { + return; + } + COMMON_EXECUTOR.execute(command); + } + + public static ThreadFactory newThreadFactory(String namePrefix, int priority) { + HgAssert.isArgumentNotNull(namePrefix, "namePrefix"); + return new HgThreadFactory(namePrefix, priority); + } + + public static ThreadFactory newThreadFactory(String namePrefix) { + HgAssert.isArgumentNotNull(namePrefix, "namePrefix"); + return new HgDefaultThreadFactory(namePrefix); + } + + public static ThreadPoolExecutor getThreadPoolExecutor(String name) { + if (name == null) { + return null; + } + return EXECUTOR_MAP.get(name); + } + + /** + * @see HgExecutorUtil:createExecutor(String , int , int , int ) + */ + @Deprecated + public static Executor createExecutor(String name, int coreThreads, int maxThreads) { +/* ThreadPoolExecutor res = + new ThreadPoolExecutor(coreThreads, maxThreads, + 60L, TimeUnit.SECONDS, + new LinkedBlockingQueue(), + newThreadFactory(name)); + if (threadPoolMap.containsKey(name)) { + threadPoolMap.put(name + "-1", res); + } else { + threadPoolMap.put(name, res); + }*/ + return createExecutor(name, coreThreads, maxThreads, Integer.MAX_VALUE); + } + + public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads, + int queueSize) { + ThreadPoolExecutor res = EXECUTOR_MAP.get(name); + + if (res != null) { + return res; + } + + synchronized (EXECUTOR_MAP) { + res = EXECUTOR_MAP.get(name); + if (res != null) { + return res; + } + + BlockingQueue queue = null; + + if (queueSize <= 0) { + queue = new SynchronousQueue(); + } else { + queue = new LinkedBlockingQueue<>(queueSize); + } + + res = new ThreadPoolExecutor( + coreThreads, + maxThreads, + 60L, TimeUnit.SECONDS, + queue, + newThreadFactory(name) + ); + EXECUTOR_MAP.put(name, res); + } + + return res; + } + + /** + * The default thread factory + */ + static class HgThreadFactory implements ThreadFactory { + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + private final int priority; + + HgThreadFactory(String namePrefix, int priority) { + this.namePrefix = namePrefix; + this.priority = priority; + SecurityManager s = System.getSecurityManager(); + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(null, r, + namePrefix + "-" + threadNumber.getAndIncrement(), + 0); + if (t.isDaemon()) { + t.setDaemon(false); + } + if (t.getPriority() != priority) { + t.setPriority(priority); + } + return t; + } + } + + /** + * The default thread factory, which added threadNamePrefix in construction method. + */ + static class HgDefaultThreadFactory implements ThreadFactory { + private static final AtomicInteger POOL_NUMBER = new AtomicInteger(1); + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + + HgDefaultThreadFactory(String threadNamePrefix) { + SecurityManager s = System.getSecurityManager(); + this.namePrefix = threadNamePrefix + "-" + + POOL_NUMBER.getAndIncrement() + + "-thread-"; + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(null, r, + namePrefix + threadNumber.getAndIncrement(), + 0); + if (t.isDaemon()) { + t.setDaemon(false); + } + if (t.getPriority() != Thread.NORM_PRIORITY) { + t.setPriority(Thread.NORM_PRIORITY); + } + return t; + } + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java new file mode 100644 index 0000000000..8b6a4a4dcb --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.util; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +/** + * @param + * @param + */ +public class HgMapCache { + private final Map cache = new ConcurrentHashMap(); + private final Supplier expiry; + + private HgMapCache(Supplier expiredPolicy) { + this.expiry = expiredPolicy; + } + + public static HgMapCache expiredOf(long interval) { + return new HgMapCache(new CycleIntervalPolicy(interval)); + } + + private boolean isExpired() { + if (expiry != null && expiry.get()) { + cache.clear(); + return true; + } + return false; + } + + public void put(K key, V value) { + if (key == null || value == null) { + return; + } + this.cache.put(key, value); + } + + + public V get(K key) { + if (isExpired()) { + return null; + } + return this.cache.get(key); + } + + public void removeAll() { + this.cache.clear(); + } + + public boolean remove(K key) { + if (key != null) { + this.cache.remove(key); + return true; + } + return false; + } + + public Map getAll() { + return this.cache; + } + + private static class CycleIntervalPolicy implements Supplier { + private long expireTime = 0; + private long interval = 0; + + public CycleIntervalPolicy(long interval) { + this.interval = interval; + init(); + } + + private void init() { + expireTime = System.currentTimeMillis() + interval; + } + + @Override + public Boolean get() { + if (System.currentTimeMillis() > expireTime) { + init(); + return true; + } + return false; + } + + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java new file mode 100644 index 0000000000..75e4287178 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.util; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public final class IdUtil { + private static final byte[] LOCK = new byte[0]; + + public static String createMillisStr() { + return String.valueOf(createMillisId()); + } + + /** + * Create millisecond style ID; + * + * @return + */ + public static Long createMillisId() { + synchronized (LOCK) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + log.error("Failed to sleep", e); + } + + return System.currentTimeMillis(); + } + + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java new file mode 100644 index 0000000000..ad0bad5493 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.util.grpc; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.util.HgExecutorUtil; +import org.lognet.springboot.grpc.GRpcServerBuilderConfigurer; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import io.grpc.ServerBuilder; + +@Component +public class GRpcServerConfig extends GRpcServerBuilderConfigurer { + public static final String EXECUTOR_NAME = "hg-grpc"; + @Autowired + private PDConfig pdConfig; + + @Override + public void configure(ServerBuilder serverBuilder) { + serverBuilder.executor( + HgExecutorUtil.createExecutor(EXECUTOR_NAME, + pdConfig.getThreadPoolGrpc().getCore(), + pdConfig.getThreadPoolGrpc().getMax(), + pdConfig.getThreadPoolGrpc().getQueue()) + ); + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java new file mode 100644 index 0000000000..c3893cc3ae --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.util.grpc; + +import java.lang.reflect.Field; + +import io.grpc.Grpc; +import io.grpc.ServerCall; +import io.grpc.stub.StreamObserver; + +public class StreamObserverUtil { + + static Object fieldLock = new Object(); + static Field callField; + + public static String getRemoteIP(StreamObserver observer) { + String ip = ""; + try { + if (callField == null) { + synchronized (fieldLock) { + callField = observer.getClass().getDeclaredField("call"); + callField.setAccessible(true); + } + } + ServerCall call = (ServerCall) callField.get(observer); + if (call != null) { + ip = call.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR).toString(); + } + } catch (Exception e) { + + } + return ip; + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java new file mode 100644 index 0000000000..79905511f2 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.function.Consumer; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; + +import com.google.protobuf.util.JsonFormat; + +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@ThreadSafe +@Slf4j +abstract class AbstractWatchSubject { + private final Map> watcherHolder = new HashMap<>(1024); + private final byte[] lock = new byte[0]; + private final WatchResponse.Builder builder = WatchResponse.newBuilder(); + private final WatchType watchType; + + protected AbstractWatchSubject(WatchType watchType) { + this.watchType = watchType; + } + + void addObserver(Long watcherId, StreamObserver responseObserver) { + synchronized (this.watcherHolder) { + + if (this.watcherHolder.containsKey(watcherId)) { + responseObserver.onError( + new Exception( + "The watcher-id[" + watcherId + "] of " + this.watchType.name() + + " subject has been existing, please unwatch it first")); + return; + } + + log.info("Adding a " + this.watchType + "'s watcher, watcher-id is [" + watcherId + + "]."); + this.watcherHolder.put(watcherId, responseObserver); + } + + } + + void removeObserver(Long watcherId, StreamObserver responseObserver) { + synchronized (this.watcherHolder) { + log.info("Removing a " + this.watchType + "'s watcher, watcher-id is [" + watcherId + + "]."); + this.watcherHolder.remove(watcherId); + } + responseObserver.onCompleted(); + } + + abstract String toNoticeString(WatchResponse res); + + public void notifyError(int code, String message){ + synchronized (lock) { + Iterator>> iter = + watcherHolder.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long watcherId = entry.getKey(); + WatchResponse res = this.builder.setWatcherId(watcherId).build(); + try { + entry.getValue().onError(Status.fromCodeValue(code).withDescription(message).asRuntimeException()); + } catch (Throwable e) { + // log.error("Failed to send " + this.watchType.name() + "'s error message [" + toNoticeString(res) + // + "] to watcher[" + watcherId + "].", e); + } + } + } + } + + protected void notifyWatcher(WatchResponse.Builder response) { + + Iterator>> iter = watcherHolder + .entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long watcherId = entry.getKey(); + WatchResponse res = response.setWatcherId(watcherId).build(); + try { + synchronized (lock) { + entry.getValue().onNext(res); + } + } catch (Throwable e) { + try { + String msg = JsonFormat.printer().print(res); + log.error( + "failed to send to watcher [{}] with notice {} for ", + msg, toNoticeString(res), watcherId, e); + } catch (Exception ex) { + + } + } + } + } + + protected void notifyWatcher(Consumer c) { + synchronized (lock) { + + if (c == null) { + log.error(this.watchType.name() + + "'s notice was abandoned, caused by: notifyWatcher(null)"); + return; + } + + try { + c.accept(this.builder.clear()); + } catch (Throwable t) { + log.error(this.watchType.name() + "'s notice was abandoned, caused by:", t); + return; + } + + Iterator>> iter = + watcherHolder.entrySet().iterator(); + + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long watcherId = entry.getKey(); + WatchResponse res = this.builder.setWatcherId(watcherId).build(); + + try { + entry.getValue().onNext(res); + } catch (Throwable e) { + log.error("Failed to send " + this.watchType.name() + "'s notice[" + + toNoticeString(res) + + "] to watcher[" + watcherId + "].", e); + + // TODO: ? try multi-times? + iter.remove(); + + log.error("Removed a " + this.watchType.name() + "'s watcher[" + entry.getKey() + + "], because of once failure of sending.", e); + } + + } + + } + + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java new file mode 100644 index 0000000000..0a2dbd84b5 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.BiPredicate; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.kv.WatchEvent; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchState; +import org.apache.hugegraph.pd.grpc.kv.WatchType; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.store.RaftKVStore; + +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * watch订阅、响应处理类 + **/ +@Slf4j +public class KvWatchSubject { + + public static final String KEY_DELIMITER = "KW"; + public static final String PREFIX_DELIMITER = "PW"; + public static final String ALL_PREFIX = "W"; + public static final long WATCH_TTL = 20000L; + private static final ConcurrentMap> clients = + new ConcurrentHashMap<>(); + private final KvService kvService; + BiPredicate equal = String::equals; + BiPredicate startWith = String::startsWith; + + /** + * 会使用以下三组key: + * clients -> W@KW@key@clientId + * rocksdb key1 ->W@KW@key@clientId + * rocksdb key2 ->W@clientId@KW@key@clientId + **/ + public KvWatchSubject(PDConfig pdConfig) { + this.kvService = new KvService(pdConfig); + } + + public String getWatchKey(String key, String watchDelimiter) { + return KvService.getKeyWithoutPrefix(ALL_PREFIX, watchDelimiter, key); + } + + private void addWatchKey(String key, String delimiter, long clientId) throws PDException { + String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + kvService.put(watchKey, "", WATCH_TTL); + String clientFirstKey = + KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key, clientId); + kvService.put(clientFirstKey, "", WATCH_TTL); + } + + private void removeWatchKey(String key, String delimiter, long clientId) throws PDException { + String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + kvService.delete(watchKey); + String clientFirstKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key); + kvService.deleteWithPrefix(clientFirstKey); + } + + /** + * 增加观察者 + * + * @param key 观察的key + * @param clientId 客户端标识 + * @param observer + * @param delimiter 观察类型标识符,对前缀监听或者对key的监听可以通过此参数区分 + * @throws PDException + */ + public void addObserver(String key, long clientId, StreamObserver observer, + String delimiter) throws PDException { + String keyWithoutPrefix = + KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + clients.putIfAbsent(keyWithoutPrefix, observer); + addWatchKey(key, delimiter, clientId); + log.info("client:{},start to watch key:{}", clientId, key); + } + + public void removeObserver(String key, long clientId, String delimiter) throws PDException { + removeWatchKey(key, delimiter, clientId); + String keyWithoutPrefix = + KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + clients.remove(keyWithoutPrefix); + } + + /** + * 通知观察者方法,key和prefix都使用此方法,predicate不同 + * + * @param key + * @param watchType 观察类型,一般是增加和删除 + * @param predicate 判断等于或者是前匹配,用来适配key或prefix观察 + * @param kvs + * @throws PDException + */ + public void notifyObserver(String key, WatchType watchType, + BiPredicate predicate, + WatchKv... kvs) throws PDException { + boolean isEqual = predicate.equals(equal); + String watchDelimiter = isEqual ? KEY_DELIMITER : PREFIX_DELIMITER; + String watchKeyPrefix = isEqual ? key : ""; + String storeKey = getWatchKey(watchKeyPrefix, watchDelimiter); + Map map = kvService.scanWithPrefix(storeKey); + String delimiter = String.valueOf(KvService.KV_DELIMITER); + WatchResponse watchResponse; + for (String keyAndClient : map.keySet()) { + String[] values = keyAndClient.split(delimiter); + assert values.length == 4; + String watchKey = values[2]; + String c = values[3]; + long clientId = Long.parseLong(c); + LinkedList watchEvents = new LinkedList<>(); + for (WatchKv kv : kvs) { + String kvKey = kv.getKey(); + boolean match = predicate.test(kvKey, watchKey); + if (!match) { + continue; + } + WatchKv watchKv = + WatchKv.newBuilder().setKey(kvKey).setValue(kv.getValue()).build(); + WatchEvent event = + WatchEvent.newBuilder().setCurrent(watchKv).setType(watchType).build(); + watchEvents.add(event); + } + StreamObserver observer = clients.get(keyAndClient); + watchResponse = + WatchResponse.newBuilder().setState(WatchState.Started).setClientId(clientId) + .addAllEvents(watchEvents).build(); + + try { + if (observer != null) { + synchronized (observer) { + // log.info("notifyObserver for clientId:{}", clientId); + observer.onNext(watchResponse); + } + } else { + log.info("cannot find StreamObserver for clientId:{}", clientId); + } + } catch (StatusRuntimeException ignored) { + + } catch (Exception e) { + log.warn("notifyObserver with error:{}", clientId, e); + } + } + } + + public void notifyAllObserver(String key, WatchType watchType, WatchKv[] kvs) throws + PDException { + notifyObserver(key, watchType, equal, kvs); + notifyObserver(key, watchType, startWith, kvs); + } + + /** + * 续活客户端 + * 1.往客户端发一个alive的消息,带重试哈 + * 2.如果有响应,则续活之前保存的那两组key + * 3.如果多次都失败,则删除内存和rocksdb的数据 + */ + public void keepClientAlive() { + WatchResponse testAlive = WatchResponse.newBuilder().setState(WatchState.Alive).build(); + Set>> entries = clients.entrySet(); + Map.Entry>[] array = + entries.toArray(new Map.Entry[0]); + Arrays.stream(array).parallel().forEach(entry -> { + StreamObserver value = entry.getValue(); + String key = entry.getKey(); + String delimiter = KvService.getDelimiter(); + String client = key.split(delimiter)[3]; + String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client); + if (value == null) { + removeClient(null, key, clientKey); + } + boolean done = false; + String removes = client + KvService.KV_DELIMITER; + for (int i = 0; i < 3; i++) { + try { + synchronized (value) { + value.onNext(testAlive); + } + Map clientKeys = kvService.scanWithPrefix(clientKey); + for (Map.Entry keyEntry : clientKeys.entrySet()) { + String entryKey = keyEntry.getKey(); + String aliveKey = entryKey.replaceFirst(removes, ""); + boolean keepAliveKey = kvService.keepAlive(aliveKey); + boolean keepAliveEntry = kvService.keepAlive(entryKey); + // log.info("keep alive client:{},{}:{},{}:{}", client, aliveKey, + // keepAliveKey, + // entryKey, + // keepAliveEntry); + done = true; + } + break; + } catch (Exception e) { + try { + Thread.sleep(100); + } catch (InterruptedException ex) { + log.info("keep alive client {} with error:{}", client, e); + } + } + } + if (!done) { + log.info("remove client {} for no data", client); + removeClient(value, key, clientKey); + } + }); + } + + private void removeClient(StreamObserver value, String key, String clientKey) { + try { + log.info("remove null observer,client:", clientKey); + if (RaftEngine.getInstance().isLeader()) { + kvService.deleteWithPrefix(clientKey); + } else { + // todo: delete records via client + var store = kvService.getMeta().getStore(); + if (store instanceof RaftKVStore) { + ((RaftKVStore) store).doRemoveByPrefix(kvService.getStoreKey(clientKey)); + } + } + + if (value != null) { + synchronized (value) { + value.onCompleted(); + } + } + clients.remove(key); + } catch (PDException e) { + log.error("remove client with error:", e); + } + } + + /** + * 通知客户端leader切换了,重连 + */ + public void notifyClientChangeLeader() { + WatchResponse response = + WatchResponse.newBuilder().setState(WatchState.Leader_Changed).build(); + for (Map.Entry> entry : clients.entrySet()) { + StreamObserver value = entry.getValue(); + String key = entry.getKey(); + String client = key.split(KvService.getDelimiter())[3]; + String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client); + if (value == null) { + removeClient(null, key, clientKey); + } + for (int i = 0; i < 3; i++) { + try { + synchronized (value) { + value.onNext(response); + } + removeClient(value, key, clientKey); + break; + } catch (Exception e) { + try { + Thread.sleep(100); + } catch (InterruptedException ignored) { + } + } + } + } + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java new file mode 100644 index 0000000000..0e7c26dde5 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; + +/** + * The subject of partition change. + */ +@ThreadSafe +final class NodeChangeSubject extends AbstractWatchSubject { + + NodeChangeSubject() { + super(WatchType.WATCH_TYPE_STORE_NODE_CHANGE); + } + + @Override + String toNoticeString(WatchResponse res) { + String sb = "graph:" + res.getNodeResponse().getGraph() + + "," + + "nodeId:" + res.getNodeResponse().getNodeId(); + return sb; + } + + public void notifyWatcher(NodeEventType nodeEventType, String graph, long nodeId) { + isArgumentNotNull(nodeEventType, "nodeEventType"); + + super.notifyWatcher(builder -> { + builder.setNodeResponse( + builder.getNodeResponseBuilder().clear() + .setGraph(graph) + .setNodeId(nodeId) + .setNodeEventType(nodeEventType) + .build() + ); + + }); + } + + @Override + public void notifyError(int code, String message) { + super.notifyError(code, message); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java new file mode 100644 index 0000000000..92ef98e8e6 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java @@ -0,0 +1,216 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; +import org.apache.hugegraph.pd.grpc.watch.WatchCreateRequest; +import org.apache.hugegraph.pd.grpc.watch.WatchRequest; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@ThreadSafe +public class PDWatchSubject implements StreamObserver { + public final static Map subjectHolder = new ConcurrentHashMap<>(); + private final static byte[] lock = new byte[0]; + + static { + subjectHolder.put(WatchType.WATCH_TYPE_PARTITION_CHANGE.name(), + new PartitionChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name(), new NodeChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_GRAPH_CHANGE.name(), new NodeChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name(), + new org.apache.hugegraph.pd.watch.ShardGroupChangeSubject()); + } + + private final StreamObserver responseObserver; + private AbstractWatchSubject subject; + private Long watcherId; + + private PDWatchSubject(StreamObserver responseObserver) { + this.responseObserver = responseObserver; + } + + public static StreamObserver addObserver( + StreamObserver responseObserver) { + isArgumentNotNull(responseObserver, "responseObserver"); + return new PDWatchSubject(responseObserver); + } + + /** + * Notify partition change + * + * @param changeType change type + * @param graph name of graph + * @param partitionId id of partition + */ + public static void notifyPartitionChange(ChangeType changeType, String graph, int partitionId) { + ((PartitionChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_PARTITION_CHANGE.name())) + .notifyWatcher(changeType.getGrpcType(), graph, partitionId); + + } + + public static void notifyShardGroupChange(ChangeType changeType, int groupId, + Metapb.ShardGroup group) { + ((org.apache.hugegraph.pd.watch.ShardGroupChangeSubject) subjectHolder.get( + WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name())) + .notifyWatcher(changeType.getGrpcType(), groupId, group); + } + + /** + * Notify store-node change + * + * @param changeType change type + * @param graph name of graph + * @param nodeId id of partition + */ + public static void notifyNodeChange(NodeEventType changeType, String graph, long nodeId) { + ((NodeChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name())) + .notifyWatcher(changeType, graph, nodeId); + } + + public static void notifyChange(WatchType type, + WatchResponse.Builder builder) { + subjectHolder.get(type.name()).notifyWatcher(builder); + } + + public static void notifyError(int code, String message){ + subjectHolder.forEach((k, v)->{ + v.notifyError(code, message); + }); + } + + private static Long createWatcherId() { + synchronized (lock) { + Thread.yield(); + try { + Thread.sleep(1); + } catch (InterruptedException e) { + log.error("Failed to sleep", e); + } + + return System.currentTimeMillis(); + } + + } + + private void cancelWatcher() { + + if (this.subject == null) { + this.responseObserver.onError( + new Exception("Invoke cancel-watch before create-watch.")); + return; + } + + this.subject.removeObserver(this.watcherId, this.responseObserver); + } + + + private WatchType getWatchType(WatchCreateRequest request) { + WatchType watchType = request.getWatchType(); + + if (watchType.equals(WatchType.WATCH_TYPE_UNKNOWN)) { + this.responseObserver.onError(new Exception("unknown watch type.")); + return null; + } + + return watchType; + } + + private AbstractWatchSubject getSubject(WatchType watchType) { + AbstractWatchSubject subject = subjectHolder.get(watchType.name()); + + if (subject == null) { + responseObserver.onError(new Exception("Unsupported watch-type: " + watchType.name())); + return null; + } + + return subject; + } + + + private void addWatcher(WatchCreateRequest request) { + if (this.subject != null) { + return; + } + WatchType watchType = getWatchType(request); + if (watchType == null) { + return; + } + + this.subject = getSubject(watchType); + this.watcherId = createWatcherId(); + + this.subject.addObserver(this.watcherId, this.responseObserver); + } + + @Override + public void onNext(WatchRequest watchRequest) { + + if (watchRequest.hasCreateRequest()) { + this.addWatcher(watchRequest.getCreateRequest()); + return; + } + + if (watchRequest.hasCancelRequest()) { + this.cancelWatcher(); + } + + } + + @Override + public void onError(Throwable throwable) { + this.cancelWatcher(); + } + + @Override + public void onCompleted() { + this.cancelWatcher(); + } + + public enum ChangeType { + ADD(WatchChangeType.WATCH_CHANGE_TYPE_ADD), + ALTER(WatchChangeType.WATCH_CHANGE_TYPE_ALTER), + DEL(WatchChangeType.WATCH_CHANGE_TYPE_DEL), + + USER_DEFINED(WatchChangeType.WATCH_CHANGE_TYPE_SPECIAL1); + + private final WatchChangeType grpcType; + + ChangeType(WatchChangeType grpcType) { + this.grpcType = grpcType; + } + + public WatchChangeType getGrpcType() { + return this.grpcType; + } + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java new file mode 100644 index 0000000000..85f5e8b7f0 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentValid; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; + +/** + * The subject of partition change. + */ +@ThreadSafe +final class PartitionChangeSubject extends AbstractWatchSubject { + + PartitionChangeSubject() { + super(WatchType.WATCH_TYPE_PARTITION_CHANGE); + } + + @Override + String toNoticeString(WatchResponse res) { + String sb = "graph:" + res.getPartitionResponse().getGraph() + + "," + + "partitionId:" + res.getPartitionResponse().getPartitionId(); + return sb; + } + + public void notifyWatcher(WatchChangeType changeType, String graph, int partitionId) { + isArgumentNotNull(changeType, "changeType"); + isArgumentValid(graph, "graph"); + + super.notifyWatcher(builder -> { + builder.setPartitionResponse( + builder.getPartitionResponseBuilder().clear() + .setGraph(graph) + .setPartitionId(partitionId) + .setChangeType(changeType) + .build() + ); + + }); + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java new file mode 100644 index 0000000000..d9cfde8e73 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.watch; + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; + +public class ShardGroupChangeSubject extends AbstractWatchSubject { + + protected ShardGroupChangeSubject() { + super(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE); + } + + @Override + String toNoticeString(WatchResponse res) { + StringBuilder sb = new StringBuilder(); + sb.append("shard group:") + .append(res.getShardGroupResponse().getShardGroup().toString().replace("\n", " ")); + return sb.toString(); + } + + public void notifyWatcher(WatchChangeType changeType, int groupId, + Metapb.ShardGroup shardGroup) { + isArgumentNotNull(changeType, "changeType"); + + super.notifyWatcher(builder -> { + builder.setShardGroupResponse( + builder.getShardGroupResponseBuilder().clear() + .setShardGroupId(groupId) + .setType(changeType) + .setShardGroup(shardGroup) + .build() + ); + }); + } +} diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/application.yml b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml new file mode 100644 index 0000000000..25471b6cce --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml @@ -0,0 +1,80 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8686 + # grpc的服务地址 + host: 127.0.0.1 + netty-server: + max-inbound-message-size: 100MB + +license: + verify-path: 'conf/verify-license.json' + license-path: 'conf/hugegraph.license' + +server: + port: 8620 + +pd: + # 定期检查集群是否健康的时间间隔,单位秒 + patrol-interval: 300 + # 存储路径 + data-path: tmp/pd/8610 + + # 最少节点数,少于该数字,集群停止入库 + initial-store-count: 1 + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8502 + +raft: + # 本机raft服务地址 + address: 127.0.0.1:8610 + # PD集群服务地址 + peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 86400 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 12 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/banner.txt b/hugegraph-pd/hg-pd-service/src/main/resources/banner.txt new file mode 100644 index 0000000000..27babf0e9a --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/resources/banner.txt @@ -0,0 +1,6 @@ + _ _ _____ _____ _____ + | | | |/ ____| | __ \| __ \ + | |__| | | __ ______| |__) | | | | + | __ | | |_ |______| ___/| | | | + | | | | |__| | | | | |__| | + |_| |_|\_____| |_| |_____/ diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml b/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml new file mode 100644 index 0000000000..a26fe62d9d --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml @@ -0,0 +1,139 @@ + + + + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer0.java b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer0.java new file mode 100644 index 0000000000..c28c098aef --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer0.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package live; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.springframework.boot.SpringApplication; + +public class PDServer0 { + + static String SERVER_NAME = "server0"; + static String DATA_PATH = "tmp/8686"; + + public static void main(String[] args) { + //deleteDirectory(new File(DATA_PATH)); + + SpringApplication.run(HugePDServer.class, + String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.printf("Failed to start ....,%s%n", e.getMessage()); + } + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer1.java b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer1.java new file mode 100644 index 0000000000..75ee0f3cd5 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer1.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package live; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.springframework.boot.SpringApplication; + +public class PDServer1 { + + static String SERVER_NAME = "server1"; + static String DATA_PATH = "tmp/8686"; + + public static void main(String[] args) { + deleteDirectory(new File(DATA_PATH)); + SpringApplication.run(HugePDServer.class, + String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.printf("Failed to start ....,%s%n", e.getMessage()); + } + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer2.java b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer2.java new file mode 100644 index 0000000000..9c9096e3c9 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer2.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package live; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.springframework.boot.SpringApplication; + +public class PDServer2 { + + static String SERVER_NAME = "server2"; + static String DATA_PATH = "tmp/8687"; + + public static void main(String[] args) { + // deleteDirectory(new File(DATA_PATH)); + SpringApplication.run(HugePDServer.class, + String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.printf("Failed to start ....,%s%n", e.getMessage()); + } + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer3.java b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer3.java new file mode 100644 index 0000000000..258f677ae7 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer3.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package live; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.springframework.boot.SpringApplication; + +public class PDServer3 { + + static String SERVER_NAME = "server3"; + static String DATA_PATH = "tmp/8688"; + + public static void main(String[] args) { + // deleteDirectory(new File(DATA_PATH)); + SpringApplication.run(HugePDServer.class, + String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.printf("Failed to start ....,%s%n", e.getMessage()); + } + } + +} diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml new file mode 100644 index 0000000000..5e1d63e943 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml @@ -0,0 +1,71 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8686 + netty-server: + max-inbound-message-size: 100MB + +server: + port: 8620 + +pd: + + patrol-interval: 3000000 + data-path: tmp/8686 + # 最少节点数,少于该数字,集群停止入库 + initial-store-count: 1 + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8500 + +raft: + address: 127.0.0.1:8610 + # raft集群 + peers-list: 127.0.0.1:8610 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 30000 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 180000 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 12 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml new file mode 100644 index 0000000000..7cb53fe1c8 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml @@ -0,0 +1,71 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8686 + netty-server: + max-inbound-message-size: 100MB + +server: + port: 8620 + +pd: + + patrol-interval: 3000000 + data-path: tmp/8686 + # 最少节点数,少于该数字,集群停止入库 + initial-store-count: 1 + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + #initial-store-list: 127.0.0.1:8501 +raft: + address: 127.0.0.1:8610 + # raft集群 + peers-list: 127.0.0.1:8610 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 30000 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 180000 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 6 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml new file mode 100644 index 0000000000..5e1dd50a98 --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml @@ -0,0 +1,73 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8687 + host: 127.0.0.1 + netty-server: + max-inbound-message-size: 100MB + +server: + port: 8621 + +pd: + # 集群ID,区分不同的PD集群 + cluster_id: 1 + patrol-interval: 300000 + data-path: tmp/8687 + enable-batch-load: true + +raft: + enable: true + address: 127.0.0.1:8611 + # raft集群 + peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + metrics: true + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 1800 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 3 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml new file mode 100644 index 0000000000..d2b88950ab --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml @@ -0,0 +1,73 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8688 + host: 127.0.0.1 + netty-server: + max-inbound-message-size: 100MB + +server: + port: 8622 + +pd: + # 集群ID,区分不同的PD集群 + cluster_id: 1 + patrol-interval: 300000 + data-path: tmp/8688 + enable-batch-load: true + +raft: + enable: true + address: 127.0.0.1:8612 + # raft集群 + peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + metrics: true + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 1800 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 3 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/banner.txt b/hugegraph-pd/hg-pd-service/src/test/resources/banner.txt new file mode 100644 index 0000000000..27babf0e9a --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/resources/banner.txt @@ -0,0 +1,6 @@ + _ _ _____ _____ _____ + | | | |/ ____| | __ \| __ \ + | |__| | | __ ______| |__) | | | | + | __ | | |_ |______| ___/| | | | + | | | | |__| | | | | |__| | + |_| |_|\_____| |_| |_____/ diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml b/hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml new file mode 100644 index 0000000000..d117f8328c --- /dev/null +++ b/hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml @@ -0,0 +1,139 @@ + + + + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hugegraph-pd/hg-pd-test/pom.xml b/hugegraph-pd/hg-pd-test/pom.xml new file mode 100644 index 0000000000..7924ca4c0b --- /dev/null +++ b/hugegraph-pd/hg-pd-test/pom.xml @@ -0,0 +1,370 @@ + + + + + + org.apache.hugegraph + hugegraph-pd + ${revision} + ../pom.xml + + 4.0.0 + + hg-pd-test + + + true + + 2.0.0-RC.3 + + + + + jacoco + + false + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + **/grpc/**.* + **/config/**.* + + + + + + prepare-agent + + + + + + + + + + + + junit + junit + 4.13.2 + + + org.apache.hugegraph + hg-store-grpc + ${revision} + + + org.apache.hugegraph + hg-store-common + ${revision} + + + org.projectlombok + lombok + 1.18.24 + + + org.springframework + spring-context-support + 5.3.20 + + + org.springframework + spring-test + 5.3.20 + test + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + + org.apache.hugegraph + hg-pd-client + ${revision} + + + + com.google.code.gson + gson + 2.8.9 + + + org.apache.hugegraph + hg-pd-grpc + ${revision} + + + commons-io + commons-io + 2.7 + + + com.fasterxml.jackson.core + jackson-databind + 2.13.0 + + + com.fasterxml.jackson.core + jackson-core + 2.13.0 + + + com.fasterxml.jackson.core + jackson-annotations + 2.13.0 + + + + + + + org.springframework.boot + spring-boot-starter-test + 2.5.14 + + + org.springframework.boot + spring-boot-starter-logging + + + + + org.apache.hugegraph + hg-pd-clitools + ${revision} + + + org.apache.hugegraph + hg-pd-common + ${revision} + + + + org.apache.hugegraph + hg-pd-core + ${revision} + + + org.apache.hugegraph + hg-pd-dist + ${revision} + compile + + + org.apache.hugegraph + hugegraph-pd + ${revision} + + + + + + org.powermock + powermock-classloading-xstream + ${powermock.version} + + + org.powermock + powermock-module-junit4-rule + ${powermock.version} + + + org.powermock + powermock-api-support + ${powermock.version} + + + org.powermock + powermock-module-junit4 + 2.0.0-RC.3 + compile + + + org.powermock + powermock-api-mockito2 + 2.0.0-RC.3 + compile + + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + client-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/PDClientSuiteTest.java + + + + + core-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/PDCoreSuiteTest.java + + + + + cli-tools-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/CliToolsSuiteTest.java + + + + + common-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/CommonSuiteTest.java + + + + + service-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/ServerSuiteTest.java + + + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + pre-test + + prepare-agent + + + + post-test + test + + report-aggregate + + + ${basedir}/target/site/jacoco + + + + + + org/apache/hugegraph/pd/rest/*.class + org/apache/hugegraph/pd/service/*.class + org/apache/hugegraph/pd/model/*.class + org/apache/hugegraph/pd/watch/*.class + org/apache/hugegraph/pd/pulse/*.class + org/apache/hugegraph/pd/license/*.class + org/apache/hugegraph/pd/notice/*.class + org/apache/hugegraph/pd/util/*.class + org/apache/hugegraph/pd/metrics/*.class + org/apache/hugegraph/pd/util/grpc/*.class + org/apache/hugegraph/pd/boot/*.class + org/apache/hugegraph/pd/grpc/**/*.class + org/apache/hugegraph/pd/raft/*.class + **/RaftKVStore.class + + + + + + + + src/main/resources/ + true + + + + + diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java new file mode 100644 index 0000000000..87b8081339 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.runner.RunWith; +import org.mockito.runners.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class BaseClientTest { + public static PDClient pdClient; + public final String storeAddr = "localhost"; + public final String graphName = "default/hugegraph/g"; + public long storeId = 0; + + @BeforeClass + public static void beforeClass() throws Exception { + PDConfig config = PDConfig.of("localhost:8686"); +// PDConfig config = PDConfig.of("10.81.116.77:8986"); + config.setEnableCache(true); + pdClient = PDClient.create(config); + } + + @After + public void teardown() throws Exception { + // pass + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java new file mode 100644 index 0000000000..5e7d1ee08e --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.client.DiscoveryClientImpl; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.junit.Before; +import org.junit.Test; + +public class DiscoveryClientTest { + + private DiscoveryClientImpl client; + + @Before + public void setUp() { + this.client = getClient("appName", "localhost:8654", new HashMap()); + } + + @Test + public void testGetRegisterNode() { + // Setup + try { + Consumer result = this.client.getRegisterConsumer(); + final NodeInfo expectedResult = NodeInfo.newBuilder() + .setAppName("appName") + .build(); + + Thread.sleep(3000); + Query query = Query.newBuilder().setAppName("appName") + .setVersion("0.13.0").build(); + + // Run the test + this.client.getNodeInfos(query); + } catch (InterruptedException e) { + e.printStackTrace(); + } finally { + this.client.close(); + } + + } + + private DiscoveryClientImpl getClient(String appName, String address, + Map labels) { + DiscoveryClientImpl discoveryClient = null; + try { + discoveryClient = DiscoveryClientImpl.newBuilder().setCenterAddress( + "localhost:8686").setAddress(address).setAppName(appName) + .setDelay(2000) + .setVersion("0.13.0") + .setId("0").setLabels(labels) + .build(); + discoveryClient.scheduleTask(); + } catch (Exception e) { + e.printStackTrace(); + } + + return discoveryClient; + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java new file mode 100644 index 0000000000..54cee0b812 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import java.util.function.Consumer; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.client.KvClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchEvent; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchState; +import org.apache.hugegraph.pd.grpc.kv.WatchType; +import org.junit.Before; +import org.junit.Test; + +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; + +public class KvClientTest extends BaseClientTest { + + String key = "key"; + String value = "value"; + private KvClient client; + + @Before + public void setUp() { + this.client = new KvClient<>(PDConfig.of("localhost:8686")); + } + + @Test + public void testCreateStub() { + // Setup + // Run the test + try { + final AbstractStub result = this.client.createStub(); + } catch (Exception e) { + + } + + + // Verify the results + } + + @Test + public void testCreateBlockingStub() { + // Setup + // Run the test + try { + final AbstractBlockingStub result = this.client.createBlockingStub(); + } catch (Exception e) { + + } + } + + @Test + public void testPutAndGet() throws Exception { + // Run the test + try { + this.client.put(this.key, this.value); + // Run the test + KResponse result = this.client.get(this.key); + + // Verify the results + assertThat(result.getValue()).isEqualTo(this.value); + this.client.delete(this.key); + result = this.client.get(this.key); + assertThat(StringUtils.isEmpty(result.getValue())); + this.client.deletePrefix(this.key); + this.client.put(this.key + "1", this.value); + this.client.put(this.key + "2", this.value); + ScanPrefixResponse response = this.client.scanPrefix(this.key); + assertThat(response.getKvsMap().size() == 2); + this.client.putTTL(this.key + "3", this.value, 1000); + this.client.keepTTLAlive(this.key + "3"); + final Consumer mockConsumer = mock(Consumer.class); + + // Run the test + this.client.listen(this.key + "3", mockConsumer); + this.client.listenPrefix(this.key + "4", mockConsumer); + WatchResponse r = WatchResponse.newBuilder().addEvents( + WatchEvent.newBuilder().setCurrent( + WatchKv.newBuilder().setKey(this.key).setValue("value") + .build()).setType(WatchType.Put).build()) + .setClientId(0L) + .setState(WatchState.Starting) + .build(); + this.client.getWatchList(r); + this.client.getWatchMap(r); + this.client.lock(this.key, 3000L); + this.client.isLocked(this.key); + this.client.unlock(this.key); + this.client.lock(this.key, 3000L); + this.client.keepAlive(this.key); + this.client.close(); + } catch (Exception e) { + + } + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java new file mode 100644 index 0000000000..f969c218a1 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +import lombok.extern.slf4j.Slf4j; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + PDClientTest.class, + KvClientTest.class, + DiscoveryClientTest.class +}) + +@Slf4j +public class PDClientSuiteTest { + +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java new file mode 100644 index 0000000000..868c8d2aec --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java @@ -0,0 +1,418 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.client; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.junit.Test; +import org.mockito.Mockito; + +public class PDClientTest extends BaseClientTest { + @Test + public void testDbCompaction() { + System.out.println("testDbCompaction start"); + + try { + pdClient.dbCompaction(""); + pdClient.dbCompaction(); + } catch (PDException e) { + e.printStackTrace(); + } + + System.out.println("pdclienttest testDbCompaction end"); + } + + @Test + public void testRegisterStore() { + Metapb.Store store = Metapb.Store.newBuilder().build(); + try { + pdClient.registerStore(store); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetGraph() { + Metapb.Graph graph = Metapb.Graph.newBuilder().setGraphName("test").build(); + try { + pdClient.setGraph(graph); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGraph() { + try { + pdClient.getGraph("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetStore() { + try { + pdClient.getStore(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testUpdateStore() { + Metapb.Store store = Metapb.Store.newBuilder().build(); + try { + pdClient.updateStore(store); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetActiveStores() { + try { + pdClient.getActiveStores("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetAllStores() { + try { + pdClient.getAllStores("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + +// @Test +// public void testStoreHeartbeat(){ +// Metapb.StoreStats stats = Metapb.StoreStats.newBuilder().build(); +// try { +// pdClient.storeHeartbeat(stats); +// } catch (PDException e) { +// e.printStackTrace(); +// } +// } + + @Test + public void testKeyToCode() { + pdClient.keyToCode("test", "test".getBytes(StandardCharsets.UTF_8)); + } + + @Test + public void testScanPartitions() { + try { + pdClient.scanPartitions("test", "1".getBytes(StandardCharsets.UTF_8), + "9".getBytes(StandardCharsets.UTF_8)); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartitionsByStore() { + try { + pdClient.getPartitionsByStore(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testQueryPartitions() { + try { + pdClient.queryPartitions(0L, 0); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartitions() { + try { + pdClient.getPartitions(0L, "test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testUpdatePartitionLeader() { + System.out.println("updatePartitionLeader start"); + + pdClient.updatePartitionLeader("aaa", 0, 0L); + } + + @Test + public void testInvalidPartitionCache() { + pdClient.invalidPartitionCache(); + } + + @Test + public void testInvalidStoreCache() { + pdClient.invalidStoreCache(0L); + } + + @Test + public void testUpdatePartitionCache() { + Metapb.Partition partition = Metapb.Partition.newBuilder().build(); + Metapb.Shard leader = Metapb.Shard.newBuilder().build(); + pdClient.updatePartitionCache(partition, leader); + } + + @Test + public void testGetIdByKey() { + try { + pdClient.getIdByKey("test", 1); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testResetIdByKey() { + try { + pdClient.resetIdByKey("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGetLeader() { + try { + pdClient.getLeader(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetMembers() { + try { + pdClient.getMembers(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetClusterStats() { + try { + pdClient.getClusterStats(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testAddEventListener() { + PDClient.PDEventListener listener = Mockito.mock(PDClient.PDEventListener.class); + pdClient.addEventListener(listener); + } + + @Test + public void testGetWatchClient() { + pdClient.getWatchClient(); + } + + @Test + public void testGetPulseClient() { + // pdClient.getPulseClient(); + } + + @Test + public void testGetStoreStatus() { + try { + pdClient.getStoreStatus(true); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartition() { + try { + pdClient.getPartition("test", "test".getBytes(StandardCharsets.UTF_8)); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetGraphSpace() { + try { + pdClient.setGraphSpace("test", 1L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGraphSpace() { + try { + pdClient.getGraphSpace("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetPDConfig() { + try { + pdClient.setPDConfig(0, "", 0, 0L); + } catch (PDException e) { + e.printStackTrace(); + } + Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().build(); + + try { + pdClient.setPDConfig(pdConfig); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPDConfig() { + try { + pdClient.getPDConfig(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testChangePeerList() { + try { + pdClient.changePeerList(""); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSplitData() { + try { + Metapb.PDConfig config = pdClient.getPDConfig(); + pdClient.setPDConfig(config.toBuilder().setMaxShardsPerStore(12).build()); + System.out.println(pdClient.getPDConfig()); + + // 开始分区分裂 + pdClient.splitData(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testBalancePartition() { + try { + pdClient.balancePartition(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testMovePartition() { + Pdpb.OperationMode mode = Pdpb.OperationMode.Auto; + List params = new ArrayList<>(1); + try { + pdClient.movePartition(mode, params); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testReportTask() { + MetaTask.Task task = MetaTask.Task.newBuilder().build(); + try { + pdClient.reportTask(task); + } catch (PDException e) { + e.printStackTrace(); + } + } + + + @Test + public void testBalanceLeaders() { + try { + pdClient.balanceLeaders(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testDelStore() { + try { + pdClient.delStore(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + +// @Test +// public void testgetQuota() { +// try { +// pdClient.getQuota(); +// } catch (PDException e) { +// e.printStackTrace(); +// } +// } + + @Test + public void testUpdatePartition() { + List partitions = new ArrayList<>(1); + try { + pdClient.updatePartition(partitions); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testDelPartition() { + try { + pdClient.delPartition("test", 0); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testdelGraph() { + try { + pdClient.delGraph("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java new file mode 100644 index 0000000000..b00049cc95 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.clitools; + +import org.junit.After; +import org.junit.BeforeClass; + + +public class BaseCliToolsTest { + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java new file mode 100644 index 0000000000..89b125a5f3 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.clitools; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +import lombok.extern.slf4j.Slf4j; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + MainTest.class +}) + +@Slf4j +public class CliToolsSuiteTest { + + +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java new file mode 100644 index 0000000000..cabbe64d49 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.clitools; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hugegraph.pd.clitools.Main; +import org.apache.hugegraph.pd.common.PDException; +import org.junit.Test; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class MainTest extends BaseCliToolsTest { + + + public static boolean test2sup(List arrays, int tail, int res) { + System.out.printf("%d %d%n", tail, res); + if (tail == 0) { + System.out.printf("a = %d %d%n", tail, res); + return false; + } else if (tail == 1) { + System.out.printf("b = %d %d%n", arrays.get(0), res); + return (arrays.get(0) == res); + } else if (tail == 2) { + System.out.printf("c = %d %d %d%n", arrays.get(0), arrays.get(1), res); + return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || + (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); + } else { + return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || + test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); + } + } + + @Test + public void getConfig() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad"}); + } + + // @Test + public void setBatchTrue() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad= true "}); + } + + // @Test + public void setBatchFalse() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad=false"}); + } + + @Test + public void getConfig2() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount"}); + } + + // @Test + public void setShardCount1() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=1"}); + } + + // @Test + public void setShardCount3() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=3"}); + } + + @Test + public void test2() { + Integer[] a = new Integer[]{1, 0, 3, 2}; + List aa = Arrays.asList(a); + System.out.printf(test2sup(aa, aa.size(), 0) ? "TRUE" : "FALSE"); + } + + +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java new file mode 100644 index 0000000000..97052ffd3a --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import org.junit.After; +import org.junit.BeforeClass; + +public class BaseCommonTest { + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java new file mode 100644 index 0000000000..0430520dc9 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.service.IdServiceTest; +import org.apache.hugegraph.pd.service.KvServiceTest; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +import lombok.extern.slf4j.Slf4j; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + PartitionUtilsTest.class, + PartitionCacheTest.class, + MetadataKeyHelperTest.class, + KvServiceTest.class, + HgAssertTest.class, + KVPairTest.class, + IdServiceTest.class +}) + +@Slf4j +public class CommonSuiteTest { + + +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java new file mode 100644 index 0000000000..90a0e05341 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.HashMap; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.junit.Test; + +public class HgAssertTest { + + @Test(expected = IllegalArgumentException.class) + public void testIsTrue() { + HgAssert.isTrue(false, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsTrue2() { + HgAssert.isTrue(true, null); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsFalse() { + HgAssert.isFalse(true, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsFalse2() { + HgAssert.isTrue(false, null); + } + + @Test(expected = IllegalArgumentException.class) + public void isArgumentValid() { + HgAssert.isArgumentValid(new byte[0], ""); + } + + @Test(expected = IllegalArgumentException.class) + public void isArgumentValidStr() { + HgAssert.isArgumentValid("", ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsArgumentNotNull() { + HgAssert.isArgumentNotNull(null, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIstValid() { + HgAssert.istValid(new byte[0], ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIstValidStr() { + HgAssert.isValid("", ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsNotNull() { + HgAssert.isNotNull(null, ""); + } + + + @Test + public void testIsInvalid() { + assertFalse(HgAssert.isInvalid("abc", "test")); + assertTrue(HgAssert.isInvalid("", null)); + } + + @Test + public void testIsInvalidByte() { + assertTrue(HgAssert.isInvalid(new byte[0])); + assertFalse(HgAssert.isInvalid(new byte[1])); + } + + @Test + public void testIsInvalidMap() { + assertTrue(HgAssert.isInvalid(new HashMap())); + assertFalse(HgAssert.isInvalid(new HashMap() {{ + put(1, 1); + }})); + } + + @Test + public void testIsInvalidCollection() { + assertTrue(HgAssert.isInvalid(new ArrayList())); + assertFalse(HgAssert.isInvalid(new ArrayList() {{ + add(1); + }})); + } + + @Test + public void testIsContains() { + assertTrue(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, + Long.valueOf(2))); + assertFalse(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, + Long.valueOf(3))); + } + + @Test + public void testIsContainsT() { + assertTrue(HgAssert.isContains(new ArrayList<>() {{ + add(1); + }}, 1)); + assertFalse(HgAssert.isContains(new ArrayList<>() {{ + add(1); + }}, 2)); + } + + @Test + public void testIsNull() { + assertTrue(HgAssert.isNull(null)); + assertFalse(HgAssert.isNull("abc", "cdf")); + } + +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java new file mode 100644 index 0000000000..9d5f019d73 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class KVPairTest { + + KVPair pair; + + @Before + public void init() { + this.pair = new KVPair<>("key", 1); + } + + @Test + public void testGetKey() { + assertEquals(this.pair.getKey(), "key"); + } + + @Test + public void testSetKey() { + this.pair.setKey("key2"); + assertEquals(this.pair.getKey(), "key2"); + } + + @Test + public void testGetValue() { + assertEquals(1, this.pair.getValue()); + } + + @Test + public void testSetValue() { + this.pair.setValue(2); + assertEquals(2, this.pair.getValue()); + } + + @Test + public void testToString() { + + } + + @Test + public void testHashCode() { + + } + + @Test + public void testEquals() { + var pair2 = new KVPair<>("key", 1); + Assert.assertEquals(pair2, this.pair); + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java new file mode 100644 index 0000000000..072b815b56 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.junit.Test; + +public class MetadataKeyHelperTest { + + @Test + public void testGetStoreInfoKey() { + assertThat(MetadataKeyHelper.getStoreInfoKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetActiveStoreKey() { + assertThat(MetadataKeyHelper.getActiveStoreKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetActiveStorePrefix() { + assertThat(MetadataKeyHelper.getActiveStorePrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetStorePrefix() { + assertThat(MetadataKeyHelper.getStorePrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetStoreStatusKey() { + assertThat(MetadataKeyHelper.getStoreStatusKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardGroupKey() { + assertThat(MetadataKeyHelper.getShardGroupKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardGroupPrefix() { + assertThat(MetadataKeyHelper.getShardGroupPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionKey() { + assertThat(MetadataKeyHelper.getPartitionKey("graphName", 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionPrefix() { + assertThat(MetadataKeyHelper.getPartitionPrefix("graphName")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardKey() { + assertThat(MetadataKeyHelper.getShardKey(0L, 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardPrefix() { + assertThat(MetadataKeyHelper.getShardPrefix(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphKey() { + assertThat(MetadataKeyHelper.getGraphKey("graphName")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphPrefix() { + assertThat(MetadataKeyHelper.getGraphPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionStatusKey() { + assertThat(MetadataKeyHelper.getPartitionStatusKey("graphName", + 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionStatusPrefixKey() { + assertThat(MetadataKeyHelper.getPartitionStatusPrefixKey( + "graphName")).contains(MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphSpaceKey() { + assertThat(MetadataKeyHelper.getGraphSpaceKey("graphSpace")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPdConfigKey() { + assertThat(MetadataKeyHelper.getPdConfigKey("configKey")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetQueueItemPrefix() { + assertThat(MetadataKeyHelper.getQueueItemPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetQueueItemKey() { + assertThat(MetadataKeyHelper.getQueueItemKey("itemId")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetSpitTaskKey() { + assertThat(MetadataKeyHelper.getSplitTaskKey("graphName", 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetSpitTaskPrefix() { + assertThat(MetadataKeyHelper.getSplitTaskPrefix("graph0")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetLogKey() { + // Setup + final Metapb.LogRecord record = Metapb.LogRecord.newBuilder() + .setAction("value") + .setTimestamp(0L) + .build(); + + // Run the test + final byte[] result = MetadataKeyHelper.getLogKey(record); + + // Verify the results + assertThat(result).contains(MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetLogKeyPrefix() { + assertThat(MetadataKeyHelper.getLogKeyPrefix("action", 0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVPrefix() { + assertThat(MetadataKeyHelper.getKVPrefix("prefix", "key")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVTTLPrefix() { + assertThat(MetadataKeyHelper.getKVTTLPrefix("ttlPrefix", "prefix", + "key")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVWatchKeyPrefix1() { + assertThat( + MetadataKeyHelper.getKVWatchKeyPrefix("key", "watchDelimiter", + 0L)).contains( + String.valueOf(MetadataKeyHelper.getDelimiter())); + } + + @Test + public void testGetKVWatchKeyPrefix2() { + assertThat(MetadataKeyHelper.getKVWatchKeyPrefix("key", + "watchDelimiter")).contains( + String.valueOf(MetadataKeyHelper.getDelimiter())); + } + + @Test + public void testGetDelimiter() { + assertThat(MetadataKeyHelper.getDelimiter()).isEqualTo('/'); + } + + @Test + public void testGetStringBuilderHelper() { + try { + MetadataKeyHelper.getStringBuilderHelper(); + } catch (Exception e) { + + } + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java new file mode 100644 index 0000000000..3377a5b732 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java @@ -0,0 +1,392 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +public class PartitionCacheTest { + + private PartitionCache cache; + + private static Metapb.Partition createPartition(int pid, String graphName, long start, + long end) { + return Metapb.Partition.newBuilder() + .setId(pid) + .setGraphName(graphName) + .setStartKey(start) + .setEndKey(end) + .setState(Metapb.PartitionState.PState_Normal) + .setVersion(1) + .build(); + } + + private static Metapb.ShardGroup creteShardGroup(int pid) { + return Metapb.ShardGroup.newBuilder() + .addShards( + Metapb.Shard.newBuilder().setStoreId(0) + .setRole(Metapb.ShardRole.Leader).build() + ) + .setId(pid) + .setVersion(0) + .setConfVer(0) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + } + + private static Metapb.Shard createShard() { + return Metapb.Shard.newBuilder() + .setStoreId(0) + .setRole(Metapb.ShardRole.Leader) + .build(); + } + + private static Metapb.Store createStore(long storeId) { + return Metapb.Store.newBuilder() + .setId(storeId) + .setAddress("127.0.0.1") + .setCores(4) + .setVersion("1") + .setDataPath("/tmp/junit") + .setDataVersion(1) + .setLastHeartbeat(System.currentTimeMillis()) + .setStartTimestamp(System.currentTimeMillis()) + .setState(Metapb.StoreState.Up) + .setDeployPath("/tmp/junit") + .build(); + } + + private static Metapb.Graph createGraph(String graphName, int partitionCount) { + return Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + } + + private static Metapb.ShardGroup createShardGroup() { + List shards = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + shards.add(Metapb.Shard.newBuilder() + .setStoreId(i) + .setRole(i == 0 ? Metapb.ShardRole.Leader : + Metapb.ShardRole.Follower) + .build() + ); + } + + return Metapb.ShardGroup.newBuilder() + .setId(1) + .setVersion(1) + .setConfVer(1) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(shards) + .build(); + } + + @Before + public void setup() { + this.cache = new PartitionCache(); + } + + @Test + public void testGetPartitionById() { + var partition = createPartition(0, "graph0", 0, 65535); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + var ret = this.cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + } + + @Test + public void testGetPartitionByKey() throws UnsupportedEncodingException { + var partition = createPartition(0, "graph0", 0, 65535); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + var ret = this.cache.getPartitionByKey("graph0", "0".getBytes(StandardCharsets.UTF_8)); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + } + + @Test + public void getPartitionByCode() { + var partition = createPartition(0, "graph0", 0, 1024); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + var ret = this.cache.getPartitionByCode("graph0", 10); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNull(this.cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testGetPartitions() { + var partition1 = createPartition(0, "graph0", 0, 1024); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition1); + assertEquals(this.cache.getPartitions("graph0").size(), 1); + var partition2 = createPartition(1, "graph0", 1024, 2048); + this.cache.updateShardGroup(creteShardGroup(1)); + this.cache.updatePartition(partition2); + assertEquals(this.cache.getPartitions("graph0").size(), 2); + System.out.print(this.cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testAddPartition() { + var partition = createPartition(0, "graph0", 0, 65535); + this.cache.addPartition("graph0", 0, partition); + var ret = this.cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNotNull(this.cache.getPartitionByCode("graph0", 2000)); + System.out.print(this.cache.debugCacheByGraphName("graph0")); + var partition2 = createPartition(0, "graph0", 0, 1024); + this.cache.addPartition("graph0", 0, partition2); + ret = this.cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition2); + assertNull(this.cache.getPartitionByCode("graph0", 2000)); + System.out.print(this.cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testUpdatePartition() { + var partition = createPartition(0, "graph0", 0, 65535); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.addPartition("graph0", 0, partition); + var partition2 = createPartition(0, "graph0", 0, 1024); + this.cache.updatePartition("graph0", 0, partition2); + var ret = this.cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition2); + assertNull(this.cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testUpdatePartition2() { + var partition = createPartition(0, "graph0", 0, 1024); + this.cache.updateShardGroup(creteShardGroup(0)); + assertTrue(this.cache.updatePartition(partition)); + assertFalse(this.cache.updatePartition(partition)); + var ret = this.cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNull(this.cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testRemovePartition() { + var partition = createPartition(0, "graph0", 0, 1024); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + assertNotNull(this.cache.getPartitionById("graph0", 0)); + this.cache.removePartition("graph0", 0); + assertNull(this.cache.getPartitionById("graph0", 0)); + System.out.print(this.cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testRange() { + var partition1 = createPartition(1, "graph0", 0, 3); + var partition2 = createPartition(2, "graph0", 3, 6); + this.cache.updatePartition(partition1); + this.cache.updatePartition(partition2); + + var partition3 = createPartition(3, "graph0", 1, 2); + var partition4 = createPartition(4, "graph0", 2, 3); + + this.cache.updatePartition(partition3); + this.cache.updatePartition(partition4); + System.out.println(this.cache.debugCacheByGraphName("graph0")); + var partition6 = createPartition(1, "graph0", 0, 1); + this.cache.updatePartition(partition6); + + + System.out.println(this.cache.debugCacheByGraphName("graph0")); + + var partition5 = createPartition(1, "graph0", 0, 3); + this.cache.updatePartition(partition5); + System.out.println(this.cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testRange2() { + var partition1 = createPartition(1, "graph0", 0, 3); + var partition2 = createPartition(2, "graph0", 3, 6); + this.cache.updatePartition(partition1); + this.cache.updatePartition(partition2); + + System.out.println(this.cache.debugCacheByGraphName("graph0")); + + // 中间有缺失 + var partition3 = createPartition(1, "graph0", 2, 3); + this.cache.updatePartition(partition3); + + System.out.println(this.cache.debugCacheByGraphName("graph0")); + + var partition5 = createPartition(1, "graph0", 0, 3); + this.cache.updatePartition(partition5); + System.out.println(this.cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testRemovePartitions() { + var partition1 = createPartition(0, "graph0", 0, 1024); + var partition2 = createPartition(1, "graph0", 1024, 2048); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition1); + this.cache.updateShardGroup(creteShardGroup(1)); + this.cache.updatePartition(partition2); + assertEquals(this.cache.getPartitions("graph0").size(), 2); + this.cache.removePartitions(); + assertEquals(this.cache.getPartitions("graph0").size(), 0); + } + + @Test + public void testRemoveAll() { + var partition1 = createPartition(0, "graph0", 0, 1024); + var partition2 = createPartition(1, "graph0", 1024, 2048); + var partition3 = createPartition(0, "graph1", 0, 2048); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updateShardGroup(creteShardGroup(1)); + this.cache.updatePartition(partition1); + this.cache.updatePartition(partition2); + this.cache.updatePartition(partition3); + + assertEquals(this.cache.getPartitions("graph0").size(), 2); + assertEquals(this.cache.getPartitions("graph1").size(), 1); + this.cache.removeAll("graph0"); + assertEquals(this.cache.getPartitions("graph0").size(), 0); + assertEquals(this.cache.getPartitions("graph1").size(), 1); + } + + @Test + public void testUpdateShardGroup() { + var shardGroup = createShardGroup(); + this.cache.updateShardGroup(shardGroup); + assertNotNull(this.cache.getShardGroup(shardGroup.getId())); + } + + @Test + public void testGetShardGroup() { + var shardGroup = createShardGroup(); + this.cache.updateShardGroup(shardGroup); + assertEquals(this.cache.getShardGroup(shardGroup.getId()), shardGroup); + } + + @Test + public void testAddStore() { + var store = createStore(1); + this.cache.addStore(1L, store); + assertEquals(this.cache.getStoreById(1L), store); + } + + @Test + public void testGetStoreById() { + var store = createStore(1); + this.cache.addStore(1L, store); + assertEquals(this.cache.getStoreById(1L), store); + } + + @Test + public void testRemoveStore() { + var store = createStore(1); + this.cache.addStore(1L, store); + assertEquals(this.cache.getStoreById(1L), store); + + this.cache.removeStore(1L); + assertNull(this.cache.getStoreById(1L)); + } + + @Test + public void testHasGraph() { + var partition = createPartition(0, "graph0", 0, 65535); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + assertTrue(this.cache.hasGraph("graph0")); + assertFalse(this.cache.hasGraph("graph1")); + } + + @Test + public void testUpdateGraph() { + var graph = createGraph("graph0", 10); + this.cache.updateGraph(graph); + assertEquals(this.cache.getGraph("graph0"), graph); + graph = createGraph("graph0", 12); + this.cache.updateGraph(graph); + assertEquals(this.cache.getGraph("graph0"), graph); + } + + @Test + public void testGetGraph() { + var graph = createGraph("graph0", 12); + this.cache.updateGraph(graph); + assertEquals(this.cache.getGraph("graph0"), graph); + } + + @Test + public void testGetGraphs() { + var graph1 = createGraph("graph0", 12); + var graph2 = createGraph("graph1", 12); + var graph3 = createGraph("graph2", 12); + this.cache.updateGraph(graph1); + this.cache.updateGraph(graph2); + this.cache.updateGraph(graph3); + assertEquals(this.cache.getGraphs().size(), 3); + } + + @Test + public void testReset() { + var graph1 = createGraph("graph0", 12); + var graph2 = createGraph("graph1", 12); + var graph3 = createGraph("graph2", 12); + this.cache.updateGraph(graph1); + this.cache.updateGraph(graph2); + this.cache.updateGraph(graph3); + assertEquals(this.cache.getGraphs().size(), 3); + this.cache.reset(); + assertEquals(this.cache.getGraphs().size(), 0); + } + + @Test + public void testUpdateShardGroupLeader() { + var shardGroup = createShardGroup(); + this.cache.updateShardGroup(shardGroup); + + var leader = + Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Leader).build(); + this.cache.updateShardGroupLeader(shardGroup.getId(), leader); + + assertEquals(this.cache.getLeaderShard(shardGroup.getId()), leader); + } + +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java new file mode 100644 index 0000000000..e6d9cd7b19 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.junit.Assert; +import org.junit.Test; + +import lombok.extern.slf4j.Slf4j; + + +@Slf4j +public class PartitionUtilsTest extends BaseCommonTest { + @Test + public void testCalcHashcode() { + byte[] key = new byte[5]; + long code = PartitionUtils.calcHashcode(key); + Assert.assertEquals(code, 31912L); + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java new file mode 100644 index 0000000000..543f11bdf2 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.core; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.config.PDConfig; +import org.junit.After; +import org.junit.BeforeClass; + + +public class BaseCoreTest { + + static org.apache.hugegraph.pd.config.PDConfig pdConfig; + + @BeforeClass + public static void init() throws Exception { + String path = "tmp/unitTest"; + deleteDirectory(new File(path)); + pdConfig = new org.apache.hugegraph.pd.config.PDConfig() {{ + this.setClusterId(100); + this.setInitialStoreList("127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502," + + "127.0.0.1:8503,127.0.0.1:8504,127.0.0.1:8505"); + }}; + + pdConfig.setStore(new org.apache.hugegraph.pd.config.PDConfig().new Store() {{ + this.setMaxDownTime(3600); + this.setKeepAliveTimeout(3600); + }}); + + pdConfig.setPartition(new org.apache.hugegraph.pd.config.PDConfig().new Partition() {{ + this.setShardCount(3); + this.setMaxShardsPerStore(3); + }}); + pdConfig.setRaft(new org.apache.hugegraph.pd.config.PDConfig().new Raft() {{ + this.setEnable(false); + }}); + pdConfig.setDiscovery(new PDConfig().new Discovery()); + pdConfig.setDataPath(path); + ConfigService configService = new ConfigService(pdConfig); + pdConfig = configService.loadConfig(); + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.printf("Failed to start ....,%s%n", e.getMessage()); + } + } + + @After + public void teardown() throws Exception { + // pass + } +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java new file mode 100644 index 0000000000..fc4e5c9ae0 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.core; + +import org.apache.hugegraph.pd.core.meta.MetadataKeyHelperTest; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +import lombok.extern.slf4j.Slf4j; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + StoreNodeServiceTest.class, + MetadataKeyHelperTest.class +}) + +@Slf4j +public class PDCoreSuiteTest { + + +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java new file mode 100644 index 0000000000..584a4ac7c2 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.core; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Assert; +import org.junit.Test; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class StoreNodeServiceTest extends BaseCoreTest { + + + @Test + public void testStoreNodeService() throws PDException { + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), + pdConfig.getInitialStoreMap().size() * + pdConfig.getPartition().getMaxShardsPerStore() + / pdConfig.getPartition().getShardCount()); + StoreNodeService storeService = new StoreNodeService(pdConfig); + storeService.init(new PartitionService(pdConfig, storeService)); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + stores[i].getId()); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + for (Metapb.Store store : stores) { + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() + .setStoreId(store.getId()) + .build(); + storeService.heartBeat(stats); + } + + Assert.assertEquals(6, storeService.getActiveStores("").size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .setPartitionCount(10) + .build(); + // 分配shard + List shards = storeService.allocShards(graph, 1); + + + Assert.assertEquals(3, shards.size()); + // 设置leader + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), + storeService.getShardGroups().size()); + Metapb.Shard leader = Metapb.Shard.newBuilder(shards.get(0)) + .setRole(Metapb.ShardRole.Leader).build(); + shards = new ArrayList<>(shards); + shards.set(0, leader); + // 增加shard + pdConfig.getPartition().setShardCount(5); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(1) + .addAllShards(shards).build(); + shards = storeService.reallocShards(shardGroup); + + Assert.assertEquals(5, shards.size()); + // 减少shard + pdConfig.getPartition().setShardCount(3); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(3, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + // 减少shard + pdConfig.getPartition().setShardCount(1); + graph = Metapb.Graph.newBuilder(graph).build(); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(1, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + for (Metapb.Store store : stores) { + storeService.removeStore(store.getId()); + } + Assert.assertEquals(0, storeService.getStores("").size()); + + + } + + +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java new file mode 100644 index 0000000000..d8a538f955 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.core.meta; + +import static org.junit.Assert.assertArrayEquals; + +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.junit.Test; + +public class MetadataKeyHelperTest { + + @Test + public void testMoveTaskKey() { + var key = MetadataKeyHelper.getMoveTaskKey("foo", 0, 1); + assertArrayEquals(key, "TASK_MOVE/foo/0/1".getBytes()); + var key2 = MetadataKeyHelper.getMoveTaskPrefix("foo"); + assertArrayEquals(key2, "TASK_MOVE/foo".getBytes()); + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java new file mode 100644 index 0000000000..917a3f74fd --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.grpc; + +import org.junit.After; +import org.junit.BeforeClass; + + +public class BaseGrpcTest { + + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } + +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java new file mode 100644 index 0000000000..4806f3ed02 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.grpc; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +import lombok.extern.slf4j.Slf4j; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ +}) + +@Slf4j +public class GrpcSuiteTest { + +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java new file mode 100644 index 0000000000..524cfddc30 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.io.File; +import java.net.http.HttpClient; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.config.PDConfig; +import org.junit.After; +import org.junit.BeforeClass; + + +public class BaseServerTest { + public static HttpClient client; + public static String pdRestAddr; + + @BeforeClass + public static void init() { + client = HttpClient.newHttpClient(); + pdRestAddr = "http://127.0.0.1:8620"; + } + + public static PDConfig getConfig() { + FileUtils.deleteQuietly(new File("tmp/test/")); + PDConfig pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + this.setRaft(new Raft() {{ + setEnable(false); + }}); + this.setDataPath("tmp/test/"); + }}; + return pdConfig; + } + + @After + public void teardown() { + // pass + } + +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java new file mode 100644 index 0000000000..7a06cef706 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.util.List; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class ConfigServiceTest { + + private final PDConfig config = BaseServerTest.getConfig(); + + private ConfigService service; + + @Before + public void setUp() { + this.service = new ConfigService(this.config); + } + + @Test + public void testGetPDConfig() throws Exception { + // Setup + try { + final Metapb.PDConfig config = Metapb.PDConfig.newBuilder() + .setVersion(0L) + .setPartitionCount(0) + .setShardCount(55) + .setMaxShardsPerStore(0) + .setTimestamp(0L).build(); + this.service.setPDConfig(config); + // Run the test + Metapb.PDConfig result = this.service.getPDConfig(0L); + + // Verify the results + Assert.assertTrue(result.getShardCount() == 55); + result = this.service.getPDConfig(); + Assert.assertTrue(result.getShardCount() == 55); + } catch (Exception e) { + + } + + } + + @Test + public void testGetGraphSpace() throws Exception { + // Setup + Metapb.GraphSpace space = Metapb.GraphSpace.newBuilder() + .setName("gs1") + .setTimestamp(0L).build(); + final List expectedResult = List.of(space); + this.service.setGraphSpace(space); + // Run the test + final List result = this.service.getGraphSpace( + "gs1"); + + Assert.assertEquals(1, result.size()); + } + + @Test + public void testUpdatePDConfig() { + try { + final Metapb.PDConfig mConfig = Metapb.PDConfig.newBuilder() + .setVersion(0L) + .setPartitionCount(0) + .setShardCount(0) + .setMaxShardsPerStore(0) + .setTimestamp(0L) + .build(); + final PDConfig expectedResult = new PDConfig(); + expectedResult.setConfigService(new ConfigService(new PDConfig())); + expectedResult.setIdService(new IdService(new PDConfig())); + expectedResult.setClusterId(0L); + expectedResult.setPatrolInterval(0L); + expectedResult.setDataPath("dataPath"); + expectedResult.setMinStoreCount(0); + expectedResult.setInitialStoreList("initialStoreList"); + expectedResult.setHost("host"); + expectedResult.setVerifyPath("verifyPath"); + expectedResult.setLicensePath("licensePath"); + this.service.updatePDConfig(mConfig); + } catch (Exception e) { + + } + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java new file mode 100644 index 0000000000..d78aceeed2 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.io.File; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.IdMetaStore; +import org.junit.Assert; +import org.junit.Test; + +public class IdServiceTest { + @Test + public void testCid() { + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + int max = 0x2000; + IdService idService = new IdService(pdConfig); + for (int i = 0; i < max; i++) { + idService.getCId("test", max); + } + idService.delCId("test", 1); + idService.delCId("test", 0x10); + idService.delCId("test", 0x100); + idService.delCId("test", 0x1000); + + Assert.assertEquals(1, idService.getCId("test", max)); + Assert.assertEquals(0x10, idService.getCId("test", max)); + Assert.assertEquals(0x100, idService.getCId("test", max)); + Assert.assertEquals(0x1000, idService.getCId("test", max)); + Assert.assertEquals(-1, idService.getCId("test", max)); + + idService.delCId("test", 1); + idService.delCId("test", 0x10); + idService.delCId("test", 0x100); + idService.delCId("test", 0x1000); + + long cid1 = idService.getCId("test", "name", max); + idService.delCIdDelay("test", "name", cid1); + long cid2 = idService.getCId("test", "name", max); + + Assert.assertEquals(cid1, cid2); + idService.delCIdDelay("test", "name", cid2); + Thread.sleep(5000); + long cid3 = idService.getCId("test", "name", max); + } catch (Exception e) { + + } + // MetadataFactory.closeStore(); + } + + @Test + public void testId() { + try { + FileUtils.deleteQuietly(new File("tmp/testId/")); + IdMetaStore.CID_DEL_TIMEOUT = 2000; + PDConfig pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + this.setRaft(new Raft() {{ + setEnable(false); + }}); + this.setDataPath("tmp/testId/"); + }}; + IdService idService = new IdService(pdConfig); + long first = idService.getId("abc", 100); + Assert.assertEquals(first, 0L); + long second = idService.getId("abc", 100); + Assert.assertEquals(second, 100L); + idService.resetId("abc"); + first = idService.getId("abc", 100); + Assert.assertEquals(first, 0L); + } catch (Exception ignored) { + + } + // MetadataFactory.closeStore(); + } + + @Test + public void testMember() { + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + IdService idService = new IdService(pdConfig); + idService.setPdConfig(pdConfig); + PDConfig config = idService.getPdConfig(); + config.getHost(); + } catch (Exception e) { + e.printStackTrace(); + } + // MetadataFactory.closeStore(); + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java new file mode 100644 index 0000000000..009de13864 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.config.PDConfig; +import org.junit.Assert; +import org.junit.Test; + +public class KvServiceTest { + + @Test + public void testKv() { + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + KvService service = new KvService(pdConfig); + String key = "kvTest"; + String kvTest = service.get(key); + Assert.assertEquals(kvTest, ""); + service.put(key, "kvTestValue"); + kvTest = service.get(key); + Assert.assertEquals(kvTest, "kvTestValue"); + service.scanWithPrefix(key); + service.delete(key); + service.put(key, "kvTestValue"); + service.deleteWithPrefix(key); + service.put(key, "kvTestValue", 1000L); + service.keepAlive(key); + } catch (Exception e) { + + } + } + + @Test + public void testMember() { + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + KvService service = new KvService(pdConfig); + service.setPdConfig(pdConfig); + PDConfig config = service.getPdConfig(); + } catch (Exception e) { + e.printStackTrace(); + } + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java new file mode 100644 index 0000000000..1c2e838579 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.util.List; + +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.protobuf.Any; + +public class LogServiceTest { + + private final PDConfig mockPdConfig = BaseServerTest.getConfig(); + + private LogService logServiceUnderTest; + + @Before + public void setUp() { + this.logServiceUnderTest = new LogService(this.mockPdConfig); + } + + @Test + public void testGetLog() throws Exception { + this.logServiceUnderTest.insertLog("action", "message", + Any.newBuilder().build()); + + // Run the test + final List result = this.logServiceUnderTest.getLog( + "action", 0L, System.currentTimeMillis()); + + // Verify the results + Assert.assertEquals(result.size(), 1); + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java new file mode 100644 index 0000000000..b4fecf1d79 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.util.List; + +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.CleanType; +import org.junit.Before; +import org.junit.Test; + +public class PartitionServiceTest extends PdTestBase { + + private PartitionService service; + + @Before + public void init() { + this.service = getPartitionService(); + } + + @Test + public void testCombinePartition() throws PDException { + buildEnv(); + // 0, 1, 2-> 0, 3,4,5->1, 6,7,8 ->2, 9,10, 11-> 3 + this.service.combinePartition(4); + + var partition = this.service.getPartitionById("graph0", 0); + assertEquals(0, partition.getStartKey()); + assertEquals(5462, partition.getEndKey()); + + var tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(11, tasks.size()); + + for (MetaTask.Task task : tasks) { + var newTask = task.toBuilder().setState(MetaTask.TaskState.Task_Success).build(); + getTaskService().reportTask(newTask); + } + + tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(0, tasks.size()); + } + + @Test + public void testCombinePartition2() throws PDException { + buildEnv(); + // 0, 1, 2-> 0, 3,4,5->1, 6,7,8 ->2, 9,10, 11-> 3 + this.service.combinePartition(4); + + var partition = this.service.getPartitionById("graph0", 0); + assertEquals(0, partition.getStartKey()); + assertEquals(5462, partition.getEndKey()); + + var tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(11, tasks.size()); + + for (MetaTask.Task task : tasks) { + var newTask = task.toBuilder().setState(MetaTask.TaskState.Task_Failure).build(); + getTaskService().reportTask(newTask); + } + + tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(0, tasks.size()); + } + + @Test + public void testHandleCleanTask() { + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Clean_Partition) + .setPartition( + Metapb.Partition.newBuilder().setGraphName("foo") + .setId(0).build()) + .setCleanPartition(CleanPartition.newBuilder() + .setCleanType( + CleanType.CLEAN_TYPE_KEEP_RANGE) + .setDeletePartition(true) + .setKeyStart(0) + .setKeyEnd(10) + .build()) + .build(); + getTaskService().reportTask(task); + } + + private void buildEnv() throws PDException { + var storeInfoMeta = getStoreNodeService().getStoreInfoMeta(); + storeInfoMeta.updateStore(Metapb.Store.newBuilder() + .setId(99) + .setState(Metapb.StoreState.Up) + .build()); + + long lastId = 0; + for (int i = 0; i < 12; i++) { + Metapb.Shard shard = Metapb.Shard.newBuilder() + .setStoreId(99) + .setRole(Metapb.ShardRole.Leader) + .build(); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(i) + .setState( + Metapb.PartitionState.PState_Normal) + .addAllShards(List.of(shard)) + .build(); + storeInfoMeta.updateShardGroup(shardGroup); + + var partitionShard = this.service.getPartitionByCode("graph0", lastId); + if (partitionShard != null) { + lastId = partitionShard.getPartition().getEndKey(); + } + } + + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java new file mode 100644 index 0000000000..0885071f56 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.io.File; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.PartitionInstructionListener; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.PartitionStatusListener; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.StoreStatusListener; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +public class PdTestBase { + private static final String DATA_PATH = "/tmp/pd_data"; + private static PDConfig pdConfig; + private static StoreNodeService storeNodeService; + private static PartitionService partitionService; + private static TaskScheduleService taskService; + private static StoreMonitorDataService storeMonitorDataService; + + @BeforeClass + public static void initService() throws PDException { + deleteDir(new File(DATA_PATH)); + + PDConfig config = new PDConfig(); + config.setDataPath(DATA_PATH); + config.setMinStoreCount(3); + config.setInitialStoreList("127.0.0.1:8501"); + config.setHost("127.0.0.1"); + config.setVerifyPath(""); + config.setLicensePath(""); + PDConfig.Raft raft = new PDConfig().new Raft(); + raft.setAddress("127.0.0.1:8601"); + raft.setPeersList("127.0.0.1:8601"); + raft.setDataPath(DATA_PATH); + raft.setHost("127.0.0.1"); + raft.setGrpcPort(8688); + raft.setPort(8621); + + config.setRaft(raft); + + config.setStore(new PDConfig().new Store()); + config.setPartition(new PDConfig().new Partition() {{ + setShardCount(1); + setTotalCount(12); + setMaxShardsPerStore(12); + }}); + config.setDiscovery(new PDConfig().new Discovery()); + + pdConfig = config; + + var configService = new ConfigService(pdConfig); + configService.loadConfig(); + + var engine = RaftEngine.getInstance(); + engine.addStateListener(configService); + engine.init(pdConfig.getRaft()); + engine.waitingForLeader(5000); + + storeNodeService = new StoreNodeService(pdConfig); + partitionService = new PartitionService(pdConfig, storeNodeService); + taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService); + var idService = new IdService(pdConfig); + storeMonitorDataService = new StoreMonitorDataService(pdConfig); + RaftEngine.getInstance().addStateListener(partitionService); + pdConfig.setIdService(idService); + + + storeNodeService.init(partitionService); + partitionService.init(); + partitionService.addInstructionListener(new PartitionInstructionListener() { + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { + + } + + @Override + public void transferLeader(Metapb.Partition partition, + TransferLeader transferLeader) throws PDException { + + } + + @Override + public void splitPartition(Metapb.Partition partition, + SplitPartition splitPartition) throws PDException { + + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { + + } + + @Override + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) + throws PDException { + + } + }); + + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition partition, + Metapb.Partition newPartition) { + + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); + + storeNodeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { + + } + + @Override + public void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); + + taskService.init(); + } + + @AfterClass + public static void shutdownService() { + var instance = RaftEngine.getInstance(); + if (instance != null) { + instance.shutDown(); + } + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } + + public static StoreNodeService getStoreNodeService() { + return storeNodeService; + } + + public static PartitionService getPartitionService() { + return partitionService; + } + + public static PDConfig getPdConfig() { + return pdConfig; + } + + public static TaskScheduleService getTaskService() { + return taskService; + } + + public static StoreMonitorDataService getStoreMonitorDataService() { + return storeMonitorDataService; + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java new file mode 100644 index 0000000000..dc7af4253f --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; + +import org.json.JSONException; +import org.json.JSONObject; +import org.junit.Test; + +public class RestApiTest extends BaseServerTest { + @Test + public void testQueryClusterInfo() throws URISyntaxException, IOException, InterruptedException, + JSONException { + String url = pdRestAddr + "/v1/cluster"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryClusterMembers() throws URISyntaxException, IOException, + InterruptedException, JSONException { + String url = pdRestAddr + "/v1/members"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryStoresInfo() throws URISyntaxException, IOException, InterruptedException, + JSONException { + String url = pdRestAddr + "/v1/stores"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryGraphsInfo() throws IOException, InterruptedException, JSONException, + URISyntaxException { + String url = pdRestAddr + "/v1/graphs"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryPartitionsInfo() throws IOException, InterruptedException, JSONException, + URISyntaxException { + String url = pdRestAddr + "/v1/highLevelPartitions"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryDebugPartitionsInfo() throws URISyntaxException, IOException, + InterruptedException { + String url = pdRestAddr + "/v1/partitions"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + assert response.statusCode() == 200; + } + + @Test + public void testQueryShards() throws URISyntaxException, IOException, InterruptedException, + JSONException { + String url = pdRestAddr + "/v1/shards"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java new file mode 100644 index 0000000000..26ac056143 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +import lombok.extern.slf4j.Slf4j; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + RestApiTest.class, + ConfigServiceTest.class, + IdServiceTest.class, + KvServiceTest.class, + LogServiceTest.class, + StoreServiceTest.class, + StoreNodeServiceNewTest.class, + StoreMonitorDataServiceTest.class, + TaskScheduleServiceTest.class, + PartitionServiceTest.class +}) + +@Slf4j +public class ServerSuiteTest { +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java new file mode 100644 index 0000000000..cd9ae710b3 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.util.List; + +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +public class StoreMonitorDataServiceTest extends PdTestBase { + + StoreMonitorDataService service; + + @Before + public void init() { + this.service = getStoreMonitorDataService(); + var store = getPdConfig().getStore(); + store.setMonitorDataEnabled(true); + store.setMonitorDataInterval("1s"); + getPdConfig().setStore(store); + } + + @Test + public void test() throws InterruptedException, PDException { + long now = System.currentTimeMillis() / 1000; + for (int i = 0; i < 5; i++) { + this.service.saveMonitorData(genStats()); + now = System.currentTimeMillis() / 1000; + Thread.sleep(1100); + } + assertTrue(this.service.getLatestStoreMonitorDataTimeStamp(1) == 0 || + this.service.getLatestStoreMonitorDataTimeStamp(1) == now); + + var data = this.service.getStoreMonitorData(1); + assertEquals(5, data.size()); + + assertNotNull(this.service.debugMonitorInfo(List.of(Metapb.RecordPair.newBuilder() + .setKey("key1") + .setValue(1) + .build()))); + + assertNotNull(this.service.getStoreMonitorDataText(1)); + + + this.service.removeExpiredMonitorData(1, now + 1); + assertEquals(0, this.service.getStoreMonitorData(1).size()); + } + + + private Metapb.StoreStats genStats() { + return Metapb.StoreStats.newBuilder() + .setStoreId(1) + .addSystemMetrics( + Metapb.RecordPair.newBuilder().setKey("key1").setValue(1) + .build()) + .build(); + } + + +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java new file mode 100644 index 0000000000..4fec3f1f03 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +public class StoreNodeServiceNewTest extends PdTestBase { + private StoreNodeService service; + + @Before + public void init() { + this.service = getStoreNodeService(); + } + + @Test + public void testGetTaskInfoMeta() { + assertNotNull(this.service.getTaskInfoMeta()); + } + + public void testGetStoreInfoMeta() { + assertNotNull(this.service.getStoreInfoMeta()); + } + + @Test + public void testRemoveShardGroup() throws PDException { + for (int i = 0; i < 12; i++) { + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() + .setId(i) + .setState( + Metapb.PartitionState.PState_Offline) + .build(); + this.service.getStoreInfoMeta().updateShardGroup(group); + } + + this.service.deleteShardGroup(11); + this.service.deleteShardGroup(10); + + assertEquals(10, getPdConfig().getConfigService().getPDConfig().getPartitionCount()); + // restore + getPdConfig().getConfigService().setPartitionCount(12); + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java new file mode 100644 index 0000000000..8b1d31fb6f --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java @@ -0,0 +1,833 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.StoreStatusListener; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +public class StoreServiceTest { + + private PDConfig config; + + private StoreNodeService service; + + @Before + public void setUp() { + this.config = getConfig(); + this.service = new StoreNodeService(this.config); + } + + @Test + public void testInit() { + // Setup + PDConfig pdConfig = getConfig(); + final PDConfig pdConfig1 = getConfig(); + final PartitionService partitionService = new PartitionService(pdConfig, + new StoreNodeService( + pdConfig1)); + + // Run the test + this.service.init(partitionService); + + // Verify the results + } + + private PDConfig getConfig() { + PDConfig pdConfig = new PDConfig(); + pdConfig.setConfigService( + new ConfigService(BaseServerTest.getConfig())); + pdConfig.setIdService(new IdService(BaseServerTest.getConfig())); + pdConfig.setClusterId(0L); + pdConfig.setPatrolInterval(0L); + pdConfig.setDataPath("dataPath"); + pdConfig.setMinStoreCount(0); + pdConfig.setInitialStoreList("initialStoreList"); + pdConfig.setHost("host"); + pdConfig.setVerifyPath("verifyPath"); + pdConfig.setLicensePath("licensePath"); + PDConfig.Raft raft = new PDConfig().new Raft(); + raft.setEnable(false); + pdConfig.setRaft(raft); + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setShardCount(0); + pdConfig.setPartition(partition); + pdConfig.setInitialStoreMap(Map.ofEntries(Map.entry("value", "value"))); + return pdConfig; + } + + @Test + public void testIsOK() { + // Setup + // Run the test + final boolean result = this.service.isOK(); + + // Verify the results + assertThat(result).isTrue(); + } + + @Test + public void testRegister() throws Exception { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + + // Configure PDConfig.getInitialStoreMap(...). + final Map stringStringMap = Map.ofEntries( + Map.entry("value", "value")); + + // Run the test + final Metapb.Store result = this.service.register(store); + } + + @Test + public void testGetStore() throws Exception { + // Setup + try { + Metapb.GraphStats stats = Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole( + Metapb.ShardRole.None) + .build(); + Metapb.StoreStats storeStats = Metapb.StoreStats.newBuilder() + .setStoreId(0L) + .setPartitionCount( + 0) + .addGraphStats( + stats) + .build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder() + .setId(0L) + .setAddress( + "address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion( + "version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp( + 0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat( + 0L) + .setStats( + storeStats) + .setDataVersion(0) + .setCores(0) + .setDataPath( + "dataPath") + .build(); + + // Run the test + final Metapb.Store result = this.service.getStore(0L); + } catch (Exception e) { + + } + } + + @Test + public void testUpdateStore() throws Exception { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + + // Run the test + final Metapb.Store result = this.service.updateStore(store); + } + + @Test + public void testStoreTurnoff() throws Exception { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + + // Run the test + this.service.storeTurnoff(store); + + // Verify the results + } + + + @Test + public void testGetStores1() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = this.service.getStores(); + } + + @Test + public void testGetStores2() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = this.service.getStores("graphName"); + } + + + @Test + public void testGetStoreStatus() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = this.service.getStoreStatus(false); + + } + + @Test + public void testGetShardGroups() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + + // Run the test + final List result = this.service.getShardGroups(); + + } + + @Test + public void testGetShardGroup() throws Exception { + // Setup + final Metapb.ShardGroup expectedResult = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + + // Run the test + final Metapb.ShardGroup result = this.service.getShardGroup(0); + + // Verify the results + } + + + @Test + public void testGetShardGroupsByStore() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + + // Run the test + final List result = this.service.getShardGroupsByStore( + 0L); + } + + @Test + public void testGetActiveStores1() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = this.service.getActiveStores("graphName"); + + // Verify the results + } + + @Test + public void testGetActiveStores1ThrowsPDException() { + try { + List stores = this.service.getActiveStores(); + assertThat(stores.size() == 0); + } catch (Exception e) { + + } + } + + @Test + public void testGetTombStores() throws Exception { + // Setup + final List storeList = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Tombstone) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + this.service.register(storeList.get(0)); + + // Run the test + final List result = this.service.getTombStores(); + + // Verify the results + assertThat(result.size() == 1); + this.service.removeStore(result.get(0).getId()); + List stores = this.service.getStores(); + assertThat(stores.size() == 0); + } + + + @Test + public void testAllocShards() throws Exception { + // Setup + try { + final Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("graphName") + .setGraphState( + Metapb.GraphState + .newBuilder() + .setMode( + Metapb.GraphMode.ReadWrite) + .setReason( + Metapb.GraphModeReason.Quota) + .build()) + .build(); + final List expectedResult = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + + // Run the test + final List result = this.service.allocShards(graph, 0); + } catch (Exception e) { + + } + + } + + @Test + public void testReallocShards() throws Exception { + // Setup + try { + final Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + final List expectedResult = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + when(this.config.getPartition()).thenReturn(partition); + + // Run the test + final List result = this.service.reallocShards(shardGroup); + + // Verify the results + assertThat(result).isEqualTo(expectedResult); + } catch (Exception e) { + + } + + } + + @Test + public void testUpdateShardGroup() { + try { + final List shards = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Run the test + this.service.updateShardGroup(0, shards, 0, 0); + } catch (Exception e) { + + } + } + + @Test + public void testUpdateShardGroupState() throws Exception { + try { + this.service.updateShardGroupState(0, Metapb.PartitionState.PState_None); + } catch (Exception e) { + + } + } + + @Test + public void testHeartBeat() throws Exception { + // Setup + try { + final Metapb.StoreStats storeStats = Metapb.StoreStats.newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build(); + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + when(this.config.getMinStoreCount()).thenReturn(0); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + when(this.config.getPartition()).thenReturn(partition); + + // Run the test + final Metapb.ClusterStats result = this.service.heartBeat(storeStats); + + // Verify the results + assertThat(result).isEqualTo(expectedResult); + } catch (Exception e) { + + } + } + + + @Test + public void testUpdateClusterStatus1() { + // Setup + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + + // Run the test + final Metapb.ClusterStats result = this.service.updateClusterStatus( + Metapb.ClusterState.Cluster_OK); + } + + @Test + public void testUpdateClusterStatus2() { + // Setup + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + + // Run the test + final Metapb.ClusterStats result = this.service.updateClusterStatus( + Metapb.PartitionState.PState_None); + } + + @Test + public void testCheckStoreStatus() { + // Setup + // Run the test + this.service.checkStoreStatus(); + + // Verify the results + } + + @Test + public void testAddStatusListener() { + // Setup + final StoreStatusListener mockListener = mock( + StoreStatusListener.class); + + // Run the test + this.service.addStatusListener(mockListener); + + // Verify the results + } + + @Test + public void testOnStoreStatusChanged() { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + + // Verify the results + } + + @Test + public void testOnShardGroupSplit() { + // Setup + final Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + final List newShardGroups = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + final Consumer mockTask = mock(Consumer.class); + + // Verify the results + } + + @Test + public void testCheckStoreCanOffline() { + // Setup + final Metapb.Store currentStore = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + // Run the test + final boolean result = this.service.checkStoreCanOffline(currentStore); + + // Verify the results + assertThat(result).isTrue(); + } + + @Test + public void testShardGroupsDbCompaction() throws Exception { + // Setup + // Run the test + try { + this.service.shardGroupsDbCompaction(0, "tableName"); + } catch (Exception e) { + + } + + // Verify the results + } + + @Test + public void testGetQuota() throws Exception { + // Setup + // Run the test + try { + this.service.getQuota(); + } catch (Exception e) { + + } + } +} diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java new file mode 100644 index 0000000000..b1064ccff2 --- /dev/null +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.service; + +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +public class TaskScheduleServiceTest extends PdTestBase { + + TaskScheduleService service; + + @Before + public void init() { + this.service = getTaskService(); + } + + @Test + public void testStoreOffline() { + + } + + public void testPatrolStores() { + + } + + public void testPatrolPartitions() { + + } + + public void testBalancePartitionShard() { + + } + + @Test + public void testBalancePartitionLeader() throws PDException { + + var list = new ArrayList(); + for (int i = 0; i < 6; i++) { + getStoreNodeService().getStoreInfoMeta().updateShardGroup(genShardGroup(i)); + list.add(genPartition(i)); + } + + getPdConfig().getPartition().setShardCount(3); + + getPartitionService().updatePartition(list); + var rst = this.service.balancePartitionLeader(true); + assertTrue(rst.size() > 0); + // recover + getPdConfig().getPartition().setShardCount(1); + getStoreNodeService().getStoreInfoMeta().removeAll(); + } + + public void testSplitPartition() { + + } + + public void testSplitPartition2() { + + } + + public void testCanAllPartitionsMovedOut() { + + } + + private Metapb.ShardGroup genShardGroup(int groupId) { + return Metapb.ShardGroup.newBuilder() + .setId(groupId) + .addAllShards(genShards()) + .build(); + } + + private Metapb.Partition genPartition(int groupId) { + return Metapb.Partition.newBuilder() + .setId(groupId) + .setState(Metapb.PartitionState.PState_Normal) + .setGraphName("graph1") + .setStartKey(groupId * 10) + .setEndKey(groupId * 10 + 10) + .build(); + } + + private List genShards() { + return List.of( + Metapb.Shard.newBuilder().setStoreId(1).setRole(Metapb.ShardRole.Leader).build(), + Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Follower).build(), + Metapb.Shard.newBuilder().setStoreId(3).setRole(Metapb.ShardRole.Follower).build()); + } + +} + + diff --git a/hugegraph-pd/local-release.sh b/hugegraph-pd/local-release.sh new file mode 100755 index 0000000000..2603bb07c3 --- /dev/null +++ b/hugegraph-pd/local-release.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +readonly VER=3.6.3 + +mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set + +mvn -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set +mvn --settings ./settings.xml -Dmaven.test.skip=true clean install +mvn versions:revert \ No newline at end of file diff --git a/hugegraph-pd/mvnw b/hugegraph-pd/mvnw new file mode 100644 index 0000000000..d236581e40 --- /dev/null +++ b/hugegraph-pd/mvnw @@ -0,0 +1,308 @@ +#!/bin/sh +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# ---------------------------------------------------------------------------- +# Maven Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/hugegraph-pd/mvnw.cmd b/hugegraph-pd/mvnw.cmd new file mode 100644 index 0000000000..86115719e5 --- /dev/null +++ b/hugegraph-pd/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/hugegraph-pd/pom.xml b/hugegraph-pd/pom.xml new file mode 100644 index 0000000000..63ee83ccdd --- /dev/null +++ b/hugegraph-pd/pom.xml @@ -0,0 +1,318 @@ + + + + + 4.0.0 + hugegraph-pd + ${revision} + pom + + + org.apache.hugegraph + hugegraph + ${revision} + ../pom.xml + + + + hg-pd-grpc + hg-pd-client + hg-pd-core + hg-pd-service + hg-pd-common + hg-pd-dist + hg-pd-clitools + hg-pd-test + + + + + 11 + 11 + 2.17.0 + + + + + + org.apache.logging.log4j + log4j-slf4j-impl + 2.17.0 + + + org.apache.hugegraph + hg-pd-grpc + ${revision} + + + org.apache.hugegraph + hg-pd-common + ${revision} + + + + + + + junit + junit + 4.13.2 + test + + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + **/grpc/**.* + **/config/**.* + + + + + + prepare-agent + + + + + + org.codehaus.mojo + flatten-maven-plugin + 1.2.7 + + true + resolveCiFriendliesOnly + + + + flatten + process-resources + + flatten + + + + flatten.clean + clean + + clean + + + + + + org.apache.maven.plugins + maven-clean-plugin + + + + ${project.basedir}/ + + *.tar + *.tar.gz + .flattened-pom.xml + + + + false + + + + + + + + org.apache.rat + apache-rat-plugin + + + **/*.versionsBackup + **/*.proto + **/*.log + **/*.txt + **/*.json + **/*.conf + **/*.map + **/*.properties + dist/**/* + **/assembly/static/bin/hugegraph.service + **/swagger-ui/**/* + scripts/dev/reviewers + scripts/dev/reviewers + **/*.md + **/dependency-reduced-pom.xml + **/logs/*.log + **/META-INF/**/* + **/target/* + style/* + ChangeLog + CONFIG.ini + GROUPS + OWNERS + **/grpc/** + + .github/**/* + .gitignore + .gitattributes + + **/*.iml + **/*.iws + **/*.ipr + **/META-INF/MANIFEST.MF + + .repository/** + **/.flattened-pom.xml + + true + + + + + + + + client-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + client-test + + test + + test + + + + + + + + core-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + core-test + + test + + test + + + + + + + + cli-tools-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + cli-tools-test + + test + + test + + + + + + + + common-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + common-test + + test + + test + + + + + + + + service-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + service-test + + test + + test + + + + + + + + + diff --git a/hugegraph-pd/settings.xml b/hugegraph-pd/settings.xml new file mode 100644 index 0000000000..ce0fa7ae9a --- /dev/null +++ b/hugegraph-pd/settings.xml @@ -0,0 +1,133 @@ + + + + + + + + star-local + superstar + Superstar12345 + + + star-snapshot + superstar + Superstar12345 + + + + + + baidu + + + baidu-nexus + http://maven.baidu-int.com/nexus/content/groups/public + + true + + + false + + + + baidu-nexus-snapshot + http://maven.baidu-int.com/nexus/content/groups/public-snapshots + + false + + + false + + + + + + star + http://127.0.0.1:8082/artifactory/star + + true + always + + + true + always + + + + + + Baidu_Local + http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local + + true + + + false + + + + Baidu_Local_Snapshots + + http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots + + + false + + + true + always + + + + + + baidu-nexus + http://maven.baidu-int.com/nexus/content/groups/public + + true + + + false + + + + baidu-nexus-snapshot + http://maven.baidu-int.com/nexus/content/groups/public-snapshots + + false + + + true + + + + + + + + + baidu + + diff --git a/hugegraph-pd/start_pd_server.sh b/hugegraph-pd/start_pd_server.sh new file mode 100644 index 0000000000..280fe25a75 --- /dev/null +++ b/hugegraph-pd/start_pd_server.sh @@ -0,0 +1,55 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +function rename() +{ + cfilelist=$(find -maxdepth 1 -type d -printf '%f\n' ) + for cfilename in $cfilelist + do + if [[ $cfilename =~ SNAPSHOT ]] + then + mv $cfilename ${cfilename/-?.?.?-SNAPSHOT/} + fi + done +} + +wget -q -O output.tar.gz $AGILE_PRODUCT_HTTP_URL +tar -zxf output.tar.gz +cd output +rm -rf hugegraph-pd +find . -name "*.tar.gz" -exec tar -zxvf {} \; +rename + + +# start pd +pushd hugegraph-pd +sed -i 's/initial-store-list:.*/initial-store-list: 127.0.0.1:8500\n initial-store-count: 1/' conf/application.yml +sed -i 's/,127.0.0.1:8611,127.0.0.1:8612//' conf/application.yml +bin/start-hugegraph-pd.sh +popd +jps +sleep 10 + + +# start store +pushd hugegraph-store +sed -i 's#local os=`uname`#local os=Linux#g' bin/util.sh +sed -i 's/export LD_PRELOAD/#export LD_PRELOAD/' bin/start-hugegraph-store.sh +bin/start-hugegraph-store.sh +popd +jps +sleep 5 \ No newline at end of file diff --git a/pom.xml b/pom.xml index 3170231902..5735ede238 100644 --- a/pom.xml +++ b/pom.xml @@ -95,7 +95,7 @@ hugegraph-server - + hugegraph-pd