From d97ba4cb5139d5aa51c58dcd4207656d109ef11d Mon Sep 17 00:00:00 2001 From: V_Galaxy <1904821183@qq.com> Date: Sat, 6 May 2023 18:24:23 +0800 Subject: [PATCH 01/18] history merged until 2023.5.6 --- .DS_Store | Bin 0 -> 6148 bytes .gitignore | 12 + .mvn/wrapper/MavenWrapperDownloader.java | 117 ++ .mvn/wrapper/maven-wrapper.jar | Bin 0 -> 50710 bytes .mvn/wrapper/maven-wrapper.properties | 2 + README.md | 897 ++++++++++ build-pre.sh | 37 + build.sh | 21 + ci.yml | 26 + conf/hugegraph.license | Bin 0 -> 856 bytes conf/verify-license.json | 6 + deploy-release.sh | 8 + deploy-snapshot.sh | 8 + hg-pd-client/pom.xml | 54 + .../hugegraph/pd/client/AbstractClient.java | 241 +++ .../pd/client/AbstractClientStubProxy.java | 56 + .../hugegraph/pd/client/Discoverable.java | 18 + .../hugegraph/pd/client/DiscoveryClient.java | 200 +++ .../pd/client/DiscoveryClientImpl.java | 127 ++ .../apache/hugegraph/pd/client/KvClient.java | 293 ++++ .../hugegraph/pd/client/LicenseClient.java | 54 + .../apache/hugegraph/pd/client/PDClient.java | 1124 ++++++++++++ .../apache/hugegraph/pd/client/PDConfig.java | 62 + .../apache/hugegraph/pd/client/PDPulse.java | 118 ++ .../hugegraph/pd/client/PDPulseImpl.java | 149 ++ .../apache/hugegraph/pd/client/PDWatch.java | 120 ++ .../hugegraph/pd/client/PDWatchImpl.java | 176 ++ .../hugegraph/pd/pulse/PartitionNotice.java | 36 + .../hugegraph/pd/pulse/PulseServerNotice.java | 20 + .../apache/hugegraph/pd/watch/NodeEvent.java | 78 + .../apache/hugegraph/pd/watch/PDWatcher.java | 8 + .../hugegraph/pd/watch/PartitionEvent.java | 74 + .../apache/hugegraph/pd/watch/WatchType.java | 16 + .../hugegraph/pd/PartitionCacheTest.java | 83 + .../hugegraph/pd/StoreRegisterTest.java | 122 ++ .../pd/client/DiscoveryClientImplTest.java | 137 ++ .../pd/client/LicenseClientImplTest.java | 112 ++ .../hugegraph/pd/client/PDPulseTest.java | 94 + .../hugegraph/pd/client/PDWatchTest.java | 74 + .../pd/client/test/HgPDTestUtil.java | 78 + hg-pd-clitools/pom.xml | 56 + .../apache/hugegraph/pd/clitools/Main.java | 62 + .../hugegraph/pd/clitools/MainTest.java | 61 + hg-pd-common/pom.xml | 26 + .../apache/hugegraph/pd/common/HgAssert.java | 94 + .../apache/hugegraph/pd/common/KVPair.java | 113 ++ .../hugegraph/pd/common/PDException.java | 29 + .../pd/common/PDRuntimeException.java | 36 + .../hugegraph/pd/common/PartitionCache.java | 432 +++++ .../hugegraph/pd/common/PartitionUtils.java | 28 + hg-pd-core/pom.xml | 72 + .../apache/hugegraph/pd/ConfigService.java | 118 ++ .../org/apache/hugegraph/pd/IdService.java | 62 + .../org/apache/hugegraph/pd/KvService.java | 295 ++++ .../org/apache/hugegraph/pd/LogService.java | 57 + .../pd/PartitionInstructionListener.java | 33 + .../apache/hugegraph/pd/PartitionService.java | 1448 +++++++++++++++ .../hugegraph/pd/PartitionStatusListener.java | 11 + .../apache/hugegraph/pd/RegistryService.java | 31 + .../pd/ShardGroupStatusListener.java | 9 + .../hugegraph/pd/StoreMonitorDataService.java | 242 +++ .../apache/hugegraph/pd/StoreNodeService.java | 996 +++++++++++ .../hugegraph/pd/StoreStatusListener.java | 13 + .../hugegraph/pd/TaskScheduleService.java | 785 +++++++++ .../apache/hugegraph/pd/config/PDConfig.java | 244 +++ .../hugegraph/pd/meta/ConfigMetaStore.java | 54 + .../hugegraph/pd/meta/DiscoveryMetaStore.java | 88 + .../apache/hugegraph/pd/meta/IdMetaStore.java | 237 +++ .../org/apache/hugegraph/pd/meta/LogMeta.java | 37 + .../hugegraph/pd/meta/MetadataFactory.java | 63 + .../hugegraph/pd/meta/MetadataKeyHelper.java | 358 ++++ .../pd/meta/MetadataRocksDBStore.java | 168 ++ .../hugegraph/pd/meta/MetadataStoreBase.java | 101 ++ .../hugegraph/pd/meta/PartitionMeta.java | 272 +++ .../apache/hugegraph/pd/meta/QueueStore.java | 34 + .../hugegraph/pd/meta/StoreInfoMeta.java | 186 ++ .../hugegraph/pd/meta/TaskInfoMeta.java | 113 ++ .../pd/raft/FutureClosureAdapter.java | 29 + .../apache/hugegraph/pd/raft/KVOperation.java | 142 ++ .../hugegraph/pd/raft/KVStoreClosure.java | 15 + .../apache/hugegraph/pd/raft/RaftEngine.java | 271 +++ .../hugegraph/pd/raft/RaftRpcClient.java | 64 + .../hugegraph/pd/raft/RaftRpcProcessor.java | 113 ++ .../hugegraph/pd/raft/RaftStateListener.java | 5 + .../hugegraph/pd/raft/RaftStateMachine.java | 302 ++++ .../hugegraph/pd/raft/RaftTaskHandler.java | 10 + .../apache/hugegraph/pd/raft/ZipUtils.java | 64 + .../pd/store/BaseKVStoreClosure.java | 31 + .../apache/hugegraph/pd/store/HgKVStore.java | 41 + .../hugegraph/pd/store/HgKVStoreImpl.java | 312 ++++ .../org/apache/hugegraph/pd/store/KV.java | 27 + .../hugegraph/pd/store/RaftKVStore.java | 296 ++++ .../hugegraph/pd/MonitorServiceTest.java | 90 + .../hugegraph/pd/PartitionServiceTest.java | 29 + .../hugegraph/pd/StoreNodeServiceTest.java | 436 +++++ .../org/apache/hugegraph/pd/UnitTestBase.java | 14 + .../pd/common/PartitionUtilsTest.java | 29 + .../hugegraph/pd/store/HgKVStoreImplTest.java | 90 + hg-pd-core/src/test/resources/log4j2.xml | 122 ++ hg-pd-dist/pom.xml | 130 ++ .../assembly/descriptor/server-assembly.xml | 40 + .../assembly/static/bin/start-hugegraph-pd.sh | 107 ++ .../assembly/static/bin/stop-hugegraph-pd.sh | 32 + hg-pd-dist/src/assembly/static/bin/util.sh | 355 ++++ .../src/assembly/static/conf/application.yml | 58 + .../static/conf/application.yml.template | 54 + .../assembly/static/conf/hugegraph.license | Bin 0 -> 856 bytes .../src/assembly/static/conf/log4j2.xml | 117 ++ .../assembly/static/conf/verify-license.json | 6 + hg-pd-grpc/pom.xml | 120 ++ hg-pd-grpc/src/main/proto/discovery.proto | 54 + hg-pd-grpc/src/main/proto/kv.proto | 126 ++ hg-pd-grpc/src/main/proto/metaTask.proto | 48 + hg-pd-grpc/src/main/proto/metapb.proto | 375 ++++ hg-pd-grpc/src/main/proto/pd_common.proto | 36 + hg-pd-grpc/src/main/proto/pd_pulse.proto | 144 ++ hg-pd-grpc/src/main/proto/pd_watch.proto | 84 + hg-pd-grpc/src/main/proto/pdpb.proto | 564 ++++++ hg-pd-service/pom.xml | 121 ++ .../pd/upgrade/VersionScriptFactory.java | 41 + .../pd/upgrade/VersionUpgradeScript.java | 39 + .../upgrade/scripts/PartitionMetaUpgrade.java | 99 ++ .../pd/upgrade/scripts/TaskCleanUpgrade.java | 47 + .../hugegraph/pd/boot/HugePDServer.java | 25 + .../pd/license/LicenseVerifierService.java | 404 +++++ .../pd/license/LicenseVerifyManager.java | 77 + .../hugegraph/pd/metrics/MetricsConfig.java | 29 + .../hugegraph/pd/metrics/PDMetrics.java | 99 ++ .../apache/hugegraph/pd/model/DemoModel.java | 55 + .../hugegraph/pd/model/GraphRestRequest.java | 9 + .../pd/model/GraphSpaceRestRequest.java | 8 + .../hugegraph/pd/model/PeerRestRequest.java | 8 + .../hugegraph/pd/model/PromTargetsModel.java | 72 + .../pd/model/RegistryQueryRestRequest.java | 17 + .../pd/model/RegistryRestRequest.java | 20 + .../pd/model/RegistryRestResponse.java | 19 + .../hugegraph/pd/model/RestApiResponse.java | 40 + .../hugegraph/pd/model/StoreRestRequest.java | 8 + .../hugegraph/pd/model/TimeRangeRequest.java | 13 + .../pd/notice/NoticeBroadcaster.java | 157 ++ .../pd/pulse/AbstractObserverSubject.java | 202 +++ .../hugegraph/pd/pulse/PDPulseSubject.java | 348 ++++ .../pd/pulse/PartitionHeartbeatSubject.java | 39 + .../hugegraph/pd/pulse/PulseListener.java | 25 + .../org/apache/hugegraph/pd/rest/API.java | 164 ++ .../apache/hugegraph/pd/rest/GraphAPI.java | 261 +++ .../hugegraph/pd/rest/GraphSpaceAPI.java | 75 + .../apache/hugegraph/pd/rest/IndexAPI.java | 232 +++ .../apache/hugegraph/pd/rest/MemberAPI.java | 208 +++ .../hugegraph/pd/rest/PartitionAPI.java | 435 +++++ .../hugegraph/pd/rest/PromTargetsAPI.java | 72 + .../apache/hugegraph/pd/rest/RegistryAPI.java | 179 ++ .../apache/hugegraph/pd/rest/ShardAPI.java | 100 ++ .../apache/hugegraph/pd/rest/StoreAPI.java | 315 ++++ .../org/apache/hugegraph/pd/rest/TaskAPI.java | 85 + .../org/apache/hugegraph/pd/rest/TestAPI.java | 140 ++ .../pd/service/DiscoveryService.java | 166 ++ .../pd/service/KvServiceGrpcImpl.java | 525 ++++++ .../hugegraph/pd/service/PDPulseService.java | 101 ++ .../hugegraph/pd/service/PDRestService.java | 251 +++ .../hugegraph/pd/service/PDService.java | 1547 +++++++++++++++++ .../hugegraph/pd/service/PDWatchService.java | 22 + .../pd/service/PromTargetsService.java | 234 +++ .../hugegraph/pd/service/ServiceGrpc.java | 79 + .../hugegraph/pd/service/UpgradeService.java | 88 + .../apache/hugegraph/pd/util/DateUtil.java | 58 + .../apache/hugegraph/pd/util/HgMapCache.java | 82 + .../org/apache/hugegraph/pd/util/IdUtil.java | 32 + .../pd/util/grpc/StreamObserverUtil.java | 32 + .../pd/watch/AbstractWatchSubject.java | 142 ++ .../hugegraph/pd/watch/KvWatchSubject.java | 243 +++ .../hugegraph/pd/watch/NodeChangeSubject.java | 51 + .../hugegraph/pd/watch/PDWatchSubject.java | 190 ++ .../pd/watch/PartitionChangeSubject.java | 48 + .../pd/watch/ShardGroupChangeSubject.java | 37 + .../src/main/resources/application.yml | 63 + hg-pd-service/src/main/resources/banner.txt | 6 + hg-pd-service/src/main/resources/log4j2.xml | 122 ++ .../src/main/resources/private-keys.store | Bin 0 -> 1299 bytes .../src/main/resources/public-certs.store | Bin 0 -> 892 bytes .../src/test/java/live/PDServer0.java | 35 + .../src/test/java/live/PDServer1.java | 33 + .../src/test/java/live/PDServer2.java | 33 + .../src/test/java/live/PDServer3.java | 33 + .../test/resources/application-server0.yml | 54 + .../test/resources/application-server1.yml | 54 + .../test/resources/application-server2.yml | 56 + .../test/resources/application-server3.yml | 56 + hg-pd-service/src/test/resources/banner.txt | 6 + hg-pd-service/src/test/resources/log4j2.xml | 122 ++ hg-pd-test/pom.xml | 350 ++++ .../hugegraph/pd/client/BaseClientTest.java | 30 + .../pd/client/DiscoveryClientTest.java | 62 + .../hugegraph/pd/client/KvClientTest.java | 107 ++ .../pd/client/PDClientSuiteTest.java | 18 + .../hugegraph/pd/client/PDClientTest.java | 407 +++++ .../pd/clitools/BaseCliToolsTest.java | 18 + .../pd/clitools/CliToolsSuiteTest.java | 17 + .../hugegraph/pd/clitools/MainTest.java | 68 + .../hugegraph/pd/common/BaseCommonTest.java | 16 + .../hugegraph/pd/common/CommonSuiteTest.java | 27 + .../hugegraph/pd/common/HgAssertTest.java | 108 ++ .../hugegraph/pd/common/KVPairTest.java | 58 + .../pd/common/MetadataKeyHelperTest.java | 199 +++ .../pd/common/PartitionCacheTest.java | 376 ++++ .../pd/common/PartitionUtilsTest.java | 18 + .../hugegraph/pd/core/BaseCoreTest.java | 56 + .../hugegraph/pd/core/PDCoreSuiteTest.java | 21 + .../pd/core/StoreNodeServiceTest.java | 98 ++ .../pd/core/meta/MetadataKeyHelperTest.java | 19 + .../hugegraph/pd/grpc/BaseGrpcTest.java | 20 + .../hugegraph/pd/grpc/GrpcSuiteTest.java | 15 + .../hugegraph/pd/service/BaseServerTest.java | 39 + .../pd/service/ConfigServiceTest.java | 92 + .../hugegraph/pd/service/IdServiceTest.java | 91 + .../hugegraph/pd/service/KvServiceTest.java | 43 + .../hugegraph/pd/service/LogServiceTest.java | 36 + .../pd/service/PartitionServiceTest.java | 113 ++ .../hugegraph/pd/service/PdTestBase.java | 194 +++ .../hugegraph/pd/service/RestApiTest.java | 100 ++ .../hugegraph/pd/service/ServerSuiteTest.java | 24 + .../service/StoreMonitorDataServiceTest.java | 63 + .../pd/service/StoreNodeServiceNewTest.java | 46 + .../pd/service/StoreServiceTest.java | 817 +++++++++ .../pd/service/TaskScheduleServiceTest.java | 95 + local-release.sh | 8 + mvnw | 310 ++++ mvnw.cmd | 182 ++ pom.xml | 234 +++ settings.xml | 113 ++ start_pd_server.sh | 38 + 231 files changed, 30538 insertions(+) create mode 100644 .DS_Store create mode 100644 .gitignore create mode 100644 .mvn/wrapper/MavenWrapperDownloader.java create mode 100644 .mvn/wrapper/maven-wrapper.jar create mode 100644 .mvn/wrapper/maven-wrapper.properties create mode 100644 README.md create mode 100644 build-pre.sh create mode 100644 build.sh create mode 100644 ci.yml create mode 100644 conf/hugegraph.license create mode 100644 conf/verify-license.json create mode 100644 deploy-release.sh create mode 100644 deploy-snapshot.sh create mode 100644 hg-pd-client/pom.xml create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java create mode 100644 hg-pd-clitools/pom.xml create mode 100644 hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java create mode 100644 hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java create mode 100644 hg-pd-common/pom.xml create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java create mode 100644 hg-pd-core/pom.xml create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java create mode 100644 hg-pd-core/src/test/resources/log4j2.xml create mode 100644 hg-pd-dist/pom.xml create mode 100644 hg-pd-dist/src/assembly/descriptor/server-assembly.xml create mode 100644 hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh create mode 100644 hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh create mode 100644 hg-pd-dist/src/assembly/static/bin/util.sh create mode 100644 hg-pd-dist/src/assembly/static/conf/application.yml create mode 100644 hg-pd-dist/src/assembly/static/conf/application.yml.template create mode 100644 hg-pd-dist/src/assembly/static/conf/hugegraph.license create mode 100644 hg-pd-dist/src/assembly/static/conf/log4j2.xml create mode 100644 hg-pd-dist/src/assembly/static/conf/verify-license.json create mode 100644 hg-pd-grpc/pom.xml create mode 100644 hg-pd-grpc/src/main/proto/discovery.proto create mode 100644 hg-pd-grpc/src/main/proto/kv.proto create mode 100644 hg-pd-grpc/src/main/proto/metaTask.proto create mode 100644 hg-pd-grpc/src/main/proto/metapb.proto create mode 100644 hg-pd-grpc/src/main/proto/pd_common.proto create mode 100644 hg-pd-grpc/src/main/proto/pd_pulse.proto create mode 100644 hg-pd-grpc/src/main/proto/pd_watch.proto create mode 100644 hg-pd-grpc/src/main/proto/pdpb.proto create mode 100644 hg-pd-service/pom.xml create mode 100644 hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java create mode 100644 hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java create mode 100644 hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java create mode 100644 hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java create mode 100644 hg-pd-service/src/main/resources/application.yml create mode 100644 hg-pd-service/src/main/resources/banner.txt create mode 100644 hg-pd-service/src/main/resources/log4j2.xml create mode 100644 hg-pd-service/src/main/resources/private-keys.store create mode 100644 hg-pd-service/src/main/resources/public-certs.store create mode 100644 hg-pd-service/src/test/java/live/PDServer0.java create mode 100644 hg-pd-service/src/test/java/live/PDServer1.java create mode 100644 hg-pd-service/src/test/java/live/PDServer2.java create mode 100644 hg-pd-service/src/test/java/live/PDServer3.java create mode 100644 hg-pd-service/src/test/resources/application-server0.yml create mode 100644 hg-pd-service/src/test/resources/application-server1.yml create mode 100644 hg-pd-service/src/test/resources/application-server2.yml create mode 100644 hg-pd-service/src/test/resources/application-server3.yml create mode 100644 hg-pd-service/src/test/resources/banner.txt create mode 100644 hg-pd-service/src/test/resources/log4j2.xml create mode 100644 hg-pd-test/pom.xml create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java create mode 100755 local-release.sh create mode 100644 mvnw create mode 100644 mvnw.cmd create mode 100644 pom.xml create mode 100644 settings.xml create mode 100644 start_pd_server.sh diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1b1b3b8c07dfedcef15b4f3094c7204fd8d10d89 GIT binary patch literal 6148 zcmeHK%Wl&^6ur}?#Bqtpf=4$oAcSCUG;`*0&peLjkpTeKoOEjdB>*rm5avvbGK}ge zoUt`kQbc6b#*xoEj?W#;Q-j*H0$PE;TLId;IjBGnJP1?!_b%3b{2@$C!d&>FZw~_C zFakHl`IO>pAjg9)xL_we+0~TidFW%S+b~4$ZRmiHJQsV(8dW7Wsx*(o0Dl~UWPd;- zi}TD>e;Kn8WBet@nU06*)cfKLeQz9AtKZn#dT!%f-Y`tVESk?-eLii4b~qU~?a@=p zJ+Q;RqtcH!_m^qgdF;8f3*TC&103Pzz_ zx*SC}_sZ6Obyl{bgPm&Gx>eho%?ieq>$N*a$Ag!zqSv!G@5Sc?hKIJW3knzT0TY?> zoVG@R&x0P;6F2G8!{so9PEvOE4*gb3s5mhRKQAQwx{xq$Y#JE?{tnTEW?NCTbAU?i z*6T~v>bv`-+NEnZZa-|F_=AAI#d)ADHKUCaH=1Xr9x2U=iDfs&B|d^vTc0!qyrN@V>_y6rd}va zJv^8t+<`SD>R2nF6-X+uF0&=N|L?DU|0hX0rxnl&{8I|B++pjmiBnQ{>zT>XU29=D t#z01XbBTh2k-CmWg0A8%48pL^q6A_?;ann?py@vXLIxda1^%c4KLHX<44eP} literal 0 HcmV?d00001 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..c588baf7c4 --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +/hg-pd-grpc/src/main/java/ +/.idea/ +/hg-pd-grpc/target/ +/dist/ +**/target/ +.DS_Store +**/tmp/ +*.log +*.iml +/hg-pd-common/target_B000000405016P_Oct-28-114458-2021_conflict_parent/ + +dist/ \ No newline at end of file diff --git a/.mvn/wrapper/MavenWrapperDownloader.java b/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000..b901097f2d --- /dev/null +++ b/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,117 @@ +/* + * Copyright 2007-present the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.6"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if(mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if(mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if(!outputFile.getParentFile().exists()) { + if(!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/.mvn/wrapper/maven-wrapper.jar b/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..2cc7d4a55c0cd0092912bf49ae38b3a9e3fd0054 GIT binary patch literal 50710 zcmbTd1CVCTmM+|7+wQV$+qP}n>auOywyU~q+qUhh+uxis_~*a##hm*_WW?9E7Pb7N%LRFiwbEGCJ0XP=%-6oeT$XZcYgtzC2~q zk(K08IQL8oTl}>>+hE5YRgXTB@fZ4TH9>7=79e`%%tw*SQUa9~$xKD5rS!;ZG@ocK zQdcH}JX?W|0_Afv?y`-NgLum62B&WSD$-w;O6G0Sm;SMX65z)l%m1e-g8Q$QTI;(Q z+x$xth4KFvH@Bs6(zn!iF#nenk^Y^ce;XIItAoCsow38eq?Y-Auh!1in#Rt-_D>H^ z=EjbclGGGa6VnaMGmMLj`x3NcwA43Jb(0gzl;RUIRAUDcR1~99l2SAPkVhoRMMtN} zXvC<tOmX83grD8GSo_Lo?%lNfhD#EBgPo z*nf@ppMC#B!T)Ae0RG$mlJWmGl7CkuU~B8-==5i;rS;8i6rJ=PoQxf446XDX9g|c> zU64ePyMlsI^V5Jq5A+BPe#e73+kpc_r1tv#B)~EZ;7^67F0*QiYfrk0uVW;Qb=NsG zN>gsuCwvb?s-KQIppEaeXtEMdc9dy6Dfduz-tMTms+i01{eD9JE&h?Kht*$eOl#&L zJdM_-vXs(V#$Ed;5wyNWJdPNh+Z$+;$|%qR(t`4W@kDhd*{(7-33BOS6L$UPDeE_53j${QfKN-0v-HG z(QfyvFNbwPK%^!eIo4ac1;b>c0vyf9}Xby@YY!lkz-UvNp zwj#Gg|4B~?n?G^{;(W;|{SNoJbHTMpQJ*Wq5b{l9c8(%?Kd^1?H1om1de0Da9M;Q=n zUfn{f87iVb^>Exl*nZ0hs(Yt>&V9$Pg`zX`AI%`+0SWQ4Zc(8lUDcTluS z5a_KerZWe}a-MF9#Cd^fi!y3%@RFmg&~YnYZ6<=L`UJ0v={zr)>$A;x#MCHZy1st7 ztT+N07NR+vOwSV2pvWuN1%lO!K#Pj0Fr>Q~R40{bwdL%u9i`DSM4RdtEH#cW)6}+I-eE< z&tZs+(Ogu(H_;$a$!7w`MH0r%h&@KM+<>gJL@O~2K2?VrSYUBbhCn#yy?P)uF3qWU z0o09mIik+kvzV6w>vEZy@&Mr)SgxPzUiDA&%07m17udz9usD82afQEps3$pe!7fUf z0eiidkJ)m3qhOjVHC_M(RYCBO%CZKZXFb8}s0-+}@CIn&EF(rRWUX2g^yZCvl0bI} zbP;1S)iXnRC&}5-Tl(hASKqdSnO?ASGJ*MIhOXIblmEudj(M|W!+I3eDc}7t`^mtg z)PKlaXe(OH+q-)qcQ8a@!llRrpGI8DsjhoKvw9T;TEH&?s=LH0w$EzI>%u;oD@x83 zJL7+ncjI9nn!TlS_KYu5vn%f*@qa5F;| zEFxY&B?g=IVlaF3XNm_03PA)=3|{n-UCgJoTr;|;1AU9|kPE_if8!Zvb}0q$5okF$ zHaJdmO&gg!9oN|M{!qGE=tb|3pVQ8PbL$}e;NgXz<6ZEggI}wO@aBP**2Wo=yN#ZC z4G$m^yaM9g=|&!^ft8jOLuzc3Psca*;7`;gnHm}tS0%f4{|VGEwu45KptfNmwxlE~ z^=r30gi@?cOm8kAz!EylA4G~7kbEiRlRIzwrb~{_2(x^$-?|#e6Bi_**(vyr_~9Of z!n>Gqf+Qwiu!xhi9f53=PM3`3tNF}pCOiPU|H4;pzjcsqbwg*{{kyrTxk<;mx~(;; z1NMrpaQ`57yn34>Jo3b|HROE(UNcQash!0p2-!Cz;{IRv#Vp5!3o$P8!%SgV~k&Hnqhp`5eLjTcy93cK!3Hm-$`@yGnaE=?;*2uSpiZTs_dDd51U%i z{|Zd9ou-;laGS_x=O}a+ zB||za<795A?_~Q=r=coQ+ZK@@ zId~hWQL<%)fI_WDIX#=(WNl!Dm$a&ROfLTd&B$vatq!M-2Jcs;N2vps$b6P1(N}=oI3<3luMTmC|0*{ zm1w8bt7vgX($!0@V0A}XIK)w!AzUn7vH=pZEp0RU0p?}ch2XC-7r#LK&vyc2=-#Q2 z^L%8)JbbcZ%g0Du;|8=q8B>X=mIQirpE=&Ox{TiuNDnOPd-FLI^KfEF729!!0x#Es z@>3ursjFSpu%C-8WL^Zw!7a0O-#cnf`HjI+AjVCFitK}GXO`ME&on|^=~Zc}^LBp9 zj=-vlN;Uc;IDjtK38l7}5xxQF&sRtfn4^TNtnzXv4M{r&ek*(eNbIu!u$>Ed%` z5x7+&)2P&4>0J`N&ZP8$vcR+@FS0126s6+Jx_{{`3ZrIMwaJo6jdrRwE$>IU_JTZ} z(||hyyQ)4Z1@wSlT94(-QKqkAatMmkT7pCycEB1U8KQbFX&?%|4$yyxCtm3=W`$4fiG0WU3yI@c zx{wfmkZAYE_5M%4{J-ygbpH|(|GD$2f$3o_Vti#&zfSGZMQ5_f3xt6~+{RX=$H8at z?GFG1Tmp}}lmm-R->ve*Iv+XJ@58p|1_jRvfEgz$XozU8#iJS})UM6VNI!3RUU!{5 zXB(+Eqd-E;cHQ>)`h0(HO_zLmzR3Tu-UGp;08YntWwMY-9i^w_u#wR?JxR2bky5j9 z3Sl-dQQU$xrO0xa&>vsiK`QN<$Yd%YXXM7*WOhnRdSFt5$aJux8QceC?lA0_if|s> ze{ad*opH_kb%M&~(~&UcX0nFGq^MqjxW?HJIP462v9XG>j(5Gat_)#SiNfahq2Mz2 zU`4uV8m$S~o9(W>mu*=h%Gs(Wz+%>h;R9Sg)jZ$q8vT1HxX3iQnh6&2rJ1u|j>^Qf`A76K%_ubL`Zu?h4`b=IyL>1!=*%!_K)=XC z6d}4R5L+sI50Q4P3upXQ3Z!~1ZXLlh!^UNcK6#QpYt-YC=^H=EPg3)z*wXo*024Q4b2sBCG4I# zlTFFY=kQ>xvR+LsuDUAk)q%5pEcqr(O_|^spjhtpb1#aC& zghXzGkGDC_XDa%t(X`E+kvKQ4zrQ*uuQoj>7@@ykWvF332)RO?%AA&Fsn&MNzmFa$ zWk&&^=NNjxLjrli_8ESU)}U|N{%j&TQmvY~lk!~Jh}*=^INA~&QB9em!in_X%Rl1&Kd~Z(u z9mra#<@vZQlOY+JYUwCrgoea4C8^(xv4ceCXcejq84TQ#sF~IU2V}LKc~Xlr_P=ry zl&Hh0exdCbVd^NPCqNNlxM3vA13EI8XvZ1H9#bT7y*U8Y{H8nwGpOR!e!!}*g;mJ#}T{ekSb}5zIPmye*If(}}_=PcuAW#yidAa^9-`<8Gr0 z)Fz=NiZ{)HAvw{Pl5uu)?)&i&Us$Cx4gE}cIJ}B4Xz~-q7)R_%owbP!z_V2=Aq%Rj z{V;7#kV1dNT9-6R+H}}(ED*_!F=~uz>&nR3gb^Ce%+0s#u|vWl<~JD3MvS0T9thdF zioIG3c#Sdsv;LdtRv3ml7%o$6LTVL>(H`^@TNg`2KPIk*8-IB}X!MT0`hN9Ddf7yN z?J=GxPL!uJ7lqwowsl?iRrh@#5C$%E&h~Z>XQcvFC*5%0RN-Opq|=IwX(dq(*sjs+ zqy99+v~m|6T#zR*e1AVxZ8djd5>eIeCi(b8sUk)OGjAsKSOg^-ugwl2WSL@d#?mdl zib0v*{u-?cq}dDGyZ%$XRY=UkQwt2oGu`zQneZh$=^! zj;!pCBWQNtvAcwcWIBM2y9!*W|8LmQy$H~5BEx)78J`4Z0(FJO2P^!YyQU{*Al+fs z){!4JvT1iLrJ8aU3k0t|P}{RN)_^v%$$r;+p0DY7N8CXzmS*HB*=?qaaF9D@#_$SN zSz{moAK<*RH->%r7xX~9gVW$l7?b|_SYI)gcjf0VAUJ%FcQP(TpBs; zg$25D!Ry_`8xpS_OJdeo$qh#7U+cepZ??TII7_%AXsT$B z=e)Bx#v%J0j``00Zk5hsvv6%T^*xGNx%KN-=pocSoqE5_R)OK%-Pbu^1MNzfds)mL zxz^F4lDKV9D&lEY;I+A)ui{TznB*CE$=9(wgE{m}`^<--OzV-5V4X2w9j(_!+jpTr zJvD*y6;39&T+==$F&tsRKM_lqa1HC}aGL0o`%c9mO=fts?36@8MGm7Vi{Y z^<7m$(EtdSr#22<(rm_(l_(`j!*Pu~Y>>xc>I9M#DJYDJNHO&4=HM%YLIp?;iR&$m z#_$ZWYLfGLt5FJZhr3jpYb`*%9S!zCG6ivNHYzNHcI%khtgHBliM^Ou}ZVD7ehU9 zS+W@AV=?Ro!=%AJ>Kcy9aU3%VX3|XM_K0A+ZaknKDyIS3S-Hw1C7&BSW5)sqj5Ye_ z4OSW7Yu-;bCyYKHFUk}<*<(@TH?YZPHr~~Iy%9@GR2Yd}J2!N9K&CN7Eq{Ka!jdu; zQNB*Y;i(7)OxZK%IHGt#Rt?z`I|A{q_BmoF!f^G}XVeTbe1Wnzh%1g>j}>DqFf;Rp zz7>xIs12@Ke0gr+4-!pmFP84vCIaTjqFNg{V`5}Rdt~xE^I;Bxp4)|cs8=f)1YwHz zqI`G~s2~qqDV+h02b`PQpUE#^^Aq8l%y2|ByQeXSADg5*qMprEAE3WFg0Q39`O+i1 z!J@iV!`Y~C$wJ!5Z+j5$i<1`+@)tBG$JL=!*uk=2k;T<@{|s1$YL079FvK%mPhyHV zP8^KGZnp`(hVMZ;s=n~3r2y;LTwcJwoBW-(ndU-$03{RD zh+Qn$ja_Z^OuMf3Ub|JTY74s&Am*(n{J3~@#OJNYuEVVJd9*H%)oFoRBkySGm`hx! zT3tG|+aAkXcx-2Apy)h^BkOyFTWQVeZ%e2@;*0DtlG9I3Et=PKaPt&K zw?WI7S;P)TWED7aSH$3hL@Qde?H#tzo^<(o_sv_2ci<7M?F$|oCFWc?7@KBj-;N$P zB;q!8@bW-WJY9do&y|6~mEruZAVe$!?{)N9rZZxD-|oltkhW9~nR8bLBGXw<632!l z*TYQn^NnUy%Ds}$f^=yQ+BM-a5X4^GHF=%PDrRfm_uqC zh{sKwIu|O0&jWb27;wzg4w5uA@TO_j(1X?8E>5Zfma|Ly7Bklq|s z9)H`zoAGY3n-+&JPrT!>u^qg9Evx4y@GI4$n-Uk_5wttU1_t?6><>}cZ-U+&+~JE) zPlDbO_j;MoxdLzMd~Ew|1o^a5q_1R*JZ=#XXMzg?6Zy!^hop}qoLQlJ{(%!KYt`MK z8umEN@Z4w!2=q_oe=;QttPCQy3Nm4F@x>@v4sz_jo{4m*0r%J(w1cSo;D_hQtJs7W z><$QrmG^+<$4{d2bgGo&3-FV}avg9zI|Rr(k{wTyl3!M1q+a zD9W{pCd%il*j&Ft z5H$nENf>>k$;SONGW`qo6`&qKs*T z2^RS)pXk9b@(_Fw1bkb)-oqK|v}r$L!W&aXA>IpcdNZ_vWE#XO8X`#Yp1+?RshVcd zknG%rPd*4ECEI0wD#@d+3NbHKxl}n^Sgkx==Iu%}HvNliOqVBqG?P2va zQ;kRJ$J6j;+wP9cS za#m;#GUT!qAV%+rdWolk+)6kkz4@Yh5LXP+LSvo9_T+MmiaP-eq6_k;)i6_@WSJ zlT@wK$zqHu<83U2V*yJ|XJU4farT#pAA&@qu)(PO^8PxEmPD4;Txpio+2)#!9 z>&=i7*#tc0`?!==vk>s7V+PL#S1;PwSY?NIXN2=Gu89x(cToFm))7L;< z+bhAbVD*bD=}iU`+PU+SBobTQ%S!=VL!>q$rfWsaaV}Smz>lO9JXT#`CcH_mRCSf4%YQAw`$^yY z3Y*^Nzk_g$xn7a_NO(2Eb*I=^;4f!Ra#Oo~LLjlcjke*k*o$~U#0ZXOQ5@HQ&T46l z7504MUgZkz2gNP1QFN8Y?nSEnEai^Rgyvl}xZfMUV6QrJcXp;jKGqB=D*tj{8(_pV zqyB*DK$2lgYGejmJUW)*s_Cv65sFf&pb(Yz8oWgDtQ0~k^0-wdF|tj}MOXaN@ydF8 zNr={U?=;&Z?wr^VC+`)S2xl}QFagy;$mG=TUs7Vi2wws5zEke4hTa2)>O0U?$WYsZ z<8bN2bB_N4AWd%+kncgknZ&}bM~eDtj#C5uRkp21hWW5gxWvc6b*4+dn<{c?w9Rmf zIVZKsPl{W2vQAlYO3yh}-{Os=YBnL8?uN5(RqfQ=-1cOiUnJu>KcLA*tQK3FU`_bM zM^T28w;nAj5EdAXFi&Kk1Nnl2)D!M{@+D-}bIEe+Lc4{s;YJc-{F#``iS2uk;2!Zp zF9#myUmO!wCeJIoi^A+T^e~20c+c2C}XltaR!|U-HfDA=^xF97ev}$l6#oY z&-&T{egB)&aV$3_aVA51XGiU07$s9vubh_kQG?F$FycvS6|IO!6q zq^>9|3U^*!X_C~SxX&pqUkUjz%!j=VlXDo$!2VLH!rKj@61mDpSr~7B2yy{>X~_nc zRI+7g2V&k zd**H++P9dg!-AOs3;GM`(g<+GRV$+&DdMVpUxY9I1@uK28$az=6oaa+PutlO9?6#? zf-OsgT>^@8KK>ggkUQRPPgC7zjKFR5spqQb3ojCHzj^(UH~v+!y*`Smv)VpVoPwa6 zWG18WJaPKMi*F6Zdk*kU^`i~NNTfn3BkJniC`yN98L-Awd)Z&mY? zprBW$!qL-OL7h@O#kvYnLsfff@kDIegt~?{-*5A7JrA;#TmTe?jICJqhub-G@e??D zqiV#g{)M!kW1-4SDel7TO{;@*h2=_76g3NUD@|c*WO#>MfYq6_YVUP+&8e4|%4T`w zXzhmVNziAHazWO2qXcaOu@R1MrPP{t)`N)}-1&~mq=ZH=w=;-E$IOk=y$dOls{6sRR`I5>|X zpq~XYW4sd;J^6OwOf**J>a7u$S>WTFPRkjY;BfVgQst)u4aMLR1|6%)CB^18XCz+r ztkYQ}G43j~Q&1em(_EkMv0|WEiKu;z2zhb(L%$F&xWwzOmk;VLBYAZ8lOCziNoPw1 zv2BOyXA`A8z^WH!nXhKXM`t0;6D*-uGds3TYGrm8SPnJJOQ^fJU#}@aIy@MYWz**H zvkp?7I5PE{$$|~{-ZaFxr6ZolP^nL##mHOErB^AqJqn^hFA=)HWj!m3WDaHW$C)i^ z9@6G$SzB=>jbe>4kqr#sF7#K}W*Cg-5y6kun3u&0L7BpXF9=#7IN8FOjWrWwUBZiU zT_se3ih-GBKx+Uw0N|CwP3D@-C=5(9T#BH@M`F2!Goiqx+Js5xC92|Sy0%WWWp={$(am!#l~f^W_oz78HX<0X#7 zp)p1u~M*o9W@O8P{0Qkg@Wa# z2{Heb&oX^CQSZWSFBXKOfE|tsAm#^U-WkDnU;IowZ`Ok4!mwHwH=s|AqZ^YD4!5!@ zPxJj+Bd-q6w_YG`z_+r;S86zwXb+EO&qogOq8h-Ect5(M2+>(O7n7)^dP*ws_3U6v zVsh)sk^@*c>)3EML|0<-YROho{lz@Nd4;R9gL{9|64xVL`n!m$-Jjrx?-Bacp!=^5 z1^T^eB{_)Y<9)y{-4Rz@9_>;_7h;5D+@QcbF4Wv7hu)s0&==&6u)33 zHRj+&Woq-vDvjwJCYES@$C4{$?f$Ibi4G()UeN11rgjF+^;YE^5nYprYoJNoudNj= zm1pXSeG64dcWHObUetodRn1Fw|1nI$D9z}dVEYT0lQnsf_E1x2vBLql7NrHH!n&Sq z6lc*mvU=WS6=v9Lrl}&zRiu_6u;6g%_DU{9b+R z#YHqX7`m9eydf?KlKu6Sb%j$%_jmydig`B*TN`cZL-g!R)iE?+Q5oOqBFKhx z%MW>BC^(F_JuG(ayE(MT{S3eI{cKiwOtPwLc0XO*{*|(JOx;uQOfq@lp_^cZo=FZj z4#}@e@dJ>Bn%2`2_WPeSN7si^{U#H=7N4o%Dq3NdGybrZgEU$oSm$hC)uNDC_M9xc zGzwh5Sg?mpBIE8lT2XsqTt3j3?We8}3bzLBTQd639vyg^$0#1epq8snlDJP2(BF)K zSx30RM+{f+b$g{9usIL8H!hCO117Xgv}ttPJm9wVRjPk;ePH@zxv%j9k5`TzdXLeT zFgFX`V7cYIcBls5WN0Pf6SMBN+;CrQ(|EsFd*xtwr#$R{Z9FP`OWtyNsq#mCgZ7+P z^Yn$haBJ)r96{ZJd8vlMl?IBxrgh=fdq_NF!1{jARCVz>jNdC)H^wfy?R94#MPdUjcYX>#wEx+LB#P-#4S-%YH>t-j+w zOFTI8gX$ard6fAh&g=u&56%3^-6E2tpk*wx3HSCQ+t7+*iOs zPk5ysqE}i*cQocFvA68xHfL|iX(C4h*67@3|5Qwle(8wT&!&{8*{f%0(5gH+m>$tq zp;AqrP7?XTEooYG1Dzfxc>W%*CyL16q|fQ0_jp%%Bk^k!i#Nbi(N9&T>#M{gez_Ws zYK=l}adalV(nH}I_!hNeb;tQFk3BHX7N}}R8%pek^E`X}%ou=cx8InPU1EE0|Hen- zyw8MoJqB5=)Z%JXlrdTXAE)eqLAdVE-=>wGHrkRet}>3Yu^lt$Kzu%$3#(ioY}@Gu zjk3BZuQH&~7H+C*uX^4}F*|P89JX;Hg2U!pt>rDi(n(Qe-c}tzb0#6_ItoR0->LSt zR~UT<-|@TO%O`M+_e_J4wx7^)5_%%u+J=yF_S#2Xd?C;Ss3N7KY^#-vx+|;bJX&8r zD?|MetfhdC;^2WG`7MCgs>TKKN=^=!x&Q~BzmQio_^l~LboTNT=I zC5pme^P@ER``p$2md9>4!K#vV-Fc1an7pl>_|&>aqP}+zqR?+~Z;f2^`a+-!Te%V? z;H2SbF>jP^GE(R1@%C==XQ@J=G9lKX+Z<@5}PO(EYkJh=GCv#)Nj{DkWJM2}F&oAZ6xu8&g7pn1ps2U5srwQ7CAK zN&*~@t{`31lUf`O;2w^)M3B@o)_mbRu{-`PrfNpF!R^q>yTR&ETS7^-b2*{-tZAZz zw@q5x9B5V8Qd7dZ!Ai$9hk%Q!wqbE1F1c96&zwBBaRW}(^axoPpN^4Aw}&a5dMe+*Gomky_l^54*rzXro$ z>LL)U5Ry>~FJi=*{JDc)_**c)-&faPz`6v`YU3HQa}pLtb5K)u%K+BOqXP0)rj5Au$zB zW1?vr?mDv7Fsxtsr+S6ucp2l#(4dnr9sD*v+@*>g#M4b|U?~s93>Pg{{a5|rm2xfI z`>E}?9S@|IoUX{Q1zjm5YJT|3S>&09D}|2~BiMo=z4YEjXlWh)V&qs;*C{`UMxp$9 zX)QB?G$fPD6z5_pNs>Jeh{^&U^)Wbr?2D6-q?)`*1k@!UvwQgl8eG$r+)NnFoT)L6 zg7lEh+E6J17krfYJCSjWzm67hEth24pomhz71|Qodn#oAILN)*Vwu2qpJirG)4Wnv}9GWOFrQg%Je+gNrPl8mw7ykE8{ z=|B4+uwC&bpp%eFcRU6{mxRV32VeH8XxX>v$du<$(DfinaaWxP<+Y97Z#n#U~V zVEu-GoPD=9$}P;xv+S~Ob#mmi$JQmE;Iz4(){y*9pFyW-jjgdk#oG$fl4o9E8bo|L zWjo4l%n51@Kz-n%zeSCD`uB?T%FVk+KBI}=ve zvlcS#wt`U6wrJo}6I6Rwb=1GzZfwE=I&Ne@p7*pH84XShXYJRgvK)UjQL%R9Zbm(m zxzTQsLTON$WO7vM)*vl%Pc0JH7WhP;$z@j=y#avW4X8iqy6mEYr@-}PW?H)xfP6fQ z&tI$F{NNct4rRMSHhaelo<5kTYq+(?pY)Ieh8*sa83EQfMrFupMM@nfEV@EmdHUv9 z35uzIrIuo4#WnF^_jcpC@uNNaYTQ~uZWOE6P@LFT^1@$o&q+9Qr8YR+ObBkpP9=F+$s5+B!mX2~T zAuQ6RenX?O{IlLMl1%)OK{S7oL}X%;!XUxU~xJN8xk z`xywS*naF(J#?vOpB(K=o~lE;m$zhgPWDB@=p#dQIW>xe_p1OLoWInJRKbEuoncf; zmS1!u-ycc1qWnDg5Nk2D)BY%jmOwCLC+Ny>`f&UxFowIsHnOXfR^S;&F(KXd{ODlm z$6#1ccqt-HIH9)|@fHnrKudu!6B$_R{fbCIkSIb#aUN|3RM>zuO>dpMbROZ`^hvS@ z$FU-;e4W}!ubzKrU@R*dW*($tFZ>}dd*4_mv)#O>X{U@zSzQt*83l9mI zI$8O<5AIDx`wo0}f2fsPC_l>ONx_`E7kdXu{YIZbp1$(^oBAH({T~&oQ&1{X951QW zmhHUxd)t%GQ9#ak5fTjk-cahWC;>^Rg7(`TVlvy0W@Y!Jc%QL3Ozu# zDPIqBCy&T2PWBj+d-JA-pxZlM=9ja2ce|3B(^VCF+a*MMp`(rH>Rt6W1$;r{n1(VK zLs>UtkT43LR2G$AOYHVailiqk7naz2yZGLo*xQs!T9VN5Q>eE(w zw$4&)&6xIV$IO^>1N-jrEUg>O8G4^@y+-hQv6@OmF@gy^nL_n1P1-Rtyy$Bl;|VcV zF=p*&41-qI5gG9UhKmmnjs932!6hceXa#-qfK;3d*a{)BrwNFeKU|ge?N!;zk+kB! zMD_uHJR#%b54c2tr~uGPLTRLg$`fupo}cRJeTwK;~}A>(Acy4k-Xk&Aa1&eWYS1ULWUj@fhBiWY$pdfy+F z@G{OG{*v*mYtH3OdUjwEr6%_ZPZ3P{@rfbNPQG!BZ7lRyC^xlMpWH`@YRar`tr}d> z#wz87t?#2FsH-jM6m{U=gp6WPrZ%*w0bFm(T#7m#v^;f%Z!kCeB5oiF`W33W5Srdt zdU?YeOdPG@98H7NpI{(uN{FJdu14r(URPH^F6tOpXuhU7T9a{3G3_#Ldfx_nT(Hec zo<1dyhsVsTw;ZkVcJ_0-h-T3G1W@q)_Q30LNv)W?FbMH+XJ* zy=$@39Op|kZv`Rt>X`zg&at(?PO^I=X8d9&myFEx#S`dYTg1W+iE?vt#b47QwoHI9 zNP+|3WjtXo{u}VG(lLUaW0&@yD|O?4TS4dfJI`HC-^q;M(b3r2;7|FONXphw-%7~* z&;2!X17|05+kZOpQ3~3!Nb>O94b&ZSs%p)TK)n3m=4eiblVtSx@KNFgBY_xV6ts;NF;GcGxMP8OKV^h6LmSb2E#Qnw ze!6Mnz7>lE9u{AgQ~8u2zM8CYD5US8dMDX-5iMlgpE9m*s+Lh~A#P1er*rF}GHV3h z=`STo?kIXw8I<`W0^*@mB1$}pj60R{aJ7>C2m=oghKyxMbFNq#EVLgP0cH3q7H z%0?L93-z6|+jiN|@v>ix?tRBU(v-4RV`}cQH*fp|)vd3)8i9hJ3hkuh^8dz{F5-~_ zUUr1T3cP%cCaTooM8dj|4*M=e6flH0&8ve32Q)0dyisl))XkZ7Wg~N}6y`+Qi2l+e zUd#F!nJp{#KIjbQdI`%oZ`?h=5G^kZ_uN`<(`3;a!~EMsWV|j-o>c?x#;zR2ktiB! z);5rrHl?GPtr6-o!tYd|uK;Vbsp4P{v_4??=^a>>U4_aUXPWQ$FPLE4PK$T^3Gkf$ zHo&9$U&G`d(Os6xt1r?sg14n)G8HNyWa^q8#nf0lbr4A-Fi;q6t-`pAx1T*$eKM*$ z|CX|gDrk#&1}>5H+`EjV$9Bm)Njw&7-ZR{1!CJTaXuP!$Pcg69`{w5BRHysB$(tWUes@@6aM69kb|Lx$%BRY^-o6bjH#0!7b;5~{6J+jKxU!Kmi# zndh@+?}WKSRY2gZ?Q`{(Uj|kb1%VWmRryOH0T)f3cKtG4oIF=F7RaRnH0Rc_&372={_3lRNsr95%ZO{IX{p@YJ^EI%+gvvKes5cY+PE@unghjdY5#9A!G z70u6}?zmd?v+{`vCu-53_v5@z)X{oPC@P)iA3jK$`r zSA2a7&!^zmUiZ82R2=1cumBQwOJUPz5Ay`RLfY(EiwKkrx%@YN^^XuET;tE zmr-6~I7j!R!KrHu5CWGSChO6deaLWa*9LLJbcAJsFd%Dy>a!>J`N)Z&oiU4OEP-!Ti^_!p}O?7`}i7Lsf$-gBkuY*`Zb z7=!nTT;5z$_5$=J=Ko+Cp|Q0J=%oFr>hBgnL3!tvFoLNhf#D0O=X^h+x08iB;@8pXdRHxX}6R4k@i6%vmsQwu^5z zk1ip`#^N)^#Lg#HOW3sPI33xqFB4#bOPVnY%d6prwxf;Y-w9{ky4{O6&94Ra8VN@K zb-lY;&`HtxW@sF!doT5T$2&lIvJpbKGMuDAFM#!QPXW87>}=Q4J3JeXlwHys?!1^#37q_k?N@+u&Ns20pEoBeZC*np;i;M{2C0Z4_br2gsh6eL z#8`#sn41+$iD?^GL%5?cbRcaa-Nx0vE(D=*WY%rXy3B%gNz0l?#noGJGP728RMY#q z=2&aJf@DcR?QbMmN)ItUe+VM_U!ryqA@1VVt$^*xYt~-qvW!J4Tp<-3>jT=7Zow5M z8mSKp0v4b%a8bxFr>3MwZHSWD73D@+$5?nZAqGM#>H@`)mIeC#->B)P8T$zh-Pxnc z8)~Zx?TWF4(YfKuF3WN_ckpCe5;x4V4AA3(i$pm|78{%!q?|~*eH0f=?j6i)n~Hso zmTo>vqEtB)`%hP55INf7HM@taH)v`Fw40Ayc*R!T?O{ziUpYmP)AH`euTK!zg9*6Z z!>M=$3pd0!&TzU=hc_@@^Yd3eUQpX4-33}b{?~5t5lgW=ldJ@dUAH%`l5US1y_`40 zs(X`Qk}vvMDYYq+@Rm+~IyCX;iD~pMgq^KY)T*aBz@DYEB={PxA>)mI6tM*sx-DmGQHEaHwRrAmNjO!ZLHO4b;;5mf@zzlPhkP($JeZGE7 z?^XN}Gf_feGoG~BjUgVa*)O`>lX=$BSR2)uD<9 z>o^|nb1^oVDhQbfW>>!;8-7<}nL6L^V*4pB=>wwW+RXAeRvKED(n1;R`A6v$6gy0I(;Vf?!4;&sgn7F%LpM}6PQ?0%2Z@b{It<(G1CZ|>913E0nR2r^Pa*Bp z@tFGi*CQ~@Yc-?{cwu1 zsilf=k^+Qs>&WZG(3WDixisHpR>`+ihiRwkL(3T|=xsoNP*@XX3BU8hr57l3k;pni zI``=3Nl4xh4oDj<%>Q1zYXHr%Xg_xrK3Nq?vKX3|^Hb(Bj+lONTz>4yhU-UdXt2>j z<>S4NB&!iE+ao{0Tx^N*^|EZU;0kJkx@zh}S^P{ieQjGl468CbC`SWnwLRYYiStXm zOxt~Rb3D{dz=nHMcY)#r^kF8|q8KZHVb9FCX2m^X*(|L9FZg!5a7((!J8%MjT$#Fs)M1Pb zq6hBGp%O1A+&%2>l0mpaIzbo&jc^!oN^3zxap3V2dNj3x<=TwZ&0eKX5PIso9j1;e zwUg+C&}FJ`k(M|%%}p=6RPUq4sT3-Y;k-<68ciZ~_j|bt>&9ZLHNVrp#+pk}XvM{8 z`?k}o-!if>hVlCP9j%&WI2V`5SW)BCeR5>MQhF)po=p~AYN%cNa_BbV6EEh_kk^@a zD>4&>uCGCUmyA-c)%DIcF4R6!>?6T~Mj_m{Hpq`*(wj>foHL;;%;?(((YOxGt)Bhx zuS+K{{CUsaC++%}S6~CJ=|vr(iIs-je)e9uJEU8ZJAz)w166q)R^2XI?@E2vUQ!R% zn@dxS!JcOimXkWJBz8Y?2JKQr>`~SmE2F2SL38$SyR1^yqj8_mkBp)o$@+3BQ~Mid z9U$XVqxX3P=XCKj0*W>}L0~Em`(vG<>srF8+*kPrw z20{z(=^w+ybdGe~Oo_i|hYJ@kZl*(9sHw#Chi&OIc?w`nBODp?ia$uF%Hs(X>xm?j zqZQ`Ybf@g#wli`!-al~3GWiE$K+LCe=Ndi!#CVjzUZ z!sD2O*;d28zkl))m)YN7HDi^z5IuNo3^w(zy8 zszJG#mp#Cj)Q@E@r-=NP2FVxxEAeOI2e=|KshybNB6HgE^(r>HD{*}S}mO>LuRGJT{*tfTzw_#+er-0${}%YPe@CMJ1Ng#j#)i)SnY@ss3gL;g zg2D~#Kpdfu#G;q1qz_TwSz1VJT(b3zby$Vk&;Y#1(A)|xj`_?i5YQ;TR%jice5E;0 zYHg;`zS5{S*9xI6o^j>rE8Ua*XhIw{_-*&@(R|C(am8__>+Ws&Q^ymy*X4~hR2b5r zm^p3sw}yv=tdyncy_Ui7{BQS732et~Z_@{-IhHDXAV`(Wlay<#hb>%H%WDi+K$862nA@BDtM#UCKMu+kM`!JHyWSi?&)A7_ z3{cyNG%a~nnH_!+;g&JxEMAmh-Z}rC!o7>OVzW&PoMyTA_g{hqXG)SLraA^OP**<7 zjWbr7z!o2n3hnx7A=2O=WL;`@9N{vQIM@&|G-ljrPvIuJHYtss0Er0fT5cMXNUf1B z7FAwBDixt0X7C3S)mPe5g`YtME23wAnbU)+AtV}z+e8G;0BP=bI;?(#|Ep!vVfDbK zvx+|CKF>yt0hWQ3drchU#XBU+HiuG*V^snFAPUp-5<#R&BUAzoB!aZ+e*KIxa26V}s6?nBK(U-7REa573wg-jqCg>H8~>O{ z*C0JL-?X-k_y%hpUFL?I>0WV{oV`Nb)nZbJG01R~AG>flIJf)3O*oB2i8~;!P?Wo_ z0|QEB*fifiL6E6%>tlAYHm2cjTFE@*<);#>689Z6S#BySQ@VTMhf9vYQyLeDg1*F} zjq>i1*x>5|CGKN{l9br3kB0EHY|k4{%^t7-uhjd#NVipUZa=EUuE5kS1_~qYX?>hJ z$}!jc9$O$>J&wnu0SgfYods^z?J4X;X7c77Me0kS-dO_VUQ39T(Kv(Y#s}Qqz-0AH z^?WRL(4RzpkD+T5FG_0NyPq-a-B7A5LHOCqwObRJi&oRi(<;OuIN7SV5PeHU$<@Zh zPozEV`dYmu0Z&Tqd>t>8JVde9#Pt+l95iHe$4Xwfy1AhI zDM4XJ;bBTTvRFtW>E+GzkN)9k!hA5z;xUOL2 zq4}zn-DP{qc^i|Y%rvi|^5k-*8;JZ~9a;>-+q_EOX+p1Wz;>i7c}M6Nv`^NY&{J-> z`(mzDJDM}QPu5i44**2Qbo(XzZ-ZDu%6vm8w@DUarqXj41VqP~ zs&4Y8F^Waik3y1fQo`bVUH;b=!^QrWb)3Gl=QVKr+6sxc=ygauUG|cm?|X=;Q)kQ8 zM(xrICifa2p``I7>g2R~?a{hmw@{!NS5`VhH8+;cV(F>B94M*S;5#O`YzZH1Z%yD? zZ61w(M`#aS-*~Fj;x|J!KM|^o;MI#Xkh0ULJcA?o4u~f%Z^16ViA27FxU5GM*rKq( z7cS~MrZ=f>_OWx8j#-Q3%!aEU2hVuTu(7`TQk-Bi6*!<}0WQi;_FpO;fhpL4`DcWp zGOw9vx0N~6#}lz(r+dxIGZM3ah-8qrqMmeRh%{z@dbUD2w15*_4P?I~UZr^anP}DB zU9CCrNiy9I3~d#&!$DX9e?A});BjBtQ7oGAyoI$8YQrkLBIH@2;lt4E^)|d6Jwj}z z&2_E}Y;H#6I4<10d_&P0{4|EUacwFHauvrjAnAm6yeR#}f}Rk27CN)vhgRqEyPMMS7zvunj2?`f;%?alsJ+-K+IzjJx>h8 zu~m_y$!J5RWAh|C<6+uiCNsOKu)E72M3xKK(a9Okw3e_*O&}7llNV!=P87VM2DkAk zci!YXS2&=P0}Hx|wwSc9JP%m8dMJA*q&VFB0yMI@5vWoAGraygwn){R+Cj6B1a2Px z5)u(K5{+;z2n*_XD!+Auv#LJEM)(~Hx{$Yb^ldQmcYF2zNH1V30*)CN_|1$v2|`LnFUT$%-tO0Eg|c5$BB~yDfzS zcOXJ$wpzVK0MfTjBJ0b$r#_OvAJ3WRt+YOLlJPYMx~qp>^$$$h#bc|`g0pF-Ao43? z>*A+8lx>}L{p(Tni2Vvk)dtzg$hUKjSjXRagj)$h#8=KV>5s)J4vGtRn5kP|AXIz! zPgbbVxW{2o4s-UM;c#We8P&mPN|DW7_uLF!a|^0S=wr6Esx9Z$2|c1?GaupU6$tb| zY_KU`(_29O_%k(;>^|6*pZURH3`@%EuKS;Ns z1lujmf;r{qAN&Q0&m{wJSZ8MeE7RM5+Sq;ul_ z`+ADrd_Um+G37js6tKsArNB}n{p*zTUxQr>3@wA;{EUbjNjlNd6$Mx zg0|MyU)v`sa~tEY5$en7^PkC=S<2@!nEdG6L=h(vT__0F=S8Y&eM=hal#7eM(o^Lu z2?^;05&|CNliYrq6gUv;|i!(W{0N)LWd*@{2q*u)}u*> z7MQgk6t9OqqXMln?zoMAJcc zMKaof_Up})q#DzdF?w^%tTI7STI^@8=Wk#enR*)&%8yje>+tKvUYbW8UAPg55xb70 zEn5&Ba~NmOJlgI#iS8W3-@N%>V!#z-ZRwfPO1)dQdQkaHsiqG|~we2ALqG7Ruup(DqSOft2RFg_X%3w?6VqvV1uzX_@F(diNVp z4{I|}35=11u$;?|JFBEE*gb;T`dy+8gWJ9~pNsecrO`t#V9jW-6mnfO@ff9od}b(3s4>p0i30gbGIv~1@a^F2kl7YO;DxmF3? zWi-RoXhzRJV0&XE@ACc?+@6?)LQ2XNm4KfalMtsc%4!Fn0rl zpHTrHwR>t>7W?t!Yc{*-^xN%9P0cs0kr=`?bQ5T*oOo&VRRu+1chM!qj%2I!@+1XF z4GWJ=7ix9;Wa@xoZ0RP`NCWw0*8247Y4jIZ>GEW7zuoCFXl6xIvz$ezsWgKdVMBH> z{o!A7f;R-@eK9Vj7R40xx)T<2$?F2E<>Jy3F;;=Yt}WE59J!1WN367 zA^6pu_zLoZIf*x031CcwotS{L8bJE(<_F%j_KJ2P_IusaZXwN$&^t716W{M6X2r_~ zaiMwdISX7Y&Qi&Uh0upS3TyEIXNDICQlT5fHXC`aji-c{U(J@qh-mWl-uMN|T&435 z5)a1dvB|oe%b2mefc=Vpm0C%IUYYh7HI*;3UdgNIz}R##(#{(_>82|zB0L*1i4B5j-xi9O4x10rs_J6*gdRBX=@VJ+==sWb&_Qc6tSOowM{BX@(zawtjl zdU!F4OYw2@Tk1L^%~JCwb|e#3CC>srRHQ*(N%!7$Mu_sKh@|*XtR>)BmWw!;8-mq7 zBBnbjwx8Kyv|hd*`5}84flTHR1Y@@uqjG`UG+jN_YK&RYTt7DVwfEDXDW4U+iO{>K zw1hr{_XE*S*K9TzzUlJH2rh^hUm2v7_XjwTuYap|>zeEDY$HOq3X4Tz^X}E9z)x4F zs+T?Ed+Hj<#jY-`Va~fT2C$=qFT-5q$@p9~0{G&eeL~tiIAHXA!f6C(rAlS^)&k<- zXU|ZVs}XQ>s5iONo~t!XXZgtaP$Iau;JT%h)>}v54yut~pykaNye4axEK#5@?TSsQ zE;Jvf9I$GVb|S`7$pG)4vgo9NXsKr?u=F!GnA%VS2z$@Z(!MR9?EPcAqi5ft)Iz6sNl`%kj+_H-X`R<>BFrBW=fSlD|{`D%@Rcbu2?%>t7i34k?Ujb)2@J-`j#4 zLK<69qcUuniIan-$A1+fR=?@+thwDIXtF1Tks@Br-xY zfB+zblrR(ke`U;6U~-;p1Kg8Lh6v~LjW@9l2P6s+?$2!ZRPX`(ZkRGe7~q(4&gEi<$ch`5kQ?*1=GSqkeV z{SA1EaW_A!t{@^UY2D^YO0(H@+kFVzZaAh0_`A`f(}G~EP~?B|%gtxu&g%^x{EYSz zk+T;_c@d;+n@$<>V%P=nk36?L!}?*=vK4>nJSm+1%a}9UlmTJTrfX4{Lb7smNQn@T zw9p2%(Zjl^bWGo1;DuMHN(djsEm)P8mEC2sL@KyPjwD@d%QnZ$ zMJ3cnn!_!iP{MzWk%PI&D?m?C(y2d|2VChluN^yHya(b`h>~GkI1y;}O_E57zOs!{ zt2C@M$^PR2U#(dZmA-sNreB@z-yb0Bf7j*yONhZG=onhx>t4)RB`r6&TP$n zgmN*)eCqvgriBO-abHQ8ECN0bw?z5Bxpx z=jF@?zFdVn?@gD5egM4o$m`}lV(CWrOKKq(sv*`mNcHcvw&Xryfw<{ch{O&qc#WCTXX6=#{MV@q#iHYba!OUY+MGeNTjP%Fj!WgM&`&RlI^=AWTOqy-o zHo9YFt!gQ*p7{Fl86>#-JLZo(b^O`LdFK~OsZBRR@6P?ad^Ujbqm_j^XycM4ZHFyg ziUbIFW#2tj`65~#2V!4z7DM8Z;fG0|APaQ{a2VNYpNotB7eZ5kp+tPDz&Lqs0j%Y4tA*URpcfi z_M(FD=fRGdqf430j}1z`O0I=;tLu81bwJXdYiN7_&a-?ly|-j*+=--XGvCq#32Gh(=|qj5F?kmihk{%M&$}udW5)DHK zF_>}5R8&&API}o0osZJRL3n~>76nUZ&L&iy^s>PMnNcYZ|9*1$v-bzbT3rpWsJ+y{ zPrg>5Zlery96Um?lc6L|)}&{992{_$J&=4%nRp9BAC6!IB=A&=tF>r8S*O-=!G(_( zwXbX_rGZgeiK*&n5E;f=k{ktyA1(;x_kiMEt0*gpp_4&(twlS2e5C?NoD{n>X2AT# zY@Zp?#!b1zNq96MQqeO*M1MMBin5v#RH52&Xd~DO6-BZLnA6xO1$sou(YJ1Dlc{WF zVa%2DyYm`V#81jP@70IJ;DX@y*iUt$MLm)ByAD$eUuji|5{ptFYq(q)mE(5bOpxjM z^Q`AHWq44SG3`_LxC9fwR)XRVIp=B%<(-lOC3jI#bb@dK(*vjom!=t|#<@dZql%>O z15y^{4tQoeW9Lu%G&V$90x6F)xN6y_oIn;!Q zs)8jT$;&;u%Y>=T3hg34A-+Y*na=|glcStr5D;&5*t5*DmD~x;zQAV5{}Ya`?RRGa zT*t9@$a~!co;pD^!J5bo?lDOWFx%)Y=-fJ+PDGc0>;=q=s?P4aHForSB+)v0WY2JH z?*`O;RHum6j%#LG)Vu#ciO#+jRC3!>T(9fr+XE7T2B7Z|0nR5jw@WG)kDDzTJ=o4~ zUpeyt7}_nd`t}j9BKqryOha{34erm)RmST)_9Aw)@ zHbiyg5n&E{_CQR@h<}34d7WM{s{%5wdty1l+KX8*?+-YkNK2Be*6&jc>@{Fd;Ps|| z26LqdI3#9le?;}risDq$K5G3yoqK}C^@-8z^wj%tdgw-6@F#Ju{Sg7+y)L?)U$ez> zoOaP$UFZ?y5BiFycir*pnaAaY+|%1%8&|(@VB)zweR%?IidwJyK5J!STzw&2RFx zZV@qeaCB01Hu#U9|1#=Msc8Pgz5P*4Lrp!Q+~(G!OiNR{qa7|r^H?FC6gVhkk3y7=uW#Sh;&>78bZ}aK*C#NH$9rX@M3f{nckYI+5QG?Aj1DM)@~z_ zw!UAD@gedTlePB*%4+55naJ8ak_;))#S;4ji!LOqY5VRI){GMwHR~}6t4g>5C_#U# ztYC!tjKjrKvRy=GAsJVK++~$|+s!w9z3H4G^mACv=EErXNSmH7qN}%PKcN|8%9=i)qS5+$L zu&ya~HW%RMVJi4T^pv?>mw*Gf<)-7gf#Qj|e#w2|v4#t!%Jk{&xlf;$_?jW*n!Pyx zkG$<18kiLOAUPuFfyu-EfWX%4jYnjBYc~~*9JEz6oa)_R|8wjZA|RNrAp%}14L7fW zi7A5Wym*K+V8pkqqO-X#3ft{0qs?KVt^)?kS>AicmeO&q+~J~ zp0YJ_P~_a8j= zsAs~G=8F=M{4GZL{|B__UorX@MRNQLn?*_gym4aW(~+i13knnk1P=khoC-ViMZk+x zLW(l}oAg1H`dU+Fv**;qw|ANDSRs>cGqL!Yw^`; zv;{E&8CNJcc)GHzTYM}f&NPw<6j{C3gaeelU#y!M)w-utYEHOCCJo|Vgp7K6C_$14 zqIrLUB0bsgz^D%V%fbo2f9#yb#CntTX?55Xy|Kps&Xek*4_r=KDZ z+`TQuv|$l}MWLzA5Ay6Cvsa^7xvwXpy?`w(6vx4XJ zWuf1bVSb#U8{xlY4+wlZ$9jjPk)X_;NFMqdgq>m&W=!KtP+6NL57`AMljW+es zzqjUjgz;V*kktJI?!NOg^s_)ph45>4UDA!Vo0hn>KZ+h-3=?Y3*R=#!fOX zP$Y~+14$f66ix?UWB_6r#fMcC^~X4R-<&OD1CSDNuX~y^YwJ>sW0j`T<2+3F9>cLo z#!j57$ll2K9(%$4>eA7(>FJX5e)pR5&EZK!IMQzOfik#FU*o*LGz~7u(8}XzIQRy- z!U7AlMTIe|DgQFmc%cHy_9^{o`eD%ja_L>ckU6$O4*U**o5uR7`FzqkU8k4gxtI=o z^P^oGFPm5jwZMI{;nH}$?p@uV8FT4r=|#GziKXK07bHJLtK}X%I0TON$uj(iJ`SY^ zc$b2CoxCQ>7LH@nxcdW&_C#fMYBtTxcg46dL{vf%EFCZ~eErMvZq&Z%Lhumnkn^4A zsx$ay(FnN7kYah}tZ@0?-0Niroa~13`?hVi6`ndno`G+E8;$<6^gsE-K3)TxyoJ4M zb6pj5=I8^FD5H@`^V#Qb2^0cx7wUz&cruA5g>6>qR5)O^t1(-qqP&1g=qvY#s&{bx zq8Hc%LsbK1*%n|Y=FfojpE;w~)G0-X4i*K3{o|J7`krhIOd*c*$y{WIKz2n2*EXEH zT{oml3Th5k*vkswuFXdGDlcLj15Nec5pFfZ*0?XHaF_lVuiB%Pv&p7z)%38}%$Gup zVTa~C8=cw%6BKn_|4E?bPNW4PT7}jZQLhDJhvf4z;~L)506IE0 zX!tWXX(QOQPRj-p80QG79t8T2^az4Zp2hOHziQlvT!|H)jv{Ixodabzv6lBj)6WRB z{)Kg@$~~(7$-az?lw$4@L%I&DI0Lo)PEJJziWP33a3azb?jyXt1v0N>2kxwA6b%l> zZqRpAo)Npi&loWbjFWtEV)783BbeIAhqyuc+~>i7aQ8shIXt)bjCWT6$~ro^>99G} z2XfmT0(|l!)XJb^E!#3z4oEGIsL(xd; zYX1`1I(cG|u#4R4T&C|m*9KB1`UzKvho5R@1eYtUL9B72{i(ir&ls8g!pD ztR|25xGaF!4z5M+U@@lQf(12?xGy`!|3E}7pI$k`jOIFjiDr{tqf0va&3pOn6Pu)% z@xtG2zjYuJXrV)DUrIF*y<1O1<$#54kZ#2;=X51J^F#0nZ0(;S$OZDt_U2bx{RZ=Q zMMdd$fH|!s{ zXq#l;{`xfV`gp&C>A`WrQU?d{!Ey5(1u*VLJt>i27aZ-^&2IIk=zP5p+{$q(K?2(b z8?9h)kvj9SF!Dr zoyF}?V|9;6abHxWk2cEvGs$-}Pg}D+ZzgkaN&$Snp%;5m%zh1E#?Wac-}x?BYlGN#U#Mek*}kek#I9XaHt?mz3*fDrRTQ#&#~xyeqJk1QJ~E$7qsw6 z?sV;|?*=-{M<1+hXoj?@-$y+(^BJ1H~wQ9G8C0#^aEAyhDduNX@haoa=PuPp zYsGv8UBfQaRHgBgLjmP^eh>fLMeh{8ic)?xz?#3kX-D#Z{;W#cd_`9OMFIaJg-=t`_3*!YDgtNQ2+QUEAJB9M{~AvT$H`E)IKmCR21H532+ata8_i_MR@ z2Xj<3w<`isF~Ah$W{|9;51ub*f4#9ziKrOR&jM{x7I_7()O@`F*5o$KtZ?fxU~g`t zUovNEVKYn$U~VX8eR)qb`7;D8pn*Pp$(otYTqL)5KH$lUS-jf}PGBjy$weoceAcPp z&5ZYB$r&P$MN{0H0AxCe4Qmd3T%M*5d4i%#!nmBCN-WU-4m4Tjxn-%j3HagwTxCZ9 z)j5vO-C7%s%D!&UfO>bi2oXiCw<-w{vVTK^rVbv#W=WjdADJy8$khnU!`ZWCIU`># zyjc^1W~pcu>@lDZ{zr6gv%)2X4n27~Ve+cQqcND%0?IFSP4sH#yIaXXYAq^z3|cg` z`I3$m%jra>e2W-=DiD@84T!cb%||k)nPmEE09NC%@PS_OLhkrX*U!cgD*;;&gIaA(DyVT4QD+q_xu z>r`tg{hiGY&DvD-)B*h+YEd+Zn)WylQl}<4>(_NlsKXCRV;a)Rcw!wtelM2_rWX`j zTh5A|i6=2BA(iMCnj_fob@*eA;V?oa4Z1kRBGaU07O70fb6-qmA$Hg$ps@^ka1=RO zTbE_2#)1bndC3VuK@e!Sftxq4=Uux}fDxXE#Q5_x=E1h>T5`DPHz zbH<_OjWx$wy7=%0!mo*qH*7N4tySm+R0~(rbus`7;+wGh;C0O%x~fEMkt!eV>U$`i z5>Q(o z=t$gPjgGh0&I7KY#k50V7DJRX<%^X z>6+ebc9efB3@eE2Tr){;?_w`vhgF>`-GDY(YkR{9RH(MiCnyRtd!LxXJ75z+?2 zGi@m^+2hKJ5sB1@Xi@s_@p_Kwbc<*LQ_`mr^Y%j}(sV_$`J(?_FWP)4NW*BIL~sR>t6 zM;qTJZ~GoY36&{h-Pf}L#y2UtR}>ZaI%A6VkU>vG4~}9^i$5WP2Tj?Cc}5oQxe2=q z8BeLa$hwCg_psjZyC2+?yX4*hJ58Wu^w9}}7X*+i5Rjqu5^@GzXiw#SUir1G1`jY% zOL=GE_ENYxhcyUrEt9XlMNP6kx6h&%6^u3@zB8KUCAa18T(R2J`%JjWZ z!{7cXaEW+Qu*iJPu+m>QqW}Lo$4Z+!I)0JNzZ&_M%=|B1yejFRM04bGAvu{=lNPd+ zJRI^DRQ(?FcVUD+bgEcAi@o(msqys9RTCG#)TjI!9~3-dc`>gW;HSJuQvH~d`MQs86R$|SKXHh zqS9Qy)u;T`>>a!$LuaE2keJV%;8g)tr&Nnc;EkvA-RanHXsy)D@XN0a>h}z2j81R; zsUNJf&g&rKpuD0WD@=dDrPHdBoK42WoBU|nMo17o(5^;M|dB4?|FsAGVrSyWcI`+FVw^vTVC`y}f(BwJl zrw3Sp151^9=}B})6@H*i4-dIN_o^br+BkcLa^H56|^2XsT0dESw2 zMX>(KqNl=x2K5=zIKg}2JpGAZu{I_IO}0$EQ5P{4zol**PCt3F4`GX}2@vr8#Y)~J zKb)gJeHcFnR@4SSh%b;c%J`l=W*40UPjF#q{<}ywv-=vHRFmDjv)NtmC zQx9qm)d%0zH&qG7AFa3VAU1S^(n8VFTC~Hb+HjYMjX8r#&_0MzlNR*mnLH5hi}`@{ zK$8qiDDvS_(L9_2vHgzEQ${DYSE;DqB!g*jhJghE&=LTnbgl&Xepo<*uRtV{2wDHN z)l;Kg$TA>Y|K8Lc&LjWGj<+bp4Hiye_@BfU(y#nF{fpR&|Ltbye?e^j0}8JC4#xi% zv29ZR%8%hk=3ZDvO-@1u8KmQ@6p%E|dlHuy#H1&MiC<*$YdLkHmR#F3ae;bKd;@*i z2_VfELG=B}JMLCO-6UQy^>RDE%K4b>c%9ki`f~Z2Qu8hO7C#t%Aeg8E%+}6P7Twtg z-)dj(w}_zFK&86KR@q9MHicUAucLVshUdmz_2@32(V`y3`&Kf8Q2I)+!n0mR=rrDU zXvv^$ho;yh*kNqJ#r1}b0|i|xRUF6;lhx$M*uG3SNLUTC@|htC z-=fsw^F%$qqz4%QdjBrS+ov}Qv!z00E+JWas>p?z@=t!WWU3K*?Z(0meTuTOC7OTx zU|kFLE0bLZ+WGcL$u4E}5dB0g`h|uwv3=H6f+{5z9oLv-=Q45+n~V4WwgO=CabjM% zBAN+RjM65(-}>Q2V#i1Na@a0`08g&y;W#@sBiX6Tpy8r}*+{RnyGUT`?XeHSqo#|J z^ww~c;ou|iyzpErDtlVU=`8N7JSu>4M z_pr9=tX0edVn9B}YFO2y(88j#S{w%E8vVOpAboK*27a7e4Ekjt0)hIX99*1oE;vex z7#%jhY=bPijA=Ce@9rRO(Vl_vnd00!^TAc<+wVvRM9{;hP*rqEL_(RzfK$er_^SN; z)1a8vo8~Dr5?;0X0J62Cusw$A*c^Sx1)dom`-)Pl7hsW4i(r*^Mw`z5K>!2ixB_mu z*Ddqjh}zceRFdmuX1akM1$3>G=#~|y?eYv(e-`Qy?bRHIq=fMaN~fB zUa6I8Rt=)jnplP>yuS+P&PxeWpJ#1$F`iqRl|jF$WL_aZFZl@kLo&d$VJtu&w?Q0O zzuXK>6gmygq(yXJy0C1SL}T8AplK|AGNUOhzlGeK_oo|haD@)5PxF}rV+5`-w{Aag zus45t=FU*{LguJ11Sr-28EZkq;!mJO7AQGih1L4rEyUmp>B!%X0YemsrV3QFvlgt* z5kwlPzaiJ+kZ^PMd-RRbl(Y?F*m`4*UIhIuf#8q>H_M=fM*L_Op-<_r zBZagV=4B|EW+KTja?srADTZXCd3Yv%^Chfpi)cg{ED${SI>InNpRj5!euKv?=Xn92 zsS&FH(*w`qLIy$doc>RE&A5R?u zzkl1sxX|{*fLpXvIW>9d<$ePROttn3oc6R!sN{&Y+>Jr@yeQN$sFR z;w6A<2-0%UA?c8Qf;sX7>>uKRBv3Ni)E9pI{uVzX|6Bb0U)`lhLE3hK58ivfRs1}d zNjlGK0hdq0qjV@q1qI%ZFMLgcpWSY~mB^LK)4GZ^h_@H+3?dAe_a~k*;9P_d7%NEFP6+ zgV(oGr*?W(ql?6SQ~`lUsjLb%MbfC4V$)1E0Y_b|OIYxz4?O|!kRb?BGrgiH5+(>s zoqM}v*;OBfg-D1l`M6T6{K`LG+0dJ1)!??G5g(2*vlNkm%Q(MPABT$r13q?|+kL4- zf)Mi5r$sn;u41aK(K#!m+goyd$c!KPl~-&-({j#D4^7hQkV3W|&>l_b!}!z?4($OA z5IrkfuT#F&S1(`?modY&I40%gtroig{YMvF{K{>5u^I51k8RriGd${z)=5k2tG zM|&Bp5kDTfb#vfuTTd?)a=>bX=lokw^y9+2LS?kwHQIWI~pYgy7 zb?A-RKVm_vM5!9?C%qYdfRAw& zAU7`up~%g=p@}pg#b7E)BFYx3g%(J36Nw(Dij!b>cMl@CSNbrW!DBDbTD4OXk!G4x zi}JBKc8HBYx$J~31PXH+4^x|UxK~(<@I;^3pWN$E=sYma@JP|8YL`L(zI6Y#c%Q{6 z*APf`DU$S4pr#_!60BH$FGViP14iJmbrzSrOkR;f3YZa{#E7Wpd@^4E-zH8EgPc-# zKWFPvh%WbqU_%ZEt`=Q?odKHc7@SUmY{GK`?40VuL~o)bS|is$Hn=<=KGHOsEC5tB zFb|q}gGlL97NUf$G$>^1b^3E18PZ~Pm9kX%*ftnolljiEt@2#F2R5ah$zbXd%V_Ev zyDd{1o_uuoBga$fB@Fw!V5F3jIr=a-ykqrK?WWZ#a(bglI_-8pq74RK*KfQ z0~Dzus7_l;pMJYf>Bk`)`S8gF!To-BdMnVw5M-pyu+aCiC5dwNH|6fgRsIKZcF&)g zr}1|?VOp}I3)IR@m1&HX1~#wsS!4iYqES zK}4J{Ei>;e3>LB#Oly>EZkW14^@YmpbgxCDi#0RgdM${&wxR+LiX}B+iRioOB0(pDKpVEI;ND?wNx>%e|m{RsqR_{(nmQ z3ZS}@t!p4a(BKx_-CYwrcyJ5u1TO9bcXti$8sy>xcLKqKCc#~UOZYD{llKTSFEjJ~ zyNWt>tLU}*>^`TvPxtP%F`ZJQw@W0^>x;!^@?k_)9#bF$j0)S3;mH-IR5y82l|%=F z2lR8zhP?XNP-ucZZ6A+o$xOyF!w;RaLHGh57GZ|TCXhJqY~GCh)aXEV$1O&$c}La1 zjuJxkY9SM4av^Hb;i7efiYaMwI%jGy`3NdY)+mcJhF(3XEiSlU3c|jMBi|;m-c?~T z+x0_@;SxcoY=(6xNgO$bBt~Pj8`-<1S|;Bsjrzw3@zSjt^JC3X3*$HI79i~!$RmTz zsblZsLYs7L$|=1CB$8qS!tXrWs!F@BVuh?kN(PvE5Av-*r^iYu+L^j^m9JG^#=m>@ z=1soa)H*w6KzoR$B8mBCXoU;f5^bVuwQ3~2LKg!yxomG1#XPmn(?YH@E~_ED+W6mxs%x{%Z<$pW`~ON1~2XjP5v(0{C{+6Dm$00tsd3w=f=ZENy zOgb-=f}|Hb*LQ$YdWg<(u7x3`PKF)B7ZfZ6;1FrNM63 z?O6tE%EiU@6%rVuwIQjvGtOofZBGZT1Sh(xLIYt9c4VI8`!=UJd2BfLjdRI#SbVAX ziT(f*RI^T!IL5Ac>ql7uduF#nuCRJ1)2bdvAyMxp-5^Ww5p#X{rb5)(X|fEhDHHW{ zw(Lfc$g;+Q`B0AiPGtmK%*aWfQQ$d!*U<|-@n2HZvCWSiw^I>#vh+LyC;aaVWGbmkENr z&kl*8o^_FW$T?rDYLO1Pyi%>@&kJKQoH2E0F`HjcN}Zlnx1ddoDA>G4Xu_jyp6vuT zPvC}pT&Owx+qB`zUeR|4G;OH(<<^_bzkjln0k40t`PQxc$7h(T8Ya~X+9gDc8Z9{Z z&y0RAU}#_kQGrM;__MK9vwIwK^aoqFhk~dK!ARf1zJqHMxF2?7-8|~yoO@_~Ed;_wvT%Vs{9RK$6uUQ|&@#6vyBsFK9eZW1Ft#D2)VpQRwpR(;x^ zdoTgMqfF9iBl%{`QDv7B0~8{8`8k`C4@cbZAXBu00v#kYl!#_Wug{)2PwD5cNp?K^ z9+|d-4z|gZ!L{57>!Ogfbzchm>J1)Y%?NThxIS8frAw@z>Zb9v%3_3~F@<=LG%r*U zaTov}{{^z~SeX!qgSYow`_5)ij*QtGp4lvF`aIGQ>@3ZTkDmsl#@^5*NGjOuu82}o zzLF~Q9SW+mP=>88%eSA1W4_W7-Q>rdq^?t=m6}^tDPaBRGFLg%ak93W!kOp#EO{6& zP%}Iff5HZQ9VW$~+9r=|Quj#z*=YwcnssS~9|ub2>v|u1JXP47vZ1&L1O%Z1DsOrDfSIMHU{VT>&>H=9}G3i@2rP+rx@eU@uE8rJNec zij~#FmuEBj03F1~ct@C@$>y)zB+tVyjV3*n`mtAhIM0$58vM9jOQC}JJOem|EpwqeMuYPxu3sv}oMS?S#o6GGK@8PN59)m&K4Dc&X% z(;XL_kKeYkafzS3Wn5DD>Yiw{LACy_#jY4op(>9q>>-*9@C0M+=b#bknAWZ37^(Ij zq>H%<@>o4a#6NydoF{_M4i4zB_KG)#PSye9bk0Ou8h%1Dtl7Q_y#7*n%g)?m>xF~( zjqvOwC;*qvN_3(*a+w2|ao0D?@okOvg8JskUw(l7n`0fncglavwKd?~l_ryKJ^Ky! zKCHkIC-o7%fFvPa$)YNh022lakMar^dgL=t#@XLyNHHw!b?%WlM)R@^!)I!smZL@k zBi=6wE5)2v&!UNV(&)oOYW(6Qa!nUjDKKBf-~Da=#^HE4(@mWk)LPvhyN3i4goB$3K8iV7uh zsv+a?#c4&NWeK(3AH;ETrMOIFgu{_@%XRwCZ;L=^8Ts)hix4Pf3yJRQ<8xb^CkdmC z?c_gB)XmRsk`9ch#tx4*hO=#qS7={~Vb4*tTf<5P%*-XMfUUYkI9T1cEF;ObfxxI-yNuA=I$dCtz3ey znVkctYD*`fUuZ(57+^B*R=Q}~{1z#2!ca?)+YsRQb+lt^LmEvZt_`=j^wqig+wz@n@ z`LIMQJT3bxMzuKg8EGBU+Q-6cs5(@5W?N>JpZL{$9VF)veF`L5%DSYTNQEypW%6$u zm_~}T{HeHj1bAlKl8ii92l9~$dm=UM21kLemA&b$;^!wB7#IKWGnF$TVq!!lBlG4 z{?Rjz?P(uvid+|i$VH?`-C&Gcb3{(~Vpg`w+O);Wk1|Mrjxrht0GfRUnZqz2MhrXa zqgVC9nemD5)H$to=~hp)c=l9?#~Z_7i~=U-`FZxb-|TR9@YCxx;Zjo-WpMNOn2)z) zFPGGVl%3N$f`gp$gPnWC+f4(rmts%fidpo^BJx72zAd7|*Xi{2VXmbOm)1`w^tm9% znM=0Fg4bDxH5PxPEm{P3#A(mxqlM7SIARP?|2&+c7qmU8kP&iApzL|F>Dz)Ixp_`O zP%xrP1M6@oYhgo$ZWwrAsYLa4 z|I;DAvJxno9HkQrhLPQk-8}=De{9U3U%)dJ$955?_AOms!9gia%)0E$Mp}$+0er@< zq7J&_SzvShM?e%V?_zUu{niL@gt5UFOjFJUJ}L?$f%eU%jUSoujr{^O=?=^{19`ON zlRIy8Uo_nqcPa6@yyz`CM?pMJ^^SN^Fqtt`GQ8Q#W4kE7`V9^LT}j#pMChl!j#g#J zr-=CCaV%xyFeQ9SK+mG(cTwW*)xa(eK;_Z(jy)woZp~> zA(4}-&VH+TEeLzPTqw&FOoK(ZjD~m{KW05fiGLe@E3Z2`rLukIDahE*`u!ubU)9`o zn^-lyht#E#-dt~S>}4y$-mSbR8{T@}22cn^refuQ08NjLOv?JiEWjyOnzk<^R5%gO zhUH_B{oz~u#IYwVnUg8?3P*#DqD8#X;%q%HY**=I>>-S|!X*-!x1{^l#OnR56O>iD zc;i;KS+t$koh)E3)w0OjWJl_aW2;xF=9D9Kr>)(5}4FqUbk# zI#$N8o0w;IChL49m9CJTzoC!|u{Ljd%ECgBOf$}&jA^$(V#P#~)`&g`H8E{uv52pp zwto`xUL-L&WTAVREEm$0g_gYPL(^vHq(*t1WCH_6alhkeW&GCZ3hL)|{O-jiFOBrF z!EW=Jej|dqQitT6!B-7&io2K)WIm~Q)v@yq%U|VpV+I?{y0@Yd%n8~-NuuM*pM~KA z85YB};IS~M(c<}4Hxx>qRK0cdl&e?t253N%vefkgds>Ubn8X}j6Vpgs>a#nFq$osY z1ZRwLqFv=+BTb=i%D2Wv>_yE0z}+niZ4?rE|*a3d7^kndWGwnFqt+iZ(7+aln<}jzbAQ(#Z2SS}3S$%Bd}^ zc9ghB%O)Z_mTZMRC&H#)I#fiLuIkGa^`4e~9oM5zKPx?zjkC&Xy0~r{;S?FS%c7w< zWbMpzc(xSw?9tGxG~_l}Acq}zjt5ClaB7-!vzqnlrX;}$#+PyQ9oU)_DfePh2E1<7 ztok6g6K^k^DuHR*iJ?jw?bs_whk|bx`dxu^nC6#e{1*m~z1eq7m}Cf$*^Eua(oi_I zAL+3opNhJteu&mWQ@kQWPucmiP)4|nFG`b2tpC;h{-PI@`+h?9v=9mn|0R-n8#t=+Z*FD(c5 zjj79Jxkgck*DV=wpFgRZuwr%}KTm+dx?RT@aUHJdaX-ODh~gByS?WGx&czAkvkg;x zrf92l8$Or_zOwJVwh>5rB`Q5_5}ef6DjS*$x30nZbuO3dijS*wvNEqTY5p1_A0gWr znH<(Qvb!os14|R)n2Ost>jS2;d1zyLHu`Svm|&dZD+PpP{Bh>U&`Md;gRl64q;>{8MJJM$?UNUd`aC>BiLe>*{ zJY15->yW+<3rLgYeTruFDtk1ovU<$(_y7#HgUq>)r0{^}Xbth}V#6?%5jeFYt;SG^ z3qF)=uWRU;Jj)Q}cpY8-H+l_n$2$6{ZR?&*IGr{>ek!69ZH0ZoJ*Ji+ezzlJ^%qL3 zO5a`6gwFw(moEzqxh=yJ9M1FTn!eo&qD#y5AZXErHs%22?A+JmS&GIolml!)rZTnUDM3YgzYfT#;OXn)`PWv3Ta z!-i|-Wojv*k&bC}_JJDjiAK(Ba|YZgUI{f}TdEOFT2+}nPmttytw7j%@bQZDV1vvj z^rp{gRkCDmYJHGrE1~e~AE!-&6B6`7UxVQuvRrfdFkGX8H~SNP_X4EodVd;lXd^>eV1jN+Tt4}Rsn)R0LxBz0c=NXU|pUe!MQQFkGBWbR3&(jLm z%RSLc#p}5_dO{GD=DEFr=Fc% z85CBF>*t!6ugI?soX(*JNxBp+-DdZ4X0LldiK}+WWGvXV(C(Ht|!3$psR=&c*HIM=BmX;pRIpz@Ale{9dhGe(U2|Giv;# zOc|;?p67J=Q(kamB*aus=|XP|m{jN^6@V*Bpm?ye56Njh#vyJqE=DweC;?Rv7faX~ zde03n^I~0B2vUmr;w^X37tVxUK?4}ifsSH5_kpKZIzpYu0;Kv}SBGfI2AKNp+VN#z`nI{UNDRbo-wqa4NEls zICRJpu)??cj^*WcZ^MAv+;bDbh~gpN$1Cor<{Y2oyIDws^JsfW^5AL$azE(T0p&pP z1Mv~6Q44R&RHoH95&OuGx2srIr<@zYJTOMKiVs;Bx3py89I87LOb@%mr`0)#;7_~Z zzcZj8?w=)>%5@HoCHE_&hnu(n_yQ-L(~VjpjjkbT7e)Dk5??fApg(d>vwLRJ-x{um z*Nt?DqTSxh_MIyogY!vf1mU1`Gld-&L)*43f6dilz`Q@HEz;+>MDDYv9u!s;WXeao zUq=TaL$P*IFgJzrGc>j1dDOd zed+=ZBo?w4mr$2)Ya}?vedDopomhW1`#P<%YOJ_j=WwClX0xJH-f@s?^tmzs_j7t!k zK@j^zS0Q|mM4tVP5Ram$VbS6|YDY&y?Q1r1joe9dj08#CM{RSMTU}(RCh`hp_Rkl- zGd|Cv~G@F{DLhCizAm9AN!^{rNs8hu!G@8RpnGx7e`-+K$ffN<0qjR zGq^$dj_Tv!n*?zOSyk5skI7JVKJ)3jysnjIu-@VSzQiP8r6MzudCU=~?v-U8yzo^7 zGf~SUTvEp+S*!X9uX!sq=o}lH;r{pzk~M*VA(uyQ`3C8!{C;)&6)95fv(cK!%Cuz$ z_Zal57H6kPN>25KNiI6z6F)jzEkh#%OqU#-__Xzy)KyH};81#N6OfX$$IXWzOn`Q& z4f$Z1t>)8&8PcYfEwY5UadU1yg+U*(1m2ZlHoC-!2?gB!!fLhmTl))D@dhvkx#+Yj z1O=LV{(T%{^IeCuFK>%QR!VZ4GnO5tK8a+thWE zg4VytZrwcS?7^ zuZfhYnB8dwd%VLO?DK7pV5Wi<(`~DYqOXn8#jUIL^)12*Dbhk4GmL_E2`WX&iT16o zk(t|hok(Y|v-wzn?4x34T)|+SfZP>fiq!><*%vnxGN~ypST-FtC+@TPv*vYv@iU!_ z@2gf|PrgQ?Ktf*9^CnJ(x*CtZVB8!OBfg0%!wL;Z8(tYYre0vcnPGlyCc$V(Ipl*P z_(J!a=o@vp^%Efme!K74(Ke7A>Y}|sxV+JL^aYa{~m%5#$$+R1? zGaQhZTTX!#s#=Xtpegqero$RNt&`4xn3g$)=y*;=N=Qai)}~`xtxI_N*#MMCIq#HFifT zz(-*m;pVH&+4bixL&Bbg)W5FN^bH87pAHp)zPkWNMfTFqS=l~AC$3FX3kQUSh_C?-ZftyClgM)o_D7cX$RGlEYblux0jv5 zTr|i-I3@ZPCGheCl~BGhImF)K4!9@?pC(gi3ozX=a!|r1)LFxy_8c&wY0<^{2cm|P zv6Y`QktY*;I)IUd5y3ne1CqpVanlY45z8hf4&$EUBnucDj16pDa4&GI&TArYhf*xh zdj>*%APH8(h~c>o@l#%T>R$e>rwVx_WUB|~V`p^JHsg*y12lzj&zF}w6W09HwB2yb z%Q~`es&(;7#*DUC_w-Dmt7|$*?TA_m;zB+-u{2;Bg{O}nV7G_@7~<)Bv8fH^G$XG8$(&{A zwXJK5LRK%M34(t$&NI~MHT{UQ9qN-V_yn|%PqC81EIiSzmMM=2zb`mIwiP_b)x+2M z7Gd`83h79j#SItpQ}luuf2uOU`my_rY5T{6P#BNlb%h%<#MZb=m@y5aW;#o1^2Z)SWo+b`y0gV^iRcZtz5!-05vF z7wNo=hc6h4hc&s@uL^jqRvD6thVYtbErDK9k!;+a0xoE0WL7zLixjn5;$fXvT=O3I zT6jI&^A7k6R{&5#lVjz#8%_RiAa2{di{`kx79K+j72$H(!ass|B%@l%KeeKchYLe_ z>!(JC2fxsv>XVen+Y42GeYPxMWqm`6F$(E<6^s|g(slNk!lL*6v^W2>f6hh^mE$s= z3D$)}{V5(Qm&A6bp%2Q}*GZ5Qrf}n7*Hr51?bJOyA-?B4vg6y_EX<*-e20h{=0Mxs zbuQGZ$fLyO5v$nQ&^kuH+mNq9O#MWSfThtH|0q1i!NrWj^S}_P;Q1OkYLW6U^?_7G zx2wg?CULj7))QU(n{$0JE%1t2dWrMi2g-Os{v|8^wK{@qlj%+1b^?NI z$}l2tjp0g>K3O+p%yK<9!XqmQ?E9>z&(|^Pi~aSRwI5x$jaA62GFz9%fmO3t3a>cq zK8Xbv=5Ps~4mKN5+Eqw12(!PEyedFXv~VLxMB~HwT1Vfo51pQ#D8e$e4pFZ{&RC2P z5gTIzl{3!&(tor^BwZfR8j4k{7Rq#`riKXP2O-Bh66#WWK2w=z;iD9GLl+3 zpHIaI4#lQ&S-xBK8PiQ%dwOh?%BO~DCo06pN7<^dnZCN@NzY{_Z1>rrB0U|nC&+!2 z2y!oBcTd2;@lzyk(B=TkyZ)zy0deK05*Q0zk+o$@nun`VI1Er7pjq>8V zNmlW{p7S^Btgb(TA}jL(uR>`0w8gHP^T~Sh5Tkip^spk4SBAhC{TZU}_Z)UJw-}zm zPq{KBm!k)?P{`-(9?LFt&YN4s%SIZ-9lJ!Ws~B%exHOeVFk3~}HewnnH(d)qkLQ_d z6h>O)pEE{vbOVw}E+jdYC^wM+AAhaI(YAibUc@B#_mDss0Ji&BK{WG`4 zOk>vSNq(Bq2IB@s>>Rxm6Wv?h;ZXkpb1l8u|+_qXWdC*jjcPCixq;!%BVPSp#hP zqo`%cNf&YoQXHC$D=D45RiT|5ngPlh?0T~?lUf*O)){K@*Kbh?3RW1j9-T?%lDk@y z4+~?wKI%Y!-=O|_IuKz|=)F;V7ps=5@g)RrE;;tvM$gUhG>jHcw2Hr@fS+k^Zr~>G z^JvPrZc}_&d_kEsqAEMTMJw!!CBw)u&ZVzmq+ZworuaE&TT>$pYsd9|g9O^0orAe8 z221?Va!l1|Y5X1Y?{G7rt1sX#qFA^?RLG^VjoxPf63;AS=_mVDfGJKg73L zsGdnTUD40y(>S##2l|W2Cy!H(@@5KBa(#gs`vlz}Y~$ot5VsqPQ{{YtjYFvIumZzt zA{CcxZLJR|4#{j7k~Tu*jkwz8QA|5G1$Cl895R`Zyp;irp1{KN){kB30O8P1W5;@bG znvX74roeMmQlUi=v9Y%(wl$ZC#9tKNFpvi3!C}f1m6Ct|l2g%psc{TJp)@yu)*e2> z((p0Fg*8gJ!|3WZke9;Z{8}&NRkv7iP=#_y-F}x^y?2m%-D_aj^)f04%mneyjo_;) z6qc_Zu$q37d~X``*eP~Q>I2gg%rrV8v=kDfpp$=%Vj}hF)^dsSWygoN(A$g*E=Do6FX?&(@F#7pbiJ`;c0c@Ul zDqW_90Wm#5f2L<(Lf3)3TeXtI7nhYwRm(F;*r_G6K@OPW4H(Y3O5SjUzBC}u3d|eQ8*8d@?;zUPE+i#QNMn=r(ap?2SH@vo*m z3HJ%XuG_S6;QbWy-l%qU;8x;>z>4pMW7>R}J%QLf%@1BY(4f_1iixd-6GlO7Vp*yU zp{VU^3?s?90i=!#>H`lxT!q8rk>W_$2~kbpz7eV{3wR|8E=8**5?qn8#n`*(bt1xRQrdGxyx2y%B$qmw#>ZV$c7%cO#%JM1lY$Y0q?Yuo> ze9KdJoiM)RH*SB%^;TAdX-zEjA7@%y=!0=Zg%iWK7jVI9b&Dk}0$Af&08KHo+ zOwDhFvA(E|ER%a^cdh@^wLUlmIv6?_3=BvX8jKk92L=Y}7Jf5OGMfh` zBdR1wFCi-i5@`9km{isRb0O%TX+f~)KNaEz{rXQa89`YIF;EN&gN)cigu6mNh>?Cm zAO&Im2flv6D{jwm+y<%WsPe4!89n~KN|7}Cb{Z;XweER73r}Qp2 zz}WP4j}U0&(uD&9yGy6`!+_v-S(yG*iytsTR#x_Rc>=6u^vnRDnf1gP{#2>`ffrAC% zTZ5WQ@hAK;P;>kX{D)mIXe4%a5p=LO1xXH@8T?mz7Q@d)$3pL{{B!2{-v70L*o1AO+|n5beiw~ zk@(>m?T3{2k2c;NWc^`4@P&Z?BjxXJ@;x1qhn)9Mn*IFdt_J-dIqx5#d`NfyfX~m( zIS~5)MfZ2Uy?_4W`47i}u0ZgPh<{D|w_d#;D}Q&U$Q-G}xM1A@1f{#%A$jh6Qp&0hQ<0bPOM z-{1Wm&p%%#eb_?x7i;bol EfAhh=DF6Tf literal 0 HcmV?d00001 diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000..6d49a6c17b --- /dev/null +++ b/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,2 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar diff --git a/README.md b/README.md new file mode 100644 index 0000000000..fa3a61c2c7 --- /dev/null +++ b/README.md @@ -0,0 +1,897 @@ +# PD部署说明 +## PD配置 +- 配置文件在application.yml +```` +license: + # 验证使用的配置文件所在目录,包括主题、密码等 + verify-path: 'conf/verify-license.json' + # license文件所在目录,通过hugegraph-signature项目生成 + license-path: 'conf/hugegraph.license' +pd: + # 存储路径 + data-path: ./pd_data + # 自动扩容的检查周期,定时检查每个Store的分区数量,自动进行分区数量平衡 + patrol-interval: 1800 + # 是否允许批量单副本入库 + enable-batch-load: false +store: + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 172800 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 12 +```` +##store配置 +-配置文件在application.yml,配置pdserver的address +```` +pdserver: + # pd服务地址,多个pd地址用逗号分割 + address: pdserver ip:端口 +```` +## Hugegraph配置 +- 配置项在hugegraph的启动脚本start-hugegraph.sh中 +```` +if [ -z "$META_SERVERS" ];then + META_SERVERS="pdserver ip:端口" +fi +if [ -z "$PD_PEERS" ];then + PD_PEERS="pdserver ip:端口" +fi +```` +## RESTFUL API +- pd提供了一些restful API可以获取集群分区,图,存储节点等一系列信息 + +###获取集群统计信息 + +#### 获取集群统计信息 + +###### Method & Url +``` +GET http://localhost:8620/v1/cluster +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "state": "Cluster_OK", + "pdList": [ + { + "raftUrl": "10.232.132.38:8610", + "grpcUrl": "10.232.132.38:8686", + "restUrl": "10.232.132.38:8620", + "state": "Up", + "dataPath": "./pd_data", + "role": "Leader", + "serviceName": "10.232.132.38:8686-PD", + "serviceVersion": "", + "startTimeStamp": 0 + } + ], + "pdLeader": { + "raftUrl": "10.232.132.38:8610", + "grpcUrl": "10.232.132.38:8686", + "restUrl": "10.232.132.38:8620", + "state": "Up", + "dataPath": "./pd_data", + "role": "Leader", + "serviceName": "10.232.132.38:8686-PD", + "serviceVersion": "", + "startTimeStamp": 0 + }, + "memberSize": 1, + "stores": [ + { + "storeId": 110645464809417136, + "address": "10.232.132.38:8500", + "raftAddress": "10.232.132.38:8510", + "version": "3.6.3", + "state": "Up" + } + ], + "storeSize": 1, + "onlineStoreSize": 1, + "offlineStoreSize": 0, + "graphSize": 3, + "partitionSize": 4, + "shardCount": 3, + "keyCount": 1707, + "dataSize": 19 + }, + "status": 0 +} +``` + +#### 获取pd集群成员信息 + +###### Method & Url +``` +GET http://localhost:8620/v1/member +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "pdLeader": { + "raftUrl": "10.232.132.38:8610", + "grpcUrl": "10.232.132.38:8686", + "restUrl": "10.232.132.38:8620", + "state": "Up", + "dataPath": "./pd_data", + "role": "Leader", + "serviceName": "10.232.132.38:8686-PD", + "serviceVersion": "", + "startTimeStamp": 0 + }, + "pdList": [ + { + "raftUrl": "10.232.132.38:8610", + "grpcUrl": "10.232.132.38:8686", + "restUrl": "10.232.132.38:8620", + "state": "Up", + "dataPath": "./pd_data", + "role": "Leader", + "serviceName": "10.232.132.38:8686-PD", + "serviceVersion": "", + "startTimeStamp": 0 + } + ], + "state": "Cluster_OK" + }, + "status": 0 +} +``` +###存储节点相关 + +#### 获取集群所有的store的信息 + +###### Method & Url +``` +GET http://localhost:8620/v1/stores +``` +###### Response Status + +```json +200 +``` +###### Request Body + +```json +{ + "message": "OK", + "data": { + "stores": [ + { + "storeId": 110645464809417136, + "address": "10.232.132.38:8500", + "raftAddress": "10.232.132.38:8510", + "version": "3.6.3", + "state": "Up", + "deployPath": "", + "startTimeStamp": 1658491024, + "lastHeatBeat": 1658491748560, + "capacity": 1968740712448, + "available": 1959665557504, + "partitionCount": 4, + "graphSize": 3, + "keyCount": 1128, + "leaderCount": 4, + "serviceName": "10.232.132.38:8500-store", + "serviceVersion": "3.6.3", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/s", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 1, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 2, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 3, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/m", + "role": "Leader", + "workState": "PState_Normal" + } + ] + } + ] + }, + "status": 0 +} +``` +#### 获取单个store的信息 + +###### Method & Url +``` +GET http://localhost:8620/v1/store/{storeId} +``` +###### Response Status + +```json +200 +``` +###### Request Body +```json +{ + "message": "OK", + "data": { + "storeId": 110645464809417136, + "address": "10.232.132.38:8500", + "raftAddress": "10.232.132.38:8510", + "version": "3.6.3", + "state": "Up", + "deployPath": "", + "startTimeStamp": 1658491024, + "lastHeatBeat": 1658491838632, + "capacity": 1968740712448, + "available": 1959665549312, + "partitionCount": 4, + "graphSize": 3, + "keyCount": 1128, + "leaderCount": 4, + "serviceName": "10.232.132.38:8500-store", + "serviceVersion": "3.6.3", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/s", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 1, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 2, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 3, + "graphName": "DEFAULT/hugegraph/g", + "role": "Leader", + "workState": "PState_Normal" + }, + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/m", + "role": "Leader", + "workState": "PState_Normal" + } + ] + }, + "status": 0 +} +``` +### 分区相关 +#### 获取分区信息 + +###### Method & Url +``` +GET http://localhost:8620/v1/highLevelPartitions +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "partitions": [ + { + "partitionId": 0, + "state": "PState_Normal", + "leaderAddress": "10.232.132.38:8500", + "keyCount": 0, + "dataSize": 0, + "shardState": "SState_Normal", + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "keyCount": 361, + "startKey": 0, + "endKey": 0, + "dataSize": 8, + "workState": "PState_Normal", + "partitionId": 0 + }, + { + "graphName": "DEFAULT/hugegraph/m", + "keyCount": 361, + "startKey": 0, + "endKey": 0, + "dataSize": 13, + "workState": "PState_Normal", + "partitionId": 0 + }, + { + "graphName": "DEFAULT/hugegraph/s", + "keyCount": 361, + "startKey": 0, + "endKey": 65535, + "dataSize": 6, + "workState": "PState_Normal", + "partitionId": 0 + } + ], + "shards": [ + { + "storeId": 110645464809417136, + "role": "Leader", + "state": "SState_Normal", + "progress": 0, + "partitionId": 0, + "address": "10.232.132.38:8500" + } + ] + }, + { + "partitionId": 1, + "state": "PState_Normal", + "leaderAddress": "10.232.132.38:8500", + "keyCount": 0, + "dataSize": 0, + "shardState": "SState_Normal", + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "keyCount": 8, + "startKey": 16384, + "endKey": 32768, + "dataSize": 5, + "workState": "PState_Normal", + "partitionId": 1 + } + ], + "shards": [ + { + "storeId": 110645464809417136, + "role": "Leader", + "state": "SState_Normal", + "progress": 0, + "partitionId": 1, + "address": "10.232.132.38:8500" + } + ] + }, + { + "partitionId": 2, + "state": "PState_Normal", + "leaderAddress": "10.232.132.38:8500", + "keyCount": 0, + "dataSize": 0, + "shardState": "SState_Normal", + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "keyCount": 18, + "startKey": 32768, + "endKey": 49152, + "dataSize": 8, + "workState": "PState_Normal", + "partitionId": 2 + } + ], + "shards": [ + { + "storeId": 110645464809417136, + "role": "Leader", + "state": "SState_Normal", + "progress": 0, + "partitionId": 2, + "address": "10.232.132.38:8500" + } + ] + }, + { + "partitionId": 3, + "state": "PState_Normal", + "leaderAddress": "10.232.132.38:8500", + "keyCount": 0, + "dataSize": 0, + "shardState": "SState_Normal", + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "keyCount": 19, + "startKey": 49152, + "endKey": 65536, + "dataSize": 8, + "workState": "PState_Normal", + "partitionId": 3 + } + ], + "shards": [ + { + "storeId": 110645464809417136, + "role": "Leader", + "state": "SState_Normal", + "progress": 0, + "partitionId": 3, + "address": "10.232.132.38:8500" + } + ] + } + ] + }, + "status": 0 +} +``` +###获取图信息 +#### 获取所有的图信息 + +###### Method & Url +``` +GET http://localhost:8620/v1/graphs +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "graphs": [ + { + "graphName": "DEFAULT/hugegraph/g", + "partitionCount": 4, + "state": "PState_Normal", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 0, + "endKey": 16384, + "shards": [ + { + "partitionId": 0, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 1, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 16384, + "endKey": 32768, + "shards": [ + { + "partitionId": 1, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 2, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 32768, + "endKey": 49152, + "shards": [ + { + "partitionId": 2, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 3, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 49152, + "endKey": 65536, + "shards": [ + { + "partitionId": 3, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + } + ], + "dataSize": 48, + "keyCount": 1128, + "nodeCount": 0, + "edgeCount": 0 + }, + { + "graphName": "DEFAULT/hugegraph/m", + "partitionCount": 1, + "state": "PState_Normal", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/m", + "workState": "PState_Normal", + "startKey": 0, + "endKey": 65535, + "shards": [ + { + "partitionId": 0, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + } + ], + "dataSize": 48, + "keyCount": 1128, + "nodeCount": 0, + "edgeCount": 0 + }, + { + "graphName": "DEFAULT/hugegraph/s", + "partitionCount": 1, + "state": "PState_Normal", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/s", + "workState": "PState_Normal", + "startKey": 0, + "endKey": 65535, + "shards": [ + { + "partitionId": 0, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + } + ], + "dataSize": 48, + "keyCount": 1128, + "nodeCount": 0, + "edgeCount": 0 + } + ] + }, + "status": 0 +} +``` +#### 获取单个图信息 + +###### Method & Url +``` +GET http://localhost:8620/v1/graph/{graphName} +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "graphName": "DEFAULT/hugegraph/g", + "partitionCount": 4, + "state": "PState_Normal", + "partitions": [ + { + "partitionId": 0, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 0, + "endKey": 16384, + "shards": [ + { + "partitionId": 0, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 1, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 16384, + "endKey": 32768, + "shards": [ + { + "partitionId": 1, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 2, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 32768, + "endKey": 49152, + "shards": [ + { + "partitionId": 2, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + }, + { + "partitionId": 3, + "graphName": "DEFAULT/hugegraph/g", + "workState": "PState_Normal", + "startKey": 49152, + "endKey": 65536, + "shards": [ + { + "partitionId": 3, + "storeId": 110645464809417136, + "state": "SState_Normal", + "role": "Leader", + "progress": 0 + } + ] + } + ], + "dataSize": 48, + "keyCount": 1128, + "nodeCount": 0, + "edgeCount": 0 + }, + "status": 0 +} +``` +###获取shard的信息 +#### 获取所有shard的信息 + +###### Method & Url +``` +GET http://localhost:8620/v1/shards +``` + +###### Response Status + +```json +200 +``` + +###### Response Body + +```json +{ + "message": "OK", + "data": { + "shards": [ + { + "storeId": 110645464809417136, + "partitionId": 0, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/g", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 1, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/g", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 2, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/g", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 3, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/g", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 0, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/m", + "progress": 0 + }, + { + "storeId": 110645464809417136, + "partitionId": 0, + "role": "Leader", + "state": "SState_Normal", + "graphName": "DEFAULT/hugegraph/s", + "progress": 0 + } + ] + }, + "status": 0 +} +``` +###服务注册 +#### 注册服务 + +###### Method & Url +``` +POST http://127.0.0.1:8620/v1/registry +``` + +###### Request Body +```json +200 +``` + +###### Response Status + +```json +{ + "appName":"aaaa", + "version":"version1", + "address":"address1", + "interval":"9223372036854775807", + "labels": { + "aaa": "aaaavalue" + } +} +``` +appName:所属服务名 +version:所属服务版本号 +address:服务实例地址+端口 +interval:实例心跳间隔,字符串,最大9223372036854775807 +labels: 自定义标签,若服务名为'hg'即hugeserver时,需要提供key为cores的项,进行cpu核数的验证 + +###### Response Body + +```json +{ + "errorType": "OK", + "message": "", + "data": null +} +``` +errorType:状态码 +message:状态码为错误时的具体出错信息 +data:无返回数据 +#### 服务实例获取 + +###### Method & Url +``` +POST http://127.0.0.1:8620/v1/registryInfo +``` + +###### Request Body +```json +200 +``` + +###### Response Status + +```json +{ + "appName":"aaaa", + "version":"version1", + "labels": { + "aaa": "aaaavalue" + } +} +``` +以下三项可全部为空,则获取所有服务节点的信息: +-- appName:过滤所属服务名的条件 +-- version:过滤所属服务版本号的条件,此项有值,则appName不能为空 +-- labels: 过滤自定义标签的条件 + +###### Response Body + +```json +{ + "errorType": "OK", + "message": null, + "data": [ + { + "id": null, + "appName": "aaaa", + "version": "version1", + "address": "address1", + "interval": "9223372036854775807", + "labels": { + "aaa": "aaaavalue" + } + } + ] +} +``` +errorType:状态码 +message:状态码为错误时的具体出错信息 +data:获取的服务节点信息 \ No newline at end of file diff --git a/build-pre.sh b/build-pre.sh new file mode 100644 index 0000000000..06efc075af --- /dev/null +++ b/build-pre.sh @@ -0,0 +1,37 @@ +#!/bin/bash +mkdir output +touch output/1 +export JAVA_HOME=$ORACLEJDK_1_8_0_HOME + +readonly VER=3.6.3 +readonly REPO_URL=http://10.14.139.8:8081/artifactory/star-local + +$MAVEN_3_5_3_BIN/mvn -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set +$MAVEN_3_5_3_BIN/mvn --settings ./settings.xml -Dmaven.test.skip=true -DaltDeploymentRepository=star-local::default::${REPO_URL} clean deploy +$MAVEN_3_5_3_BIN/mvn versions:revert + + +#------------------repo----------------------- +readonly FILE_NAME=hugegraph-pd-3.6.3.tar.gz +readonly REPO_URL_FILE=http://10.14.139.8:8081/artifactory/star-file + +localFilePath=dist/${FILE_NAME} +targetFolder="${REPO_URL_FILE}/dist/$(date '+%Y-%m-%d')/" +artifactoryUser="admin" +artifactoryPassword="JFrog12345" + +md5Value="$(md5sum "$localFilePath")" +md5Value="${md5Value:0:32}" +sha1Value="$(sha1sum "$localFilePath")" +sha1Value="${sha1Value:0:40}" +sha256Value="$(sha256sum "$localFilePath")" +sha256Value="${sha256Value:0:65}" + +#curl -X PUT -u admin:JFrog12345 -T ${localFilePath} "${REPO_URL_FILE}/dist/${data_folder}/" +echo "INFO: Uploading $localFilePath to $targetFolder" +curl -i -X PUT -u "$artifactoryUser:$artifactoryPassword" \ + -H "X-Checksum-Md5: $md5Value" \ + -H "X-Checksum-Sha1: $sha1Value" \ + -H "X-Checksum-Sha256: $sha256Value" \ + -T "$localFilePath" \ + "$targetFolder" diff --git a/build.sh b/build.sh new file mode 100644 index 0000000000..dd45ad01e3 --- /dev/null +++ b/build.sh @@ -0,0 +1,21 @@ +#!/bin/bash +export PATH=$MAVEN_3_5_3_BIN:$ORACLEJDK_11_0_7_BIN:$PATH +export JAVA_HOME=$ORACLEJDK_11_0_7_HOME +export MAVEN_HOME=$MAVEN_3_5_3_HOME + +readonly REPO_URL=http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots + +if [ ! -n "$1" ] ;then + GOAL=package +else + GOAL=$1 +fi + +$MAVEN_3_5_3_BIN/mvn -Dmaven.test.skip=true -DaltDeploymentRepository=Baidu_Local_Snapshots::default::${REPO_URL} clean ${GOAL} +echo "mv dist...." +mv dist output +ls output +echo "mv dist done" +echo "show output...." +ls output +echo "show output done" \ No newline at end of file diff --git a/ci.yml b/ci.yml new file mode 100644 index 0000000000..13c56261fe --- /dev/null +++ b/ci.yml @@ -0,0 +1,26 @@ +Global: + tool : build_submitter + languages: + # Java Version: 1.7, 1.8, 9, 10 + - language : java + version : 11 + envs: + # Maven Version: 3.0.4, 3.1.1, 3.2.5, 3.3.9, 3.5.3 + - env : maven + version : 3.5.3 +Default: + profile : [dev] +Profiles: + - profile: + name : dev + tool : build_submitter + env : DECK_CENTOS7U5_K3 + command : sh build.sh + release : true + + - profile: + name: deploy + tool: build_submitter + env: DECK_CENTOS7U5_K3 + command: sh build.sh deploy + release: true \ No newline at end of file diff --git a/conf/hugegraph.license b/conf/hugegraph.license new file mode 100644 index 0000000000000000000000000000000000000000..bca71af9a1966024e7a99880302f6b592a33ca84 GIT binary patch literal 856 zcmV-e1E>69hn&I!I)GnSv~^hE{iID$vF0rJw0JnV&Nu&pJ?*HLQ#!nms8=CeFwzT% z)5v&U=s7DO7$r#-RSw|2Vq*`|(2D5<5GKxmIX=E1#UCniwf>oeSOP{p(e{%an?_vT zYc)wIUQsJ<9Ai=YsETz;YFaYQ^&YAJD(|KO1~NKs#B+QiROyg@d5VjOt~PJcGl>9= zPZ!gB)$<%a;*Yw{lDP)9Cj#PNktQ)_SRi&!l=1Y})2$oO3ASGnC|m#T{b9DXGt@^9 zRQlb4GeipvU&m_F1_$wwu0?i5Cz0TTSl`#uc}6(t4&YjvWkFI)*jMO@Me_Le_=M(v zSrA?RjyET9V15A)-p(d1q)ry6yF$rH4#5n`?m|ws9TUw@;$3qiBbOlEoKdy_MT)~V z=N#&y{vn8U2Og2iJ{)W-ZO7$zLHEUMN-l$XRvs_I_2T?X5&UXMi&;q8i8d}FWtd=8y@PLaCFm+wpR#G)aC{`rVrs%8)@u!?}9E$ZnDYo!c5hiA?8Qm}y{K9F) zTlyJ+OL<98%B`lIL|}hc9JSDFm9?7x2CnOjGJE_7%Jp&Bif`p29?ai>C(n0UWicnv zrZH578>dDfFP_O?YlK@|!U^eZflg1)Hs(pq1{WqS2Cflfgsa$(yyyfhqxr2y)xO<- ztE~YE$2dMsybWD-Bi<89DZ@L;S$hw~0DG7-yoR%(%Gon5wDYD`dL4k_U6j*47Z4h4 zwCZyUzNtj|D0Iax|0F9J(~M5|FP&r1ysHQXgy8J7aEa}MA3?yWdO@W6NuOQq zxI&O;%`%{#7kN9?F(hYrclJ8cN^CxZ6s68>v%<^cjA#tlBAKkmyCm&l%`g32m?oed z6hL($sbOP?W%_S#UNEW$*u&~DQD5$(f*}}lGTzkLe#?(+LA~;aL=0#EKm?Ju+ zaOe2lRrWmfBd1Mk57Xciq<;l;cn=*wkW>MbOpHWHg~zj9arkkR#q-;Ch&7kk6E(mR i^Sj|__)(b4Oh-2cCDrT@A%)082}=;$lBJ*eQVTP`m7MAT literal 0 HcmV?d00001 diff --git a/conf/verify-license.json b/conf/verify-license.json new file mode 100644 index 0000000000..868ccbebbb --- /dev/null +++ b/conf/verify-license.json @@ -0,0 +1,6 @@ +{ + "subject": "hugegraph-license", + "public_alias": "publiccert", + "store_ticket": "803b6cc3-d144-47e8-948f-ec8b39c8881e", + "publickey_path": "/public-certs.store" +} diff --git a/deploy-release.sh b/deploy-release.sh new file mode 100644 index 0000000000..3712135c59 --- /dev/null +++ b/deploy-release.sh @@ -0,0 +1,8 @@ +#!/bin/bash +readonly VER=3.6.3 +readonly REPO_URL=http://10.14.139.8:8081/artifactory/star-local +#mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set + +./mvnw -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set +./mvnw --settings ./settings.xml -Dmaven.test.skip=true -DaltDeploymentRepository=star-local::default::${REPO_URL} clean deploy +./mvnw versions:revert diff --git a/deploy-snapshot.sh b/deploy-snapshot.sh new file mode 100644 index 0000000000..3712135c59 --- /dev/null +++ b/deploy-snapshot.sh @@ -0,0 +1,8 @@ +#!/bin/bash +readonly VER=3.6.3 +readonly REPO_URL=http://10.14.139.8:8081/artifactory/star-local +#mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set + +./mvnw -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set +./mvnw --settings ./settings.xml -Dmaven.test.skip=true -DaltDeploymentRepository=star-local::default::${REPO_URL} clean deploy +./mvnw versions:revert diff --git a/hg-pd-client/pom.xml b/hg-pd-client/pom.xml new file mode 100644 index 0000000000..f11d5069aa --- /dev/null +++ b/hg-pd-client/pom.xml @@ -0,0 +1,54 @@ + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-pd-root + 3.6.5-SNAPSHOT + + hg-pd-client + + + + org.projectlombok + lombok + 1.18.20 + + + org.apache.logging.log4j + log4j-slf4j-impl + 2.17.0 + + + com.baidu.hugegraph + hg-pd-grpc + ${project.version} + + + com.baidu.hugegraph + hg-pd-common + ${project.version} + compile + + + junit + junit + 4.13.2 + test + + + commons-io + commons-io + 2.8.0 + + + org.yaml + snakeyaml + test + + + \ No newline at end of file diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java new file mode 100644 index 0000000000..46ab774d04 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java @@ -0,0 +1,241 @@ +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.LinkedList; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.stream.Stream; + +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.PDGrpc; +import com.baidu.hugegraph.pd.grpc.PDGrpc.PDBlockingStub; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.Pdpb.GetMembersRequest; +import com.baidu.hugegraph.pd.grpc.Pdpb.GetMembersResponse; + +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import io.grpc.stub.ClientCalls; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2022/6/20 + **/ +@Slf4j +public abstract class AbstractClient implements Closeable { + + protected final Pdpb.RequestHeader header; + protected final AbstractClientStubProxy stubProxy; + protected final PDConfig config; + protected ManagedChannel channel = null; + protected ConcurrentMap stubs = null; + + protected AbstractClient(PDConfig config) { + String[] hosts = config.getServerHost().split(","); + this.stubProxy = new AbstractClientStubProxy(hosts); + this.header = Pdpb.RequestHeader.getDefaultInstance(); + this.config = config; + } + + private AbstractBlockingStub getBlockingStub() throws PDException { + if (stubProxy.getBlockingStub() == null) { + synchronized (this) { + if (stubProxy.getBlockingStub() == null) { + String host = resetStub(); + if (host.isEmpty()) throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + config.getServerHost()); + } + } + } + ; + return stubProxy.getBlockingStub(); + } + + public static Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); + + public static Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build(); + return header; + } + + private AbstractStub getStub() throws PDException { + if (stubProxy.getStub() == null) { + synchronized (this) { + if (stubProxy.getStub() == null) { + String host = resetStub(); + if (host.isEmpty()) throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + config.getServerHost()); + } + } + } + return stubProxy.getStub(); + } + + protected abstract AbstractStub createStub(); + + protected abstract AbstractBlockingStub createBlockingStub(); + + private String resetStub() { + String leaderHost = ""; + for (int i = 0; i < stubProxy.getHostCount(); i++) { + String host = stubProxy.nextHost(); + channel = ManagedChannelBuilder.forTarget(host).usePlaintext().build(); + PDBlockingStub blockingStub = PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + try { + GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder().setHeader(header).build(); + GetMembersResponse members = blockingStub.getMembers(request); + Metapb.Member leader = members.getLeader(); + leaderHost = leader.getGrpcUrl(); + close(); + channel = ManagedChannelBuilder.forTarget(leaderHost).usePlaintext().build(); + stubProxy.setBlockingStub(createBlockingStub()); + stubProxy.setStub(createStub()); + log.info("PDClient connect to host = {} success", leaderHost); + break; + } catch (Exception e) { + log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(), + e.getCause() != null ? e.getCause().getMessage() : ""); + } + } + return leaderHost; + } + + protected > RespT blockingUnaryCall( + MethodDescriptor method, ReqT req) throws PDException { + return blockingUnaryCall(method, req, 5); + } + + protected > RespT blockingUnaryCall( + MethodDescriptor method, ReqT req, int retry) throws PDException { + AbstractBlockingStub stub = getBlockingStub(); + try { + RespT resp = ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), req); + return resp; + } catch (Exception e) { + log.error(method.getFullMethodName() + " exception, {}", e.getMessage()); + if (e instanceof StatusRuntimeException) { + if (retry < stubProxy.getHostCount()) { + // 网络不通,关掉之前连接,换host重新连接 + synchronized (this) { + stubProxy.setBlockingStub(null); + } + return blockingUnaryCall(method, req, ++retry); + } + } + } + return null; + } + + // this.stubs = new ConcurrentHashMap(hosts.length); + private AbstractBlockingStub getConcurrentBlockingStub(String address) { + AbstractBlockingStub stub = stubs.get(address); + if (stub != null) { + return stub; + } + Channel ch = ManagedChannelBuilder.forTarget(address).usePlaintext().build(); + PDBlockingStub blockingStub = PDGrpc.newBlockingStub(ch).withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + stubs.put(address, blockingStub); + return blockingStub; + + } + + protected KVPair concurrentBlockingUnaryCall( + MethodDescriptor method, ReqT req, Predicate predicate) throws PDException { + LinkedList hostList = this.stubProxy.getHostList(); + if (this.stubs == null) { + synchronized (this) { + if (this.stubs == null) { + this.stubs = new ConcurrentHashMap<>(hostList.size()); + } + } + } + Stream respTStream = hostList.parallelStream().map((address) -> { + AbstractBlockingStub stub = getConcurrentBlockingStub(address); + RespT resp = ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), req); + return resp; + }); + KVPair pair; + AtomicReference response = new AtomicReference<>(); + boolean result = respTStream.anyMatch((r) -> { + response.set(r); + return predicate.test(r); + }); + if (result) { + pair = new KVPair<>(true, null); + } else { + pair = new KVPair<>(false, response.get()); + } + return pair; + } + + protected void streamingCall(MethodDescriptor method, ReqT request, + StreamObserver responseObserver, int retry) throws + PDException { + AbstractStub stub = getStub(); + try { + ClientCall call = stub.getChannel().newCall(method, stub.getCallOptions()); + ClientCalls.asyncServerStreamingCall(call, request, responseObserver); + } catch (Exception e) { + if (e instanceof StatusRuntimeException) { + if (retry < stubProxy.getHostCount()) { + synchronized (this) { + stubProxy.setStub(null); + } + streamingCall(method, request, responseObserver, ++retry); + return; + } + } + log.error("rpc call with exception, {}", e.getMessage()); + } + } + + + protected static void handleErrors(Pdpb.ResponseHeader header) throws PDException { + if (header.hasError() && header.getError().getType() != Pdpb.ErrorType.OK) + throw new PDException(header.getError().getTypeValue(), + String.format("PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } + + + @Override + public void close() { + closeChannel(channel); + if (stubs != null) { + for (AbstractBlockingStub stub : stubs.values()) { + closeChannel((ManagedChannel) stub.getChannel()); + } + } + + } + + private void closeChannel(ManagedChannel channel) { + try { + while (channel != null && !channel.shutdownNow().awaitTermination(100, TimeUnit.MILLISECONDS)) { + continue; + } + } catch (Exception e) { + log.info("Close channel with error : {}.", e); + } finally { + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java new file mode 100644 index 0000000000..4fd5678407 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java @@ -0,0 +1,56 @@ +package org.apache.hugegraph.pd.client; + +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; + +import java.util.LinkedList; + +/** + * @author zhangyingjie + * @date 2022/6/20 + **/ +public class AbstractClientStubProxy { + + private AbstractBlockingStub blockingStub; + private AbstractStub stub; + + public LinkedList getHostList() { + return hostList; + } + + private LinkedList hostList = new LinkedList<>(); + + public AbstractClientStubProxy(String[] hosts) { + for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); + } + + public String nextHost() { + String host = hostList.poll(); + hostList.offer(host); //移到尾部 + return host; + } + + public void setBlockingStub(AbstractBlockingStub stub) { + this.blockingStub = stub; + } + + public AbstractBlockingStub getBlockingStub() { + return this.blockingStub; + } + + public String getHost() { + return hostList.peek(); + } + + public int getHostCount() { + return hostList.size(); + } + + public AbstractStub getStub() { + return stub; + } + + public void setStub(AbstractStub stub) { + this.stub = stub; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java new file mode 100644 index 0000000000..fc769e34f8 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java @@ -0,0 +1,18 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; + +import java.util.Map; + +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ +public interface Discoverable { + + NodeInfos getNodeInfos(Query query); + + void scheduleTask(); + void cancelTask(); +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java new file mode 100644 index 0000000000..929cec19a0 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java @@ -0,0 +1,200 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import lombok.extern.slf4j.Slf4j; + +import java.io.Closeable; +import java.util.LinkedList; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ +@Slf4j +public abstract class DiscoveryClient implements Closeable, Discoverable { + + protected int period; //心跳周期 + private Timer timer = new Timer("serverHeartbeat", true); + private volatile int currentIndex; // 当前在用pd地址位置 + LinkedList pdAddresses = new LinkedList<>(); + ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private volatile AtomicBoolean requireResetStub = new AtomicBoolean(false); + private int maxTime = 6; + private ManagedChannel channel = null; + private DiscoveryServiceGrpc.DiscoveryServiceBlockingStub registerStub; + private DiscoveryServiceGrpc.DiscoveryServiceBlockingStub blockingStub; + + public DiscoveryClient(String centerAddress, int delay) { + String[] addresses = centerAddress.split(","); + for (int i = 0; i < addresses.length; i++) { + String singleAddress = addresses[i]; + if (singleAddress == null || singleAddress.length() <= 0) { + continue; + } + pdAddresses.add(addresses[i]); + } + this.period = delay; + if (maxTime < addresses.length) { + maxTime = addresses.length; + } + } + + private R tryWithTimes(Function function, V v) { + R r; + Exception ex = null; + for (int i = 0; i < maxTime; i++) { + try { + r = function.apply(v); + return r; + } catch (Exception e) { + requireResetStub.set(true); + resetStub(); + ex = e; + } + } + if (ex != null) + log.error("Try discovery method with error: {}", ex.getMessage()); + return null; + } + + /*** + * 按照pd列表重置stub + */ + private void resetStub() { + String errLog = null; + for (int i = currentIndex + 1; i <= pdAddresses.size() + currentIndex; i++) { + currentIndex = i % pdAddresses.size(); + String singleAddress = pdAddresses.get(currentIndex); + try { + if (requireResetStub.get()) { + resetChannel(singleAddress); + } + errLog = null; + break; + } catch (Exception e) { + requireResetStub.set(true); + if (errLog == null) errLog = e.getMessage(); + continue; + } + } + if (errLog != null) log.error(errLog); + } + + /*** + * 按照某个pd的地址重置channel和stub + * @param singleAddress + * @throws PDException + */ + private void resetChannel(String singleAddress) throws PDException { + + readWriteLock.writeLock().lock(); + try { + if (requireResetStub.get()) { + while (channel != null && !channel.shutdownNow().awaitTermination( + 100, TimeUnit.MILLISECONDS)) { + continue; + } + channel = ManagedChannelBuilder.forTarget( + singleAddress).usePlaintext().build(); + this.registerStub = DiscoveryServiceGrpc.newBlockingStub( + channel); + this.blockingStub = DiscoveryServiceGrpc.newBlockingStub( + channel); + requireResetStub.set(false); + } + } catch (Exception e) { + throw new PDException(-1, String.format( + "Reset channel with error : %s.", e.getMessage())); + } finally { + readWriteLock.writeLock().unlock(); + } + } + + /*** + * 获取注册节点信息 + * @param query + * @return + */ + @Override + public NodeInfos getNodeInfos(Query query) { + return tryWithTimes((q) -> { + this.readWriteLock.readLock().lock(); + NodeInfos nodes; + try { + nodes = this.blockingStub.getNodes(q); + } catch (Exception e) { + throw e; + } finally { + this.readWriteLock.readLock().unlock(); + } + return nodes; + }, query); + } + + /*** + * 启动心跳任务 + */ + @Override + public void scheduleTask() { + timer.schedule(new TimerTask() { + @Override + public void run() { + NodeInfo nodeInfo = getRegisterNode(); + tryWithTimes((t) -> { + RegisterInfo register; + readWriteLock.readLock().lock(); + try { + register = registerStub.register(t); + log.debug("Discovery Client work done."); + Consumer consumer = getRegisterConsumer(); + if (consumer != null) consumer.accept(register); + } catch (Exception e) { + throw e; + } finally { + readWriteLock.readLock().unlock(); + } + return register; + }, nodeInfo); + } + }, 0, period); + } + + abstract NodeInfo getRegisterNode(); + + abstract Consumer getRegisterConsumer(); + + @Override + public void cancelTask() { + this.timer.cancel(); + } + + @Override + public void close() { + this.timer.cancel(); + readWriteLock.writeLock().lock(); + try { + while (channel != null && !channel.shutdownNow().awaitTermination( + 100, TimeUnit.MILLISECONDS)) { + continue; + } + } catch (Exception e) { + log.info("Close channel with error : {}.", e); + } finally { + readWriteLock.writeLock().unlock(); + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java new file mode 100644 index 0000000000..e496991992 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java @@ -0,0 +1,127 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.RegisterType; + +import java.util.Map; +import java.util.function.Consumer; + +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ +public class DiscoveryClientImpl extends DiscoveryClient { + + private volatile String id ; + private RegisterType type; // 心跳类型,备用 + private String version; + private String appName; + private int times; // 心跳过期次数,备用 + private String address; + private Map labels; + private Consumer registerConsumer; + + + private DiscoveryClientImpl(Builder builder) { + super(builder.centerAddress, builder.delay); + period = builder.delay; + id = builder.id; + type = builder.type; + version = builder.version; + appName = builder.appName; + times = builder.times; + address = builder.address; + labels = builder.labels; + registerConsumer = builder.registerConsumer; + } + + public static Builder newBuilder() { + return new Builder(); + } + + + @Override + NodeInfo getRegisterNode() { + return NodeInfo.newBuilder().setAddress(this.address) + .setVersion(this.version) + .setAppName(this.appName).setInterval(this.period) + .setId(this.id).putAllLabels(labels).build(); + } + + @Override + Consumer getRegisterConsumer() { + return registerConsumer; + } + + + public static final class Builder { + + private int delay; + private String centerAddress; + private String id; + private RegisterType type; + private String address; + private Map labels; + private String version; + private String appName; + private int times; + private Consumer registerConsumer; + + private Builder() { + } + + public Builder setDelay(int val) { + delay = val; + return this; + } + + public Builder setCenterAddress(String val) { + centerAddress = val; + return this; + } + + public Builder setId(String val) { + id = val; + return this; + } + + public Builder setType(RegisterType val) { + type = val; + return this; + } + + public Builder setAddress(String val) { + address = val; + return this; + } + + public Builder setLabels(Map val) { + labels = val; + return this; + } + + public Builder setVersion(String val) { + version = val; + return this; + } + + public Builder setAppName(String val) { + appName = val; + return this; + } + + public Builder setTimes(int val) { + times = val; + return this; + } + + public Builder setRegisterConsumer(Consumer registerConsumer) { + this.registerConsumer = registerConsumer; + return this; + } + + public DiscoveryClientImpl build() { + return new DiscoveryClientImpl(this); + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java new file mode 100644 index 0000000000..29cfb88f37 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java @@ -0,0 +1,293 @@ +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.kv.K; +import com.baidu.hugegraph.pd.grpc.kv.KResponse; +import com.baidu.hugegraph.pd.grpc.kv.Kv; +import com.baidu.hugegraph.pd.grpc.kv.KvResponse; +import com.baidu.hugegraph.pd.grpc.kv.KvServiceGrpc; +import com.baidu.hugegraph.pd.grpc.kv.LockRequest; +import com.baidu.hugegraph.pd.grpc.kv.LockResponse; +import com.baidu.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import com.baidu.hugegraph.pd.grpc.kv.TTLRequest; +import com.baidu.hugegraph.pd.grpc.kv.TTLResponse; +import com.baidu.hugegraph.pd.grpc.kv.WatchEvent; +import com.baidu.hugegraph.pd.grpc.kv.WatchKv; +import com.baidu.hugegraph.pd.grpc.kv.WatchRequest; +import com.baidu.hugegraph.pd.grpc.kv.WatchResponse; +import com.baidu.hugegraph.pd.grpc.kv.WatchType; + +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2022/6/20 + **/ +@Slf4j +public class KvClient extends AbstractClient implements Closeable { + + private AtomicLong clientId = new AtomicLong(0); + private Semaphore semaphore = new Semaphore(1); + + public KvClient(PDConfig pdConfig) { + super(pdConfig); + } + + @Override + protected AbstractStub createStub() { + return KvServiceGrpc.newStub(channel); + } + + @Override + protected AbstractBlockingStub createBlockingStub() { + return KvServiceGrpc.newBlockingStub(channel); + } + + public KvResponse put(String key, String value) throws PDException { + Kv kv = Kv.newBuilder().setKey(key).setValue(value).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getPutMethod(), kv); + handleErrors(response.getHeader()); + return response; + } + + + public KResponse get(String key) throws PDException { + K k = K.newBuilder().setKey(key).build(); + KResponse response = blockingUnaryCall(KvServiceGrpc.getGetMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + + public KvResponse delete(String key) throws PDException { + K k = K.newBuilder().setKey(key).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeleteMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + + public KvResponse deletePrefix(String prefix) throws PDException { + K k = K.newBuilder().setKey(prefix).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeletePrefixMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + + public ScanPrefixResponse scanPrefix(String prefix) throws PDException { + K k = K.newBuilder().setKey(prefix).build(); + ScanPrefixResponse response = blockingUnaryCall(KvServiceGrpc.getScanPrefixMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + public TTLResponse keepTTLAlive(String key) throws PDException { + TTLRequest request = TTLRequest.newBuilder().setKey(key).build(); + TTLResponse response = blockingUnaryCall(KvServiceGrpc.getKeepTTLAliveMethod(), request); + handleErrors(response.getHeader()); + return response; + } + + public TTLResponse putTTL(String key, String value, long ttl) throws PDException { + TTLRequest request = TTLRequest.newBuilder().setKey(key).setValue(value).setTtl(ttl).build(); + TTLResponse response = blockingUnaryCall(KvServiceGrpc.getPutTTLMethod(), request); + handleErrors(response.getHeader()); + return response; + } + + private void onEvent(WatchResponse value, Consumer consumer) { + log.info("receive message for {},event Count:{}", value, value.getEventsCount()); + clientId.compareAndSet(0L, value.getClientId()); + if (value.getEventsCount() != 0) consumer.accept((T) value); + } + + BiConsumer listenWrapper = (key, consumer) -> { + try { + listen(key, consumer); + } catch (PDException e) { + try { + log.warn("start listen with warning:", e); + Thread.sleep(1000); + } catch (InterruptedException ex) { + } + } + }; + + BiConsumer prefixListenWrapper = (key, consumer) -> { + try { + listenPrefix(key, consumer); + } catch (PDException e) { + try { + log.warn("start listenPrefix with warning:", e); + Thread.sleep(1000); + } catch (InterruptedException ex) { + } + } + }; + + private StreamObserver getObserver(String key, Consumer consumer, + BiConsumer listenWrapper) { + return new StreamObserver() { + @Override + public void onNext(WatchResponse value) { + switch (value.getState()) { + case Starting: + boolean b = clientId.compareAndSet(0, value.getClientId()); + if (b) { + log.info("set watch client id to :{}", value.getClientId()); + } + semaphore.release(); + break; + case Started: + onEvent(value, consumer); + break; + case Leader_Changed: + listenWrapper.accept(key, consumer); + break; + case Alive: + // only for check client is alive, do nothing + break; + default: + break; + } + } + + @Override + public void onError(Throwable t) { + listenWrapper.accept(key, consumer); + } + + + @Override + public void onCompleted() { + + } + }; + } + + public void listen(String key, Consumer consumer) throws PDException { + StreamObserver observer = getObserver(key, consumer, listenWrapper); + acquire(); + WatchRequest k = WatchRequest.newBuilder().setClientId(clientId.get()).setKey(key).build(); + streamingCall(KvServiceGrpc.getWatchMethod(), k, observer, 1); + } + + public void listenPrefix(String prefix, Consumer consumer) throws PDException { + StreamObserver observer = getObserver(prefix, consumer, prefixListenWrapper); + acquire(); + WatchRequest k = WatchRequest.newBuilder().setClientId(clientId.get()).setKey(prefix).build(); + streamingCall(KvServiceGrpc.getWatchPrefixMethod(), k, observer, 1); + } + + private void acquire() { + if (clientId.get() == 0L) { + try { + semaphore.acquire(); + if (clientId.get() != 0L) { + semaphore.release(); + } + } catch (InterruptedException e) { + log.error("get semaphore with error:", e); + } + } + } + + + public List getWatchList(T response) { + List values = new LinkedList<>(); + List eventsList = response.getEventsList(); + for (WatchEvent event : eventsList) { + if (event.getType() != WatchType.Put) { + return null; + } + String value = event.getCurrent().getValue(); + values.add(value); + } + return values; + } + + + public Map getWatchMap(T response) { + Map values = new HashMap<>(); + List eventsList = response.getEventsList(); + for (WatchEvent event : eventsList) { + if (event.getType() != WatchType.Put) { + return null; + } + WatchKv current = event.getCurrent(); + String key = current.getKey(); + String value = current.getValue(); + values.put(key, value); + } + return values; + } + + + public LockResponse lock(String key, long ttl) throws PDException { + acquire(); + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getLockMethod(), k); + handleErrors(response.getHeader()); + clientId.compareAndSet(0L, response.getClientId()); + assert clientId.get() == response.getClientId(); + return response; + } + + public LockResponse lockWithoutReentrant(String key, long ttl) throws PDException { + acquire(); + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getLockWithoutReentrantMethod(), k); + handleErrors(response.getHeader()); + clientId.compareAndSet(0L, response.getClientId()); + assert clientId.get() == response.getClientId(); + return response; + } + + public LockResponse isLocked(String key) throws PDException { + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getIsLockedMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + + public LockResponse unlock(String key) throws PDException { + assert clientId.get() != 0; + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getUnlockMethod(), k); + handleErrors(response.getHeader()); + clientId.compareAndSet(0L, response.getClientId()); + assert clientId.get() == response.getClientId(); + return response; + } + + + public LockResponse keepAlive(String key) throws PDException { + assert clientId.get() != 0; + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getKeepAliveMethod(), k); + handleErrors(response.getHeader()); + clientId.compareAndSet(0L, response.getClientId()); + assert clientId.get() == response.getClientId(); + return response; + } + + @Override + public void close() { + super.close(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java new file mode 100644 index 0000000000..9adcd01dac --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java @@ -0,0 +1,54 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.grpc.PDGrpc; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.google.protobuf.ByteString; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2022/8/3 + **/ +@Slf4j +public class LicenseClient extends AbstractClient { + + public LicenseClient(PDConfig config) { + super(config); + } + + @Override + protected AbstractStub createStub() { + return PDGrpc.newStub(channel); + } + + @Override + protected AbstractBlockingStub createBlockingStub() { + return PDGrpc.newBlockingStub(channel); + } + + public Pdpb.PutLicenseResponse putLicense(byte[] content) { + Pdpb.PutLicenseRequest request = Pdpb.PutLicenseRequest.newBuilder() + .setContent(ByteString.copyFrom(content)) + .build(); + try { + KVPair pair = concurrentBlockingUnaryCall( + PDGrpc.getPutLicenseMethod(), request, + (rs) -> rs.getHeader().getError().getType().equals(Pdpb.ErrorType.OK)); + if (pair.getKey()) { + Pdpb.PutLicenseResponse.Builder builder = Pdpb.PutLicenseResponse.newBuilder(); + builder.setHeader(okHeader); + return builder.build(); + } else { + return pair.getValue(); + } + } catch (Exception e) { + e.printStackTrace(); + log.debug("put license with error:{} ", e); + Pdpb.ResponseHeader rh = newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); + return Pdpb.PutLicenseResponse.newBuilder().setHeader(rh).build(); + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java new file mode 100644 index 0000000000..accabbb63e --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -0,0 +1,1124 @@ +package org.apache.hugegraph.pd.client; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.PartitionEvent; + +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.common.PartitionCache; +import com.baidu.hugegraph.pd.common.PartitionUtils; +import com.baidu.hugegraph.pd.grpc.MetaTask; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.PDGrpc; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.google.protobuf.ByteString; + +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.AbstractBlockingStub; +import lombok.extern.slf4j.Slf4j; + +/** + * PD客户端实现类 + * @author yanjinbing + */ +@Slf4j +public class PDClient { + private final PDConfig config; + private final Pdpb.RequestHeader header; + private final PartitionCache cache; + private PDWatch.Watcher partitionWatcher; + private PDWatch.Watcher storeWatcher; + private PDWatch.Watcher graphWatcher; + private PDWatch.Watcher shardGroupWatcher; + private PDPulse pdPulse; + private final StubProxy stubProxy; + private final List eventListeners; + + public interface PDEventListener { + void onStoreChanged(NodeEvent event); + void onPartitionChanged(PartitionEvent event); + void onGraphChanged(WatchResponse event); + default void onShardGroupChanged(WatchResponse event) {}; + } + + static class StubProxy { + private volatile PDGrpc.PDBlockingStub stub; + private LinkedList hostList = new LinkedList<>(); + + public StubProxy(String[] hosts) { + for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); + } + + public String nextHost() { + String host = hostList.poll(); + hostList.offer(host); //移到尾部 + return host; + } + public void set(PDGrpc.PDBlockingStub stub){ this.stub = stub;} + public PDGrpc.PDBlockingStub get(){return this.stub;} + public String getHost(){ return hostList.peek();} + public int getHostCount(){ return hostList.size();} + } + /** + * 创建PDClient对象,并初始化stub + * @param config + * @return + */ + public static PDClient create(PDConfig config){ + PDClient client = new PDClient(config); + return client; + } + + private PDClient(PDConfig config) { + this.config = config; + this.header = Pdpb.RequestHeader.getDefaultInstance(); + this.cache = new PartitionCache(); + this.stubProxy = new StubProxy(config.getServerHost().split(",")); + this.eventListeners = new CopyOnWriteArrayList<>(); + + } + + private synchronized void newBlockingStub() throws PDException { + if ( stubProxy.get() != null ) return; + String host = newLeaderStub(); + if ( host.isEmpty()) + throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + config.getServerHost()); + PDWatch pdWatch = new PDWatchImpl(host); + if (config.isEnableCache()) { + log.info("PDClient enable cache, init PDWatch object"); + this.pdPulse = new PDPulseImpl(host); + partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener() { + @Override + public void onNext(PartitionEvent response) { + // log.info("PDClient receive partition event {}-{} {}", + // response.getGraph(), response.getPartitionId(), response.getChangeType()); + invalidPartitionCache(response.getGraph(), response.getPartitionId()); + + if (response.getChangeType() == PartitionEvent.ChangeType.DEL) { + cache.removeAll(response.getGraph()); + } + + eventListeners.forEach(listener -> { + listener.onPartitionChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.error("watchPartition exception {}", throwable.getMessage()); + closeStub(false); + } + + @Override + public void onCompleted() { + + } + }); + storeWatcher = pdWatch.watchNode(new PDWatch.Listener() { + @Override + public void onNext(NodeEvent response) { + log.info("PDClient receive store event {} {}", response.getEventType(), Long.toHexString(response.getNodeId())); + invalidStoreCache(response.getNodeId()); + eventListeners.forEach(listener -> { + listener.onStoreChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.error("watchNode exception {}", throwable.getMessage()); + closeStub(false); + } + + @Override + public void onCompleted() { + + } + }); + } + graphWatcher = pdWatch.watchGraph(new PDWatch.Listener() { + @Override + public void onNext(WatchResponse response) { + eventListeners.forEach(listener -> { + listener.onGraphChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.warn("graphWatcher exception {}", throwable.getMessage()); + } + + @Override + public void onCompleted() { + + } + }); + + shardGroupWatcher = pdWatch.watchShardGroup(new PDWatch.Listener<>() { + @Override + public void onNext(WatchResponse response) { + var shardResponse = response.getShardGroupResponse(); + log.info("PDClient receive shard group event: raft {}-{}", shardResponse.getShardGroupId(), + shardResponse.getType()); + if (config.isEnableCache()) { + switch (shardResponse.getType()) { + case WATCH_CHANGE_TYPE_DEL: + cache.deleteShardGroup(shardResponse.getShardGroupId()); + break; + case WATCH_CHANGE_TYPE_ALTER: + cache.updateShardGroup(response.getShardGroupResponse().getShardGroup()); + break; + default: + break; + } + } + eventListeners.forEach(listener -> listener.onShardGroupChanged(response)); + } + + @Override + public void onError(Throwable throwable) { + log.warn("shardGroupWatcher exception {}", throwable.getMessage()); + } + + @Override + public void onCompleted() { + + } + }); + } + + private synchronized void closeStub(boolean closeWatcher) { + // TODO ManagedChannel 没有正常关闭 + stubProxy.set(null); + cache.reset(); + + if (closeWatcher) { + if (partitionWatcher != null) { + partitionWatcher.close(); + } + if (storeWatcher != null) { + storeWatcher.close(); + } + if (graphWatcher != null) { + graphWatcher.close(); + graphWatcher = null; + } + + if (shardGroupWatcher != null){ + shardGroupWatcher.close(); + shardGroupWatcher = null; + } + } + partitionWatcher = storeWatcher = null; + + } + + private PDGrpc.PDBlockingStub getStub() throws PDException { + if (stubProxy.get() == null) { + newBlockingStub(); + } + return stubProxy.get().withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + } + + + private String newLeaderStub() { + String leaderHost = ""; + for (int i = 0; i < stubProxy.getHostCount(); i++) { + String host = stubProxy.nextHost(); + ManagedChannel channel = ManagedChannelBuilder.forTarget(host).usePlaintext().build(); + PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS); + try { + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Metapb.Member leader = stub.getMembers(request).getLeader(); + if (!leader.getGrpcUrl().equalsIgnoreCase(host)) { + leaderHost = leader.getGrpcUrl(); + channel.shutdown(); + channel.awaitTermination(10000, TimeUnit.MILLISECONDS); + channel = ManagedChannelBuilder.forTarget(leaderHost).usePlaintext().build(); + stubProxy.set(PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS)); + } else { + stubProxy.set(stub); + leaderHost = host; + } + log.info("PDClient connect to host = {} success", leaderHost); + break; + } catch (Exception e) { + log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(), + e.getCause() != null ? e.getCause().getMessage() : ""); + } + } + return leaderHost; + } + /** + * Store注册,返回storeID,初次注册会返回新ID + * @param store + * @return + */ + public long registerStore(Metapb.Store store) throws PDException { + Pdpb.RegisterStoreRequest request = Pdpb.RegisterStoreRequest.newBuilder() + .setHeader(header) + .setStore(store).build(); + + Pdpb.RegisterStoreResponse response = + blockingUnaryCall(PDGrpc.getRegisterStoreMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreId(); + } + + /** + * 根据storeId返回Store对象 + * @param storeId + * @return + * @throws PDException + */ + public Metapb.Store getStore(long storeId) throws PDException { + Metapb.Store store = cache.getStoreById(storeId); + if (store == null) { + Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder() + .setHeader(header) + .setStoreId(storeId).build(); + Pdpb.GetStoreResponse response = getStub().getStore(request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) + cache.addStore(storeId, store); + } + return store; + } + + /** + * 更新Store信息,包括上下线等 + * @param store + * @return + */ + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder() + .setHeader(header) + .setStore(store).build(); + + Pdpb.SetStoreResponse response = getStub().setStore(request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) + cache.addStore(store.getId(), store); + return store; + } + /** + * 返回活跃的Store + * @param graphName + * @return + */ + public List getActiveStores(String graphName) throws PDException { + List stores = new ArrayList<>(); + KVPair ptShard = this.getPartitionByCode(graphName, 0); + while (ptShard != null) { + stores.add(this.getStore(ptShard.getValue().getStoreId())); + if (ptShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) { + ptShard = this.getPartitionByCode(graphName, ptShard.getKey().getEndKey()); + } else { + ptShard = null; + } + } + return stores; + } + + public List getActiveStores() throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName("") + .setExcludeOfflineStores(true) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * 返回活跃的Store + * @param graphName + * @return + */ + public List getAllStores(String graphName) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setExcludeOfflineStores(false) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * Store心跳,定期调用,保持在线状态 + * @param stats + * @throws PDException + */ + public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException { + Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder() + .setHeader(header) + .setStats(stats).build(); + Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request); + handleResponseError(response.getHeader()); + return response.getClusterStats(); + } + + /** + * 查询Key所属分区信息 + * @param graphName + * @param key + * @return + * @throws PDException + */ + public KVPair getPartition(String graphName, byte[] key) throws PDException { + // 先查cache,cache没有命中,在调用PD + KVPair partShard = cache.getPartitionByKey(graphName, key); + if ( partShard == null ){ + Pdpb.GetPartitionRequest request = Pdpb.GetPartitionRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setKey(ByteString.copyFrom(key)).build(); + Pdpb.GetPartitionResponse response = + blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + if (config.isEnableCache()) { + cache.addPartition(graphName, partShard.getKey().getId(), partShard.getKey()); + } + } + return partShard; + } + + /** + * 根据hashcode查询所属分区信息 + * @param graphName + * @param hashCode + * @return + * @throws PDException + */ + public KVPair getPartitionByCode(String graphName, long hashCode) + throws PDException { + // 先查cache,cache没有命中,在调用PD + KVPair partShard = cache.getPartitionByCode(graphName, hashCode); + if ( partShard == null ){ + Pdpb.GetPartitionByCodeRequest request = Pdpb.GetPartitionByCodeRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setCode(hashCode).build(); + Pdpb.GetPartitionResponse response = + blockingUnaryCall(PDGrpc.getGetPartitionByCodeMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + if (config.isEnableCache()) { + cache.addPartition(graphName, partShard.getKey().getId(), partShard.getKey()); + cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); + } + } + return partShard; + } + + + + /** + * 获取Key的哈希值 + */ + public int keyToCode(String graphName, byte[] key){ + return PartitionUtils.calcHashcode(key); + } + + /** + * 根据分区id返回分区信息, RPC请求 + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public KVPair getPartitionById(String graphName, int partId) throws PDException { + KVPair partShard = cache.getPartitionById(graphName, partId); + if (partShard == null) { + Pdpb.GetPartitionByIDRequest request = Pdpb.GetPartitionByIDRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setPartitionId(partId).build(); + Pdpb.GetPartitionResponse response = + blockingUnaryCall(PDGrpc.getGetPartitionByIDMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + if (config.isEnableCache()) { + cache.addPartition(graphName, partShard.getKey().getId(), partShard.getKey()); + cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); + } + } + return partShard; + } + + public Metapb.ShardGroup getShardGroup(int partId) throws PDException { + Metapb.ShardGroup group = cache.getShardGroup(partId); + if (group == null) { + Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(partId) + .build(); + Pdpb.GetShardGroupResponse response = blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request); + handleResponseError(response.getHeader()); + group = response.getShardGroup(); + if (config.isEnableCache()) { + cache.updateShardGroup(group); + } + } + return group; + } + + public void updateShardGroup(Metapb.ShardGroup shardGroup) throws PDException { + Pdpb.UpdateShardGroupRequest request = Pdpb.UpdateShardGroupRequest.newBuilder() + .setHeader(header) + .setShardGroup(shardGroup) + .build(); + Pdpb.UpdateShardGroupResponse response = blockingUnaryCall(PDGrpc.getUpdateShardGroupMethod(), request); + handleResponseError(response.getHeader()); + + if (config.isEnableCache()) { + cache.updateShardGroup(shardGroup); + } + } + + /** + * 返回startKey和endKey跨越的所有分区信息 + * @param graphName + * @param startKey + * @param endKey + * @return + * @throws PDException + */ + public List> scanPartitions(String graphName, byte[] startKey, byte[] endKey) throws PDException { + List> partitions = new ArrayList<>(); + KVPair startPartShard = getPartition(graphName, startKey); + KVPair endPartShard = getPartition(graphName, endKey); + if (startPartShard == null || endPartShard == null) + return null; + + partitions.add(startPartShard); + while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey() + && startPartShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE /*排除最后一个分区*/) { + startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey()); + partitions.add(startPartShard); + } + return partitions; + } + /** + * 根据条件查询分区信息 + * @return + * @throws PDException + */ + public List getPartitionsByStore(long storeId) throws PDException { + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + } + + /** + * 查找指定store上的指定partitionId + * @return + * @throws PDException + */ + public List queryPartitions(long storeId, int partitionId) throws PDException { + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .setPartitionId(partitionId) + .build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + } + + public List getPartitions(long storeId, String graphName) throws PDException { + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .setGraphName(graphName).build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + + } + + public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { + Pdpb.SetGraphRequest request = Pdpb.SetGraphRequest.newBuilder() + .setGraph(graph) + .build(); + Pdpb.SetGraphResponse response = + blockingUnaryCall(PDGrpc.getSetGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder() + .setGraphName(graphName) + .build(); + Pdpb.GetGraphResponse response = + blockingUnaryCall(PDGrpc.getGetGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public Metapb.Graph getGraphWithOutException(String graphName) throws + PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder() + .setGraphName( + graphName) + .build(); + Pdpb.GetGraphResponse response = blockingUnaryCall( + PDGrpc.getGetGraphMethod(), request); + return response.getGraph(); + } + + public Metapb.Graph delGraph(String graphName) throws PDException { + Pdpb.DelGraphRequest request = Pdpb.DelGraphRequest.newBuilder() + .setGraphName(graphName) + .build(); + Pdpb.DelGraphResponse response = + blockingUnaryCall(PDGrpc.getDelGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public List updatePartition(List partitions) throws PDException { + + Pdpb.UpdatePartitionRequest request = Pdpb.UpdatePartitionRequest.newBuilder() + .addAllPartition(partitions) + .build(); + Pdpb.UpdatePartitionResponse response = + blockingUnaryCall(PDGrpc.getUpdatePartitionMethod(), request); + handleResponseError(response.getHeader()); + invalidPartitionCache(); + + return response.getPartitionList(); + } + public Metapb.Partition delPartition(String graphName, int partitionId) throws PDException { + + Pdpb.DelPartitionRequest request = Pdpb.DelPartitionRequest.newBuilder() + .setGraphName(graphName) + .setPartitionId(partitionId) + .build(); + Pdpb.DelPartitionResponse response = + blockingUnaryCall(PDGrpc.getDelPartitionMethod(), request); + + handleResponseError(response.getHeader()); + invalidPartitionCache(graphName ,partitionId); + return response.getPartition(); + } + /** + * 删除分区缓存 + */ + public void invalidPartitionCache(String graphName, int partitionId) { + // 检查是否存在缓存 + if (null != cache.getPartitionById(graphName, partitionId)) + cache.removePartition(graphName, partitionId); + } + + + /** + * 删除分区缓存 + */ + public void invalidPartitionCache() { + // 检查是否存在缓存 + cache.removePartitions(); + } + /** + * 删除分区缓存 + */ + public void invalidStoreCache(long storeId){ + cache.removeStore(storeId); + } + + /** + * Hugegraph server 调用,Leader发生改变,更新缓存 + */ + public void updatePartitionLeader(String graphName, int partId, long leaderStoreId){ + KVPair partShard = null; + try { + partShard = this.getPartitionById(graphName, partId); + + if (partShard != null && partShard.getValue().getStoreId() != leaderStoreId) { + var shardGroup = this.getShardGroup(partId); + Metapb.Shard shard = null; + List shards = new ArrayList<>(); + + for(Metapb.Shard s : shardGroup.getShardsList()){ + if (s.getStoreId() == leaderStoreId) { + shard = s; + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Leader).build()); + } else{ + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Follower).build()); + } + } + + if (config.isEnableCache()) { + if (shard == null) { + // 分区的shard中未找到leader,说明分区发生了迁移 + cache.removePartition(graphName, partId); + } + } + } + } catch (PDException e) { + log.error("getPartitionException: {}", e.getMessage()); + } + } + + /** + * Hugegraph-store调用,更新缓存 + * @param partition + */ + public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader){ + if (config.isEnableCache()) { + cache.updatePartition(partition.getGraphName(), partition.getId(), partition); + cache.updateShardGroupLeader(partition.getId(), leader); + } + } + + + public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException { + Pdpb.GetIdRequest request = Pdpb.GetIdRequest.newBuilder() + .setHeader(header) + .setKey(key) + .setDelta(delta) + .build(); + Pdpb.GetIdResponse response = blockingUnaryCall(PDGrpc.getGetIdMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException { + Pdpb.ResetIdRequest request = Pdpb.ResetIdRequest.newBuilder() + .setHeader(header) + .setKey(key) + .build(); + Pdpb.ResetIdResponse response = blockingUnaryCall(PDGrpc.getResetIdMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Metapb.Member getLeader() throws PDException { + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); + handleResponseError(response.getHeader()); + return response.getLeader(); + } + + public Pdpb.GetMembersResponse getMembers() throws PDException { + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Metapb.ClusterStats getClusterStats() throws PDException { + Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); + handleResponseError(response.getHeader()); + return response.getCluster(); + } + + private > RespT + blockingUnaryCall(MethodDescriptor method, ReqT req) throws PDException { + return blockingUnaryCall(method, req, 5); + } + + private > RespT + blockingUnaryCall(MethodDescriptor method, ReqT req, int retry) throws PDException { + io.grpc.stub.AbstractBlockingStub stub = (AbstractBlockingStub) getStub(); + try { + RespT resp = io.grpc.stub.ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), req); + return resp; + } catch (Exception e) { + log.error(method.getFullMethodName() + " exception, {}", e.getMessage()); + if (e instanceof StatusRuntimeException) { + StatusRuntimeException se = (StatusRuntimeException) e; + //se.getStatus() == Status.UNAVAILABLE && + if (retry < stubProxy.getHostCount()) { + // 网络不通,关掉之前连接,换host重新连接 + closeStub(true); + return blockingUnaryCall(method, req, ++retry); + } + } + } + return null; + } + + private static void handleResponseError(Pdpb.ResponseHeader header) throws + PDException { + if (header.hasError() && header.getError() + .getType() != Pdpb.ErrorType.OK) { + throw new PDException(header.getError().getTypeValue(), + String.format( + "PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } + } + + public void addEventListener(PDEventListener listener){ + eventListeners.add(listener); + } + + public PDWatch getWatchClient(){ + return new PDWatchImpl(stubProxy.getHost()); + } + + public PDPulse getPulseClient(){ + return this.pdPulse; + } + + /** + * 返回Store状态信息 + * + */ + public List getStoreStatus(boolean offlineExcluded) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setExcludeOfflineStores(offlineExcluded) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request); + handleResponseError(response.getHeader()); + List stores = response.getStoresList(); + return stores; + } + public void setGraphSpace(String graphSpaceName,long storageLimit) throws PDException { + Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder().setName(graphSpaceName) + .setStorageLimit(storageLimit).setTimestamp(System.currentTimeMillis()).build(); + Pdpb.SetGraphSpaceRequest request = Pdpb.SetGraphSpaceRequest.newBuilder() + .setHeader(header) + .setGraphSpace(graphSpace) + .build(); + Pdpb.SetGraphSpaceResponse response = getStub().setGraphSpace(request); + handleResponseError(response.getHeader()); + } + + public List getGraphSpace(String graphSpaceName) throws + PDException { + Pdpb.GetGraphSpaceRequest.Builder builder = Pdpb.GetGraphSpaceRequest.newBuilder(); + Pdpb.GetGraphSpaceRequest request; + builder.setHeader(header); + if (graphSpaceName != null && graphSpaceName.length() > 0) { + builder.setGraphSpaceName(graphSpaceName); + } + request = builder.build(); + Pdpb.GetGraphSpaceResponse response = getStub().getGraphSpace(request); + List graphSpaceList = response.getGraphSpaceList(); + handleResponseError(response.getHeader()); + return graphSpaceList; + } + + public void setPDConfig(int partitionCount, String peerList, int shardCount, long version) throws PDException { + Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().setPartitionCount(partitionCount) + .setPeersList(peerList).setShardCount(shardCount) + .setVersion(version).setTimestamp(System.currentTimeMillis()) + .build(); + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + handleResponseError(response.getHeader()); + } + + public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + handleResponseError(response.getHeader()); + } + + public Metapb.PDConfig getPDConfig() throws PDException { + Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + handleResponseError(response.getHeader()); + return response.getPdConfig(); + } + + public Metapb.PDConfig getPDConfig(long version) throws PDException { + Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder().setHeader( + header).setVersion(version).build(); + Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + handleResponseError(response.getHeader()); + return response.getPdConfig(); + } + + public void changePeerList(String peerList) throws PDException { + Pdpb.ChangePeerListRequest request = Pdpb.ChangePeerListRequest.newBuilder() + .setPeerList(peerList) + .setHeader(header).build(); + Pdpb.getChangePeerListResponse response = blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request); + handleResponseError(response.getHeader()); + } + + /** + * 工作模式 + * Auto:自动分裂,每个Store上分区数达到最大值 + * + * @throws PDException + */ + public void splitData() throws PDException { + Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder() + .setHeader(header) + .setMode(Pdpb.OperationMode.Auto).build(); + Pdpb.SplitDataResponse response = getStub().splitData(request); + handleResponseError(response.getHeader()); + } + + /** + * 工作模式 + * Auto:自动分裂,每个Store上分区数达到最大值 + * Expert:专家模式,需要指定splitParams + * @param mode + * @param params + * @throws PDException + */ + public void splitData(Pdpb.OperationMode mode, List params) throws PDException { + Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .addAllParam(params).build();; + Pdpb.SplitDataResponse response = getStub().splitData(request); + handleResponseError(response.getHeader()); + } + + public void splitGraphData(String graphName, int toCount) throws PDException { + Pdpb.SplitGraphDataRequest request = Pdpb.SplitGraphDataRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); + Pdpb.SplitDataResponse response = getStub().splitGraphData(request); + handleResponseError(response.getHeader()); + } + + /** + * 自动转移,达到每个Store上分区数量相同 + * @throws PDException + */ + public void balancePartition() throws PDException { + Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder() + .setHeader(header) + .setMode(Pdpb.OperationMode.Auto).build(); + Pdpb.MovePartitionResponse response = getStub().movePartition(request); + handleResponseError(response.getHeader()); + } + + /** + * //工作模式 + * // Auto:自动转移,达到每个Store上分区数量相同 + * // Expert:专家模式,需要指定transferParams + * @param mode + * @param params + * @throws PDException + */ + public void movePartition(Pdpb.OperationMode mode, List params) throws PDException { + Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .addAllParam(params).build(); + Pdpb.MovePartitionResponse response = getStub().movePartition(request); + handleResponseError(response.getHeader()); + } + public void reportTask(MetaTask.Task task)throws PDException { + Pdpb.ReportTaskRequest request = Pdpb.ReportTaskRequest.newBuilder() + .setHeader(header) + .setTask(task).build(); + Pdpb.ReportTaskResponse response = getStub().reportTask(request); + handleResponseError(response.getHeader()); + } + + public Metapb.PartitionStats getPartitionsStats(String graph, int partId)throws PDException { + Pdpb.GetPartitionStatsRequest request = Pdpb.GetPartitionStatsRequest.newBuilder() + .setHeader(header) + .setGraphName(graph) + .setPartitionId(partId).build(); + Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request); + handleResponseError(response.getHeader()); + return response.getPartitionStats(); + } + + /** + * 平衡不同store中leader的数量 + */ + public void balanceLeaders() throws PDException { + Pdpb.BalanceLeadersRequest request = Pdpb.BalanceLeadersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request); + handleResponseError(response.getHeader()); + } + + /** + * 从pd中删除store + */ + public Metapb.Store delStore(long storeId) throws PDException { + Pdpb.DetStoreRequest request = Pdpb.DetStoreRequest.newBuilder() + .setHeader(header) + .setStoreId(storeId) + .build(); + Pdpb.DetStoreResponse response = getStub().delStore(request); + handleResponseError(response.getHeader()); + return response.getStore(); + } + + /** + * 对rocksdb整体进行compaction + * @throws PDException + */ + public void dbCompaction() throws PDException { + Pdpb.DbCompactionRequest request = Pdpb.DbCompactionRequest + .newBuilder() + .setHeader(header) + .build(); + Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + handleResponseError(response.getHeader()); + } + + /** + * 对rocksdb指定表进行compaction + * @param tableName + * @throws PDException + */ + public void dbCompaction(String tableName) throws PDException { + Pdpb.DbCompactionRequest request = Pdpb.DbCompactionRequest + .newBuilder() + .setHeader(header) + .setTableName(tableName) + .build(); + Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + handleResponseError(response.getHeader()); + } + + /** + * 分区合并,把当前的分区缩容至toCount个 + * + * @param toCount 缩容到分区的个数 + * @throws PDException + */ + public void combineCluster(int toCount) throws PDException{ + Pdpb.CombineClusterRequest request = Pdpb.CombineClusterRequest + .newBuilder() + .setHeader(header) + .setToCount(toCount) + .build(); + Pdpb.CombineClusterResponse response = getStub().combineCluster(request); + handleResponseError(response.getHeader()); + } + + /** + * 将单图缩容到 toCount个 + * @param graphName graph name + * @param toCount target count + * @throws PDException + */ + public void combineGraph(String graphName, int toCount) throws PDException{ + Pdpb.CombineGraphRequest request = Pdpb.CombineGraphRequest + .newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); + Pdpb.CombineGraphResponse response = getStub().combineGraph(request); + handleResponseError(response.getHeader()); + } + + public void deleteShardGroup(int groupId) throws PDException { + Pdpb.DeleteShardGroupRequest request = Pdpb.DeleteShardGroupRequest + .newBuilder() + .setHeader(header) + .setGroupId(groupId) + .build(); + Pdpb.DeleteShardGroupResponse response = getStub().deleteShardGroup(request); + handleResponseError(response.getHeader()); + } + + /** + * 用于 store的 shard list重建 + * @param groupId shard group id + * @param shards shard list,delete when shards size is 0 + */ + public void updateShardGroupOp(int groupId, List shards) throws PDException { + Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request); + handleResponseError(response.getHeader()); + } + + /** + * invoke fireChangeShard command + * @param groupId shard group id + * @param shards shard list + */ + public void changeShard(int groupId, List shards) throws PDException { + Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + Pdpb.ChangeShardResponse response = getStub().changeShard(request); + handleResponseError(response.getHeader()); + } + + public PartitionCache getCache() { + return cache; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java new file mode 100644 index 0000000000..3b72ee89ad --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java @@ -0,0 +1,62 @@ +package org.apache.hugegraph.pd.client; + +public final class PDConfig { + //TODO multi-server + private String serverHost = "localhost:9000"; + private long grpcTimeOut = 60000; // grpc调用超时时间 10秒 + + // 是否接收PD异步通知 + private boolean enablePDNotify = false; + + private boolean enableCache = false; + + private PDConfig() { + } + + public static PDConfig of() { + return new PDConfig(); + } + + public static PDConfig of(String serverHost) { + PDConfig config = new PDConfig(); + config.serverHost = serverHost; + return config; + } + + public static PDConfig of(String serverHost, long timeOut) { + PDConfig config = new PDConfig(); + config.serverHost = serverHost; + config.grpcTimeOut = timeOut; + return config; + } + public String getServerHost() { + return serverHost; + } + + public long getGrpcTimeOut(){ return grpcTimeOut; } + + @Deprecated + public PDConfig setEnablePDNotify(boolean enablePDNotify) { + this.enablePDNotify = enablePDNotify; + + // TODO 临时代码,hugegraph修改完后删除 + this.enableCache = enablePDNotify; + return this; + } + + public boolean isEnableCache() { + return enableCache; + } + + public PDConfig setEnableCache(boolean enableCache) { + this.enableCache = enableCache; + return this; + } + + @Override + public String toString() { + return "PDConfig{" + + "serverHost='" + serverHost + '\'' + + '}'; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java new file mode 100644 index 0000000000..63dd780131 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java @@ -0,0 +1,118 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; + +import java.io.Closeable; +import java.util.function.Consumer; + +/** + * Bidirectional communication interface of pd-client and pd-server + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +public interface PDPulse { + + /** + * + * @param listener + * @return + */ + Notifier connectPartition(Listener listener); + + /*** inner static methods ***/ + static Listener listener(Consumer onNext) { + return listener(onNext, t -> {}, () -> {}); + } + + static Listener listener(Consumer onNext, Consumer onError) { + return listener(onNext, onError, () -> {}); + } + + static Listener listener(Consumer onNext, Runnable onCompleted) { + return listener(onNext, t -> {}, onCompleted); + } + + static Listener listener(Consumer onNext, Consumer onError, Runnable onCompleted) { + return new Listener() { + @Override + public void onNext(T response) { + onNext.accept(response); + } + + @Override + public void onNotice(PulseServerNotice notice) { + + } + + @Override + public void onError(Throwable throwable) { + onError.accept(throwable); + } + + @Override + public void onCompleted() { + onCompleted.run(); + } + }; + } + + /** + * Interface of pulse. + */ + interface Listener { + /** + * Invoked on new events. + * + * @param response the response. + */ + @Deprecated + default void onNext(T response){}; + + /** + * Invoked on new events. + * @param notice a wrapper of response + */ + default void onNotice(PulseServerNotice notice){ + notice.ack(); + } + + /** + * Invoked on errors. + * + * @param throwable the error. + */ + void onError(Throwable throwable); + + /** + * Invoked on completion. + */ + void onCompleted(); + + } + + /** + * Interface of notifier that can send notice to server. + * @param + */ + interface Notifier extends Closeable { + /** + * closes this watcher and all its resources. + */ + @Override + void close(); + + /** + * Send notice to pd-server. + * @return + */ + void notifyServer(T t); + + /** + * Send an error report to pd-server. + * @param error + */ + void crash(String error); + + } +} \ No newline at end of file diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java new file mode 100644 index 0000000000..91ba2687fe --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java @@ -0,0 +1,149 @@ +package org.apache.hugegraph.pd.client; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import com.baidu.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import com.baidu.hugegraph.pd.grpc.pulse.PulseAckRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PulseCreateRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PulseRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PulseResponse; +import com.baidu.hugegraph.pd.grpc.pulse.PulseType; +import com.baidu.hugegraph.pd.pulse.PartitionNotice; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +@Slf4j +final class PDPulseImpl implements PDPulse { + + private final HgPdPulseGrpc.HgPdPulseStub stub; + + private ExecutorService threadPool ; + + // TODO: support several servers. + public PDPulseImpl(String pdServerAddress) { + this.stub = HgPdPulseGrpc.newStub(getChannel(pdServerAddress)); + var namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); + threadPool = Executors.newSingleThreadExecutor(namedThreadFactory); + } + + private ManagedChannel getChannel(String target) { + return ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + } + + @Override + public Notifier connectPartition(Listener listener) { + return new PartitionHeartbeat(listener); + } + + /*** PartitionHeartbeat's implement ***/ + private class PartitionHeartbeat extends + AbstractConnector { + private long observerId = -1; + + PartitionHeartbeat(Listener listener) { + super(listener, PulseType.PULSE_TYPE_PARTITION_HEARTBEAT); + } + + private void setObserverId(long observerId) { + if (this.observerId == -1) { + this.observerId = observerId; + } + } + + @Override + public void notifyServer(PartitionHeartbeatRequest.Builder requestBuilder) { + this.reqStream.onNext(PulseRequest.newBuilder() + .setNoticeRequest( + PulseNoticeRequest.newBuilder() + .setPartitionHeartbeatRequest( + requestBuilder.build() + ).build() + ).build() + ); + } + + @Override + public void onNext(PulseResponse pulseResponse) { + this.setObserverId(pulseResponse.getObserverId()); + long noticeId = pulseResponse.getNoticeId(); + PartitionHeartbeatResponse res = pulseResponse.getPartitionHeartbeatResponse(); + this.listener.onNext(res); + this.listener.onNotice(new PartitionNotice(noticeId, + e -> super.ackNotice(e, observerId), res)); + } + + } + + private abstract class AbstractConnector implements Notifier, StreamObserver { + Listener listener; + StreamObserver reqStream; + PulseType pulseType; + PulseRequest.Builder reqBuilder = PulseRequest.newBuilder(); + PulseAckRequest.Builder ackBuilder = PulseAckRequest.newBuilder(); + + private AbstractConnector(Listener listener, PulseType pulseType) { + this.listener = listener; + this.pulseType = pulseType; + this.init(); + } + + void init() { + PulseCreateRequest.Builder builder = PulseCreateRequest.newBuilder() + .setPulseType(this.pulseType); + + this.reqStream = PDPulseImpl.this.stub.pulse(this); + this.reqStream.onNext(reqBuilder.clear().setCreateRequest(builder).build()); + } + + /*** notifier ***/ + @Override + public void close() { + this.reqStream.onCompleted(); + } + + @Override + public abstract void notifyServer(N t); + + @Override + public void crash(String error) { + this.reqStream.onError(new Throwable(error)); + } + + /*** listener ***/ + @Override + public abstract void onNext(PulseResponse pulseResponse); + + @Override + public void onError(Throwable throwable) { + this.listener.onError(throwable); + } + + @Override + public void onCompleted() { + this.listener.onCompleted(); + } + + protected void ackNotice(long noticeId, long observerId) { + threadPool.execute(() -> { + // log.info("send ack: {}, ts: {}", noticeId, System.currentTimeMillis()); + this.reqStream.onNext(reqBuilder.clear() + .setAckRequest( + this.ackBuilder.clear().setNoticeId(noticeId) + .setObserverId(observerId).build() + ).build() + ); + }); + } + } + +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java new file mode 100644 index 0000000000..0662ebe52b --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java @@ -0,0 +1,120 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.PartitionEvent; + +import java.io.Closeable; +import java.util.function.Consumer; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +public interface PDWatch { + + /** + * Watch the events of all store-nodes registered in the remote PD-Server. + * + * @param listener + * @return + */ + //PDWatcher watchNode(Listener listener); + + /** + * Watch the events of the store-nodes assigned to a specified graph. + * + * @param graph the graph name which you want to watch + * @param listener + * @return + */ + //PDWatcher watchNode(String graph, Listener listener); + + + /** + * + * @param listener + * @return + */ + Watcher watchPartition(Listener listener); + + Watcher watchNode(Listener listener); + + Watcher watchGraph(Listener listener); + + Watcher watchShardGroup(Listener listener); + + /*** inner static methods ***/ + static Listener listener(Consumer onNext) { + return listener(onNext, t -> { + }, () -> { + }); + } + + static Listener listener(Consumer onNext, Consumer onError) { + return listener(onNext, onError, () -> { + }); + } + + static Listener listener(Consumer onNext, Runnable onCompleted) { + return listener(onNext, t -> { + }, onCompleted); + } + + static Listener listener(Consumer onNext, Consumer onError, Runnable onCompleted) { + return new Listener() { + @Override + public void onNext(T response) { + onNext.accept(response); + } + + @Override + public void onError(Throwable throwable) { + onError.accept(throwable); + } + + @Override + public void onCompleted() { + onCompleted.run(); + } + }; + } + + + /** + * Interface of Watcher. + */ + interface Listener { + /** + * Invoked on new events. + * + * @param response the response. + */ + void onNext(T response); + + /** + * Invoked on errors. + * + * @param throwable the error. + */ + void onError(Throwable throwable); + + /** + * Invoked on completion. + */ + void onCompleted(); + } + + interface Watcher extends Closeable { + /** + * closes this watcher and all its resources. + */ + @Override + void close(); + + /** + * Requests the latest revision processed and propagates it to listeners + */ + // TODO: what's it for? + //void requestProgress(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java new file mode 100644 index 0000000000..bdbed212c2 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java @@ -0,0 +1,176 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.grpc.watch.HgPdWatchGrpc; +import com.baidu.hugegraph.pd.grpc.watch.WatchCreateRequest; +import com.baidu.hugegraph.pd.grpc.watch.WatchNodeResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchPartitionResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchRequest; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import com.baidu.hugegraph.pd.watch.NodeEvent; +import com.baidu.hugegraph.pd.watch.PartitionEvent; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.stub.StreamObserver; + +import java.util.function.Supplier; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +final class PDWatchImpl implements PDWatch { + + private final HgPdWatchGrpc.HgPdWatchStub stub; + + // TODO: support several servers. + PDWatchImpl(String pdServerAddress) { + this.stub = HgPdWatchGrpc.newStub(getChannel(pdServerAddress)); + } + + private ManagedChannel getChannel(String target) { + return ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + } + + /** + * Get Partition change watcher. + * + * @param listener + * @return + */ + @Override + public Watcher watchPartition(Listener listener) { + return new PartitionWatcher(listener); + } + + /** + * Get Store-Node change watcher. + * + * @param listener + * @return + */ + @Override + public Watcher watchNode(Listener listener) { + return new NodeWatcher(listener); + } + + @Override + public Watcher watchGraph(Listener listener) { + return new GraphWatcher(listener); + } + + @Override + public Watcher watchShardGroup(Listener listener) { + return new ShardGroupWatcher(listener); + } + + private class GraphWatcher extends AbstractWatcher { + + private GraphWatcher(Listener listener) { + super(listener, + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_GRAPH_CHANGE) + .build() + ); + } + + @Override + public void onNext(WatchResponse watchResponse) { + this.listener.onNext(watchResponse); + } + } + + private class ShardGroupWatcher extends AbstractWatcher { + + private ShardGroupWatcher(Listener listener) { + super(listener, + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE) + .build() + ); + } + + @Override + public void onNext(WatchResponse watchResponse) { + this.listener.onNext(watchResponse); + } + } + + private class PartitionWatcher extends AbstractWatcher { + + private PartitionWatcher(Listener listener) { + super(listener, + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_PARTITION_CHANGE) + .build() + ); + } + + @Override + public void onNext(WatchResponse watchResponse) { + WatchPartitionResponse res = watchResponse.getPartitionResponse(); + PartitionEvent event = new PartitionEvent(res.getGraph(), res.getPartitionId(), + PartitionEvent.ChangeType.grpcTypeOf(res.getChangeType())); + this.listener.onNext(event); + } + } + + private class NodeWatcher extends AbstractWatcher { + private NodeWatcher(Listener listener) { + super(listener, + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_STORE_NODE_CHANGE) + .build() + ); + } + + @Override + public void onNext(WatchResponse watchResponse) { + WatchNodeResponse res = watchResponse.getNodeResponse(); + NodeEvent event = new NodeEvent(res.getGraph(), res.getNodeId(), + NodeEvent.EventType.grpcTypeOf(res.getNodeEventType())); + this.listener.onNext(event); + } + } + + private abstract class AbstractWatcher implements Watcher, StreamObserver { + Listener listener; + StreamObserver reqStream; + Supplier requestSupplier; + + private AbstractWatcher(Listener listener, Supplier requestSupplier) { + this.listener = listener; + this.requestSupplier = requestSupplier; + this.init(); + } + + void init() { + this.reqStream = PDWatchImpl.this.stub.watch(this); + this.reqStream.onNext(WatchRequest.newBuilder().setCreateRequest( + this.requestSupplier.get() + ).build()); + } + + @Override + public void close() { + this.reqStream.onCompleted(); + } + + @Override + public abstract void onNext(WatchResponse watchResponse); + + @Override + public void onError(Throwable throwable) { + this.listener.onError(throwable); + } + + @Override + public void onCompleted() { + this.listener.onCompleted(); + } + } + +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java new file mode 100644 index 0000000000..2837eefd3a --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java @@ -0,0 +1,36 @@ +package org.apache.hugegraph.pd.pulse; + +import java.util.function.Consumer; + +import java.util.function.Consumer; + +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +/** + * @author lynn.bond@hotmail.com created on 2022/2/13 + */ +public class PartitionNotice implements PulseServerNotice{ + private long noticeId; + private Consumer ackConsumer; + private PartitionHeartbeatResponse content; + + public PartitionNotice(long noticeId, Consumer ackConsumer, PartitionHeartbeatResponse content) { + this.noticeId = noticeId; + this.ackConsumer = ackConsumer; + this.content = content; + } + + @Override + public void ack() { + this.ackConsumer.accept(this.noticeId); + } + + @Override + public long getNoticeId() { + return this.noticeId; + } + + @Override + public PartitionHeartbeatResponse getContent() { + return this.content; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java new file mode 100644 index 0000000000..03d298f4c8 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java @@ -0,0 +1,20 @@ +package org.apache.hugegraph.pd.pulse; + +/** + * @author lynn.bond@hotmail.com created on 2022/2/13 + */ +public interface PulseServerNotice { + /** + * @throws RuntimeException when failed to send ack-message to pd-server + */ + void ack(); + + long getNoticeId(); + + /** + * Return a response object of gRPC stream. + * @return + */ + T getContent(); + +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java new file mode 100644 index 0000000000..dae8726388 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java @@ -0,0 +1,78 @@ +package org.apache.hugegraph.pd.watch; + +import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; + +import java.util.Objects; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +public class NodeEvent { + private String graph; + private long nodeId; + private EventType eventType; + + public NodeEvent(String graph, long nodeId, EventType eventType) { + this.graph=graph; + this.nodeId=nodeId; + this.eventType=eventType; + } + + public String getGraph() { + return graph; + } + + public long getNodeId() { + return nodeId; + } + + public EventType getEventType() { + return eventType; + } + + public enum EventType { + UNKNOWN, + NODE_ONLINE, + NODE_OFFLINE, + NODE_RAFT_CHANGE; + + + public static EventType grpcTypeOf(NodeEventType grpcType) { + switch (grpcType) { + case NODE_EVENT_TYPE_NODE_ONLINE: + return NODE_ONLINE; + case NODE_EVENT_TYPE_NODE_OFFLINE: + return NODE_OFFLINE; + case NODE_EVENT_TYPE_NODE_RAFT_CHANGE: + return NODE_RAFT_CHANGE; + default: + return UNKNOWN; + } + + } + + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NodeEvent nodeEvent = (NodeEvent) o; + return nodeId == nodeEvent.nodeId && Objects.equals(graph, + nodeEvent.graph) && eventType == nodeEvent.eventType; + } + + @Override + public int hashCode() { + return Objects.hash(graph, nodeId, eventType); + } + + @Override + public String toString() { + return "NodeEvent{" + + "graph='" + graph + '\'' + + ", nodeId=" + nodeId + + ", eventType=" + eventType + + '}'; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java new file mode 100644 index 0000000000..18a34e8a45 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java @@ -0,0 +1,8 @@ +package org.apache.hugegraph.pd.watch; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +public class PDWatcher { + +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java new file mode 100644 index 0000000000..04adb403a2 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java @@ -0,0 +1,74 @@ +package org.apache.hugegraph.pd.watch; + +import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; + +import java.util.Objects; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +public class PartitionEvent { + private String graph; + private int partitionId; + private ChangeType changeType; + + public PartitionEvent(String graph, int partitionId, ChangeType changeType) { + this.graph = graph; + this.partitionId = partitionId; + this.changeType = changeType; + } + + public String getGraph() { + return this.graph; + } + + public int getPartitionId() { + return this.partitionId; + } + + public ChangeType getChangeType() { + return this.changeType; + } + + public enum ChangeType { + UNKNOWN, + ADD, + ALTER, + DEL; + + public static ChangeType grpcTypeOf(WatchChangeType grpcType) { + switch (grpcType) { + case WATCH_CHANGE_TYPE_ADD: + return ADD; + case WATCH_CHANGE_TYPE_ALTER: + return ALTER; + case WATCH_CHANGE_TYPE_DEL: + return DEL; + default: + return UNKNOWN; + } + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PartitionEvent that = (PartitionEvent) o; + return partitionId == that.partitionId && Objects.equals(graph, that.graph) && changeType == that.changeType; + } + + @Override + public int hashCode() { + return Objects.hash(graph, partitionId, changeType); + } + + @Override + public String toString() { + return "PartitionEvent{" + + "graph='" + graph + '\'' + + ", partitionId=" + partitionId + + ", changeType=" + changeType + + '}'; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java new file mode 100644 index 0000000000..cafd4dd895 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.watch; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +enum WatchType { + + PARTITION_CHANGE(10); + + private int value; + + private WatchType(int value){ + this.value=value; + } + +} diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java new file mode 100644 index 0000000000..bb32ddd155 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java @@ -0,0 +1,83 @@ +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PartitionCache; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + +// import org.junit.Test; + +public class PartitionCacheTest { + + // @Test + public void test(){ + PartitionCache cache = new PartitionCache(); + for(int i = 0; i< 10; i++) { + KVPair partShards = new KVPair<>(Metapb.Partition.newBuilder() + .setStartKey(i*10) + .setEndKey((i+1)*10) + .build(),null); + cache.addPartition("aa", i, partShards.getKey()); + } + + for(int i = 0; i<100; i++){ + KVPair partShards = cache.getPartitionByCode("aa", i); + System.out.println(" " + i + " " + partShards.getKey().getStartKey()); + } + } + + + // @Test + public void test1(){ + Map> keyToPartIdCache = new HashMap<>(); + // graphName + PartitionID组成key + Map> partitionCache = new HashMap<>(); + + // 缓存全部Store,用于全库查询,需要优化 + Map> allStoresCache = new HashMap<>(); + + keyToPartIdCache.put("a", TreeRangeMap.create()); + + keyToPartIdCache.get("a") + .put(Range.closedOpen(1L, 2L), 1); + + allStoresCache.put("a", new ArrayList<>()); + allStoresCache.get("a").add(Metapb.Store.newBuilder().setId(34).build()); + + + Map> keyToPartIdCache2 = cloneKeyToPartIdCache(keyToPartIdCache); + System.out.println(keyToPartIdCache2.size()); + } + + public Map> cloneKeyToPartIdCache(Map> cache) { + Map> cacheClone = new HashMap<>(); + cache.forEach((k1, v1) -> { + cacheClone.put(k1, TreeRangeMap.create()); + v1.asMapOfRanges().forEach((k2, v2) -> { + cacheClone.get(k1).put(k2, v2); + }); + }); + return cacheClone; + } + + public Map> + clonePartitionCache(Map> cache) { + Map> cacheClone = new HashMap<>(); + cacheClone.putAll(cache); + return cacheClone; + } + + public Map> + cloneStoreCache(Map> cache) { + Map> cacheClone = new HashMap<>(); + cacheClone.putAll(cache); + return cacheClone; + } +} diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java new file mode 100644 index 0000000000..9797314775 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java @@ -0,0 +1,122 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.client.PDPulse; +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.util.List; + +public class StoreRegisterTest { + private static PDClient pdClient; + + private long storeId = 0; + private final String storeAddr = "localhost"; + private final String graphName = "default/hugegraph/g"; + + @BeforeClass + public static void beforeClass() throws Exception { + PDConfig config = PDConfig.of("localhost:8686"); + config.setEnableCache(true); + pdClient = PDClient.create(config); + } + + // @Test + public void testRegisterStore() throws PDException { + Metapb.Store store = Metapb.Store.newBuilder().setAddress(storeAddr).build(); + try { + storeId = pdClient.registerStore(store); + } catch (Exception e) { + e.printStackTrace(); + } + Assert.assertTrue("RegisterStore store_id = " + storeId, storeId != 0); + } + + // @Test + public void testGetStore() throws PDException { + testRegisterStore(); + Metapb.Store store = pdClient.getStore(storeId); + Assert.assertTrue(store.getAddress().equals(storeAddr)); + System.out.println(store); + } + // @Test + public void testGetActiveStores() throws PDException { + testRegisterStore(); + List stores = pdClient.getActiveStores(graphName); + stores.forEach((e) -> { + System.out.println("-------------------------------------"); + System.out.println(e); + }); + } + + + // @Test + public void testStoreHeartbeat() throws PDException { + testRegisterStore(); + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() + .setStoreId(storeId) + .build(); + pdClient.storeHeartbeat(stats); + List stores = pdClient.getActiveStores(graphName); + boolean exist = false; + for (Metapb.Store store : stores) { + if (store.getId() == storeId) { + exist = true; + break; + } + } + Assert.assertTrue(exist); + } + + + + // @Test + public void testPartitionHeartbeat() throws InterruptedException, PDException { + testRegisterStore(); + PDPulse pdPulse = pdClient.getPulseClient(); + PDPulse.Notifier notifier = pdPulse.connectPartition( + new PDPulse.Listener() { + + @Override + public void onNext(PartitionHeartbeatResponse response) { + + } + + @Override + public void onNotice(PulseServerNotice notice) { + + } + + @Override + public void onError(Throwable throwable) { + + } + + @Override + public void onCompleted() { + + } + }); + KVPair partShard = pdClient.getPartition("test", "1".getBytes(StandardCharsets.UTF_8)); + notifier.notifyServer(PartitionHeartbeatRequest.newBuilder() + .setStates( + Metapb.PartitionStats.newBuilder() + .addGraphName("test") + .setId(partShard.getKey().getId()) + .setLeader(Metapb.Shard.newBuilder() + .setStoreId(1).build()))); + + + Thread.sleep(10000); + } + +} diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java new file mode 100644 index 0000000000..6abdb39929 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java @@ -0,0 +1,137 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; + +import org.junit.Assert; +// import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.Vector; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author zhangyingjie + * @date 2021/12/21 + **/ +public class DiscoveryClientImplTest { + + String address = "localhost:80"; + int delay = 1000; + int wait = delay * 3 + 500; + + // @Test + public void registerStore() throws InterruptedException { + + HashMap labels = new HashMap<>(); + + labels.put("metrics","/actuator/prometheus"); + labels.put("target","10.81.116.77:8520"); + labels.put("scheme","http"); + labels.put("__relabeling","http"); + labels.put("no_relabeling","http"); + getClient("store", "address1",labels); + + labels.put("metrics","/actuator/prometheus"); + labels.put("target","10.81.116.78:8520"); + labels.put("scheme","http"); + getClient("store", "address2",labels); + + labels.put("metrics","/actuator/prometheus"); + labels.put("target","10.81.116.79:8520"); + labels.put("scheme","http"); + getClient("store", "address3",labels); + + labels.put("metrics","/actuator/prometheus"); + labels.put("target","10.81.116.78:8620"); + labels.put("scheme","http"); + getClient("pd", "address1",labels); + + labels.put("metrics","/graph/metrics"); + labels.put("target","10.37.1.1:9200"); + labels.put("scheme","https"); + getClient("hugegraph", "address1",labels); + } + + // @Test + public void testNodes() throws InterruptedException { + String appName = "hugegraph"; + register(appName, address); + } + + // @Test + public void testMultiNode() throws InterruptedException { + for (int i = 0; i < 2; i++) { + register("app" + String.valueOf(i), address + i); + } + } + + // @Test + public void testParallelMultiNode() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(30); + Vector exceptions = new Vector<>(); + for (int i = 0; i < 30; i++) { + int finalI = i; + new Thread(() -> { + try { + for (int j = 0; j < 3; j++) { + register("app" + finalI, address + j); + } + } catch (Exception e) { + exceptions.add(e); + } finally { + latch.countDown(); + } + }).start(); + } + latch.await(); + Assert.assertTrue(exceptions.size() == 0); + } + private static AtomicLong label = new AtomicLong(); + private void register(String appName, String address) throws InterruptedException { + + HashMap labels = new HashMap<>(); + String labelValue = String.valueOf(label.incrementAndGet()); + labels.put("address",labelValue); + labels.put("address1",labelValue); + Query query = Query.newBuilder().setAppName( + appName).setVersion("0.13.0").putAllLabels(labels).build(); + DiscoveryClientImpl discoveryClient = getClient(appName, address, labels); + Thread.sleep(10000); + NodeInfos nodeInfos1 = discoveryClient.getNodeInfos(query); + Assert.assertTrue(nodeInfos1.getInfoCount() == 1); + DiscoveryClientImpl discoveryClient1 = getClient(appName, address + 0,labels); + Thread.sleep(10000); + Assert.assertTrue( + discoveryClient.getNodeInfos(query).getInfoCount() == 2); + Query query1 = Query.newBuilder().setAppName( + appName).setVersion("0.12.0").putAllLabels(labels).build(); + Assert.assertTrue( + discoveryClient.getNodeInfos(query1).getInfoCount() == 0); + discoveryClient.cancelTask(); + discoveryClient1.cancelTask(); + Thread.sleep(wait); + NodeInfos nodeInfos = discoveryClient.getNodeInfos(query); + System.out.println(nodeInfos); + Assert.assertTrue(nodeInfos.getInfoCount() == 0); + discoveryClient.close(); + discoveryClient1.close(); + } + + private DiscoveryClientImpl getClient(String appName, String address,Map labels) { + DiscoveryClientImpl discoveryClient = null; + try{ + discoveryClient = DiscoveryClientImpl.newBuilder().setCenterAddress( + "localhost:8687,localhost:8686,localhost:8688").setAddress(address).setAppName( + appName).setDelay(delay).setVersion("0.13.0").setId( + "0").setLabels(labels).build(); + discoveryClient.scheduleTask(); + } catch(Exception e){ + e.printStackTrace(); + } + + return discoveryClient; + } +} \ No newline at end of file diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java new file mode 100644 index 0000000000..53fd34c563 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java @@ -0,0 +1,112 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.kv.KResponse; +import com.baidu.hugegraph.pd.grpc.kv.KvResponse; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +// import org.junit.Test; +import org.yaml.snakeyaml.Yaml; + +import java.io.File; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; + +/** + * @author zhangyingjie + * @date 2021/12/21 + **/ +@Slf4j +public class LicenseClientImplTest { + + // @Test + public void putLicense() { + PDConfig pdConfig = PDConfig.of("localhost:8686,localhost:8687,localhost:8688"); + //PDConfig pdConfig = PDConfig.of("localhost:8686"); + pdConfig.setEnableCache(true); + try (LicenseClient c = new LicenseClient(pdConfig)) { + File file = new File("../conf/hugegraph.license"); + byte[] bytes = FileUtils.readFileToByteArray(file); + Pdpb.PutLicenseResponse putLicenseResponse = c.putLicense(bytes); + Pdpb.Error error = putLicenseResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(Pdpb.ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: {}", e); + } + } + + // @Test + public void getKv() { + PDConfig pdConfig = PDConfig.of("10.157.12.36:8686"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + KResponse kResponse = c.get("S:FS"); + Pdpb.Error error = kResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(Pdpb.ErrorType.OK); + Properties ymlConfig = getYmlConfig(kResponse.getValue()); + Object property = ymlConfig.get("rocksdb.write_buffer_size"); + assert property.toString().equals("32000000"); + } catch (Exception e) { + log.error("put license with error: {}", e); + } + } + // @Test + public void putKv() { + PDConfig pdConfig = PDConfig.of("10.14.139.70:8688"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + long l = System.currentTimeMillis(); + KvResponse kvResponse = c.put("S:Timestamp", String.valueOf(l)); + Pdpb.Error error = kvResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(Pdpb.ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: {}", e); + } + } + // @Test + public void putKvLocal() { + PDConfig pdConfig = PDConfig.of("localhost:8686"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + long l = System.currentTimeMillis(); + KvResponse kvResponse = c.put("S:Timestamp", String.valueOf(l)); + Pdpb.Error error = kvResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(Pdpb.ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: {}", e); + } + } + + private Properties getYmlConfig(String yml) { + Yaml yaml = new Yaml(); + Iterable load = yaml.loadAll(yml); + Iterator iterator = load.iterator(); + Properties properties = new Properties(); + while (iterator.hasNext()) { + Map next = (Map) iterator.next(); + map2Properties(next, "", properties); + } + return properties; + } + + private void map2Properties(Map map, String prefix, Properties properties) { + + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + String newPrefix = prefix == null || prefix.length() == 0 ? key : prefix + "." + key; + Object value = entry.getValue(); + if (!(value instanceof Map)) { + properties.put(newPrefix, value); + } else { + map2Properties((Map) value, newPrefix, properties); + } + + } + } + +} \ No newline at end of file diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java new file mode 100644 index 0000000000..a2cabe7ad4 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java @@ -0,0 +1,94 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; + +import org.apache.hugegraph.pd.client.test.HgPDTestUtil; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.junit.BeforeClass; +// import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + */ +public class PDPulseTest { + private static PDClient pdClient; + + private long storeId = 0; + private String storeAddress = "localhost"; + private String graphName = "graph1"; + + @BeforeClass + public static void beforeClass() throws Exception { + PDConfig pdConfig = PDConfig.of("localhost:8686"); + pdConfig.setEnableCache(true); + pdClient = PDClient.create(pdConfig); + pdClient.getLeader(); + } + + // @Test + public void listen() { + + PDPulse pulse = pdClient.getPulseClient(); + CountDownLatch latch = new CountDownLatch(60); + + PDPulse.Notifier notifier1 = pulse.connectPartition(new PulseListener(latch, "listener1")); + PDPulse.Notifier notifier2 = pulse.connectPartition(new PulseListener(latch, "listener2")); + PDPulse.Notifier notifier3 = pulse.connectPartition(new PulseListener(latch, "listener3")); + + try { + latch.await(120, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + PartitionHeartbeatRequest.Builder builder = PartitionHeartbeatRequest.newBuilder(); + + notifier1.notifyServer(builder); + + + notifier2.notifyServer(builder); + + notifier3.notifyServer(builder); + + notifier1.close(); + notifier2.close(); + notifier3.close(); + } + + + private class PulseListener implements PDPulse.Listener { + CountDownLatch latch = new CountDownLatch(10); + private String listenerName; + + private PulseListener(CountDownLatch latch, String listenerName) { + this.latch = latch; + this.listenerName = listenerName; + } + + @Override + public void onNext(T response) { + // println(this.listenerName+" res: "+response); + // this.latch.countDown(); + } + + @Override + public void onNotice(PulseServerNotice notice) { + HgPDTestUtil.println(this.listenerName + " ---> res: " + notice.getContent()); + + notice.ack(); + this.latch.countDown(); + } + + @Override + public void onError(Throwable throwable) { + HgPDTestUtil.println(this.listenerName + " error: " + throwable.toString()); + } + + @Override + public void onCompleted() { + HgPDTestUtil.println(this.listenerName + " is completed"); + } + } +} \ No newline at end of file diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java new file mode 100644 index 0000000000..7d7026f146 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java @@ -0,0 +1,74 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.client.test.HgPDTestUtil; +import org.apache.hugegraph.pd.watch.NodeEvent; + +import org.junit.BeforeClass; +// import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + */ +@Deprecated +public class PDWatchTest { + private static PDClient pdClient; + + private long storeId = 0; + private String storeAddr = "localhost"; + private String graphName = "graph1"; + + @BeforeClass + public static void beforeClass() throws Exception { + pdClient = PDClient.create(PDConfig.of("localhost:9000")); + } + + // @Test + public void watch(){ + PDWatch watch=pdClient.getWatchClient(); + CountDownLatch latch = new CountDownLatch(10); + + PDWatch.Watcher watcher1=watch.watchPartition(new WatchListener(latch,"watcher1")); + PDWatch.Watcher watcher2=watch.watchPartition(new WatchListener(latch,"watcher2")); + PDWatch.Watcher watcher3=watch.watchPartition(new WatchListener(latch,"watcher3")); + + PDWatch.Watcher nodeWatcher1=watch.watchNode(new WatchListener(latch,"nodeWatcher1")); + + try { + latch.await(15, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + watcher1.close(); + watcher2.close(); + watcher3.close(); + } + + private class WatchListener implements PDWatch.Listener{ + CountDownLatch latch = new CountDownLatch(10); + private String watcherName; + + private WatchListener(CountDownLatch latch,String watcherName){ + this.latch=latch; + this.watcherName=watcherName; + } + + @Override + public void onNext(T response) { + HgPDTestUtil.println(this.watcherName + " res: " + response); + this.latch.countDown(); + } + + @Override + public void onError(Throwable throwable) { + HgPDTestUtil.println(this.watcherName + " error: " + throwable.toString()); + } + + @Override + public void onCompleted() { + HgPDTestUtil.println(this.watcherName + " is completed"); + } + } +} \ No newline at end of file diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java new file mode 100644 index 0000000000..f82535e210 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java @@ -0,0 +1,78 @@ +package org.apache.hugegraph.pd.client.test; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.List; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + */ +public class HgPDTestUtil { + + public static void println(Object str) { + System.out.println(str); + } + + public static String toStr(byte[] b) { + if (b == null) return ""; + if (b.length == 0) return ""; + return new String(b, StandardCharsets.UTF_8); + } + + public static byte[] toBytes(String str) { + if (str == null) return null; + return str.getBytes(StandardCharsets.UTF_8); + } + + public static byte[] toBytes(long l) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(l); + return buffer.array(); + } + + private static byte[] toBytes(final int i) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.putInt(i); + return buffer.array(); + } + + public static long toLong(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getLong(); + } + + public static long toInt(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getInt(); + } + + public static String padLeftZeros(String str, int n) { + return String.format("%1$" + n + "s", str).replace(' ', '0'); + } + + public static String toSuffix(int num, int length) { + return "-" + padLeftZeros(String.valueOf(num), length); + } + + public static int amountOf(List list) { + if (list == null) { + return 0; + } + return list.size(); + } + + public static int amountOf(Iterator iterator) { + if (iterator == null) return 0; + int count = 0; + while (iterator.hasNext()) { + iterator.next(); + count++; + } + return count; + } +} diff --git a/hg-pd-clitools/pom.xml b/hg-pd-clitools/pom.xml new file mode 100644 index 0000000000..2419d07b7e --- /dev/null +++ b/hg-pd-clitools/pom.xml @@ -0,0 +1,56 @@ + + + + hugegraph-pd-root + org.apache.hugegraph + 3.6.3-SNAPSHOT + + 4.0.0 + + hg-pd-clitools + + + com.baidu.hugegraph + hg-pd-client + 3.6.5-SNAPSHOT + + + junit + junit + 4.13.2 + test + + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + package + + single + + + + + + org.apache.hugegraph.pd.clitools.Main + + + + + jar-with-dependencies + + + + + + + + \ No newline at end of file diff --git a/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java b/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java new file mode 100644 index 0000000000..1e6c6cd651 --- /dev/null +++ b/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java @@ -0,0 +1,62 @@ +package org.apache.hugegraph.pd.clitools; + +import com.baidu.hugegraph.pd.client.PDClient; +import com.baidu.hugegraph.pd.client.PDConfig; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +public class Main { + + public static void main(String[] args) throws PDException { + + + if ( args.length < 3){ + String error = " usage: pd-address config key[=value] \n key list: " + + "\n\tenableBatchLoad"; + System.out.println(error); + System.exit(0); + } + String pd = args[0]; + String cmd = args[1]; + String param = args[2]; + System.out.println(pd + " " + cmd + " " + param); + System.out.println("Result: \n"); + switch (cmd){ + case "config": + doConfig(pd, param); + } + } + + public static void doConfig(String pd, String param) throws PDException { + PDClient pdClient = PDClient.create(PDConfig.of(pd)); + String[] pair = param.split("="); + String key = pair[0].trim(); + Object value = null; + if ( pair.length > 1) + value = pair[1].trim(); + if ( value == null){ + Metapb.PDConfig pdConfig = pdClient.getPDConfig(); + switch (key){ + case "enableBatchLoad": + // value = pdConfig.getEnableBatchLoad(); + break; + case "shardCount": + value = pdConfig.getShardCount(); + break; + } + + System.out.println("Get config " + key + "=" + value); + }else{ + Metapb.PDConfig.Builder builder = Metapb.PDConfig.newBuilder(); + switch (key){ + case "enableBatchLoad": + // builder.setEnableBatchLoad(Boolean.valueOf((String)value)); + case "shardCount": + builder.setShardCount(Integer.valueOf((String)value)); + } + pdClient.setPDConfig(builder.build()); + System.out.println("Set config " + key + "=" + value); + } + } + +} diff --git a/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java b/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java new file mode 100644 index 0000000000..30d0efb0b6 --- /dev/null +++ b/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java @@ -0,0 +1,61 @@ +package org.apache.hugegraph.pd.clitools; + +import com.baidu.hugegraph.pd.common.PDException; +// import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +public class MainTest { + // @Test + public void getConfig() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad"}); + } + // @Test + public void setBatchTrue() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad= true "}); + } + + // @Test + public void setBatchFalse() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad=false"}); + } + + // @Test + public void getConfig2() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount"}); + } + // @Test + public void setShardCount1() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=1"}); + } + + // @Test + public void setShardCount3() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=3"}); + } + + // @Test + public void test2(){ + Integer[] a = new Integer[] { 1, 0, 3, 2}; + List aa = Arrays.asList(a); + System.out.printf(test2sup(aa, aa.size(),0)?"TRUE":"FALSE"); + } + public static boolean test2sup (List arrays, int tail, int res) { + System.out.println(String.format("%d %d", tail, res)); + if (tail == 0) { + System.out.println(String.format("a = %d %d", tail, res)); + return false; + } else if(tail == 1) { + System.out.println(String.format("b = %d %d", arrays.get(0), res)); + return (arrays.get(0) == res); + } else if(tail == 2) { + System.out.println(String.format("c = %d %d %d", arrays.get(0), arrays.get(1), res)); + return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || + (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); + } else { + return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || + test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); + } + } +} diff --git a/hg-pd-common/pom.xml b/hg-pd-common/pom.xml new file mode 100644 index 0000000000..89c8d3dca8 --- /dev/null +++ b/hg-pd-common/pom.xml @@ -0,0 +1,26 @@ + + + + 4.0.0 + + org.apache.hugegraph + hugegraph-pd-root + 3.6.5-SNAPSHOT + + hg-pd-common + + + 11 + 11 + + + + + com.baidu.hugegraph + hg-pd-grpc + ${project.version} + + + \ No newline at end of file diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java new file mode 100644 index 0000000000..f964329278 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java @@ -0,0 +1,94 @@ +package org.apache.hugegraph.pd.common; + +import java.util.Collection; +import java.util.Map; + +/** + * @author lynn.bond@hotmail.com + */ +public final class HgAssert { + + public static void isTrue(boolean expression, String message) { + if (message == null) throw new IllegalArgumentException("message is null"); + + if (!expression) { + throw new IllegalArgumentException(message); + } + } + + public static void isFalse(boolean expression, String message) { + isTrue(!expression, message); + } + + public static void isArgumentValid(byte[] bytes, String parameter) { + isFalse(isInvalid(bytes), "The argument is invalid: " + parameter); + } + + public static void isArgumentValid(String str, String parameter) { + isFalse(isInvalid(str), "The argument is invalid: " + parameter); + } + + public static void isArgumentNotNull(Object obj, String parameter) { + isTrue(obj != null, "The argument is null: " + parameter); + } + + public static void istValid(byte[] bytes, String msg) { + isFalse(isInvalid(bytes), msg); + } + + public static void isValid(String str, String msg) { + isFalse(isInvalid(str), msg); + } + + public static void isNotNull(Object obj, String msg) { + isTrue(obj != null, msg); + } + + public static boolean isContains(Object[] objs, Object obj) { + if (objs == null || objs.length == 0 || obj == null) return false; + for (Object item : objs) { + if (obj.equals(item)) return true; + } + return false; + } + + public static boolean isInvalid(String... strs) { + if (strs == null || strs.length == 0) return true; + for (String item : strs) { + if (item == null || "".equals(item.trim())) { + return true; + } + } + return false; + } + + public static boolean isInvalid(byte[] bytes) { + if (bytes == null || bytes.length == 0) return true; + return false; + } + + public static boolean isInvalid(Map map) { + if (map == null || map.isEmpty()) return true; + return false; + } + + public static boolean isInvalid(Collection list) { + if (list == null || list.isEmpty()) return true; + return false; + } + + public static boolean isContains(Collection list, T item) { + if (list == null || item == null) return false; + return list.contains(item); + } + + public static boolean isNull(Object... objs) { + if (objs == null) return true; + for (Object item : objs) { + if (item == null) { + return true; + } + } + return false; + } +} \ No newline at end of file diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java new file mode 100644 index 0000000000..641fa38e97 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java @@ -0,0 +1,113 @@ +package org.apache.hugegraph.pd.common; + +import java.io.Serializable; +import java.util.Objects; + +public class KVPair implements Serializable { + + /** + * Key of this Pair. + */ + private K key; + + /** + * Gets the key for this pair. + * + * @return key for this pair + */ + public K getKey() { + return key; + } + + public void setKey(K key){ + this.key = key; + } + + /** + * Value of this this Pair. + */ + private V value; + + /** + * Gets the value for this pair. + * + * @return value for this pair + */ + public V getValue() { + return value; + } + + + public void setValue(V value){ + this.value = value; + } + /** + * Creates a new pair + * + * @param key The key for this pair + * @param value The value to use for this pair + */ + public KVPair(K key, V value) { + this.key = key; + this.value = value; + } + + /** + *

String representation of this + * Pair.

+ * + *

The default name/value delimiter '=' is always used.

+ * + * @return String representation of this Pair + */ + @Override + public String toString() { + return key + "=" + value; + } + + /** + *

Generate a hash code for this Pair.

+ * + *

The hash code is calculated using both the name and + * the value of the Pair.

+ * + * @return hash code for this Pair + */ + @Override + public int hashCode() { + // name's hashCode is multiplied by an arbitrary prime number (13) + // in order to make sure there is a difference in the hashCode between + // these two parameters: + // name: a value: aa + // name: aa value: a + return key.hashCode() * 13 + (value == null ? 0 : value.hashCode()); + } + + /** + *

Test this Pair for equality with another + * Object.

+ * + *

If the Object to be tested is not a + * Pair or is null, then this method + * returns false.

+ * + *

Two Pairs are considered equal if and only if + * both the names and values are equal.

+ * + * @param o the Object to test for + * equality with this Pair + * @return true if the given Object is + * equal to this Pair else false + */ + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof KVPair) { + KVPair pair = (KVPair) o; + if (!Objects.equals(key, pair.key)) return false; + if (!Objects.equals(value, pair.value)) return false; + return true; + } + return false; + } +} \ No newline at end of file diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java new file mode 100644 index 0000000000..77015c5c21 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd.common; + +public class PDException extends Exception{ + private int errorCode = 0; + + public PDException(int error) { + super(String.format("Error code = %d", error)); + this.errorCode = error; + } + + public PDException(int error, String msg) { + super(msg); + this.errorCode = error; + } + + public PDException(int error, Throwable e) { + super(e); + this.errorCode = error; + } + + public PDException(int error, String msg, Throwable e) { + super(msg, e); + this.errorCode = error; + } + + public int getErrorCode() { + return errorCode; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java new file mode 100644 index 0000000000..1be1dea183 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java @@ -0,0 +1,36 @@ +package org.apache.hugegraph.pd.common; + +/** + * @author zhangyingjie + * @date 2022/8/1 + **/ +public class PDRuntimeException extends RuntimeException { + + // public static final int LICENSE_ERROR = -11; + + private int errorCode = 0; + + public PDRuntimeException(int error) { + super(String.format("Error code = %d", error)); + this.errorCode = error; + } + + public PDRuntimeException(int error, String msg) { + super(msg); + this.errorCode = error; + } + + public PDRuntimeException(int error, Throwable e) { + super(e); + this.errorCode = error; + } + + public PDRuntimeException(int error, String msg, Throwable e) { + super(msg, e); + this.errorCode = error; + } + + public int getErrorCode() { + return errorCode; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java new file mode 100644 index 0000000000..6f1968f604 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java @@ -0,0 +1,432 @@ +package org.apache.hugegraph.pd.common; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public class PartitionCache { + + // 每张图一个缓存 + private volatile Map> keyToPartIdCache; + // graphName + PartitionID组成key + private volatile Map partitionCache; + + private volatile Map shardGroupCache; + + private volatile Map storeCache; + + private volatile Map graphCache; + // 读写锁对象 + private ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + Lock writeLock = readWriteLock.writeLock(); + + public PartitionCache() { + keyToPartIdCache = new HashMap<>(); + partitionCache = new HashMap<>(); + shardGroupCache = new ConcurrentHashMap<>(); + storeCache = new ConcurrentHashMap<>(); + graphCache = new ConcurrentHashMap<>(); + } + + /** + * 根据partitionId返回分区信息 + * + * @param graphName + * @param partId + * @return + */ + public KVPair getPartitionById(String graphName, int partId) { + var partition = partitionCache.get(makePartitionKey(graphName, partId)); + if (partition != null) { + return new KVPair<>(partition, getLeaderShard(partId)); + } + return null; + } + + /** + * 返回key所在的分区信息 + * + * @param key + * @return + */ + public KVPair getPartitionByKey(String graphName, byte[] key) { + int code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + /** + * 根据key的hashcode返回分区信息 + * + * @param graphName + * @param code + * @return + */ + public KVPair getPartitionByCode(String graphName, long code) { + RangeMap rangeMap = keyToPartIdCache.get(graphName); + if (rangeMap != null) { + Integer partId = rangeMap.get(code); + if (partId != null) { + return getPartitionById(graphName, partId); + } + } + return null; + } + + public List getPartitions(String graphName) { + List partitions = new ArrayList<>(); + // partitionCache key: graph name + partition id + partitionCache.forEach((k,v) -> { + if (k.startsWith(graphName)) { + partitions.add(v); + } + }); + + return partitions; + } + + public boolean addPartition(String graphName, int partId, Metapb.Partition partition) { + writeLock.lock(); + try { + // graphName + PartitionID组成key + Metapb.Partition old = partitionCache.get(makePartitionKey(graphName, partId)); + + if (old != null && old.equals(partition)) { + return false; + } + + Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); + Map tmpPartitionCache = clonePartitionCache(); + + tmpPartitionCache.put(makePartitionKey(graphName, partId), partition); + if (!tmpKeyToPartIdCache.containsKey(graphName)) { + tmpKeyToPartIdCache.put(graphName, TreeRangeMap.create()); + } + + if (old != null) { + // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3) + // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) + var graphRange = tmpKeyToPartIdCache.get(graphName); + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + } + } + + tmpKeyToPartIdCache.get(graphName) + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + partitionCache = tmpPartitionCache; + keyToPartIdCache = tmpKeyToPartIdCache; + return true; + } finally { + writeLock.unlock(); + } + + } + + public void updatePartition(String graphName, int partId, Metapb.Partition partition) { + writeLock.lock(); + try { + Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); + Map tmpPartitionCache = clonePartitionCache(); + + Metapb.Partition old = tmpPartitionCache.get(makePartitionKey(graphName, partId)); + + tmpPartitionCache.put(makePartitionKey(graphName, partId), partition); + + if (!tmpKeyToPartIdCache.containsKey(graphName)) { + tmpKeyToPartIdCache.put(graphName, TreeRangeMap.create()); + } + + if (old != null) { + var graphRange = tmpKeyToPartIdCache.get(graphName); + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + + } + } + + tmpKeyToPartIdCache.get(graphName) + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + partitionCache = tmpPartitionCache; + keyToPartIdCache = tmpKeyToPartIdCache; + } finally { + writeLock.unlock(); + } + } + + public boolean updatePartition(Metapb.Partition partition) { + + var graphName = partition.getGraphName(); + var partitionId = partition.getId(); + + var old = getPartitionById(graphName, partitionId); + if (old != null && Objects.equals(partition, old.getKey())) { + return false; + } + + updatePartition(graphName, partitionId, partition); + return true; + } + + public void removePartition(String graphName, int partId) { + writeLock.lock(); + try { + Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); + Map tmpPartitionCache = clonePartitionCache(); + Metapb.Partition partition = tmpPartitionCache.remove(makePartitionKey(graphName, partId)); + if (partition != null) { + var graphRange = tmpKeyToPartIdCache.get(graphName); + + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + } + + } + partitionCache = tmpPartitionCache; + keyToPartIdCache = tmpKeyToPartIdCache; + // log.info("PartitionCache.removePartition : (after){}", debugCacheByGraphName(graphName)); + } finally { + writeLock.unlock(); + } + } + + /** + * remove partition id of graph name + * @param graphName + * @param id + */ + public void remove(String graphName, int id) { + removePartition(graphName, id); + } + + /** + * remove all partitions + */ + public void removePartitions() { + writeLock.lock(); + try { + partitionCache = new HashMap<>(); + keyToPartIdCache = new HashMap<>(); + } finally { + writeLock.unlock(); + } + } + + /** + * remove partition cache of graphName + * @param graphName + */ + public void removeAll(String graphName) { + writeLock.lock(); + try { + Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); + Map tmpPartitionCache = clonePartitionCache(); + var itr = tmpPartitionCache.entrySet().iterator(); + while (itr.hasNext()) { + var entry = itr.next(); + if (entry.getKey().startsWith(graphName)) { + itr.remove(); + } + } + tmpKeyToPartIdCache.remove(graphName); + partitionCache = tmpPartitionCache; + keyToPartIdCache = tmpKeyToPartIdCache; + } finally { + writeLock.unlock(); + } + } + + private String makePartitionKey(String graphName, int partId) { + return graphName + "/" + partId; + } + + public boolean updateShardGroup(Metapb.ShardGroup shardGroup){ + Metapb.ShardGroup oldShardGroup = shardGroupCache.get(shardGroup.getId()); + if (oldShardGroup != null && oldShardGroup.equals(shardGroup)){ + return false; + } + shardGroupCache.put(shardGroup.getId(), shardGroup); + return true; + } + + public void deleteShardGroup(int shardGroupId){ + if (shardGroupCache.containsKey(shardGroupId)) { + shardGroupCache.remove(shardGroupId); + } + } + + public Metapb.ShardGroup getShardGroup(int groupId){ + return shardGroupCache.get(groupId); + } + + public boolean addStore(Long storeId, Metapb.Store store) { + Metapb.Store oldStore = storeCache.get(storeId); + if (oldStore != null && oldStore.equals(store)) { + return false; + } + storeCache.put(storeId, store); + return true; + } + + public Metapb.Store getStoreById(Long storeId) { + return storeCache.get(storeId); + } + + public void removeStore(Long storeId) { + storeCache.remove(storeId); + } + + public boolean hasGraph(String graphName) { + return getPartitions(graphName).size() > 0; + } + + public void updateGraph(Metapb.Graph graph) { + if (Objects.equals(graph, getGraph(graph.getGraphName()))) { + return; + } + graphCache.put(graph.getGraphName(), graph); + } + + public Metapb.Graph getGraph(String graphName) { + return graphCache.get(graphName); + } + + public List getGraphs() { + List graphs = new ArrayList<>(); + graphCache.forEach((k, v) -> { + graphs.add(v); + }); + return graphs; + } + + private Map> cloneKeyToPartIdCache() { + Map> cacheClone = new HashMap<>(); + keyToPartIdCache.forEach((k1, v1) -> { + cacheClone.put(k1, TreeRangeMap.create()); + v1.asMapOfRanges().forEach((k2, v2) -> { + cacheClone.get(k1).put(k2, v2); + }); + }); + return cacheClone; + } + + private Map clonePartitionCache() { + Map cacheClone = new HashMap<>(); + cacheClone.putAll(partitionCache); + return cacheClone; + } + + public void reset() { + writeLock.lock(); + try { + partitionCache = new HashMap<>(); + keyToPartIdCache = new HashMap<>(); + shardGroupCache = new ConcurrentHashMap<>(); + storeCache = new ConcurrentHashMap<>(); + graphCache = new ConcurrentHashMap<>(); + } finally { + writeLock.unlock(); + } + } + + public void clear(){ + reset(); + } + + public String debugCacheByGraphName(String graphName) { + StringBuilder builder = new StringBuilder(); + builder.append("Graph:").append(graphName).append(", cache info: range info: {"); + var rangeMap = keyToPartIdCache.get(graphName); + builder.append( rangeMap == null ? "" : rangeMap).append("}"); + + if (rangeMap != null) { + builder.append(", partition info : {"); + rangeMap.asMapOfRanges().forEach((k, v) -> { + var partition = partitionCache.get(makePartitionKey(graphName, v)); + builder.append("[part_id:").append(v); + if (partition != null) { + builder.append(", start_key:").append(partition.getStartKey()) + .append(", end_key:").append(partition.getEndKey()) + .append(", state:").append(partition.getState().name()); + } + builder.append("], "); + }); + builder.append("}"); + } + + builder.append(", graph info:{"); + var graph = graphCache.get(graphName); + if (graph != null) { + builder.append("partition_count:").append(graph.getPartitionCount()) + .append(", state:").append(graph.getState().name()); + } + builder.append("}]"); + return builder.toString(); + } + + public Metapb.Shard getLeaderShard(int partitionId){ + var shardGroup = shardGroupCache.get(partitionId); + if (shardGroup != null) { + for (Metapb.Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + return shard; + } + } + } + + return null; + } + + public void updateShardGroupLeader(int partitionId, Metapb.Shard leader){ + if (shardGroupCache.containsKey(partitionId) && leader != null) { + if (! Objects.equals(getLeaderShard(partitionId), leader)) { + var shardGroup = shardGroupCache.get(partitionId); + var builder = Metapb.ShardGroup.newBuilder(shardGroup).clearShards(); + for (var shard : shardGroup.getShardsList()) { + builder.addShards( + Metapb.Shard.newBuilder() + .setStoreId(shard.getStoreId()) + .setRole(shard.getStoreId() == leader.getStoreId() ? + Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) + .build() + ); + } + shardGroupCache.put(partitionId, builder.build()); + } + } + } + + public String debugShardGroup(){ + StringBuilder builder = new StringBuilder(); + builder.append("shard group cache:{"); + shardGroupCache.forEach((partitionId,shardGroup) ->{ + builder.append(partitionId).append("::{") + .append("version:").append(shardGroup.getVersion()) + .append(", conf_version:").append(shardGroup.getConfVer()) + .append(", state:").append(shardGroup.getState().name()) + .append(", shards:["); + + for (var shard : shardGroup.getShardsList()) { + builder.append("{store_id:").append(shard.getStoreId()) + .append(", role:").append(shard.getRole().name()) + .append("},"); + } + builder.append("], "); + }); + builder.append("}"); + return builder.toString(); + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java new file mode 100644 index 0000000000..b94225387c --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java @@ -0,0 +1,28 @@ +package org.apache.hugegraph.pd.common; + +public class PartitionUtils { + + public static final int MAX_VALUE = 0xffff; + + /** + * 计算key的hashcode + * + * @param key + * @return hashcode + */ + public static int calcHashcode(byte[] key) { + final int p = 16777619; + int hash = (int) 2166136261L; + for (byte element : key) + hash = (hash ^ element) * p; + hash += hash << 13; + hash ^= hash >> 7; + hash += hash << 3; + hash ^= hash >> 17; + hash += hash << 5; + hash = hash & PartitionUtils.MAX_VALUE; + if ( hash == PartitionUtils.MAX_VALUE ) + hash = PartitionUtils.MAX_VALUE - 1; + return hash; + } +} diff --git a/hg-pd-core/pom.xml b/hg-pd-core/pom.xml new file mode 100644 index 0000000000..904326841a --- /dev/null +++ b/hg-pd-core/pom.xml @@ -0,0 +1,72 @@ + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-pd-root + 3.6.5-SNAPSHOT + + + hg-pd-core + + + 0.5.10 + + + + com.alipay.sofa + jraft-core + 1.3.9-hg-SNAPSHOT + + + org.rocksdb + rocksdbjni + + + + + org.rocksdb + rocksdbjni + 6.29.5 + + + com.baidu.hugegraph + hg-pd-grpc + + + org.springframework + spring-context + + + com.baidu.hugegraph + hg-pd-common + + + org.springframework.boot + spring-boot + + + org.projectlombok + lombok + + + org.apache.commons + commons-lang3 + 3.12.0 + + + com.google.code.gson + gson + 2.8.9 + + + + com.baidu.hugegraph + hg-store-grpc + ${project.version} + + + \ No newline at end of file diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java new file mode 100644 index 0000000000..2a9f8af475 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java @@ -0,0 +1,118 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.ConfigMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.raft.RaftStateListener; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + +import java.util.List; + +@Slf4j +public class ConfigService implements RaftStateListener { + + private PDConfig pdConfig; + private ConfigMetaStore meta; + + public ConfigService(PDConfig config){ + this.pdConfig = config; + config.setConfigService(this); + meta = MetadataFactory.newConfigMeta(config); + } + + + public Metapb.PDConfig getPDConfig(long version) throws PDException { + return this.meta.getPdConfig(version); + } + public Metapb.PDConfig getPDConfig() throws PDException { + return this.meta.getPdConfig(0); + } + + public Metapb.PDConfig setPDConfig(Metapb.PDConfig mConfig) throws PDException { + Metapb.PDConfig oldCfg = getPDConfig(); + Metapb.PDConfig.Builder builder = oldCfg.toBuilder().mergeFrom(mConfig) + .setVersion(oldCfg.getVersion() + 1) + .setTimestamp(System.currentTimeMillis()); + mConfig = this.meta.setPdConfig(builder.build()); + log.info("PDConfig has been modified, new PDConfig is {}", mConfig); + updatePDConfig(mConfig); + return mConfig; + } + + public List getGraphSpace(String graphSpaceName) throws PDException { + return this.meta.getGraphSpace(graphSpaceName); + } + + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + return this.meta.setGraphSpace(graphSpace.toBuilder() + .setTimestamp(System.currentTimeMillis()) + .build()); + } + + /** + * 从存储中读取配置项,并覆盖全局的PDConfig对象 + + * @return + */ + public PDConfig loadConfig() { + try { + Metapb.PDConfig mConfig = this.meta.getPdConfig(0); + if ( mConfig == null ){ + mConfig = Metapb.PDConfig.newBuilder() + .setPartitionCount(pdConfig.getInitialPartitionCount()) + .setShardCount(pdConfig.getPartition().getShardCount()) + .setVersion(1) + .setTimestamp(System.currentTimeMillis()) + .setMaxShardsPerStore(pdConfig.getPartition().getMaxShardsPerStore()) + .build(); + this.meta.setPdConfig(mConfig); + } + pdConfig = updatePDConfig(mConfig); + } catch (Exception e) { + log.error("ConfigService loadConfig exception {}", e); + } + return pdConfig; + } + + public synchronized PDConfig updatePDConfig(Metapb.PDConfig mConfig){ + log.info("update pd config: mConfig:{}", mConfig); + pdConfig.getPartition().setShardCount(mConfig.getShardCount()); + pdConfig.getPartition().setTotalCount(mConfig.getPartitionCount()); + pdConfig.getPartition().setMaxShardsPerStore(mConfig.getMaxShardsPerStore()); + return pdConfig; + } + + public synchronized PDConfig setPartitionCount(int count){ + Metapb.PDConfig mConfig = null; + try { + mConfig = getPDConfig(); + mConfig = mConfig.toBuilder().setPartitionCount(count).build(); + setPDConfig(mConfig); + } catch (PDException e) { + log.error("ConfigService exception {}", e); + e.printStackTrace(); + } + return pdConfig; + } + + /** + * meta store中的数量 + * 由于可能会受分区分裂/合并的影响,原始的partition count不推荐使用 + * + * @return partition count of cluster + * @throws PDException when io error + */ + public int getPartitionCount() throws PDException { + return getPDConfig().getPartitionCount(); + } + + @Override + public void onRaftLeaderChanged() { + loadConfig(); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java new file mode 100644 index 0000000000..ba642d28bc --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java @@ -0,0 +1,62 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.IdMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; + +import com.baidu.hugegraph.pd.common.PDException; + +public class IdService { + + public PDConfig getPdConfig() { + return pdConfig; + } + + public void setPdConfig(PDConfig pdConfig) { + this.pdConfig = pdConfig; + } + + private PDConfig pdConfig; + private IdMetaStore meta; + + public IdService(PDConfig config) { + this.pdConfig = config; + meta = MetadataFactory.newHugeServerMeta(config); + } + + public long getId(String key, int delta) throws PDException { + return meta.getId(key, delta); + } + + public void resetId(String key) throws PDException { + meta.resetId(key); + } + + /** + * 获取自增循环不重复id, 达到上限后从0开始自增.自动跳过正在使用的cid + * @param key + * @param max + * @return + * @throws PDException + */ + public long getCId(String key, long max) throws PDException { + return meta.getCId(key, max); + } + public long getCId(String key, String name, long max) throws PDException { + return meta.getCId(key, name, max); + } + + /** + * 删除一个自增循环id + * @param key + * @param cid + * @return + * @throws PDException + */ + public long delCId(String key, long cid) throws PDException { + return meta.delCId(key, cid); + } + public long delCIdDelay(String key, String name, long cid) throws PDException { + return meta.delCIdDelay(key, name, cid); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java new file mode 100644 index 0000000000..fa73b29bd8 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java @@ -0,0 +1,295 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.store.KV; + +import com.baidu.hugegraph.pd.grpc.kv.Kv; +import com.baidu.hugegraph.pd.grpc.kv.V; + +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; + +import com.google.protobuf.InvalidProtocolBufferException; +import lombok.extern.slf4j.Slf4j; + +import org.springframework.stereotype.Service; + +import java.nio.charset.Charset; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +/** + * @author zhangyingjie + **/ +@Slf4j +@Service +public class KvService { + + + private PDConfig pdConfig; + private MetadataRocksDBStore meta; + // TODO 主前缀之后,增加类名做区分 + private static final String TTL_PREFIX = "T"; + private static final String KV_PREFIX = "K"; + private static final String LOCK_PREFIX = "L"; + public static final char KV_DELIMITER = '@'; + private static final String KV_PREFIX_DELIMITER = KV_PREFIX + KV_DELIMITER; + private static final byte[] EMPTY_VALUE = new byte[0]; + + public KvService(PDConfig config) { + this.pdConfig = config; + meta = new MetadataRocksDBStore(config); + } + + public PDConfig getPdConfig() { + return pdConfig; + } + + public void setPdConfig(PDConfig pdConfig) { + this.pdConfig = pdConfig; + } + + public void put(String key, String value) throws PDException { + V storeValue = V.newBuilder().setValue(value).setTtl(0).build(); + meta.put(getStoreKey(key), storeValue.toByteArray()); + // log.warn("add key with key-{}:value-{}", key, value); + } + + public void put(String key, String value, long ttl) throws PDException { + long curTime = System.currentTimeMillis(); + curTime += ttl; + V storeValue = V.newBuilder().setValue(value).setSt(ttl).setTtl(curTime).build(); + meta.put(getStoreKey(key), storeValue.toByteArray()); + meta.put(getTTLStoreKey(key, curTime), EMPTY_VALUE); + // log.warn("add key with key-{}:value-{}:ttl-{}", key, value, ttl); + } + + public String get(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + return get(storeKey); + } + + public String get(byte[] keyBytes) throws PDException { + byte[] bytes = meta.getOne(keyBytes); + String v = getValue(keyBytes, bytes); + return v; + } + + private String getValue(byte[] keyBytes, byte[] valueBytes) throws PDException { + if (valueBytes == null || valueBytes.length == 0) return ""; + try { + V v = V.parseFrom(valueBytes); + if (v.getTtl() == 0 || v.getTtl() >= System.currentTimeMillis()) { + return v.getValue(); + } else { + meta.remove(keyBytes); + meta.remove(getTTLStoreKey(new String(keyBytes), v.getTtl())); + } + } catch (Exception e) { + log.error("parse value with error:{}", e.getMessage()); + throw new PDException(-1, e.getMessage()); + } + return null; + } + + public boolean keepAlive(String key) throws PDException { + byte[] bytes = meta.getOne(getStoreKey(key)); + try { + if (bytes == null || bytes.length == 0) { + return false; + } + V v = V.parseFrom(bytes); + if (v != null) { + long ttl = v.getTtl(); + long st = v.getSt(); + meta.remove(getTTLStoreKey(key, ttl)); + put(key, v.getValue(), st); + return true; + } else { + return false; + } + } catch (InvalidProtocolBufferException e) { + throw new PDException(-1, e.getMessage()); + } + } + + public Kv delete(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + String value = this.get(storeKey); + meta.remove(storeKey); + Kv.Builder builder = Kv.newBuilder().setKey(key); + if (value != null) { + builder.setValue(value); + } + Kv kv = builder.build(); + // log.warn("delete kv with key :{}", key); + return kv; + } + + public List deleteWithPrefix(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + //TODO to many rows for scan + List kvList = meta.scanPrefix(storeKey); + LinkedList kvs = new LinkedList<>(); + for (KV kv : kvList) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) kvs.add(Kv.newBuilder().setKey(kvKey).setValue(kvValue).build()); + } + meta.removeByPrefix(storeKey); + // log.warn("delete kv with key prefix :{}", key); + return kvs; + } + + /** + * scan result ranged from key start and key end + * + * @param keyStart + * @param keyEnd + * @return Records + * @throws PDException + */ + public Map scanRange(String keyStart, String keyEnd) throws PDException{ + List list = meta.scanRange(getStoreKey(keyStart), getStoreKey(keyEnd)); + Map map = new HashMap<>(); + for (KV kv : list) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) { + map.put(kvKey, kvValue); + } + } + return map; + } + + public Map scanWithPrefix(String key) throws PDException { + List kvList = meta.scanPrefix(getStoreKey(key)); + HashMap map = new HashMap<>(); + for (KV kv : kvList) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) { + map.put(kvKey, kvValue); + } + } + return map; + } + + public boolean locked(String key) throws PDException { + String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); + Map allLock = scanWithPrefix(lockKey); + if (allLock == null || allLock.size() == 0) { + return false; + } else { + return true; + } + } + + private boolean owned(String key, long clientId) throws PDException { + String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); + Map allLock = scanWithPrefix(lockKey); + if (allLock.size() == 0) return true; + for (Map.Entry entry : allLock.entrySet()) { + String entryKey = entry.getKey(); + String[] split = entryKey.split(String.valueOf(KV_DELIMITER)); + if (Long.valueOf(split[split.length - 1]).equals(clientId)) { + return true; + } + } + return false; + } + + public boolean lock(String key, long ttl, long clientId) throws PDException { + //TODO lock improvement + synchronized (KvService.class) { + if (!owned(key, clientId)) return false; + put(getLockKey(key, clientId), " ", ttl); + return true; + } + } + + public boolean lockWithoutReentrant(String key, long ttl, + long clientId) throws PDException { + synchronized (KvService.class) { + if (locked(key)) { + return false; + } + put(getLockKey(key, clientId), " ", ttl); + return true; + } + } + + + public boolean unlock(String key, long clientId) throws PDException { + synchronized (KvService.class) { + if (!owned(key, clientId)) return false; + delete(getLockKey(key, clientId)); + return true; + } + } + + public boolean keepAlive(String key, long clientId) throws PDException { + String lockKey = getLockKey(key, clientId); + return keepAlive(lockKey); + } + + public String getLockKey(String key, long clientId) { + return getKeyWithoutPrefix(LOCK_PREFIX, key, clientId); + } + + public byte[] getStoreKey(String key) { + return getKeyBytes(key); + } + + public byte[] getTTLStoreKey(String key, long time) { + return getKeyBytes(TTL_PREFIX, time, key); + } + + public void clearTTLData() { + try { + byte[] ttlStartKey = getTTLStoreKey("", 0); + byte[] ttlEndKey = getTTLStoreKey("", System.currentTimeMillis()); + List kvList = meta.scanRange(ttlStartKey, ttlEndKey); + for (KV kv : kvList) { + String key = new String(kv.getKey()); + int index = key.indexOf(KV_DELIMITER, 2); + String delKey = key.substring(index + 1); + delete(delKey); + meta.remove(kv.getKey()); + } + } catch (Exception e) { + log.error("clear ttl data with error :", e); + } + } + + public static String getKey(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + builder.append(KV_PREFIX).append(KV_DELIMITER); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static byte[] getKeyBytes(Object... keys) { + String key = getKey(keys); + return key.getBytes(Charset.defaultCharset()); + } + + public static String getKeyWithoutPrefix(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static String getDelimiter() { + return String.valueOf(KV_DELIMITER); + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java new file mode 100644 index 0000000000..7336a583a5 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java @@ -0,0 +1,57 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.LogMeta; +import org.apache.hugegraph.pd.meta.MetadataFactory; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.google.protobuf.Any; +import com.google.protobuf.GeneratedMessageV3; +import lombok.extern.slf4j.Slf4j; + +import org.springframework.stereotype.Service; + +import java.util.List; + +/** + * @author zhangyingjie + * @date 2022/3/29 + **/ +@Slf4j +@Service +public class LogService { + + private LogMeta logMeta; + + public static final String GRPC = "GRPC"; + public static final String REST = "REST"; + public static final String TASK = "TASK"; + + public static final String NODE_CHANGE = "NODE_CHANGE"; + public static final String PARTITION_CHANGE = "PARTITION_CHANGE"; + + public LogService(PDConfig pdConfig) { + logMeta = MetadataFactory.newLogMeta(pdConfig); + } + + public List getLog(String action, Long start, Long end) throws PDException { + return logMeta.getLog(action, start, end); + } + + public void insertLog(String action, String message, GeneratedMessageV3 target) { + try { + Metapb.LogRecord logRecord = Metapb.LogRecord.newBuilder() + .setAction(action) + .setMessage(message) + .setTimestamp(System.currentTimeMillis()) + .setObject(Any.pack(target)) + .build(); + logMeta.insertLog(logRecord); + } catch (PDException e) { + log.debug("Insert log with error:{}", e); + } + + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java new file mode 100644 index 0000000000..3051f0ecab --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java @@ -0,0 +1,33 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; +import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; +import com.baidu.hugegraph.pd.grpc.pulse.DbCompaction; +import com.baidu.hugegraph.pd.grpc.pulse.MovePartition; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; +import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; + +import java.util.List; + +/** + * 分区命令监听 + */ +public interface PartitionInstructionListener { + void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException; + + void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException; + + void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException; + + void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException; + + void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException; + + void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException; + + void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) throws PDException; + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java new file mode 100644 index 0000000000..d8df4afe9c --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java @@ -0,0 +1,1448 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.common.PartitionUtils; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.PartitionMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import org.apache.hugegraph.pd.raft.RaftStateListener; + +import com.baidu.hugegraph.pd.grpc.MetaTask; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; +import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; +import com.baidu.hugegraph.pd.grpc.pulse.CleanType; +import com.baidu.hugegraph.pd.grpc.pulse.ConfChangeType; +import com.baidu.hugegraph.pd.grpc.pulse.DbCompaction; +import com.baidu.hugegraph.pd.grpc.pulse.MovePartition; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; +import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; + +import lombok.extern.slf4j.Slf4j; + +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * 分区管理 + */ +@Slf4j +public class PartitionService implements RaftStateListener { + + private final long Partition_Version_Skip = 0x0F; + private PartitionMeta partitionMeta; + private final StoreNodeService storeService; + + private PDConfig pdConfig; + // 分区命令监听 + private List instructionListeners; + + // 分区状态监听 + private List statusListeners; + + public PartitionService(PDConfig config, StoreNodeService storeService) { + this.pdConfig = config; + this.storeService = storeService; + partitionMeta = MetadataFactory.newPartitionMeta(config); + instructionListeners = Collections.synchronizedList(new ArrayList()); + statusListeners = Collections.synchronizedList(new ArrayList()); + } + + public void init() throws PDException{ + partitionMeta.init(); + storeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status) { + if ( status == Metapb.StoreState.Tombstone){ + // Store被停机,通知所有该store所有分区,迁移数据 + storeOffline(store); + } + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); + } + + /** + * 返回Key所属的partition + * + * @param graphName + * @param key + * @return + */ + public Metapb.PartitionShard getPartitionShard(String graphName, byte[] key) throws PDException { + long code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + /** + * 根据hashcode返回所属的partition + * + * @param graphName + * @param code + * @return + */ + public Metapb.PartitionShard getPartitionByCode(String graphName, long code) throws PDException { + if ( code < 0 || code >= PartitionUtils.MAX_VALUE) { + throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "code error"); + } + // 根据Code查找分区id,如果没有找到,创建新的分区 + Metapb.Partition partition = partitionMeta.getPartitionByCode(graphName, code); + + if (partition == null) { + synchronized (this) { + if (partition == null) { + partition = newPartition(graphName, code); + } + } + } + + Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() + .setPartition(partition) + .setLeader(storeService.getLeader(partition, 0)) + .build(); + log.debug("{} Partition get code = {}, partition id = {}, start = {}, end = {}, leader = {}", + graphName, (code), partition.getId(), partition.getStartKey(), partition.getEndKey(), partShard.getLeader()); + + return partShard; + } + + /** + * 根据ID返回分区信息 + * + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public Metapb.PartitionShard getPartitionShardById(String graphName, int partId) throws PDException { + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); + if (partition == null) { + return null; + } + + Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() + .setPartition(partition) + // 此处需要返回正确的leader,暂时默认取第一个 + .setLeader(storeService.getLeader(partition, 0)) + .build(); + + return partShard; + } + + public Metapb.Partition getPartitionById(String graphName, int partId) throws PDException { + return partitionMeta.getPartitionById(graphName, partId); + } + + public List getPartitionById(int partId) throws PDException { + return partitionMeta.getPartitionById(partId); + } + + /** + * 获取图的所有分区 + */ + public List getPartitions() { + return partitionMeta.getPartitions(); + } + + public List getPartitions(String graphName){ + if ( StringUtils.isAllEmpty(graphName)) { + return partitionMeta.getPartitions(); + } + return partitionMeta.getPartitions(graphName); + } + + /** + * 查找在store上的所有分区 + * @param store + * @return + */ + public List getPartitionByStore(Metapb.Store store) throws PDException { + List partitions = new ArrayList<>(); + getGraphs().forEach(graph -> { + getPartitions(graph.getGraphName()).forEach(partition -> { + try { + storeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { + if (shard.getStoreId() == store.getId()){ + partitions.add(partition); + } + }); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + }); + return partitions; + } + + /** + * 产生一个新的分区 + * + * @param graphName + * @return + */ + private Metapb.Partition newPartition(String graphName, long code) throws PDException { + Metapb.Graph graph = partitionMeta.getAndCreateGraph(graphName); + int partitionSize = PartitionUtils.MAX_VALUE / graph.getPartitionCount(); + if (PartitionUtils.MAX_VALUE % graph.getPartitionCount() != 0) { + // 有余数,分区除不尽 + partitionSize++; + } + + int partitionId = (int) (code / partitionSize); + long startKey = (long) partitionSize * partitionId; + long endKey = (long) partitionSize * (partitionId + 1); + + // 检查本地 + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partitionId); + if (partition == null) { + storeService.allocShards(null, partitionId); + + // 分配store + partition = Metapb.Partition.newBuilder() + .setId(partitionId) + .setVersion(0) + .setState(Metapb.PartitionState.PState_Normal) + .setStartKey(startKey) + .setEndKey(endKey) + .setGraphName(graphName) + .build(); + + log.info("Create newPartition {}", partition); + } + + partitionMeta.updatePartition(partition); + + return partition; + } + + /** + * 计算Key所属的分区,此处采用Hash映射的方法。 + * + * @param graphName + * @param key + * @return + */ + protected int getPartitionId(String graphName, byte[] key) throws PDException { + int code = PartitionUtils.calcHashcode(key); + Metapb.Partition partition = partitionMeta.getPartitionByCode(graphName, code); + return partition != null ? partition.getId() : -1; + } + + /** + * 获取key范围所跨越的所有分区 + * 暂时使用hashcode计算,正常做法,基于key进行查询 + * + * @param graphName + * @param startKey + * @param endKey + */ + public List scanPartitions(String graphName, byte[] startKey, byte[] endKey) + throws PDException { + int startPartId = getPartitionId(graphName, startKey); + int endPartId = getPartitionId(graphName, endKey); + + List partShards = new ArrayList<>(); + for (int id = startPartId; id <= endPartId; id++) { + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, id); + partShards.add( + Metapb.PartitionShard.newBuilder() + .setPartition(partition) + // 此处需要返回正确的leader,暂时默认取第一个 + .setLeader(storeService.getLeader(partition, 0)) + .build() + ); + } + return partShards; + } + + public synchronized long updatePartition(List partitions) throws PDException { + for (Metapb.Partition pt : partitions) { + Metapb.Partition oldPt = getPartitionById(pt.getGraphName(), pt.getId()); + partitionMeta.updatePartition(pt); + onPartitionChanged(oldPt, pt); + } + return partitions.size(); + } + + /** + * 更新分区以及图的状态 + * @param graph + * @param partId + * @param state + * @throws PDException + */ + public synchronized void updatePartitionState(String graph, int partId, Metapb.PartitionState state) throws PDException { + Metapb.Partition partition = getPartitionById(graph, partId); + + if ( partition.getState() != state) { + Metapb.Partition newPartition = partitionMeta.updatePartition(partition.toBuilder() + .setState(state).build()); + + onPartitionChanged(partition, newPartition); + } + } + + public synchronized void updateGraphState(String graphName, Metapb.PartitionState state) throws PDException { + Metapb.Graph graph = getGraph(graphName); + if (graph != null) { + partitionMeta.updateGraph(graph.toBuilder() + .setState(state).build()); + } + } + + public synchronized long removePartition(String graphName, int partId) throws PDException { + log.info("Partition {}-{} removePartition", graphName, partId); + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); + var ret = partitionMeta.removePartition(graphName, partId); + partitionMeta.reload(); + onPartitionRemoved(partition); + + // source中有些是 offline的,删除后,需要更新图的状态 + try { + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { + if (pt.getState().getNumber() > state.getNumber()) { + state = pt.getState(); + } + } + updateGraphState(partition.getGraphName(), state); + + state = Metapb.PartitionState.PState_Normal; + for(Metapb.ShardGroup group : storeService.getShardGroups()){ + if ( group.getState().getNumber() > state.getNumber()) { + state = group.getState(); + } + } + storeService.updateClusterStatus(state); + + }catch ( PDException e){ + log.error("onPartitionChanged", e); + } + + return ret; + } + + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws PDException { + return partitionMeta.getPartitionStats(graphName, partitionId); + } + + /** + * 获取图的分区状态 + */ + public List getPartitionStatus(String graphName) + throws PDException { + return partitionMeta.getPartitionStats(graphName); + } + + /** + * 返回图的信息 + */ + public List getGraphs() throws PDException { + return partitionMeta.getGraphs(); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + return partitionMeta.getGraph(graphName); + } + + /** + * 删除图以及图的所有分区 + */ + public Metapb.Graph delGraph(String graphName) throws PDException { + log.info("delGraph {}", graphName); + Metapb.Graph graph = getGraph(graphName); + getPartitions(graphName).forEach(partition -> { + onPartitionRemoved(partition); + }); + partitionMeta.removeAllPartitions(graphName); + partitionMeta.removeGraph(graphName); + return graph; + } + + /** + * 修改图信息,需要通知到store + */ + public synchronized Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + Metapb.Graph lastGraph = partitionMeta.getAndCreateGraph(graph.getGraphName()); + log.info("updateGraph graph: {}, last: {}", graph, lastGraph); + + int partCount = (graph.getGraphName().endsWith("/s") || graph.getGraphName().endsWith("/m")) ? + 1 : pdConfig.getPartition().getTotalCount(); + + // set the partition count to specified if legal. + if (graph.getPartitionCount() <= partCount && graph.getPartitionCount() > 0) { + partCount = graph.getPartitionCount(); + } + + if (partCount == 0) { + throw new PDException(10010 ,"update graph error, partition count = 0"); + } + + graph = lastGraph.toBuilder() + .mergeFrom(graph) + .setPartitionCount(partCount) + .build(); + partitionMeta.updateGraph(graph); + + // 分区数发生改变 + if (lastGraph.getPartitionCount() != graph.getPartitionCount()) { + log.info("updateGraph graph: {}, partition count changed from {} to {}", + graph.getGraphName(), lastGraph.getPartitionCount(), graph.getPartitionCount()); + // TODO 修改图的分区数,需要进行数据迁移。 + } + return graph; + } + + // partitionId -> (storeId -> shard committedIndex) + public Map> getCommittedIndexStats() throws PDException { + Map> map = new HashMap<>(); + for (Metapb.Store store : storeService.getActiveStores()) { + for (Metapb.RaftStats raftStats : store.getStats().getRaftStatsList()) { + int partitionID = raftStats.getPartitionId(); + if (!map.containsKey(partitionID)) { + map.put(partitionID, new HashMap<>()); + } + Map storeMap = map.get(partitionID); + if (!storeMap.containsKey(store.getId())) { + storeMap.put(store.getId(), raftStats.getCommittedIndex()); + } + } + } + return map; + } + + /** + * 存储被下线,迁移分区数据 + * @param store + */ + public void storeOffline(Metapb.Store store) { + try { + log.info("storeOffline store id: {}, address: {}, state: {}", + store.getId(), store.getAddress(), store.getState()); + List partitions = getPartitionByStore(store); + var partIds = new HashSet(); + for (Metapb.Partition p : partitions) { + if (partIds.contains(p.getId())) { + continue; + } + shardOffline(p, store.getId()); + partIds.add(p.getId()); + } + } catch (PDException e) { + log.error("storeOffline exception: ", e); + } + } + + /** + * 存储被下线,迁移分区数据 + */ + public synchronized void shardOffline(Metapb.Partition partition, long storeId) { + try { + log.info("shardOffline Partition {} - {} shardOffline store : {}", + partition.getGraphName(), partition.getId(), storeId); + // partition = getPartitionById(partition.getGraphName(), partition.getId()); + // Metapb.Partition.Builder builder = Metapb.Partition.newBuilder(partition); + // builder.clearShards(); + // partition.getShardsList().forEach(shard -> { + // if (shard.getStoreId() != storeId) + // builder.addShards(shard); + // }); + // partition = builder.build(); + Metapb.Graph graph = getGraph(partition.getGraphName()); + reallocPartitionShards(graph, partition); + + } catch (PDException e) { + log.error("storeOffline exception: ", e); + } + } + + private boolean isShardListEquals(List list1, List list2){ + if (list1 == list2) { + return true; + }else if (list1 != null && list2 != null) { + + var s1 = list1.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare).collect(Collectors.toList()); + var s2 = list2.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare).collect(Collectors.toList()); + + if (s1.size() == s2.size()) { + for (int i = 0; i < s1.size(); i++) { + if (s1.get(i) != s2.get(i)) { + return false; + } + } + return true; + } + } + + return false; + } + + /** + * 重新分配shard + * @param graph + * @param partition + * @throws PDException + */ + public void reallocPartitionShards(Metapb.Graph graph, Metapb.Partition partition) throws PDException { + if (partition == null) { + return; + } + List originalShards = storeService.getShardList(partition.getId()); + + var shardGroup = storeService.getShardGroup(partition.getId()); + + List shards = storeService.reallocShards(shardGroup); + + if (isShardListEquals(originalShards, shards)) { + log.info("reallocPartitionShards:{} vs {}", shardGroup, shards); + // partition = Metapb.Partition.newBuilder(partition) + // .clearShards().addAllShards(shards) + // .build(); + // partitionMeta.updatePartition(partition); + fireChangeShard(partition, shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + } + + public synchronized void reallocPartitionShards(String graphName, int partitionId) throws PDException { + reallocPartitionShards(partitionMeta.getGraph(graphName), + partitionMeta.getPartitionById(graphName, partitionId)); + } + + /** + * 迁移分区副本 + */ + public synchronized void movePartitionsShard(Integer partitionId, long fromStore, long toStore) { + try { + log.info("movePartitionsShard partitionId {} from store {} to store {}", partitionId, fromStore, toStore); + for (Metapb.Graph graph : getGraphs()) { + Metapb.Partition partition = this.getPartitionById(graph.getGraphName(), partitionId); + if (partition == null) { + continue; + } + + var shardGroup = storeService.getShardGroup(partitionId); + List shards = new ArrayList<>(); + shardGroup.getShardsList().forEach(shard -> { + if (shard.getStoreId() != fromStore) { + shards.add(shard); + } + }); + + shards.add(Metapb.Shard.newBuilder().setStoreId(toStore).setRole(Metapb.ShardRole.Follower).build()); + + // storeService.updateShardGroup(partitionId, shards, -1, -1); + // storeService.onShardGroupStatusChanged(shardGroup, newShardGroup); + fireChangeShard(partition, shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + // shard group和 graph无关,迁移一个就够了 + break; + } + } catch (PDException e) { + log.error("Partition {} movePartitionsShard exception {}", partitionId, e); + } + } + + /** + * 把集群中所有的分区,拆成split + * @param splits 拆分分区 + */ + public synchronized void splitPartition(List> splits) throws PDException { + var tasks = new HashMap>>(); + + for (var pair : splits){ + for (var partition : getPartitionById(pair.getKey())){ + if (!tasks.containsKey(partition.getGraphName())) { + tasks.put(partition.getGraphName(), new ArrayList<>()); + } + tasks.get(partition.getGraphName()).add(pair); + } + } + + for (var entry : tasks.entrySet()) { + splitPartition(getGraph(entry.getKey()), entry.getValue()); + } + } + + /** + * 分区分裂, 把一个图拆分到N 个 + * @param graph graph + * @param toCount target count + * @throws PDException + */ + + public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws PDException{ + + var partitionCount = getPartitions(graph.getGraphName()).size(); + var maxShardsPerStore = pdConfig.getPartition().getMaxShardsPerStore(); + var shardCount = pdConfig.getPartition().getShardCount(); + + if ( shardCount * toCount > storeService.getActiveStores().size() * maxShardsPerStore){ + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "can't satisfy target shard group count, reached the upper limit of the cluster"); + } + + if (toCount % partitionCount != 0 || toCount <= partitionCount) { + throw new PDException(Pdpb.ErrorType.Invalid_Split_Partition_Count_VALUE, + "invalid split partition count, make sure to count is N time of current partition count"); + } + + // 由于是整数倍数,扩充因子为 toCount / current count + var splitCount = toCount / partitionCount; + var list = new ArrayList>(); + for (int i = 0 ; i < partitionCount; i ++) { + list.add(new KVPair<>(i, splitCount)); + } + + splitPartition(graph, list); + } + + private synchronized void splitPartition(Metapb.Graph graph, List> splits) + throws PDException { + var taskInfoMeta = storeService.getTaskInfoMeta(); + if (taskInfoMeta.scanSplitTask(graph.getGraphName()).size() > 0) { + return; + } + + splits.sort(Comparator.comparing(KVPair::getKey)); + log.info("split partition, graph: {}, splits:{}", graph, splits); + + // 从最后一个partition下标开始 + var i = getPartitions(graph.getGraphName()).size(); + + for (var pair : splits) { + Metapb.Partition partition = partitionMeta.getPartitionById(graph.getGraphName(), pair.getKey()); + if (partition != null) { + var splitCount = pair.getValue(); + long splitLen = (partition.getEndKey() - partition.getStartKey()) / splitCount; + + List newPartitions = new ArrayList<>(); + // 第一个分区也就是原分区 + newPartitions.add(partition.toBuilder() + .setStartKey(partition.getStartKey()) + .setEndKey(partition.getStartKey() + splitLen) + .setId(partition.getId()) + .setState(Metapb.PartitionState.PState_Offline) + .build()); + + int idx = 0; + + for (; idx < splitCount - 2; idx++) { + newPartitions.add(partition.toBuilder() + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(newPartitions.get(idx).getEndKey() + splitLen) + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build()); + i += 1; + } + + newPartitions.add(partition.toBuilder() + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(partition.getEndKey()) + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build()); + i += 1; + + // try to save new partitions, and repair shard group + for (int j = 0; j < newPartitions.size(); j ++) { + var newPartition = newPartitions.get(j); + + if (j != 0) { + partitionMeta.updatePartition(newPartition); + } + // 创建shard group,如果为空,则按照partition的shard group为蓝本,去创建,保证在一个机器上 + // 如果存在,则由于各个图的分区数量不一样,需要store端复制到其他机器上 + var shardGroup = storeService.getShardGroup(newPartition.getId()); + if (shardGroup == null) { + shardGroup = storeService.getShardGroup(partition.getId()).toBuilder() + .setId(newPartition.getId()) + .build(); + storeService.getStoreInfoMeta().updateShardGroup(shardGroup); + updateShardGroupCache(shardGroup); + } + + // 做shard list的检查 + if (shardGroup.getShardsCount() != pdConfig.getPartition().getShardCount()) { + storeService.reallocShards(shardGroup); + } + } + + SplitPartition splitPartition = SplitPartition.newBuilder() + .addAllNewPartition(newPartitions) + .build(); + + fireSplitPartition(partition, splitPartition); + // 修改Partition状态为下线,任务完成后恢复为上线 + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Offline); + + // 记录事务 + var task = MetaTask.Task.newBuilder().setPartition(partition) + .setSplitPartition(splitPartition) + .build(); + taskInfoMeta.addSplitTask(pair.getKey(), task.getPartition(), task.getSplitPartition()); + } + } + } + + /** + * 转移leader到其他shard上. + * 转移一个partition即可 + */ + public void transferLeader(Integer partId, Metapb.Shard shard) { + try { + var partitions = getPartitionById(partId); + if (partitions.size() > 0) { + fireTransferLeader(partitions.get(0), TransferLeader.newBuilder().setShard(shard).build()); + } +// for (Metapb.Graph graph : getGraphs()) { +// Metapb.Partition partition = this.getPartitionById(graph.getGraphName(), partId); +// if (partition != null) { +// fireTransferLeader(partition, TransferLeader.newBuilder().setShard(shard).build()); +// } +// } + } catch (PDException e) { + log.error("Partition {} transferLeader exception {}", partId, e); + } + } + + /** + * 分区合并,将整个集群的分区数,合并到toCount个 + * + * @param toCount 目标分区数 + * @throws PDException when query errors + */ + public void combinePartition(int toCount) throws PDException { + + int shardsTotalCount = getShardGroupCount(); + for (var graph : getGraphs()){ + // 对所有大于toCount分区的图,都进行缩容 + if (graph.getPartitionCount() > toCount){ + combineGraphPartition(graph, toCount, shardsTotalCount); + } + } + } + + /** + * 针对单个图,进行分区合并 + * + * @param graphName the name of the graph + * @param toCount the target partition count + * @throws PDException when query errors + */ + + public void combineGraphPartition(String graphName, int toCount) throws PDException { + combineGraphPartition(getGraph(graphName), toCount, getShardGroupCount()); + } + + /** + * 单图合并的内部实现 + * + * @param graph the name of the graph + * @param toCount the target partition count + * @param shardCount the shard count of the clusters + * @throws PDException when query errors + */ + private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, int shardCount) + throws PDException { + if (graph == null){ + throw new PDException(1, "Graph not exists, try to use full graph name, like /DEFAULT/GRAPH_NAME/g"); + } + + log.info("Combine graph {} partition, from {}, to {}, with shard count:{}", + graph.getGraphName(), graph.getPartitionCount(), toCount, shardCount); + + if (! checkTargetCount(graph.getPartitionCount(), toCount, shardCount)) { + log.error("Combine partition, illegal toCount:{}, graph:{}", toCount, graph.getGraphName()); + throw new PDException(2, "illegal partition toCount, should between 1 ~ shard group count and " + + " can be dived by shard group count"); + } + + var taskInfoMeta = storeService.getTaskInfoMeta(); + if (taskInfoMeta.scanMoveTask(graph.getGraphName()).size() > 0 ) { + throw new PDException(3, "Graph Combine process exists"); + } + + // 按照 key start 排序,合并后的key range 是连续的 + var partitions = getPartitions(graph.getGraphName()).stream() + .sorted(Comparator.comparing(Metapb.Partition::getStartKey)) + .collect(Collectors.toList()); + + // 分区编号不一定是连续的 + var sortPartitions = getPartitions(graph.getGraphName()) + .stream() + .sorted(Comparator.comparing(Metapb.Partition::getId)) + .collect(Collectors.toList()); + + var groupSize = partitions.size() / toCount; // merge group size + // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 共12个分区, 合并成4个 + // 方案:0,1,2 => 0, 3,4,5 -> 1, 6,7,8 ->2, 9,10,11 -> 3 + // 保证分区的连续性. + for (int i = 0; i < toCount; i ++ ){ + var startKey = partitions.get(i * groupSize).getStartKey(); + var endKey = partitions.get(i * groupSize + groupSize - 1).getEndKey(); + // compose the key range + // the start key and end key should be changed if combine success. + + var targetPartition = Metapb.Partition.newBuilder(sortPartitions.get(i)) + .setStartKey(startKey) + .setEndKey(endKey) + .build(); + + for (int j = 0; j < groupSize; j++) { + var partition = partitions.get(i * groupSize + j); + // 分区id相同,就跳过 + if (i == partition.getId()) { + continue; + } + + log.info("combine partition of graph :{}, from part id {} to {}", partition.getGraphName(), + partition.getId(), targetPartition.getId()); + MovePartition movePartition = MovePartition.newBuilder() + .setTargetPartition(targetPartition) + .setKeyStart(partition.getStartKey()) + .setKeyEnd(partition.getEndKey()) + .build(); + taskInfoMeta.addMovePartitionTask(partition, movePartition); + // source 下线 + updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Offline); + fireMovePartition(partition, movePartition); + } + // target 下线 + updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), + Metapb.PartitionState.PState_Offline); + } + + storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); + } + + /** + * 通过 storeService 获取 raft group 总数 + * @return the count of raft groups + */ + private int getShardGroupCount() { + try { + return Optional.ofNullable(storeService.getShardGroups()).orElseGet(ArrayList::new).size(); + }catch (PDException e){ + log.error("get shard group failed, error: {}", e); + } + return 0; + } + + /** + * 判断图分区是否能够从from合并到to个 + * + * @param fromCount 现在的分区数 + * @param toCount 目标分区数 + * @return true when available , or otherwise + */ + private boolean checkTargetCount(int fromCount, int toCount, int shardCount){ + // 要介于 1 ~ N 中间,而且可以整除 + if (toCount < 1 || toCount >= fromCount || fromCount % toCount != 0 || toCount >= shardCount) { + return false; + } + + return true; + } + + /** + * 处理分区心跳, 记录Leader信息 + * 检查term和version,比较是否是最新的消息 + * @param stats + */ + public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { + + Metapb.ShardGroup shardGroup = storeService.getShardGroup(stats.getId()); + // shard group version changes + // (shard group 由pd控制, 在分裂等操作后,可能出现短暂不一致的情况,以pd为准) + // store控制shard leader + if (shardGroup != null && + (shardGroup.getVersion() < stats.getLeaderTerm() || shardGroup.getConfVer() < stats.getConfVer())) { + storeService.updateShardGroup(stats.getId(), + stats.getShardList(), stats.getLeaderTerm(), stats.getConfVer()); + } + + List partitions = getPartitionById(stats.getId()); + for (Metapb.Partition partition : partitions) { + // partitionMeta.getAndCreateGraph(partition.getGraphName()); + checkShardState(partition, stats); + } + // 统计信息 + partitionMeta.updatePartitionStats(stats.toBuilder() + .setTimestamp(System.currentTimeMillis()).build()); + } + + /** + * 检查shard状态,离线shard影响到分区状态 + * @param stats + */ + private void checkShardState(Metapb.Partition partition, Metapb.PartitionStats stats) { + + try { + int offCount = 0; + for (Metapb.ShardStats shard : stats.getShardStatsList()) { + if (shard.getState() == Metapb.ShardState.SState_Offline) + offCount++; + } + if (partition.getState() != Metapb.PartitionState.PState_Offline) { + if (offCount == 0) { + updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Normal); + } else if (offCount * 2 < stats.getShardStatsCount()) { + updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Warn); + } else + updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Warn); + } + } catch (Exception e) { + log.error("Partition {}-{} checkShardState exception {}", + partition.getGraphName(), partition.getId(), e); + } + } + + + + public void addInstructionListener(PartitionInstructionListener event){ + instructionListeners.add(event); + } + + public void addStatusListener(PartitionStatusListener listener){ + statusListeners.add(listener); + } + + /** + * 发起改变shard命令 + * @param changeType + */ + protected void fireChangeShard(Metapb.Partition partition, List shards, ConfChangeType changeType) { + log.info("fireChangeShard partition: {}-{}, changeType:{} {}", partition.getGraphName(), partition.getId(), changeType, shards); + instructionListeners.forEach(cmd -> { + try { + cmd.changeShard(partition, ChangeShard.newBuilder() + .addAllShard(shards).setChangeType(changeType).build()); + }catch (Exception e){ + log.error("fireChangeShard", e); + } + }); + } + + public void changeShard(int groupId, List shards) throws PDException { + var partitions = getPartitionById(groupId); + if (partitions.size() == 0) { + return; + } + fireChangeShard(partitions.get(0), shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + + /** + * 发送分区分裂消息 + * @param partition + */ + protected void fireSplitPartition(Metapb.Partition partition, SplitPartition splitPartition) { + log.info("fireSplitPartition partition: {}-{}, split :{}", + partition.getGraphName(), partition.getId(), splitPartition); + instructionListeners.forEach(cmd -> { + try { + cmd.splitPartition(partition, splitPartition); + }catch (Exception e){ + log.error("fireSplitPartition", e); + } + }); + } + + /** + * 发送Leader切换消息 + */ + protected void fireTransferLeader(Metapb.Partition partition, TransferLeader transferLeader) { + log.info("fireTransferLeader partition: {}-{}, leader :{}", + partition.getGraphName(), partition.getId(), transferLeader); + instructionListeners.forEach(cmd -> { + try { + cmd.transferLeader(partition, transferLeader); + }catch (Exception e){ + log.error("fireSplitPartition", e); + } + }); + } + + /** + * 发送分区移动数据的消息 + * @param partition 原分区 + * @param movePartition 目标分区,包含 key range + */ + protected void fireMovePartition(Metapb.Partition partition, MovePartition movePartition) { + log.info("fireMovePartition partition: {} -> {}", + partition, movePartition); + + instructionListeners.forEach(cmd -> { + try { + cmd.movePartition(partition, movePartition); + }catch (Exception e){ + log.error("fireMovePartition", e); + } + }); + } + + protected void fireCleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) { + log.info("fireCleanPartition partition: {} -> just keep : {}->{}", + partition.getId(), cleanPartition.getKeyStart(), cleanPartition.getKeyEnd()); + + instructionListeners.forEach(cmd -> { + try { + cmd.cleanPartition(partition, cleanPartition); + }catch (Exception e){ + log.error("cleanPartition", e); + } + }); + } + + protected void fireChangePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) { + log.info("fireChangePartitionKeyRange partition: {}-{} -> key range {}", + partition.getGraphName(), partition.getId(), partitionKeyRange); + + instructionListeners.forEach(cmd -> { + try { + cmd.changePartitionKeyRange(partition, partitionKeyRange); + }catch (Exception e){ + log.error("cleanPartition", e); + } + }); + } + + /** + * 处理图迁移任务 + * @param task + */ + public synchronized void handleMoveTask(MetaTask.Task task) throws PDException { + var taskInfoMeta = storeService.getTaskInfoMeta(); + var partition = task.getPartition(); + var movePartition = task.getMovePartition(); + + MetaTask.Task pdMetaTask = taskInfoMeta.getMovePartitionTask(partition.getGraphName(), + movePartition.getTargetPartition().getId(), + partition.getId()); + + log.info("report move task, graph:{}, pid : {}->{}, state: {}", task.getPartition().getGraphName(), + task.getPartition().getId(), task.getMovePartition().getTargetPartition().getId(), task.getState()); + + // 已经被处理(前面有failed) + if (pdMetaTask != null) { + var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); + taskInfoMeta.updateMovePartitionTask(newTask); + + List subTasks = taskInfoMeta.scanMoveTask(partition.getGraphName()); + + var finished = subTasks.stream().allMatch(t -> + t.getState() == MetaTask.TaskState.Task_Success || t.getState() == MetaTask.TaskState.Task_Failure); + + if (finished) { + var allSuccess = subTasks.stream().allMatch(t -> t.getState() == MetaTask.TaskState.Task_Success); + if (allSuccess) { + log.info("graph:{} combine task all success!", partition.getGraphName()); + handleMoveTaskAllSuccess(subTasks, partition.getGraphName(), taskInfoMeta); + } else { + log.info("graph:{} combine task failed!", partition.getGraphName()); + handleMoveTaskIfFailed(partition.getGraphName(), taskInfoMeta); + } + } + } + } + + /** + * 当所有的迁移子任务成功: + * 1. 发送清理source分区指令 + * 2. 设置target上线, 更新key range, 更新 graph partition count + * 3. 删除move task,任务结束 + * + * @param subTasks all move sub tasks + * @param graphName graph name + * @param taskInfoMeta task info meta + * @throws PDException returns if write db failed + */ + private void handleMoveTaskAllSuccess(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) throws PDException { + + var targetPartitionIds = new HashSet(); + var targetPartitions = new ArrayList(); + var deleteFlags = subTasks.stream().map(task -> task.getMovePartition().getTargetPartition().getId()) + .collect(Collectors.toSet()); + + for (MetaTask.Task subTask : subTasks) { + var source = subTask.getPartition(); + var targetPartition = subTask.getMovePartition().getTargetPartition(); + // 是否处理过 + if (! targetPartitionIds.contains(targetPartition.getId())) { + // 更新range + var old = getPartitionById(targetPartition.getGraphName(), targetPartition.getId()); + var newPartition = Metapb.Partition.newBuilder(old) + .setStartKey(targetPartition.getStartKey()) + .setEndKey(targetPartition.getEndKey()) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + // 在 key range之前更新,避免store没有分区的问题, 需要到pd查询 + updatePartition(List.of(newPartition)); + targetPartitions.add(newPartition); + + // 发送key range 变更消息 + PartitionKeyRange partitionKeyRange = PartitionKeyRange.newBuilder() + .setPartitionId(old.getId()) + .setKeyStart(targetPartition.getStartKey()) + .setKeyEnd(targetPartition.getEndKey()) + .build(); + // 通知store + fireChangePartitionKeyRange(old.toBuilder().setState(Metapb.PartitionState.PState_Normal).build(), + partitionKeyRange); + + // 将 target 设置为上线. source 理论上可能被删掉,所以不处理 + updatePartitionState(newPartition.getGraphName(), newPartition.getId(), + Metapb.PartitionState.PState_Normal); + + targetPartitionIds.add(targetPartition.getId()); + } + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(source.getStartKey()) + .setKeyEnd(source.getEndKey()) + .setCleanType(CleanType.CLEAN_TYPE_EXCLUDE_RANGE) + // target 的 partition只需要清理数据,不需要删除分区 + .setDeletePartition(!deleteFlags.contains(source.getId())) + .build(); + + log.info("pd clean data: {}-{}, key range:{}-{}, type:{}, delete partition:{}", + source.getGraphName(), + source.getId(), + cleanPartition.getKeyStart(), + cleanPartition.getKeyEnd(), + CleanType.CLEAN_TYPE_EXCLUDE_RANGE, + cleanPartition.getDeletePartition()); + + // 清理掉被移动分区的数据 + fireCleanPartition(source, cleanPartition); + } + + // 更新key range, 本地更新,client更新 + // updatePartition(targetPartitions); + + // 更新target 分区状态,source 可能被删掉,所以不处理 + targetPartitions.forEach(p -> { + try { + updatePartitionState(p.getGraphName(), p.getId(), Metapb.PartitionState.PState_Normal); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + + partitionMeta.reload(); + + // 更新graph partition count + var graph = getGraph(graphName).toBuilder() + .setPartitionCount(targetPartitionIds.size()) + .build(); + updateGraph(graph); + + // 事务完成 + taskInfoMeta.removeMoveTaskPrefix(graphName); + } + + /** + * 如果缩容任务有失败的,回滚合并操作 + * 1. 清理原来的target 分区,将迁移过来的数据再删掉 + * 2. 将source/target 分区设置为上线 + * 3. 删除task,任务结束 + * + * @param graphName graph name + * @param taskInfoMeta task info meta + * @throws PDException return if write to db failed + */ + private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) throws PDException { + // 发送清理target分区的任务, 回滚target分区 + var targetPartitionIds = new HashSet(); + for (var metaTask : taskInfoMeta.scanMoveTask(graphName)){ + + var source = metaTask.getPartition(); + // 设置 source 为上线 + updatePartitionState(source.getGraphName(), source.getId(), Metapb.PartitionState.PState_Normal); + var movedPartition = metaTask.getMovePartition().getTargetPartition(); + + if (targetPartitionIds.contains(movedPartition.getId())) { + continue; + } + + var targetPartition = getPartitionById(graphName, movedPartition.getId()); + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(targetPartition.getStartKey()) + .setKeyEnd(targetPartition.getEndKey()) + .setCleanType(CleanType.CLEAN_TYPE_KEEP_RANGE) + .setDeletePartition(false) + .build(); + fireCleanPartition(targetPartition, cleanPartition); + targetPartitionIds.add(targetPartition.getId()); + + // 设置target 上线 + updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), + Metapb.PartitionState.PState_Normal); + } + // 清理掉任务列表 + taskInfoMeta.removeMoveTaskPrefix(graphName); + } + + /** + * 处理clean task + * @param task clean task + */ + public void handleCleanPartitionTask(MetaTask.Task task){ + log.info("clean task {} -{}, key range:{}~{}, report: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), + task.getCleanPartition().getKeyStart(), + task.getCleanPartition().getKeyEnd(), + task.getState() + ); + + // 如果失败重试? + } + + public synchronized void handleSplitTask(MetaTask.Task task) throws PDException { + + var taskInfoMeta = storeService.getTaskInfoMeta(); + var partition = task.getPartition(); + + MetaTask.Task pdMetaTask = taskInfoMeta.getSplitTask(partition.getGraphName(), partition.getId()); + + log.info("report split task, graph:{}, pid : {}, state: {}", task.getPartition().getGraphName(), + task.getPartition().getId(), task.getState()); + + if (pdMetaTask != null) { + var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); + taskInfoMeta.updateSplitTask(newTask); + + List subTasks = taskInfoMeta.scanSplitTask(partition.getGraphName()); + + var finished = subTasks.stream().allMatch(t -> + t.getState() == MetaTask.TaskState.Task_Success || t.getState() == MetaTask.TaskState.Task_Failure); + + if (finished) { + var allSuccess = subTasks.stream().allMatch(t -> t.getState() == MetaTask.TaskState.Task_Success); + if (allSuccess) { + log.info("graph:{} split task all success!", partition.getGraphName()); + handleSplitTaskAllSuccess(subTasks, partition.getGraphName(), taskInfoMeta); + } else { + handleSplitTaskIfFailed(subTasks, partition.getGraphName(), taskInfoMeta); + } + } + } + } + + private void handleSplitTaskAllSuccess(List subTasks, String graphName, TaskInfoMeta taskInfoMeta) + throws PDException { + + int addedPartitions = 0; + var partitions = new ArrayList(); + for (MetaTask.Task subTask: subTasks) { + var source = subTask.getPartition(); + var newPartition = subTask.getSplitPartition().getNewPartitionList().get(0); + + // 发送key range 变更消息 + PartitionKeyRange partitionKeyRange = PartitionKeyRange.newBuilder() + .setPartitionId(source.getId()) + .setKeyStart(newPartition.getStartKey()) + .setKeyEnd(newPartition.getEndKey()) + .build(); + // 通知store + fireChangePartitionKeyRange(source, partitionKeyRange); + // 将 target 设置为上线. source 理论上可能被删掉,所以不处理 + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(newPartition.getStartKey()) + .setKeyEnd(newPartition.getEndKey()) + .setCleanType(CleanType.CLEAN_TYPE_KEEP_RANGE) + // target 的 partition只需要清理数据,不需要删除分区 + .setDeletePartition(false) + .build(); + + log.info("pd clean data: {}-{}, key range:{}-{}, type:{}, delete partition:{}", + source.getGraphName(), + source.getId(), + cleanPartition.getKeyStart(), + cleanPartition.getKeyEnd(), + CleanType.CLEAN_TYPE_EXCLUDE_RANGE, + cleanPartition.getDeletePartition()); + + fireCleanPartition(source, cleanPartition); + + // 更新partition state + for (var sp : subTask.getSplitPartition().getNewPartitionList()) { + partitions.add(sp.toBuilder().setState(Metapb.PartitionState.PState_Normal).build()); + } + + addedPartitions += subTask.getSplitPartition().getNewPartitionCount() - 1; + } + + updatePartition(partitions); + partitionMeta.reload(); + + var graph = getGraph(graphName); + + // set partition count + if (pdConfig.getConfigService().getPartitionCount() != storeService.getShardGroups().size()) { + pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size()); + log.info("set the partition count of config server to {}", storeService.getShardGroups().size()); + } + + // 更新graph partition count + var newGraph = graph.toBuilder() + .setPartitionCount(graph.getPartitionCount() + addedPartitions) + .build(); + updateGraph(newGraph); + + // 事务完成 + taskInfoMeta.removeSplitTaskPrefix(graphName); + } + + private void handleSplitTaskIfFailed(List subTasks, String graphName, TaskInfoMeta taskInfoMeta) + throws PDException { + for (var metaTask : subTasks){ + var splitPartitions = metaTask.getSplitPartition().getNewPartitionList(); + for (int i = 1; i < splitPartitions.size(); i ++) { + var split = splitPartitions.get(i); + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(split.getStartKey()) + .setKeyEnd(split.getEndKey()) + .setCleanType(CleanType.CLEAN_TYPE_EXCLUDE_RANGE) + .setDeletePartition(true) + .build(); + + fireCleanPartition(split, cleanPartition); + } + + // set partition state normal + var partition = metaTask.getPartition(); + updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Normal); + } + // 清理掉任务列表 + taskInfoMeta.removeSplitTaskPrefix(graphName); + } + + /** + * 接收到Leader改变的消息 + * 更新图状态,触发分区变更 + */ + protected void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + log.info("onPartitionChanged partition: {}", partition); + if (old != null && old.getState() != partition.getState()){ + // 状态改变,重置图的状态 + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { + if (pt.getState().getNumber() > state.getNumber()) { + state = pt.getState(); + } + } + try { + updateGraphState(partition.getGraphName(), state); + }catch ( PDException e){ + log.error("onPartitionChanged", e); + } + + } + + statusListeners.forEach(e -> { + e.onPartitionChanged(old, partition); + }); + } + + protected void onPartitionRemoved(Metapb.Partition partition) { + log.info("onPartitionRemoved partition: {}", partition); + statusListeners.forEach(e -> { + e.onPartitionRemoved(partition); + }); + } + /** + * PD的leader发生改变,需要重新加载数据 + */ + @Override + public void onRaftLeaderChanged() { + log.info("Partition service reload cache from rocksdb"); + try { + partitionMeta.reload(); + } catch (PDException e) { + log.error("Partition meta reload exception {}", e); + } + } + + /** + * 分区状态发生改变,需要传播到图、集群 + * + * @param graph + * @param partId + * @param state + */ + public void onPartitionStateChanged(String graph, int partId, Metapb.PartitionState state) throws PDException { + updatePartitionState(graph, partId, state); + } + + /** + * Shard状态发生改变,需要传播到分区、图、集群 + * @param graph + * @param partId + * @param state + */ + public void onShardStateChanged(String graph, int partId, Metapb.PartitionState state){ + + } + + /** + * 发送rocksdb compaction 消息 + * @param partId + * @param tableName + */ + public void fireDbCompaction(int partId, String tableName) { + + try { + for (Metapb.Graph graph : getGraphs()) { + Metapb.Partition partition = partitionMeta.getPartitionById(graph.getGraphName(), partId); + + DbCompaction dbCompaction = DbCompaction.newBuilder() + .setTableName(tableName) + .build(); + instructionListeners.forEach(cmd -> { + try { + cmd.dbCompaction(partition, dbCompaction); + }catch (Exception e){ + log.error("firedbCompaction", e); + } + }); + } + } catch (PDException e) { + e.printStackTrace(); + } + + } + + public void updateShardGroupCache(Metapb.ShardGroup group){ + partitionMeta.getPartitionCache().updateShardGroup(group); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java new file mode 100644 index 0000000000..ad851ca546 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java @@ -0,0 +1,11 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +/** + * 分区状态监听 + */ +public interface PartitionStatusListener { + void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition); + void onPartitionRemoved(Metapb.Partition partition); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java new file mode 100644 index 0000000000..20d764b307 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java @@ -0,0 +1,31 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.DiscoveryMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; + +/** + * @author zhangyingjie + * @date 2022/1/14 + **/ +public class RegistryService { + private PDConfig pdConfig; + private DiscoveryMetaStore meta; + + public RegistryService(PDConfig config){ + this.pdConfig = config; + meta = MetadataFactory.newDiscoveryMeta(config); + } + + public void register(NodeInfo nodeInfo, int outTimes) throws PDException { + meta.register(nodeInfo, outTimes); + } + public NodeInfos getNodes(Query query) { + return meta.getNodes(query); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java new file mode 100644 index 0000000000..dd254df32a --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java @@ -0,0 +1,9 @@ +package com.baidu.hugegraph.pd; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +public interface ShardGroupStatusListener { + void onShardListChanged(Metapb.ShardGroup shardGroup, Metapb.ShardGroup newShardGroup); + + void onShardListOp(Metapb.ShardGroup shardGroup); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java new file mode 100644 index 0000000000..be2b146501 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java @@ -0,0 +1,242 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + +import org.springframework.stereotype.Service; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + + +@Slf4j +@Service +public class StoreMonitorDataService { + private PDConfig pdConfig; + + private KvService kvService; + + private static final String MONITOR_DATA_PREFIX = "SMD"; + + /** + * the last timestamp of the store monitor data, + * used for determine the gap of store's heartbeat. + */ + private Map lastStoreStateTimestamp; + + + public StoreMonitorDataService(PDConfig pdConfig) { + this.pdConfig = pdConfig; + this.kvService = new KvService(pdConfig); + this.lastStoreStateTimestamp = new HashMap<>(); + } + + /** + * save the store stats + * @param storeStats + */ + public void saveMonitorData(Metapb.StoreStats storeStats) throws PDException { + long storeId = storeStats.getStoreId(); + /** + * load the latest store timestamp when start up or alter leader + */ + if (! lastStoreStateTimestamp.containsKey(storeId)){ + long lastTimestamp = getLatestStoreMonitorDataTimeStamp(storeId); + log.debug("store id : {}, last timestamp :{}", storeId, lastTimestamp); + lastStoreStateTimestamp.put(storeId, lastTimestamp); + } + + long current = System.currentTimeMillis() / 1000; + long interval = this.pdConfig.getStore().getMonitorInterval(); + + // exceed the interval + if (current - lastStoreStateTimestamp.getOrDefault(storeId, 0L) >= interval){ + saveMonitorDataToDb(storeStats, current); + log.debug("store id: {}, system info:{}", storeId, debugMonitorInfo(storeStats.getSystemMetricsList())); + lastStoreStateTimestamp.put(storeId, current); + } + } + + /** + * save the snapshot of store status + * @param storeStats store status + * @param ts, timestamp + * @return store status + * @throws PDException + */ + private void saveMonitorDataToDb(Metapb.StoreStats storeStats, long ts) throws PDException { + String key = getMonitorDataKey(storeStats.getStoreId(), ts); + log.debug("store id: {}, save monitor data info, ts:{}, my key:{}", storeStats.getStoreId(), ts, key); + kvService.put(key, extractMetricsFromStoreStatus(storeStats)); + } + + public String debugMonitorInfo(List systemInfo){ + StringBuilder sb = new StringBuilder(); + sb.append("["); + for(Metapb.RecordPair pair : systemInfo){ + sb.append(pair.getKey()); + sb.append(":"); + sb.append(pair.getValue()); + sb.append(","); + } + sb.append("]"); + return sb.toString(); + } + + /** + * get the historical monitor data by store id, by range(start, end) + * @param storeId store id + * @param start range start + * @param end range end + * @return list of store stats + */ + public Map getStoreMonitorData(long storeId, long start, long end) throws PDException { + log.debug("get monitor data, store id:{}, start{}, end:{}", + storeId, + getMonitorDataKey(storeId, start), + getMonitorDataKey(storeId, end)); + return kvService.scanRange(getMonitorDataKey(storeId, start), getMonitorDataKey(storeId, end)); + } + + /** + * for api service + * @param storeId + * @return + * @throws PDException + */ + public List> getStoreMonitorData(long storeId) throws PDException { + List> result = new LinkedList<>(); + long current = System.currentTimeMillis() / 1000; + long start = current - this.pdConfig.getStore().getRetentionPeriod(); + + try { + for(Map.Entry entry : getStoreMonitorData(storeId, start, current).entrySet()){ + String[] arr = entry.getKey().split(String.valueOf(MetadataKeyHelper.getDelimiter())); + Map map = new HashMap(); + long timestamp = Long.parseLong(arr[arr.length - 1]); + map.put("ts", timestamp); + for (String pair : entry.getValue().split(",")){ + String[] p = pair.split(":"); + if (p.length == 2){ + map.put(p[0], Long.parseLong(p[1])); + } + } + result.add(map); + } + result.sort((o1,o2)-> o1.get("ts").compareTo(o2.get("ts"))); + } catch (PDException e) { + log.error(e.getMessage()); + } + return result; + } + + /** + * for api service, export txt + * @param storeId + * @return + * @throws PDException + */ + public String getStoreMonitorDataText(long storeId) throws PDException { + + List> result = getStoreMonitorData(storeId); + StringBuilder sb = new StringBuilder(); + if (result.size() > 0) { + DateTimeFormatter dtf = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); + Map lastRow = result.get(result.size() - 1); + List columns = new ArrayList<>(); + // construct columns, ts + sorted keys + columns.add("ts"); + columns.addAll(lastRow.keySet().stream() + .filter(x -> !"ts".equals(x)) + .sorted() + .collect(Collectors.toList())); + sb.append(String.join(",", columns).replace("\"", "")).append("\r\n"); + for (Map row : result){ + for ( String key : columns){ + // ts + , + ... + if ("ts".equals(key)){ + // format ts + sb.append(dtf.format(LocalDateTime.ofInstant(Instant.ofEpochSecond(row.get(key)), + ZoneId.systemDefault()))); + continue; + } else { + sb.append(",").append(row.getOrDefault(key, 0L)); + } + } + sb.append("\r\n"); + } + } + return sb.toString(); + } + + /** + * remove the monitor data of the store that before till(not include) + * @param storeId store id + * @param till expire time + * @return affect rows + */ + public int removeExpiredMonitorData(long storeId, long till) throws PDException { + String keyStart = getMonitorDataKey(storeId, 1); + String keyEnd = getMonitorDataKey(storeId, till); + int records = 0; + for (String key : kvService.scanRange(keyStart, keyEnd).keySet()){ + kvService.delete(key); + log.debug("remove monitor data, key: {}", key); + records += 1; + } + return records; + } + + /** + * get the latest timestamp of the store monitor data + * @param storeId + * @return timestamp(by seconds) + */ + public long getLatestStoreMonitorDataTimeStamp(long storeId){ + long maxId = 0L; + long current = System.currentTimeMillis() / 1000; + long start = current - this.pdConfig.getStore().getMonitorInterval(); + String keyStart = getMonitorDataKey(storeId, start); + String keyEnd = getMonitorDataKey(storeId, current); + try { + for(String key : kvService.scanRange(keyStart, keyEnd).keySet()){ + String[] arr = key.split(String.valueOf(MetadataKeyHelper.getDelimiter())); + maxId = Math.max(maxId, Long.parseLong(arr[arr.length - 1])); + } + } catch (PDException e) { + } + return maxId; + } + + private String getMonitorDataKey(long storeId, long ts){ + StringBuilder builder = new StringBuilder(); + builder.append(MONITOR_DATA_PREFIX) + .append(MetadataKeyHelper.getDelimiter()) + .append(storeId) + .append(MetadataKeyHelper.getDelimiter()) + .append(ts); + return builder.toString(); + } + + private String extractMetricsFromStoreStatus(Metapb.StoreStats storeStats){ + List list = new ArrayList<>(); + for(Metapb.RecordPair pair : storeStats.getSystemMetricsList()){ + list.add("\"" + pair.getKey() + "\":" + pair.getValue()); + } + return String.join(",", list); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java new file mode 100644 index 0000000000..d2eb8b49be --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -0,0 +1,996 @@ +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Random; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.StoreInfoMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; + +import com.baidu.hugegraph.pd.ShardGroupStatusListener; +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Metapb.GraphMode; +import com.baidu.hugegraph.pd.grpc.Metapb.GraphModeReason; +import com.baidu.hugegraph.pd.grpc.Metapb.GraphState; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.pulse.ConfChangeType; +import com.google.gson.Gson; + +import lombok.extern.slf4j.Slf4j; + + +/** + * HgStore注册、保活管理类 + */ +@Slf4j +public class StoreNodeService { + + // Store状态监听 + private List statusListeners; + private List shardGroupStatusListeners; + + private PartitionService partitionService; + private StoreInfoMeta storeInfoMeta; + private TaskInfoMeta taskInfoMeta; + private Random random = new Random(System.currentTimeMillis()); + private Metapb.ClusterStats clusterStats; + private KvService kvService; + private ConfigService configService; + private PDConfig pdConfig; + + private static final Long STORE_HEART_BEAT_INTERVAL = 30000L; + + public StoreNodeService(PDConfig config){ + this.pdConfig = config; + storeInfoMeta = MetadataFactory.newStoreInfoMeta(pdConfig); + taskInfoMeta = MetadataFactory.newTaskInfoMeta(pdConfig); + shardGroupStatusListeners = Collections.synchronizedList(new ArrayList<>()); + statusListeners = Collections.synchronizedList(new ArrayList()); + clusterStats = Metapb.ClusterStats.newBuilder() + .setState(Metapb.ClusterState.Cluster_Not_Ready) + .setTimestamp(System.currentTimeMillis()) + .build(); + kvService = new KvService(pdConfig); + configService = new ConfigService(pdConfig); + } + + public void init(PartitionService partitionService){ + this.partitionService = partitionService; + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + if (old != null && old.getState() != partition.getState()){ + // 状态改变,重置集群状态 + try { + List partitions = partitionService.getPartitionById(partition.getId()); + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for(Metapb.Partition pt : partitions){ + if ( pt.getState().getNumber() > state.getNumber()) + state = pt.getState(); + } + updateShardGroupState(partition.getId(), state); + + for(Metapb.ShardGroup group : getShardGroups()){ + if ( group.getState().getNumber() > state.getNumber()) + state = group.getState(); + } + updateClusterStatus(state); + } catch (PDException e) { + log.error("onPartitionChanged exception {}", e); + } + } + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); + } + + /** + * 集群是否准备就绪 + * @return + */ + public boolean isOK(){ + return this.clusterStats.getState().getNumber() < Metapb.ClusterState.Cluster_Offline.getNumber(); + } + + /** + * Store注册,记录Store的ip地址,首次注册需要生成store_ID + * @param store + */ + public Metapb.Store register(Metapb.Store store) throws PDException { + if (store.getId() == 0) { + // 初始注册,生成新id,保证Id不重复。 + store = newStoreNode(store); + } + + if (!storeInfoMeta.storeExists(store.getId())) { + log.error("Store id {} does not belong to this PD, address = {}", store.getId(), store.getAddress()); + // storeId不存在,抛出异常 + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d doest not exist.", store.getId())); + } + + // 如果store状态为Tombstone拒绝注册。 + Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); + if (lastStore.getState() == Metapb.StoreState.Tombstone) { + log.error("Store id {} has been removed, Please reinitialize, address = {}", + store.getId(), store.getAddress()); + // storeId不存在,抛出异常 + throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format("Store id %d has been removed. %s", store.getId(), store.getAddress())); + } + + // offline或者up,或者在初始激活列表中,自动上线 + Metapb.StoreState storeState = lastStore.getState(); + if (storeState == Metapb.StoreState.Offline || storeState == Metapb.StoreState.Up + || inInitialStoreList(store)){ + storeState = Metapb.StoreState.Up; + } + else { + storeState = Metapb.StoreState.Pending; + } + + store = Metapb.Store.newBuilder(lastStore) + .setAddress(store.getAddress()) + .setRaftAddress(store.getRaftAddress()) + .setDataVersion(store.getDataVersion()) + .setDeployPath(store.getDeployPath()) + .setVersion(store.getVersion()) + .setDataPath(store.getDataPath()) + .setState(storeState).setCores(store.getCores()) + .clearLabels().addAllLabels(store.getLabelsList()) + .setLastHeartbeat(System.currentTimeMillis()).build(); + + long current = System.currentTimeMillis(); + boolean raftChanged = false; + // 上线状态的Raft Address 发生了变更 + if (!Objects.equals(lastStore.getRaftAddress(), store.getRaftAddress()) && storeState == Metapb.StoreState.Up) { + // 时间间隔太短,而且raft有变更,则认为是无效的store + if (current - lastStore.getLastHeartbeat() < STORE_HEART_BEAT_INTERVAL * 0.8){ + throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DUPLICATE_VALUE, + String.format("Store id %d may be duplicate. addr: %s", store.getId(), store.getAddress())); + } else if(current - lastStore.getLastHeartbeat() > STORE_HEART_BEAT_INTERVAL * 1.2 ) { + // 认为发生了变更 + raftChanged = true; + } else { + // 等待下次注册 + return Metapb.Store.newBuilder(store).setId(0L).build(); + } + } + + // 存储store信息 + storeInfoMeta.updateStore(store); + if (storeState == Metapb.StoreState.Up) { + // 更新store 活跃状态 + storeInfoMeta.keepStoreAlive(store); + onStoreStatusChanged(store, Metapb.StoreState.Offline, Metapb.StoreState.Up); + checkStoreStatus(); + } + + // 等store信息保存后,再发送变更 + if (raftChanged) { + onStoreRaftAddressChanged(store); + } + + log.info("Store register, id = {} {}", store.getId(), store); + return store; + } + + private boolean inInitialStoreList(Metapb.Store store){ + return this.pdConfig.getInitialStoreMap().containsKey(store.getAddress()); + } + + /** + * 产生一个新的store对象 + * @param store + * @return + * @throws PDException + */ + private synchronized Metapb.Store newStoreNode(Metapb.Store store) throws PDException { + long id = random.nextLong() & Long.MAX_VALUE; + while( id == 0 || storeInfoMeta.storeExists(id) ) + id = random.nextLong() & Long.MAX_VALUE; + store = Metapb.Store.newBuilder(store) + .setId(id) + .setState(Metapb.StoreState.Pending) + .setStartTimestamp(System.currentTimeMillis()).build(); + storeInfoMeta.updateStore(store); + return store; + } + + /** + * 根据store_id返回Store信息 + * @param id + * @return + * @throws PDException + */ + public Metapb.Store getStore(long id) throws PDException { + Metapb.Store store = storeInfoMeta.getStore(id); + if ( store == null ) + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %x doest not exist.", id)); + return store; + } + + /** + * 更新Store信息,检测Store状态的变化,通知到Hugestore + * + */ + public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDException { + log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), store.getAddress(), store.getState()); + Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); + if (lastStore == null) return null; + Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); + store = builder.mergeFrom(store).build(); + if (store.getState() == Metapb.StoreState.Tombstone) { + List activeStores = getStores(); + if (lastStore.getState() == Metapb.StoreState.Up + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); + } + } + + storeInfoMeta.updateStore(store); + if (store.getState() != Metapb.StoreState.Unknown && store.getState() != lastStore.getState()){ + // 如果希望将store下线 + if (store.getState() == Metapb.StoreState.Exiting) { + if (lastStore.getState() == Metapb.StoreState.Exiting){ + //如果已经是下线中的状态,则不作进一步处理 + return lastStore; + } + + List activeStores = this.getActiveStores(); + Map storeMap = new HashMap<>(); + activeStores.forEach(s -> { + storeMap.put(s.getId(), s); + }); + //如果store已经离线,直接从活跃中删除,如果store在线,暂时不从活跃中删除,等把状态置成Tombstone的时候再删除 + if (!storeMap.containsKey(store.getId())) { + log.info("updateStore removeActiveStores store {}", store.getId()); + storeInfoMeta.removeActiveStore(store); + } + storeTurnoff(store); + } else if (store.getState() == Metapb.StoreState.Offline) { //监控到store已经离线,从活跃中删除 + storeInfoMeta.removeActiveStore(store); + } else if (store.getState() == Metapb.StoreState.Tombstone) { + // 状态发生改变,Store关机,修改shardGroup,进行副本迁移 + log.info("updateStore removeActiveStores store {}", store.getId()); + storeInfoMeta.removeActiveStore(store); + // 存储下线 + storeTurnoff(store); + } else if (store.getState() == Metapb.StoreState.Up) { + storeInfoMeta.keepStoreAlive(store); + checkStoreStatus(); + } + onStoreStatusChanged(lastStore, lastStore.getState(), store.getState()); + } + return store; + } + + /** + * store被关机,重新分配shardGroup的shard + * @param store + * @throws PDException + */ + public synchronized void storeTurnoff(Metapb.Store store) throws PDException { + // 遍历ShardGroup,重新分配shard + for (Metapb.ShardGroup group : getShardGroupsByStore(store.getId())) { + Metapb.ShardGroup.Builder builder = Metapb.ShardGroup.newBuilder(group); + builder.clearShards(); + group.getShardsList().forEach(shard -> { + if (shard.getStoreId() != store.getId()) + builder.addShards(shard); + }); + reallocShards(builder.build()); + } + } + + /** + * 根据图名返回stores信息,如果graphName为空,返回所有store信息 + * @throws PDException + */ + public List getStores() throws PDException{ + return storeInfoMeta.getStores(null); + } + + public List getStores(String graphName) throws PDException{ + return storeInfoMeta.getStores(graphName); + } + + public List getStoreStatus(boolean isActive) throws PDException{ + return storeInfoMeta.getStoreStatus(isActive); + } + + public List getShardGroups() throws PDException { + return storeInfoMeta.getShardGroups(); + } + + public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { + return storeInfoMeta.getShardGroup(groupId); + } + + public List getShardList(int groupId) throws PDException { + var shardGroup = getShardGroup(groupId); + if (shardGroup != null){ + return shardGroup.getShardsList(); + } + return new ArrayList<>(); + } + + public List getShardGroupsByStore(long storeId) throws PDException { + List shardGroups = new ArrayList<>(); + storeInfoMeta.getShardGroups().forEach(shardGroup -> { + shardGroup.getShardsList().forEach(shard -> { + if (shard.getStoreId() == storeId) + shardGroups.add(shardGroup); + }); + }); + return shardGroups; + } + + /** + * 返回活跃的store + * @param graphName + * @return + * @throws PDException + */ + public List getActiveStores(String graphName) throws PDException { + return storeInfoMeta.getActiveStores(graphName); + } + public List getActiveStores() throws PDException { + return storeInfoMeta.getActiveStores(); + } + + public List getTombStores() throws PDException { + List stores = new ArrayList<>(); + for (Metapb.Store store : this.getStores()) { + if (store.getState() == Metapb.StoreState.Tombstone) { + stores.add(store); + } + } + return stores; + } + + + public long removeStore(Long storeId) throws PDException { + return storeInfoMeta.removeStore(storeId); + } + /** + * 给partition分配store,根据图的配置,决定分配几个peer + * 分配完所有的shards,保存ShardGroup对象(store不变动,只执行一次) + */ + public synchronized List allocShards(Metapb.Graph graph, int partId) throws PDException { + // 多图共用raft分组,因此分配shard只依赖partitionId. + // 图根据数据大小可以设置分区的数量,但总数不能超过raft分组数量 + if (storeInfoMeta.getShardGroup(partId) == null) { + // 获取活跃的store key + // 根据 partionID计算store + List stores = storeInfoMeta.getActiveStores(); + + if (stores.size() == 0) { + throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, "There is no any online store"); + } + + if (stores.size() < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); + } + + int shardCount = pdConfig.getPartition().getShardCount(); + shardCount = Math.min(shardCount, stores.size()); + //两个shard无法选出leader + // 不能为0 + + if (shardCount == 2 || shardCount < 1) { + shardCount = 1; + } + + // 一次创建完所有的ShardGroup,保证初始的groupID有序,方便人工阅读 + for (int groupId = 0; groupId < pdConfig.getConfigService().getPartitionCount(); groupId++) { + int storeIdx = groupId % stores.size(); //store分配规则,简化为取模 + List shards = new ArrayList<>(); + for (int i = 0; i < shardCount; i++) { + Metapb.Shard shard = Metapb.Shard.newBuilder().setStoreId(stores.get(storeIdx).getId()) + .setRole(i == 0 ? Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) // + .build(); + shards.add(shard); + storeIdx = (storeIdx + 1) >= stores.size() ? 0 : ++storeIdx; // 顺序选择 + } + + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() + .setId(groupId) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(shards).build(); + + // new group + storeInfoMeta.updateShardGroup(group); + partitionService.updateShardGroupCache(group); + onShardGroupStatusChanged(group, group); + log.info("alloc shard group: id {}", groupId); + } + } + return storeInfoMeta.getShardGroup(partId).getShardsList(); + } + + /** + * 根据graph的shard_count,重新分配shard + * 发送变更change shard指令 + */ + public synchronized List reallocShards(Metapb.ShardGroup shardGroup) throws PDException { + List stores = storeInfoMeta.getActiveStores(); + + if (stores.size() == 0) { + throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, "There is no any online store"); + } + + if (stores.size() < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); + } + + int shardCount = pdConfig.getPartition().getShardCount(); + shardCount = Math.min(shardCount, stores.size()); + if (shardCount == 2 || shardCount < 1) { + // 两个shard无法选出leader + // 不能为0 + shardCount = 1; + } + + List shards = new ArrayList<>(); + shards.addAll(shardGroup.getShardsList()); + + if (shardCount > shards.size()) { + // 需要增加shard + log.info("reallocShards ShardGroup {}, add shards from {} to {}", + shardGroup.getId(), shards.size(), shardCount); + int storeIdx = (int) shardGroup.getId() % stores.size(); //store分配规则,简化为取模 + for (int addCount = shardCount - shards.size(); addCount > 0; ) { + // 检查是否已经存在 + if (!isStoreInShards(shards, stores.get(storeIdx).getId())) { + Metapb.Shard shard = Metapb.Shard.newBuilder() + .setStoreId(stores.get(storeIdx).getId()) + .build(); + shards.add(shard); + addCount--; + } + storeIdx = (storeIdx + 1) >= stores.size() ? 0 : ++storeIdx; // 顺序选择 + } + } else if (shardCount < shards.size()) { + // 需要减shard + log.info("reallocShards ShardGroup {}, remove shards from {} to {}", + shardGroup.getId(), shards.size(), shardCount); + + int subCount = shards.size() - shardCount; + Iterator iterator = shards.iterator(); + while (iterator.hasNext() && subCount > 0) { + if (iterator.next().getRole() != Metapb.ShardRole.Leader) { + iterator.remove(); + subCount--; + } + } + } else { + return shards; + } + + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder(shardGroup) + .clearShards() + .addAllShards(shards).build(); + storeInfoMeta.updateShardGroup(group); + partitionService.updateShardGroupCache(group); + // change shard group + onShardGroupStatusChanged(shardGroup, group); + + var partitions = partitionService.getPartitionById(shardGroup.getId()); + if (partitions.size() > 0) { + // send one message, change shard is regardless with partition/graph + partitionService.fireChangeShard(partitions.get(0), shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + + log.info("reallocShards ShardGroup {}, shards: {}", group.getId(), group.getShardsList()); + return shards; + } + + /** + * 根据partition的数量,分配group shard + * + * @param groups list of (partition id, count) + * @return total groups + */ + public synchronized int splitShardGroups(List> groups) throws PDException { + int sum = groups.stream().map(pair -> pair.getValue()).reduce(0, Integer::sum); + // shard group 太大 + if (sum > getActiveStores().size() * pdConfig.getPartition().getMaxShardsPerStore()){ + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "can't satisfy target shard group count"); + } + + partitionService.splitPartition(groups); + + return sum; + } + + /** + * 分配shard group,为分裂做准备 + * @param groups + * @return true + * @throws PDException + */ + private boolean isStoreInShards(List shards, long storeId){ + AtomicBoolean exist = new AtomicBoolean(false); + shards.forEach(s->{ + if ( s.getStoreId() == storeId ) + exist.set(true); + }); + return exist.get(); + } + + /** + * update shard group and cache. + * send shard group change message. + * + * @param groupId : shard group + * @param shards : shard lists + * @param version: term version, ignored if less than 0 + * @param confVersion : conf version, ignored if less than 0 + * @return + */ + public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List shards, + long version, long confVersion) { + try { + Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); + + if (group == null) { + return null; + } + + var builder = Metapb.ShardGroup.newBuilder(group); + if (version >= 0){ + builder.setVersion(version); + } + + if (confVersion >= 0){ + builder.setConfVer(confVersion); + } + + var newGroup = builder.clearShards() .addAllShards(shards) .build(); + + storeInfoMeta.updateShardGroup(newGroup); + partitionService.updateShardGroupCache(newGroup); + onShardGroupStatusChanged(group, newGroup); + log.info("Raft {} updateShardGroup {}", groupId, newGroup); + return group; + } catch (Exception e) { + log.error("Shardgroup {} update exception {}", groupId, e); + } + return null; + } + + /** + * 通知 store 进行shard group的重建操作 + * @param groupId raft group id + * @param shards shard list: 如果为空,则删除对应的partition engine + */ + public void shardGroupOp(int groupId, List shards) throws PDException { + + var shardGroup = getShardGroup(groupId); + + if (shardGroup == null) { + return; + } + + var newGroup = shardGroup.toBuilder().clearShards().addAllShards(shards).build(); + if (shards.size() == 0) { + var partitions = partitionService.getPartitionById(groupId); + for (var partition : partitions) { + partitionService.removePartition(partition.getGraphName(), groupId); + } + deleteShardGroup(groupId); + } + + onShardGroupOp(newGroup); + } + + /** + * 删除 shard group + * @param groupId shard group id + */ + public synchronized void deleteShardGroup(int groupId) throws PDException { + Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); + if (group != null) { + storeInfoMeta.deleteShardGroup(groupId); + } + + onShardGroupStatusChanged(group, null); + + // 修正store的分区数. (分区合并导致) + var shardGroups = getShardGroups(); + if (shardGroups != null) { + var count1 = pdConfig.getConfigService().getPDConfig().getPartitionCount(); + var maxGroupId = getShardGroups().stream().map(Metapb.ShardGroup::getId).max(Integer::compareTo); + if (maxGroupId.get() < count1) { + pdConfig.getConfigService().setPartitionCount(maxGroupId.get() + 1); + } + } + } + + public synchronized void updateShardGroupState(int groupId, Metapb.PartitionState state) throws PDException { + Metapb.ShardGroup shardGroup = storeInfoMeta.getShardGroup(groupId) + .toBuilder() + .setState(state).build(); + storeInfoMeta.updateShardGroup(shardGroup); + partitionService.updateShardGroupCache(shardGroup); + } + + /** + * 接收Store的心跳 + * @param storeStats + * @throws PDException + */ + public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDException { + this.storeInfoMeta.updateStoreStats(storeStats); + Metapb.Store lastStore = this.getStore(storeStats.getStoreId()); + if (lastStore == null){ + //store不存在 + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist.", storeStats.getStoreId())); + } + if (lastStore.getState() == Metapb.StoreState.Tombstone){ + throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format("Store id %d is useless since it's state is Tombstone", + storeStats.getStoreId())); + } + Metapb.Store nowStore; + // 如果正在做store下线操作 + if (lastStore.getState() == Metapb.StoreState.Exiting){ + List activeStores = this.getActiveStores(); + Map storeMap = new HashMap<>(); + activeStores.forEach(store -> { + storeMap.put(store.getId(), store); + }); + // 下线的store的分区为0,说明已经迁移完毕,可以下线,如果非0,则迁移还在进行,需要等待 + if (storeStats.getPartitionCount() > 0 && storeMap.containsKey(storeStats.getStoreId())){ + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Exiting).build(); + this.storeInfoMeta.updateStore(nowStore); + return this.clusterStats; + }else { + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Tombstone).build(); + this.storeInfoMeta.updateStore(nowStore); + storeInfoMeta.removeActiveStore(nowStore); + return this.clusterStats; + } + } + + if (lastStore.getState() == Metapb.StoreState.Pending) { + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Pending).build(); + this.storeInfoMeta.updateStore(nowStore); + return this.clusterStats; + } else { + if (lastStore.getState() == Metapb.StoreState.Offline) { + this.updateStore(Metapb.Store.newBuilder(lastStore).setState(Metapb.StoreState.Up).build()); + } + nowStore = Metapb.Store.newBuilder(lastStore) + .setState(Metapb.StoreState.Up) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()).build(); + this.storeInfoMeta.updateStore(nowStore); + this.storeInfoMeta.keepStoreAlive(nowStore); + this.checkStoreStatus(); + return this.clusterStats; + } + } + + public synchronized Metapb.ClusterStats updateClusterStatus(Metapb.ClusterState state){ + this.clusterStats = clusterStats.toBuilder().setState(state).build(); + return this.clusterStats; + } + + public Metapb.ClusterStats updateClusterStatus(Metapb.PartitionState state){ + Metapb.ClusterState cstate = Metapb.ClusterState.Cluster_OK; + switch (state){ + case PState_Normal: + cstate = Metapb.ClusterState.Cluster_OK; + break; + case PState_Warn: + cstate = Metapb.ClusterState.Cluster_Warn; + break; + case PState_Fault: + cstate = Metapb.ClusterState.Cluster_Fault; + break; + case PState_Offline: + cstate = Metapb.ClusterState.Cluster_Offline; + break; + } + return updateClusterStatus(cstate); + } + public Metapb.ClusterStats getClusterStats() { + return this.clusterStats; + } + + /** + * 检查集群健康状态 + * 活跃机器数是否大于最小阈值 + * 分区shard在线数已否过半 * + */ + public synchronized void checkStoreStatus() { + Metapb.ClusterStats.Builder builder = Metapb.ClusterStats.newBuilder() + .setState(Metapb.ClusterState.Cluster_OK); + try { + List activeStores = this.getActiveStores(); + if (activeStores.size() < pdConfig.getMinStoreCount()) { + builder.setState(Metapb.ClusterState.Cluster_Not_Ready); + builder.setMessage("The number of active stores is " + activeStores.size() + + ", less than pd.initial-store-count:" + pdConfig.getMinStoreCount()); + } + Map storeMap = new HashMap<>(); + activeStores.forEach(store -> { + storeMap.put(store.getId(), store); + }); + + if (builder.getState() == Metapb.ClusterState.Cluster_OK) { + // 检查每个分区的在线shard数量是否大于半数 + for (Metapb.ShardGroup group : this.getShardGroups()) { + int count = 0; + for (Metapb.Shard shard : group.getShardsList()) { + count += storeMap.containsKey(shard.getStoreId()) ? 1 : 0; + } + if (count * 2 < group.getShardsList().size()) { + builder.setState(Metapb.ClusterState.Cluster_Not_Ready); + builder.setMessage("Less than half of active shard, partitionId is " + group.getId()); + break; + } + } + } + + } catch (PDException e) { + log.error("StoreNodeService updateClusterStatus exception {}", e); + } + this.clusterStats = builder.setTimestamp(System.currentTimeMillis()).build(); + if (this.clusterStats.getState() != Metapb.ClusterState.Cluster_OK) + log.error("The cluster is not ready, {}", this.clusterStats); + } + public void addStatusListener(StoreStatusListener listener){ + statusListeners.add(listener); + } + + protected void onStoreRaftAddressChanged(Metapb.Store store) { + log.info("onStoreRaftAddressChanged storeId = {}, new raft addr:", store.getId(), store.getRaftAddress()); + statusListeners.forEach(e -> { + e.onStoreRaftChanged(store); + }); + } + + public void addShardGroupStatusListener(ShardGroupStatusListener listener){ + shardGroupStatusListeners.add(listener); + } + + protected void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState stats){ + log.info("onStoreStatusChanged storeId = {} from {} to {}", store.getId(), old, stats); + statusListeners.forEach(e->{ + e.onStoreStatusChanged(store, old, stats); + }); + } + + protected void onShardGroupStatusChanged(Metapb.ShardGroup group, Metapb.ShardGroup newGroup){ + log.info("onShardGroupStatusChanged, groupId: {}, from {} to {}", group.getId(), group, newGroup); + shardGroupStatusListeners.forEach( e -> e.onShardListChanged(group, newGroup)); + } + + protected void onShardGroupOp(Metapb.ShardGroup shardGroup) { + log.info("onShardGroupOp, group id: {}, shard group:{}", shardGroup.getId(), shardGroup); + shardGroupStatusListeners.forEach( e -> e.onShardListOp(shardGroup)); + } + + /** + * 检查当前store是否可下线 + * 活跃机器数小于等于最小阈值,不可下线 + * 分区shard在线数不超过半数, 不可下线 + */ + public boolean checkStoreCanOffline(Metapb.Store currentStore) { + try { + long currentStoreId = currentStore.getId(); + List activeStores = this.getActiveStores(); + Map storeMap = new HashMap<>(); + activeStores.forEach(store -> { + if (store.getId() != currentStoreId) { + storeMap.put(store.getId(), store); + } + }); + + if (storeMap.size() < pdConfig.getMinStoreCount()) { + return false; + } + + // 检查每个分区的在线shard数量是否大于半数 + for (Metapb.ShardGroup group : this.getShardGroups()) { + int count = 0; + for (Metapb.Shard shard : group.getShardsList()) { + long storeId = shard.getStoreId(); + count += storeMap.containsKey(storeId) ? 1 : 0; + } + if (count * 2 < group.getShardsList().size()) { + return false; + } + } + } catch (PDException e) { + log.error("StoreNodeService checkStoreCanOffline exception {}", e); + return false; + } + + return true; + } + + /** + * 对store上的对rocksdb进行compaction + * @param groupId + * @param tableName + * @return + */ + public synchronized void shardGroupsDbCompaction(int groupId, String tableName) throws PDException { + + // 通知所有的store,对rocksdb进行compaction + partitionService.fireDbCompaction(groupId, tableName); + // TODO 异常怎么处理? + } + + private static String graphSpaceConfPrefix ="HUGEGRAPH/hg/GRAPHSPACE/CONF/"; + + public Map getQuota() throws PDException { + List graphs = partitionService.getGraphs(); + String delimiter = String.valueOf(MetadataKeyHelper.DELIMITER); + HashMap storages = new HashMap<>(); + for (Metapb.Graph g : graphs) { + String graphName = g.getGraphName(); + String[] splits = graphName.split(delimiter); + if (!graphName.endsWith("/g") || splits.length < 2) { + continue; + } + String graphSpace = splits[0]; + storages.putIfAbsent(graphSpace, 0L); + List stores = getStores(graphName); + long dataSize = 0; + for (Metapb.Store store : stores) { + List gss = store.getStats() + .getGraphStatsList(); + for (Metapb.GraphStats gs : gss) { + boolean nameEqual = graphName.equals(gs.getGraphName()); + boolean roleEqual = Metapb.ShardRole.Leader.equals( + gs.getRole()); + if (nameEqual && roleEqual) { + dataSize += gs.getApproximateSize(); + } + } + } + Long size = storages.get(graphSpace); + size += dataSize; + storages.put(graphSpace, size); + + } + Metapb.GraphSpace.Builder spaceBuilder = Metapb.GraphSpace.newBuilder(); + HashMap limits = new HashMap<>(); + for (Map.Entry item : storages.entrySet()) { + String spaceName = item.getKey(); + String value = kvService.get(graphSpaceConfPrefix + spaceName); + if (!StringUtils.isEmpty(value)) { + HashMap config = new Gson().fromJson(value, HashMap.class); + Long size = item.getValue(); + int limit = ((Double) config.get("storage_limit")).intValue(); + long limitByLong = limit * 1024L * 1024L; + try { + spaceBuilder.setName(spaceName).setStorageLimit(limitByLong).setUsedSize(size); + Metapb.GraphSpace graphSpace = spaceBuilder.build(); + configService.setGraphSpace(graphSpace); + } catch (Exception e) { + log.error("update graph space with error:", e); + } + // KB and GB * 1024L * 1024L + if (size > limitByLong) { + limits.put(spaceName, true); + continue; + } + } + limits.put(spaceName, false); + + } + GraphState.Builder stateBuilder = GraphState.newBuilder() + .setMode(GraphMode.ReadOnly) + .setReason( + GraphModeReason.Quota); + for (Metapb.Graph g : graphs) { + String graphName = g.getGraphName(); + String[] splits = graphName.split(delimiter); + if (!graphName.endsWith("/g") || splits.length < 2) { + continue; + } + String graphSpace = splits[0]; + Metapb.GraphState gsOld = g.getGraphState(); + GraphMode gmOld = gsOld != null ? gsOld.getMode() : GraphMode.ReadWrite; + GraphMode gmNew = limits.get( + graphSpace) ? GraphMode.ReadOnly : GraphMode.ReadWrite; + if (gmOld == null || gmOld.getNumber() != gmNew.getNumber()) { + stateBuilder.setMode(gmNew); + if (gmNew.getNumber() == GraphMode.ReadOnly.getNumber()) { + stateBuilder.setReason(GraphModeReason.Quota); + } + GraphState gsNew = stateBuilder.build(); + Metapb.Graph newGraph = g.toBuilder().setGraphState(gsNew) + .build(); + partitionService.updateGraph(newGraph); + statusListeners.forEach(listener -> { + listener.onGraphChange(newGraph, gsOld, gsNew); + }); + } + } + + return limits; + } + + + public Runnable getQuotaChecker() { + return quotaChecker; + } + + private Runnable quotaChecker = () -> { + try { + getQuota(); + } catch (Exception e) { + log.error( + "obtaining and sending graph space quota information with error: ", + e); + } + }; + + public TaskInfoMeta getTaskInfoMeta(){ + return taskInfoMeta; + } + + public StoreInfoMeta getStoreInfoMeta() { + return storeInfoMeta; + } + + /** + * 获得分区的Leader + * @param partition + * @param initIdx + * @return + */ + public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx){ + Metapb.Shard leader = null; + try { + var shardGroup = this.getShardGroup(partition.getId()); + for(Metapb.Shard shard : shardGroup.getShardsList()){ + if (shard.getRole() == Metapb.ShardRole.Leader) { + leader = shard; + } + } + }catch (Exception e){ + log.error("get leader error: group id:{}, error:", partition.getId(), e.getMessage()); + } + return leader; + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java new file mode 100644 index 0000000000..c4981a0c8e --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java @@ -0,0 +1,13 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +public interface StoreStatusListener { + + void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status); + + void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, + Metapb.GraphState stateNew) ; + void onStoreRaftChanged(Metapb.Store store); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java new file mode 100644 index 0000000000..a8aab4d2e6 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -0,0 +1,785 @@ +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.PriorityQueue; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import org.apache.hugegraph.pd.raft.RaftEngine; + +import com.baidu.hugegraph.pd.grpc.MetaTask; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.meta.TaskInfoMeta; +import com.baidu.hugegraph.pd.raft.RaftEngine; +import lombok.extern.slf4j.Slf4j; + + +/** + * 任务调度服务,定时检查Store、资源、分区的状态,及时迁移数据,错误节点 + * 1、监测Store是否离线 + * 2、监测Partition的副本是否正确 + * 3、监测Partition的工作模式是否正确 + * 4、监测Partition是否需要分裂,监测分裂是否完成 + */ +@Slf4j +public class TaskScheduleService { + private final long TurnOffAndBalanceInterval = 30 * 60 * 1000; //机器下线30后才能进行动态平衡 + + private final long BalanceLeaderInterval = 30 * 1000; // leader平衡时间间隔 + private final PDConfig pdConfig; + private StoreNodeService storeService; + private PartitionService partitionService; + private ScheduledExecutorService executor; + private TaskInfoMeta taskInfoMeta; + private StoreMonitorDataService storeMonitorDataService; + private KvService kvService; + private LogService logService; + private long lastStoreTurnoffTime = 0; + private long lastBalanceLeaderTime = 0; + private final long clusterStartTime; // + + private static final String BALANCE_SHARD_KEY = "BALANCE_SHARD_KEY"; + + // 先按照value排序,再按照key排序 + private Comparator> kvPairComparatorAsc = (o1, o2) -> { + if (o1.getValue() == o2.getValue()){ + return o1.getKey().compareTo(o2.getKey()); + } + return o1.getValue().compareTo(o2.getValue()); + }; + + // 先按照value排序(倒序),再按照key排序(升序) + private Comparator> kvPairComparatorDesc = (o1, o2) -> { + if (o1.getValue() == o2.getValue()){ + return o2.getKey().compareTo(o1.getKey()); + } + return o2.getValue().compareTo(o1.getValue()); + }; + + + public TaskScheduleService(PDConfig config, StoreNodeService storeService, + PartitionService partitionService) { + this.pdConfig = config; + this.storeService = storeService; + this.partitionService = partitionService; + this.taskInfoMeta = new TaskInfoMeta(config); + this.logService = new LogService(pdConfig); + this.storeMonitorDataService = new StoreMonitorDataService(pdConfig); + this.clusterStartTime = System.currentTimeMillis(); + this.kvService = new KvService(pdConfig); + this.executor = Executors.newScheduledThreadPool(1024); + } + + public void init() { + executor.scheduleWithFixedDelay(() -> { + try { + patrolStores(); + } catch (Throwable e) { + log.error("patrolStores exception: ", e); + } + + }, 60, 60, TimeUnit.SECONDS); + executor.scheduleWithFixedDelay(() -> { + try { + patrolPartitions(); + balancePartitionLeader(false); + balancePartitionShard(); + } catch (Throwable e) { + log.error("patrolPartitions exception: ", e); + } + }, pdConfig.getPatrolInterval(), pdConfig.getPatrolInterval(), TimeUnit.SECONDS); + executor.scheduleWithFixedDelay(() -> { + if (isLeader()) { + kvService.clearTTLData(); + } + }, 1000, 1000, TimeUnit.MILLISECONDS); + executor.scheduleWithFixedDelay( + () -> { + if (isLeader()) { + storeService.getQuotaChecker(); + } + }, 2, 30, + TimeUnit.SECONDS); + // clean expired monitor data each 10 minutes, delay 3min. + if (isLeader() && this.pdConfig.getStore().isMonitorDataEnabled()){ + executor.scheduleAtFixedRate(()->{ + Long expTill = System.currentTimeMillis() / 1000 - this.pdConfig.getStore().getRetentionPeriod(); + log.debug("monitor data keys before " + expTill + " will be deleted") ; + int records = 0; + try { + for (Metapb.Store store : storeService.getStores()){ + int cnt = this.storeMonitorDataService.removeExpiredMonitorData(store.getId(), expTill); + log.debug("store id :{}, records:{}", store.getId(), cnt); + records += cnt; + } + } catch (PDException e) { + throw new RuntimeException(e); + } + log.debug(String.format("%d records has been deleted", records)); + }, 180, 600, TimeUnit.SECONDS); + } + + storeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status) { + if ( status == Metapb.StoreState.Tombstone ) + lastStoreTurnoffTime = System.currentTimeMillis(); + + if ( status == Metapb.StoreState.Up) { + executor.schedule(()->{ + try { //store 上线后延时1分钟进行leader平衡 + balancePartitionLeader(false); + } catch (PDException e) { + log.error("exception {}", e); + } + }, BalanceLeaderInterval, TimeUnit.MILLISECONDS); + + } + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); + } + + public void shutDown() { + executor.shutdownNow(); + } + private boolean isLeader(){ return RaftEngine.getInstance().isLeader();} + /** + * 巡查所有的store,检查是否在线,存储空间是否充足 + */ + public List patrolStores() throws PDException { + if (!isLeader()) return null; + + List changedStores = new ArrayList<>(); + // 检查store在线状态 + List stores = storeService.getStores(""); + Map activeStores = storeService.getActiveStores("") + .stream().collect(Collectors.toMap(Metapb.Store::getId, t -> t)); + for (Metapb.Store store : stores) { + Metapb.Store changeStore = null; + if ((store.getState() == Metapb.StoreState.Up + || store.getState() == Metapb.StoreState.Unknown) + && !activeStores.containsKey(store.getId())) { + // 不在线,修改状态为离线 + changeStore = Metapb.Store.newBuilder(store) + .setState(Metapb.StoreState.Offline) + .build(); + + } else if ((store.getState() == Metapb.StoreState.Exiting && !activeStores.containsKey(store.getId())) || + (store.getState() == Metapb.StoreState.Offline && + (System.currentTimeMillis() - store.getLastHeartbeat() > + pdConfig.getStore().getMaxDownTime() * 1000) && + (System.currentTimeMillis() - clusterStartTime > + pdConfig.getStore().getMaxDownTime() * 1000))) { + //手工修改为下线或者离线达到时长 + // 修改状态为关机, 增加 checkStoreCanOffline 检测 + if (storeService.checkStoreCanOffline(store)) { + changeStore = Metapb.Store.newBuilder(store) + .setState(Metapb.StoreState.Tombstone).build(); + this.logService.insertLog(LogService.NODE_CHANGE, + LogService.TASK, changeStore); + log.info("patrolStores store {} Offline", changeStore.getId()); + } + } + if (changeStore != null) { + storeService.updateStore(changeStore); + changedStores.add(changeStore); + } + } + return changedStores; + } + + + /** + * 巡查所有的分区,检查副本数是否正确 + */ + public List patrolPartitions() throws PDException { + if (!isLeader()) { + return null; + } + + // 副本数不一致,重新分配副本 + for(Metapb.ShardGroup group : storeService.getShardGroups()){ + if ( group.getShardsCount() != pdConfig.getPartition().getShardCount()){ + storeService.reallocShards(group); + // 避免后面的 balance partition shard 马上执行. + kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); + } + } + //检查shard是否在线。 + Map tombStores = storeService.getTombStores().stream().collect( + Collectors.toMap(Metapb.Store::getId, t -> t)); + + var partIds = new HashSet(); + + for (var pair : tombStores.entrySet()) { + for (var partition : partitionService.getPartitionByStore(pair.getValue())) { + if (partIds.contains(partition.getId())) { + continue; + } + partIds.add(partition.getId()); + + storeService.storeTurnoff(pair.getValue()); + partitionService.shardOffline(partition, pair.getValue().getId()); + } + + } + + return null; + } + + + /** + * 在Store之间平衡分区的数量 + * 机器转为UP半小时后才能进行动态平衡 + */ + public synchronized Map> balancePartitionShard() throws PDException { + log.info("balancePartitions starting, isleader:{}", isLeader()); + + if (!isLeader()) { + return null; + } + + if ( System.currentTimeMillis() - lastStoreTurnoffTime < TurnOffAndBalanceInterval) { + return null;//机器下线半小时后才能进行动态平衡 + } + + + int activeStores = storeService.getActiveStores().size(); + if (activeStores == 0) { + log.warn("balancePartitionShard non active stores, skip to balancePartitionShard"); + return null; + } + + // 避免频繁调用. (当改变副本数,需要调整shard list,此时又需要平衡分区)会发送重复的指令。造成结果不可预料。 + // 严重会删除掉分区. + if (Objects.equals(kvService.get(BALANCE_SHARD_KEY),"DOING")) { + return null; + } + + int totalShards = pdConfig.getConfigService().getPartitionCount() * pdConfig.getPartition().getShardCount(); + int averageCount = totalShards / activeStores; + int remainder = totalShards % activeStores; + + // 统计每个store上分区, StoreId ->PartitionID, ShardRole + Map> partitionMap = new HashMap<>(); + storeService.getActiveStores().forEach(store -> { + partitionMap.put(store.getId(), new HashMap<>()); + }); + + // 如果是leaner 说明迁移正在进行,不要重复提交任务 + AtomicReference isLeaner = new AtomicReference<>(false); + partitionService.getPartitions().forEach(partition -> { + + try { + storeService.getShardList(partition.getId()).forEach(shard -> { + Long storeId = shard.getStoreId(); + // 判断每个shard为leaner或者状态非正常状态 + if (shard.getRole() == Metapb.ShardRole.Learner + || partition.getState() != Metapb.PartitionState.PState_Normal){ + isLeaner.set(true); + } + if (partitionMap.containsKey(storeId)) { + partitionMap.get(storeId).put(partition.getId(), shard.getRole()); + } + }); + } catch (PDException e) { + log.error("get partition {} shard list error:{}.", partition.getId(), e.getMessage()); + } + }); + + if (isLeaner.get()){ + log.warn("balancePartitionShard is doing, skip this balancePartitionShard task"); + return null; + } + + // 按照shard数量由高到低排序store + List> sortedList = new ArrayList<>(); + partitionMap.forEach((storeId, shards) -> { + sortedList.add(new KVPair(storeId, shards.size())); + }); + // 由大到小排序的list + sortedList.sort(((o1, o2) -> o2.getValue().compareTo(o1.getValue()))); + // 最大堆 + PriorityQueue> maxHeap = new PriorityQueue<>(sortedList.size(), + (o1, o2) -> o2.getValue().compareTo(o1.getValue())); + + // 各个副本的 committedIndex + Map> committedIndexMap = partitionService.getCommittedIndexStats(); + // 分区ID --> 源StoreID,目标StoreID + Map> movedPartitions = new HashMap<>(); + // 移除多余的shard, 按照shards由多到少的顺序遍历store,余数remainder优先给shards多的store分配,减少迁移的概率 + for (int index = 0; index < sortedList.size(); index++) { + long storeId = sortedList.get(index).getKey(); + if (!partitionMap.containsKey(storeId)) { + log.error("cannot found storeId {} in partitionMap", storeId); + return null; + } + Map shards = partitionMap.get(storeId); + int targetCount = index < remainder ? averageCount + 1 : averageCount; + // 移除多余的shard, 添加源StoreID. 非Leader,并且该分区唯一 + if (shards.size() > targetCount) { + int movedCount = shards.size() - targetCount; + log.info("balancePartitionShard storeId {}, shardsSize {}, targetCount {}, moveCount {}", + storeId, shards.size(), targetCount, movedCount); + for (Iterator iterator = shards.keySet().iterator(); + movedCount > 0 && iterator.hasNext(); ) { + Integer id = iterator.next(); + + if ( !movedPartitions.containsKey(id)) { + log.info("store {}, shard of partition {} can be moved", storeId, id); + movedPartitions.put(id, new KVPair<>(storeId, 0L)); + movedCount--; + } + } + } else if (shards.size() < targetCount) { + int addCount = targetCount - shards.size(); + log.info("balancePartitionShard storeId {}, shardsSize {}, targetCount {}, addCount {}", + storeId, shards.size(), targetCount, addCount); + maxHeap.add(new KVPair<>(storeId, addCount)); + } + } + + if (movedPartitions.size() == 0){ + log.warn("movedPartitions is empty, totalShards:{} averageCount:{} remainder:{} sortedList:{}", + totalShards, averageCount, remainder, sortedList); + } + Iterator>> moveIterator = movedPartitions.entrySet().iterator(); + + while (moveIterator.hasNext()) { + if(maxHeap.size() == 0) { + break; + } + Map.Entry> moveEntry = moveIterator.next(); + int partitionId = moveEntry.getKey(); + long sourceStoreId = moveEntry.getValue().getKey(); + + List> tmpList = new ArrayList<>(maxHeap.size()); + while (maxHeap.size() > 0) { + KVPair pair = maxHeap.poll(); + long destStoreId = pair.getKey(); + boolean destContains = false; + if (partitionMap.containsKey(destStoreId)) + destContains = partitionMap.get(destStoreId).containsKey(partitionId); + // 如果目的store已经包含了该partition,则取一下store + if(!destContains) { + moveEntry.getValue().setValue(pair.getKey()); + log.info("balancePartitionShard will move partition {} from store {} to store {}", + moveEntry.getKey(), + moveEntry.getValue().getKey(), + moveEntry.getValue().getValue()); + if(pair.getValue() > 1) { + pair.setValue(pair.getValue() - 1); + tmpList.add(pair); + } + break; + } + tmpList.add(pair); + } + maxHeap.addAll(tmpList); + } + + kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); + + // 开始迁移 + movedPartitions.forEach((partId, storePair) -> { + // 源和目标storeID都不为0 + if (storePair.getKey() > 0 && storePair.getValue() > 0) { + partitionService.movePartitionsShard(partId, storePair.getKey(), storePair.getValue()); + }else { + log.warn("balancePartitionShard key or value is zero, partId:{} storePair:{}", + partId, storePair); + } + }); + return movedPartitions; + } + + /** + * 在Store之间平衡分区的Leader的数量 + */ + public synchronized Map balancePartitionLeader(boolean immediately) throws PDException { + Map results = new HashMap<>(); + + if (!isLeader()) { + return results; + } + + if (!immediately && System.currentTimeMillis() - lastBalanceLeaderTime < BalanceLeaderInterval) { + return results; + } + lastBalanceLeaderTime = System.currentTimeMillis(); + + List shardGroups = storeService.getShardGroups(); + + // 分裂或者缩容任务的时候,退出 + var taskMeta = storeService.getTaskInfoMeta(); + if (taskMeta.hasSplitTaskDoing() || taskMeta.hasMoveTaskDoing()) { + throw new PDException(1001, "split or combine task is processing, please try later!"); + } + + // 数据迁移的时候,退出 + if (Objects.equals(kvService.get(BALANCE_SHARD_KEY),"DOING")) { + throw new PDException(1001, "balance shard is processing, please try later!"); + } + + if (shardGroups.size() == 0) { + return results; + } + + Map storeShardCount = new HashMap<>(); + + shardGroups.forEach(group -> { + group.getShardsList().forEach(shard -> { + storeShardCount.put(shard.getStoreId(), + storeShardCount.getOrDefault(shard.getStoreId(), 0) + 1); + }); + }); + + log.info("balancePartitionLeader, shard group size: {}, by store: {}", shardGroups.size(), storeShardCount); + + // 按照 target count, store id稳定排序 + PriorityQueue> targetCount = new PriorityQueue<>(kvPairComparatorDesc); + + var sortedGroups = storeShardCount.entrySet().stream() + .map(entry -> new KVPair<>(entry.getKey(), entry.getValue())) + .sorted(kvPairComparatorAsc) + .collect(Collectors.toList()); + int sum = 0; + + for (int i = 0; i < sortedGroups.size() - 1; i++) { + // at least one + int v = Math.max(sortedGroups.get(i).getValue() / pdConfig.getPartition().getShardCount(), 1); + targetCount.add(new KVPair<>(sortedGroups.get(i).getKey(), v)); + sum += v; + } + // 最后一个, 除不尽的情况,保证总数正确 + targetCount.add(new KVPair<>(sortedGroups.get(sortedGroups.size() - 1).getKey(), shardGroups.size() - sum)); + log.info("target count: {}", targetCount); + + for (var group : shardGroups){ + var map = group.getShardsList().stream() + .collect(Collectors.toMap(Metapb.Shard::getStoreId, shard -> shard)); + var tmpList = new ArrayList>(); + // store比较多的情况,可能不包含对应的store id. 则先将不符合的store保存到临时列表,直到找到一个合适的store + while (!targetCount.isEmpty()){ + var pair = targetCount.poll(); + var storeId = pair.getKey(); + if (map.containsKey(storeId)){ + if (map.get(storeId).getRole() != Metapb.ShardRole.Leader) { + log.info("shard group{}, store id:{}, set to leader", group.getId(), storeId); + partitionService.transferLeader(group.getId(), map.get(storeId)); + results.put(group.getId(), storeId); + }else{ + log.info("shard group {}, store id :{}, is leader, no need change", group.getId(), storeId); + } + + if (pair.getValue() > 1) { + // count -1 + pair.setValue(pair.getValue() - 1); + tmpList.add(pair); + } + // 找到了,则处理完成 + break; + }else{ + tmpList.add(pair); + } + } + targetCount.addAll(tmpList); + } + + return results; + } + + + private long getMaxIndexGap(Map> committedIndexMap, int partitionId) { + long maxGap = Long.MAX_VALUE; + if (committedIndexMap == null || !committedIndexMap.containsKey(partitionId)) { + return maxGap; + } + Map shardMap = committedIndexMap.get(partitionId); + if(shardMap == null || shardMap.size() == 0) { + return maxGap; + } + List sortedList = new ArrayList<>(); + shardMap.forEach((storeId, committedIndex) -> { + sortedList.add(committedIndex); + }); + // 由大到小排序的list + sortedList.sort(Comparator.reverseOrder()); + maxGap = sortedList.get(0) - sortedList.get(sortedList.size() - 1); + return maxGap; + } + + + /** + * 执行分区分裂,分为自动分裂和手工分裂 + * @return + * @throws PDException + */ + public List splitPartition( + Pdpb.OperationMode mode, List params) throws PDException { + + if (mode == Pdpb.OperationMode.Auto) { + return autoSplitPartition(); + } + + var list = params.stream() + .map(param -> new KVPair<>(param.getPartitionId(), param.getCount())) + .collect(Collectors.toList()); + + storeService.splitShardGroups(list); + return null; + } + + /** + * 自动进行分区分裂,每个store达到最大分区数量 + * 执行条件 + * 分裂后每台机器分区数量少于partition.max-partitions-per-store + * + * @throws PDException + */ + public List autoSplitPartition() throws PDException { + if (!isLeader()) return null; + + if (Metapb.ClusterState.Cluster_OK != storeService.getClusterStats().getState()) { + if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats().getState()) + throw new PDException(Pdpb.ErrorType.Split_Partition_Doing_VALUE, "The data is splitting"); + else + throw new PDException(Pdpb.ErrorType.Cluster_State_Forbid_Splitting_VALUE, + "The current state of the cluster prohibits splitting data"); + } + + //For TEST + // pdConfig.getPartition().setMaxShardsPerStore(pdConfig.getPartition().getMaxShardsPerStore()*2); + + // 计算集群能能支持的最大split count + int splitCount = pdConfig.getPartition().getMaxShardsPerStore() * storeService.getActiveStores().size() / + (storeService.getShardGroups().size() * pdConfig.getPartition().getShardCount()); + + if (splitCount < 2) { + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "Too many partitions per store, partition.store-max-shard-count = " + + pdConfig.getPartition().getMaxShardsPerStore()); + } + + // 每store未达最大分区数,进行分裂 + log.info("Start to split partitions..., split count = {}", splitCount); + + // 设置集群状态为下线 + storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); + // 修改默认分区数量 + // pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size() * splitCount); + + var list = storeService.getShardGroups().stream() + .map(shardGroup -> new KVPair<>(shardGroup.getId(), splitCount)) + .collect(Collectors.toList()); + storeService.splitShardGroups(list); + + return null; + } + + + /** + * Store汇报任务状态 + * 分区状态发生改变,重新计算分区所在的ShardGroup、图和整个集群的状态 + * @param task + */ + public void reportTask(MetaTask.Task task){ + try { + switch (task.getType()) { + case Split_Partition: + partitionService.handleSplitTask(task); + break; + case Move_Partition: + partitionService.handleMoveTask(task); + break; + case Clean_Partition: + partitionService.handleCleanPartitionTask(task); + break; + default: + break; + } + }catch (Exception e){ + log.error("Report task exception {}, {}", e, task); + } + } + + /** + * 对rocksdb进行compaction + * @throws PDException + */ + public Boolean dbCompaction(String tableName) throws PDException { + if (!isLeader()) { + return false; + } + + for (Metapb.ShardGroup shardGroup : storeService.getShardGroups()) { + storeService.shardGroupsDbCompaction(shardGroup.getId(), tableName); + } + + // + return true; + } + + /** + * 判断是否能把一个store的分区全部迁出,给出判断结果和迁移方案 + * @author tianxiaohui@baidu.com + */ + public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) throws PDException { + if (!isLeader()) { + return null; + } + // 分析一个store上面的分区是否可以完全迁出 + Map resultMap = new HashMap<>(); + // 定义对象用于保存源store上面的分区 StoreId ->PartitionID, ShardRole + Map> sourcePartitionMap = new HashMap<>(); + sourcePartitionMap.put(sourceStore.getId(), new HashMap<>()); + // 定义对象用于保存其他活跃store上面的分区 StoreId ->PartitionID, ShardRole + Map> otherPartitionMap = new HashMap<>(); + Map availableDiskSpace = new HashMap<>(); // 每个store剩余的磁盘空间 + Map partitionDataSize = new HashMap<>(); // 记录待迁移的分区的数据量 + + storeService.getActiveStores().forEach(store -> { + if (store.getId() != sourceStore.getId()){ + otherPartitionMap.put(store.getId(), new HashMap<>()); + // 记录其他store的剩余的磁盘空间, 单位为Byte + availableDiskSpace.put(store.getId(), store.getStats().getAvailable()); + }else { + resultMap.put("current_store_is_online", true); + } + }); + // 统计待迁移的分区的数据大小 (从storeStats中统计,单位为KB) + for (Metapb.GraphStats graphStats : sourceStore.getStats().getGraphStatsList()){ + partitionDataSize.put(graphStats.getPartitionId(), + partitionDataSize.getOrDefault(graphStats.getPartitionId(), 0L) + + graphStats.getApproximateSize()); + } + // 给sourcePartitionMap 和 otherPartitionMap赋值 + partitionService.getPartitions().forEach(partition -> { + try { + storeService.getShardList(partition.getId()).forEach(shard -> { + long storeId = shard.getStoreId(); + if (storeId == sourceStore.getId()){ + sourcePartitionMap.get(storeId).put(partition.getId(), shard.getRole()); + }else{ + if (otherPartitionMap.containsKey(storeId)){ + otherPartitionMap.get(storeId).put(partition.getId(), shard.getRole()); + } + } + + }); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + // 统计待移除的分区:即源store上面的所有分区 + Map> movedPartitions = new HashMap<>(); + for (Map.Entry entry : sourcePartitionMap.get(sourceStore.getId()).entrySet()){ + movedPartitions.put(entry.getKey(), new KVPair<>(sourceStore.getId(), 0L)); + } + // 统计其他store的分区数量, 用小顶堆保存,以便始终把分区数量较少的store优先考虑 + PriorityQueue> minHeap = new PriorityQueue<>(otherPartitionMap.size(), + (o1, o2) -> o1.getValue().compareTo(o2.getValue())); + otherPartitionMap.forEach((storeId, shards) -> { + minHeap.add(new KVPair(storeId, shards.size())); + }); + // 遍历待迁移的分区,优先迁移到分区比较少的store + Iterator>> moveIterator = movedPartitions.entrySet().iterator(); + while (moveIterator.hasNext()){ + Map.Entry> moveEntry = moveIterator.next(); + int partitionId = moveEntry.getKey(); + List> tmpList = new ArrayList<>(); // 记录已经弹出优先队列的元素 + while(minHeap.size() > 0) { + KVPair pair = minHeap.poll(); //弹出首个元素 + long storeId = pair.getKey(); + int partitionCount = pair.getValue(); + Map shards = otherPartitionMap.get(storeId); + final int unitRate = 1024; // 平衡不同存储单位的进率 + if ((!shards.containsKey(partitionId)) && ( + availableDiskSpace.getOrDefault(storeId, 0L) / unitRate >= + partitionDataSize.getOrDefault(partitionId, 0L))){ + // 如果目标store上面不包含该分区,且目标store剩余空间能容纳该分区,则进行迁移 + moveEntry.getValue().setValue(storeId); //设置移动的目标store + log.info("plan to move partition {} to store {}, " + + "available disk space {}, current partitionSize:{}", + partitionId, + storeId, + availableDiskSpace.getOrDefault(storeId, 0L) / unitRate, + partitionDataSize.getOrDefault(partitionId, 0L) + ); + // 更新该store预期的剩余空间 + availableDiskSpace.put(storeId, availableDiskSpace.getOrDefault(storeId, 0L) + - partitionDataSize.getOrDefault(partitionId, 0L) * unitRate); + // 更新统计变量中该store的分区数量 + partitionCount += 1; + pair.setValue(partitionCount); + tmpList.add(pair); + break; + }else{ + tmpList.add(pair); + } + } + minHeap.addAll(tmpList); + } + //检查是否未存在未分配目标store的分区 + List remainPartitions = new ArrayList<>(); + movedPartitions.forEach((partId, storePair) ->{ + if (storePair.getValue() == 0L){ + remainPartitions.add(partId); + } + }); + if (remainPartitions.size() > 0) { + resultMap.put("flag", false); + resultMap.put("movedPartitions", null); + }else{ + resultMap.put("flag", true); + resultMap.put("movedPartitions", movedPartitions); + } + return resultMap; + + } + + public Map> movePartitions(Map> movedPartitions) { + if (!isLeader()) { + return null; + } + // 开始迁移 + log.info("begin move partitions:"); + movedPartitions.forEach((partId, storePair) -> { + // 源和目标storeID都不为0 + if (storePair.getKey() > 0 && storePair.getValue() > 0) { + partitionService.movePartitionsShard(partId, storePair.getKey(), storePair.getValue()); + } + }); + return movedPartitions; + } + + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java new file mode 100644 index 0000000000..f1efccb1b6 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @@ -0,0 +1,244 @@ +package org.apache.hugegraph.pd.config; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import lombok.Data; +import org.springframework.beans.factory.annotation.Autowired; + +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Configuration; +import org.springframework.stereotype.Component; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +/** + * PD配置文件 + * @author: yanjinbing + * @date: 2021/10/20 + */ +@Data +@Component +public class PDConfig { + + @Value("${pd.cluster_id:1}") + private long clusterId; // 集群ID + + @Value("${pd.patrol-interval:300}") + private long patrolInterval = 300; //巡查任务时间间隔 + @Value("${pd.data-path}") + private String dataPath; + @Value("${pd.initial-store-count:3}") + private int minStoreCount; + + // 初始store列表,该列表内的store自动激活 + @Value("${pd.initial-store-list: ''}") + private String initialStoreList; + @Value("${grpc.host}") + private String host; + + @Value("${license.verify-path}") + private String verifyPath; + @Value("${license.license-path}") + private String licensePath; + + @Autowired + private Raft raft; + + @Autowired + private Store store; + @Autowired + private Partition partition; + @Autowired + private Discovery discovery; + + private Map initialStoreMap = null; + + public Map getInitialStoreMap() { + if (initialStoreMap == null) { + initialStoreMap = new HashMap<>(); + Arrays.asList(initialStoreList.split(",")).forEach(s -> { + initialStoreMap.put(s, s); + }); + } + return initialStoreMap; + } + + /** + * 初始分区数量 + * Store数量 * 每Store最大副本数 /每分区副本数 + * @return + */ + public int getInitialPartitionCount(){ + return getInitialStoreMap().size() * partition.getMaxShardsPerStore() + / partition.getShardCount(); + } + + @Data + @Configuration + public class Raft { + @Value("${raft.enable:true }") + private boolean enable; + @Value("${raft.address}") + private String address; + @Value("${pd.data-path}") + private String dataPath; + @Value("${raft.peers-list}") + private String peersList; + @Value("${raft.snapshotInterval: 300}") + private int snapshotInterval; + @Value("${raft.rpc-timeout:10000}") + private int rpcTimeout; + @Value("${grpc.host}") + private String host; + @Value("${server.port}") + private int port; + + @Value("${pd.cluster_id:1}") + private long clusterId; // 集群ID + @Value("${grpc.port}") + private int grpcPort; + public String getGrpcAddress(){ return host + ":" + grpcPort;} + } + + @Data + @Configuration + public class Store{ + // store 心跳超时时间 + @Value("${store.keepAlive-timeout:300}") + private long keepAliveTimeout = 300; + @Value("${store.max-down-time:1800}") + private long maxDownTime = 1800; + + @Value("${store.monitor_data_enabled:true}") + private boolean monitorDataEnabled = true; + + @Value("${store.monitor_data_interval: 1 minute}") + private String monitorDataInterval = "1 minute"; + + @Value("${store.monitor_data_retention: 1 day}") + private String monitorDataRetention = "1 day"; + + /** + * interval -> seconds. + * minimum value is 1 seconds. + * @return the seconds of the interval + */ + public Long getMonitorInterval(){ + return parseTimeExpression(this.monitorDataInterval); + } + + /** + * the monitor data that saved in rocksdb, will be deleted + * out of period + * + * @return the period of the monitor data should keep + */ + public Long getRetentionPeriod(){ + return parseTimeExpression(this.monitorDataRetention); + } + + /** + * parse time expression , support pattern: + * [1-9][ ](second, minute, hour, day, month, year) + * unit could not be null, the number part is 1 by default. + * + * @param exp + * @return seconds value of the expression. 1 will return by illegal expression + */ + private Long parseTimeExpression(String exp){ + if (exp != null) { + Pattern pattern = Pattern.compile("(?(\\d+)*)(\\s)*(?(second|minute|hour|day|month|year)$)"); + Matcher matcher = pattern.matcher(exp.trim()); + if (matcher.find()) { + String n = matcher.group("n"); + String unit = matcher.group("unit"); + + if (null == n || n.length() == 0) { + n = "1"; + } + + Long interval; + switch (unit) { + case "minute": + interval = 60L; + break; + case "hour": + interval = 3600L; + break; + case "day": + interval = 86400L; + break; + case "month": + interval = 86400L * 30; + break; + case "year": + interval = 86400L * 365; + break; + case "second": + default: + interval = 1L; + } + // avoid n == '0' + return Math.max(1L, interval * Integer.parseInt(n)); + } + } + return 1L; + } + + } + + @Data + @Configuration + public class Partition{ + private int totalCount = 0; + + // 每个Store最大副本数 + @Value("${partition.store-max-shard-count:24}") + private int maxShardsPerStore = 24; + + // 默认分副本数量 + @Value("${partition.default-shard-count:3}") + private int shardCount = 3; + + public void setTotalCount(int totalCount){ + this.totalCount = totalCount; + } + public int getTotalCount() { + if ( totalCount == 0 ) { + totalCount = getInitialPartitionCount(); + } + return totalCount; + } + } + + @Data + @Configuration + public class Discovery{ + // 客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + @Value("${discovery.heartbeat-try-count:3}") + private int heartbeatOutTimes = 3; + } + + private ConfigService configService; + + private IdService idService; + + public void setConfigService(ConfigService configService) { + this.configService = configService; + } + public ConfigService getConfigService(){ return configService; } + + public IdService getIdService() { + return idService; + } + + public void setIdService(IdService idService) { + this.idService = idService; + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java new file mode 100644 index 0000000000..c26450d7f4 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java @@ -0,0 +1,54 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +import java.util.List; +import java.util.Optional; + +public class ConfigMetaStore extends MetadataRocksDBStore { + + + private final long clusterId; + + public ConfigMetaStore(PDConfig pdConfig) { + super(pdConfig); + this.clusterId = pdConfig.getClusterId(); + } + /** + * 更新图空间存储状态信息 + * + * @param + */ + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getGraphSpaceKey(graphSpace.getName()); + graphSpace = graphSpace.toBuilder().setTimestamp(System.currentTimeMillis()).build(); + put(graphSpaceKey, graphSpace.toByteArray()); + return graphSpace; + } + public List getGraphSpace(String graphSpace) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getGraphSpaceKey(graphSpace); + return scanPrefix(Metapb.GraphSpace.parser(), graphSpaceKey); + } + + public Metapb.PDConfig setPdConfig(Metapb.PDConfig pdConfig) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getPdConfigKey(String.valueOf(pdConfig.getVersion())); + Metapb.PDConfig config = Metapb.PDConfig.newBuilder( + pdConfig).setTimestamp(System.currentTimeMillis()).build(); + put(graphSpaceKey, config.toByteArray()); + return config; + } + public Metapb.PDConfig getPdConfig(long version) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getPdConfigKey(version <= 0 ? null : + String.valueOf(version)); + Optional max = scanPrefix( + Metapb.PDConfig.parser(), graphSpaceKey).stream().max( + (o1, o2) -> (o1.getVersion() > o2.getVersion()) ? 1 : -1); + return max.isPresent()? max.get() : null; + } + + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java new file mode 100644 index 0000000000..b0a508fb79 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -0,0 +1,88 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import lombok.extern.slf4j.Slf4j; + +import org.apache.commons.lang3.StringUtils; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +/** + * @author zhangyingjie + * @date 2022/1/14 + **/ +@Slf4j +public class DiscoveryMetaStore extends MetadataRocksDBStore { + + public DiscoveryMetaStore(PDConfig pdConfig) { + super(pdConfig); + } + + //appName --> address --> registryInfo + private static final String PREFIX ="REGIS-"; + private static final String SPLITTER ="-"; + public void register(NodeInfo nodeInfo, int outTimes) throws PDException { + putWithTTL(toKey(nodeInfo.getAppName(), nodeInfo.getVersion(), nodeInfo.getAddress()), + nodeInfo.toByteArray(),(nodeInfo.getInterval() / 1000) * outTimes); + } + + byte[] toKey(String appName,String version,String address){ + StringBuilder builder = getPrefixBuilder(appName, version); + builder.append(SPLITTER); + builder.append(address); + return builder.toString().getBytes(); + } + + private StringBuilder getPrefixBuilder(String appName, String version) { + StringBuilder builder = new StringBuilder(); + builder.append(PREFIX); + if (!StringUtils.isEmpty(appName)){ + builder.append(appName); + builder.append(SPLITTER); + } + if (!StringUtils.isEmpty(version)){ + builder.append(version); + } + return builder; + } + + public NodeInfos getNodes(Query query) { + List nodeInfos = null; + try { + StringBuilder builder = getPrefixBuilder(query.getAppName(), + query.getVersion()); + nodeInfos = getInstanceListWithTTL( + NodeInfo.parser(), + builder.toString().getBytes()); + builder.setLength(0); + } catch (PDException e) { + log.error("An error occurred getting data from the store,{}",e); + } + if (query.getLabelsMap() != null && !query.getLabelsMap().isEmpty()) { + List result =new LinkedList(); + for (NodeInfo node:nodeInfos) { + if (labelMatch(node,query)) result.add(node); + } + return NodeInfos.newBuilder().addAllInfo(result).build(); + } + return NodeInfos.newBuilder().addAllInfo(nodeInfos).build(); + + } + private boolean labelMatch(NodeInfo node,Query query){ + Map labelsMap = node.getLabelsMap(); + for (Map.Entry entry:query.getLabelsMap().entrySet()) { + if (!entry.getValue().equals(labelsMap.get(entry.getKey()))){ + return false; + } + } + return true; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java new file mode 100644 index 0000000000..70878bb009 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java @@ -0,0 +1,237 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.store.KV; + +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; +import lombok.extern.slf4j.Slf4j; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 自增id的实现类 + */ +@Slf4j +public class IdMetaStore extends MetadataRocksDBStore { + + + private final long clusterId; + + public IdMetaStore(PDConfig pdConfig) { + super(pdConfig); + this.clusterId = pdConfig.getClusterId(); + } + + private static final String ID_PREFIX = "@ID@"; + private static final String CID_PREFIX = "@CID@"; + private static final String CID_SLOT_PREFIX = "@CID_SLOT@"; + + private static final String CID_DEL_SLOT_PREFIX = "@CID_DEL_SLOT@"; + + public static long CID_DEL_TIMEOUT = 24 * 3600 * 1000; + + private static final String SEPARATOR = "@"; + private static final ConcurrentHashMap SEQUENCES = new ConcurrentHashMap<>(); + + /** + * 获取自增id + * @param key + * @param delta + * @return + * @throws PDException + */ + public long getId(String key, int delta) throws PDException { + Object probableLock = getLock(key); + byte[] keyBs = (ID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + byte[] bs = getOne(keyBs); + long current = bs != null ? bytesToLong(bs) : 0L; + long next = current + delta; + put(keyBs, longToBytes(next)); + return current; + } + } + + private Object getLock(String key){ + Object probableLock = new Object(); + Object currentLock = SEQUENCES.putIfAbsent(key, probableLock); + if (currentLock != null) { + probableLock = currentLock; + } + return probableLock; + } + + + public void resetId(String key) throws PDException { + Object probableLock = new Object(); + Object currentLock = SEQUENCES.putIfAbsent(key, probableLock); + if (currentLock != null) { + probableLock = currentLock; + } + byte[] keyBs = (ID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + removeByPrefix(keyBs); + } + } + + /** + * 在删除name标识的cid的24小时内重复申请同一个name的cid保持同一值 + * 如此设计为了防止缓存的不一致,造成数据错误 + * @param key + * @param name cid 标识 + * @param max + * @return + * @throws PDException + */ + public long getCId(String key, String name, long max) throws PDException { + // 检测是否有过期的cid,删除图的频率比较低,此处对性能影响不大 + byte[] delKeyPrefix = new StringBuffer() + .append(CID_DEL_SLOT_PREFIX) + .append(key).append(SEPARATOR) + .toString().getBytes(Charset.defaultCharset()); + synchronized (this) { + scanPrefix(delKeyPrefix).forEach(kv -> { + long[] value = (long[]) deserialize(kv.getValue()); + if (value.length >= 2) { + if (System.currentTimeMillis() - value[1] > CID_DEL_TIMEOUT) { + try { + delCId(key, value[0]); + remove(kv.getKey()); + } catch (Exception e) { + log.error("Exception ", e); + } + } + } + }); + + // 从延时删除队列恢复Key + byte[] cidDelayKey = getCIDDelayKey(key, name); + byte[] value = getOne(cidDelayKey); + if (value != null) { + // 从延迟删除队列删除 + remove(cidDelayKey); + return ((long[]) deserialize(value))[0]; + } else + return getCId(key, max); + } + } + + /** + * 添加到删除队列,延后删除 + */ + public long delCIdDelay(String key, String name, long cid) throws PDException { + byte[] delKey = getCIDDelayKey(key, name); + put(delKey, serialize(new long[]{cid, System.currentTimeMillis()})); + return cid; + } + /** + * 获取自增循环不重复id, 达到上限后从0开始自增 + * @param key + * @param max id上限,达到该值后,重新从0开始自增 + * @return + * @throws PDException + */ + public long getCId(String key, long max) throws PDException { + Object probableLock = getLock(key); + byte[] keyBs = (CID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + byte[] bs = getOne(keyBs); + long current = bs != null ? bytesToLong(bs) : 0L; + long last = current == 0 ? max - 1 : current - 1; + { // 查找一个未使用的cid + List kvs = scanRange(genCIDSlotKey(key, current), genCIDSlotKey(key, max)); + for (KV kv : kvs) { + if (current == bytesToLong(kv.getValue())) + current++; + else + break; + } + } + if (current == max) { + current = 0; + List kvs = scanRange(genCIDSlotKey(key, current), genCIDSlotKey(key, last)); + for (KV kv : kvs) { + if (current == bytesToLong(kv.getValue())) + current++; + else + break; + } + } + if (current == last) return -1; + put(genCIDSlotKey(key, current), longToBytes(current)); + put(keyBs, longToBytes(current + 1)); + return current; + } + } + + private byte[] genCIDSlotKey(String key, long value){ + byte[] keySlot = (CID_SLOT_PREFIX + key + SEPARATOR).getBytes(Charset.defaultCharset()); + ByteBuffer buf = ByteBuffer.allocate(keySlot.length + Long.SIZE); + buf.put(keySlot); + buf.put(longToBytes(value)); + return buf.array(); + } + + private byte[] getCIDDelayKey(String key, String name){ + byte[] bsKey = new StringBuffer() + .append(CID_DEL_SLOT_PREFIX) + .append(key).append(SEPARATOR) + .append(name) + .toString().getBytes(Charset.defaultCharset()); + return bsKey; + } + + /** + * 删除一个循环id,释放id值 + * @param key + * @param cid + * @return + * @throws PDException + */ + public long delCId(String key, long cid) throws PDException { + return remove(genCIDSlotKey(key, cid)); + } + + public static long bytesToLong(byte[] b) { + ByteBuffer buf = ByteBuffer.wrap(b); + return buf.getLong(); + } + + public static byte[] longToBytes(long l) { + ByteBuffer buf = ByteBuffer.wrap(new byte[Long.SIZE]); + buf.putLong(l); + buf.flip(); + return buf.array(); + } + + private byte[] serialize(Object obj) { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + Hessian2Output output = new Hessian2Output(bos); + output.writeObject(obj); + output.flush(); + return bos.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private Object deserialize(byte[] bytes) { + try (ByteArrayInputStream bis = new ByteArrayInputStream(bytes)) { + Hessian2Input input = new Hessian2Input(bis); + Object obj = input.readObject(); + input.close(); + return obj; + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java new file mode 100644 index 0000000000..70975f95c5 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java @@ -0,0 +1,37 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +import java.util.List; + +/** + * @author zhangyingjie + * @date 2022/3/29 + **/ +public class LogMeta extends MetadataRocksDBStore { + + private PDConfig pdConfig; + + public LogMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + } + + public void insertLog(Metapb.LogRecord record) throws PDException { + byte[] storeLogKey = MetadataKeyHelper.getLogKey(record); + put(storeLogKey, record.toByteArray()); + + } + + public List getLog(String action, Long start, Long end) throws PDException { + byte[] keyStart = MetadataKeyHelper.getLogKeyPrefix(action, start); + byte[] keyEnd = MetadataKeyHelper.getLogKeyPrefix(action, end); + List stores =this.scanRange(Metapb.LogRecord.parser(), + keyStart, keyEnd); + return stores; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java new file mode 100644 index 0000000000..1ee7dd59bd --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java @@ -0,0 +1,63 @@ +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.HgKVStoreImpl; +import org.apache.hugegraph.pd.store.RaftKVStore; + +/** + * 存储工厂类,创建相关对象的存储类 + */ +public class MetadataFactory { + + private static HgKVStore store = null; + + public static HgKVStore getStore(PDConfig pdConfig){ + if ( store == null ){ + synchronized (MetadataFactory.class){ + if ( store == null ) { + HgKVStore proto = new HgKVStoreImpl(); + //proto.init(pdConfig); + store = pdConfig.getRaft().isEnable() ? + new RaftKVStore(RaftEngine.getInstance(), proto) : + proto; + store.init(pdConfig); + } + } + } + return store; + } + + public static void closeStore(){ + if ( store != null ) + store.close(); + } + + public static StoreInfoMeta newStoreInfoMeta(PDConfig pdConfig) { + return new StoreInfoMeta(pdConfig); + } + + public static PartitionMeta newPartitionMeta(PDConfig pdConfig) { + return new PartitionMeta(pdConfig); + } + public static IdMetaStore newHugeServerMeta(PDConfig pdConfig) { + return new IdMetaStore(pdConfig); + } + public static DiscoveryMetaStore newDiscoveryMeta(PDConfig pdConfig) { + return new DiscoveryMetaStore(pdConfig); + } + public static ConfigMetaStore newConfigMeta(PDConfig pdConfig) { + return new ConfigMetaStore(pdConfig); + } + public static TaskInfoMeta newTaskInfoMeta(PDConfig pdConfig) { return new TaskInfoMeta(pdConfig);} + + + public static QueueStore newQueueStore(PDConfig pdConfig) { + return new QueueStore(pdConfig); + } + + public static LogMeta newLogMeta(PDConfig pdConfig) { + return new LogMeta(pdConfig); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java new file mode 100644 index 0000000000..7ad150e490 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java @@ -0,0 +1,358 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.apache.commons.lang3.StringUtils; + +import java.nio.charset.Charset; + +public class MetadataKeyHelper { + + public static final char DELIMITER = '/'; + + private static final String STORE = "STORE"; + private static final String ACTIVESTORE = "ACTIVESTORE"; + private static final String STORESTATUS = "STORESTATUS"; + private static final String PARTITION = "PARTITION"; + private static final String PARTITION_V36 = "PARTITION_V36"; + private static final String SHARDGROUP = "SHARDGROUP"; + + private static final String PARTITION_STATUS = "PARTITION_STATUS"; + private static final String GRAPH = "GRAPH"; + private static final String GRAPHMETA = "GRAPHMETA"; + private static final String GRAPH_SPACE = "GRAPH_SPACE"; + private static final String PD_CONFIG = "PD_CONFIG"; + private static final String TASK_SPLIT = "TASK_SPLIT"; + private static final String TASK_MOVE = "TASK_MOVE"; + private static final String LOG_RECORD = "LOG_RECORD"; + + private static final String QUEUE = "QUEUE"; + + public static byte[] getStoreInfoKey(final long storeId) { + //STORE/{storeId} + String key = StringBuilderHelper.get() + .append(STORE).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getActiveStoreKey(final long storeId) { + //ACTIVESTORE/{storeId} + String key = StringBuilderHelper.get() + .append(ACTIVESTORE).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getActiveStorePrefix() { + //ACTIVESTORE + String key = StringBuilderHelper.get() + .append(ACTIVESTORE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getStorePrefix() { + //STORE + String key = StringBuilderHelper.get() + .append(STORE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreStatusKey(final long storeId) { + //STORESTATUS/{storeId} + String key = StringBuilderHelper.get() + .append(STORESTATUS).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardGroupKey(final long groupId) { + //SHARDGROUP/{storeId} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(groupId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardGroupPrefix() { + //SHARDGROUP + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionKey(final String graphName, final int partId) { + //GRAPH/{graphName}/Partition/{partId} + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionV36Key(final String graphName, final int partId) { + // GRAPH/{graphName}/PartitionV36/{partId} + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION_V36).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionPrefix(final String graphName) { + //GRAPH/{graph}/Partition + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardKey(final long storeId, final int partId) { + //SHARD/{graphName}/{type} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(storeId).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardPrefix(final long storeId) { + //SHARD/{graphName}/{type} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(storeId).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphKey(final String graphName) { + //GRAPHMETA/{graphName} + String key = StringBuilderHelper.get() + .append(GRAPHMETA).append(DELIMITER) + .append(graphName).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphPrefix() { + //GRAPHMETA/{ + String key = StringBuilderHelper.get() + .append(GRAPHMETA).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionStatusKey(String graphName, int id) { + //PARTITION_STATUS/{ + String key = StringBuilderHelper.get() + .append(PARTITION_STATUS) + .append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(id).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionStatusPrefixKey(String graphName) { + //PARTITION_STATUS/{ + StringBuilder builder = StringBuilderHelper.get().append(PARTITION_STATUS) + .append(DELIMITER); + if (!StringUtils.isEmpty(graphName)) { + builder.append(graphName).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphSpaceKey(String graphSpace) { + //GRAPH_SPACE/{ + StringBuilder builder = StringBuilderHelper.get().append( + GRAPH_SPACE).append(DELIMITER); + if (!StringUtils.isEmpty(graphSpace)) { + builder.append(graphSpace).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getPdConfigKey(String configKey) { + //PD_CONFIG/{ + StringBuilder builder = StringBuilderHelper.get().append( + PD_CONFIG).append(DELIMITER); + if (!StringUtils.isEmpty(configKey)) { + builder.append(configKey).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getQueueItemPrefix() { + //QUEUE + String key = StringBuilderHelper.get() + .append(QUEUE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getQueueItemKey(String itemId) { + //QUEUE + StringBuilder builder = StringBuilderHelper.get() + .append(QUEUE).append(DELIMITER); + if (!StringUtils.isEmpty(itemId)) { + builder.append(itemId).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getSplitTaskKey(String graphName, int groupId){ + // TASK_SPLIT/{GraphName}/{partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + public static byte[] getSplitTaskPrefix(String graphName){ + // TASK_SPLIT/{GraphName}/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getAllSplitTaskPrefix(){ + // TASK_SPLIT/{GraphName}/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getMoveTaskKey(String graphName, int targetGroupId, int groupId){ + // TASK_MOVE/{GraphName}/to PartitionID/{source partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(targetGroupId).append(DELIMITER) + .append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getMoveTaskPrefix(String graphName){ + // TASK_MOVE/{graphName}/toPartitionId/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER) + .append(graphName); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getAllMoveTaskPrefix(){ + // TASK_MOVE/{graphName}/toPartitionId/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getLogKey(Metapb.LogRecord record){ + //LOG_RECORD/{action}/{time}/ + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(record.getAction()) + .append(DELIMITER) + .append(record.getTimestamp()); + return builder.toString().getBytes(Charset.defaultCharset()); + } + public static byte[] getLogKeyPrefix(String action, long time){ + //LOG_DATA_SPLIT/{time}/{GraphName} + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(action) + .append(DELIMITER) + .append(time); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getKVPrefix(String prefix,String key) { + //K@/{key} + StringBuilder builder = StringBuilderHelper.get() + .append(prefix).append(DELIMITER); + if (!StringUtils.isEmpty(key)) { + builder.append(key).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getKVTTLPrefix(String ttlPrefix,String prefix,String key) { + StringBuilder builder = StringBuilderHelper.get().append(ttlPrefix) + .append(prefix).append(DELIMITER); + if (!StringUtils.isEmpty(key)) { + builder.append(key).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static String getKVWatchKeyPrefix(String key, String watchDelimiter, long clientId) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(watchDelimiter).append(DELIMITER); + builder.append(key == null ? "" : key).append(DELIMITER); + builder.append(clientId); + return builder.toString(); + } + public static String getKVWatchKeyPrefix(String key, String watchDelimiter) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(watchDelimiter).append(DELIMITER); + builder.append(key == null ? "" : key).append(DELIMITER); + return builder.toString(); + } + + public static char getDelimiter(){ + return DELIMITER; + } + + public static StringBuilder getStringBuilderHelper(){ + return StringBuilderHelper.get(); + } + + static class StringBuilderHelper { + private static final int DISCARD_LIMIT = 1024 << 3; // 8k + + private static final ThreadLocal holderThreadLocal = ThreadLocal + .withInitial(StringBuilderHolder::new); + + public static StringBuilder get() { + final StringBuilderHolder holder = holderThreadLocal.get(); + return holder.getStringBuilder(); + } + + public static void truncate() { + final StringBuilderHolder holder = holderThreadLocal.get(); + holder.truncate(); + } + + private static class StringBuilderHolder { + + private final StringBuilder buf = new StringBuilder(); + + private StringBuilder getStringBuilder() { + truncate(); + return buf; + } + + private void truncate() { + buf.setLength(0); + } + } + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java new file mode 100644 index 0000000000..bf91d80b0a --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java @@ -0,0 +1,168 @@ +package org.apache.hugegraph.pd.meta; + +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.KV; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.google.protobuf.Parser; + +/** + * @author zhangyingjie + * @date 2022/1/9 + **/ +public class MetadataRocksDBStore extends MetadataStoreBase { + + HgKVStore store; + + PDConfig pdConfig; + + public MetadataRocksDBStore(PDConfig pdConfig){ + store = MetadataFactory.getStore(pdConfig); + this.pdConfig = pdConfig; + } + + private HgKVStore getStore(){ + if (store == null) { + store = MetadataFactory.getStore(pdConfig); + } + return store; + } + + @Override + public byte[] getOne(byte[] key) throws PDException { + try { + byte[] bytes = store.get(key); + return bytes; + }catch (Exception e){ + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } + + @Override + public E getOne(Parser parser, byte[] key) throws PDException { + try { + byte[] bytes = store.get(key); + if (ArrayUtils.isEmpty(bytes)) return null; + return parser.parseFrom(bytes); + }catch (Exception e){ + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + try { + getStore().put(key, value); + } catch (Exception e){ + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.store.putWithTTL(key,value,ttl); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException { + this.store.putWithTTL(key,value,ttl,timeUnit); + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + return this.store.getWithTTL(key); + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + return this.store.getListWithTTL(key); + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + this.store.removeWithTTL(key); + } + + @Override + public List scanPrefix(byte[] prefix) throws PDException { + //TODO 使用rocksdb 前缀查询 + try { + return this.store.scanPrefix(prefix); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } + + @Override + public List scanRange(byte[] start, byte[] end) throws PDException { + return this.store.scanRange(start,end); + } + + @Override + public List scanRange(Parser parser,byte[] start,byte[] end) throws PDException { + List stores = new LinkedList<>(); + try { + List kvs = this.scanRange(start, end); + for (KV keyValue : kvs) { + stores.add(parser.parseFrom((byte[])keyValue.getValue())); + } + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + return stores; + } + + @Override + public List scanPrefix(Parser parser, byte[] prefix) throws PDException { + List stores = new LinkedList<>(); + try { + List kvs = this.scanPrefix(prefix); + for (KV keyValue : kvs) { + stores.add(parser.parseFrom((byte[])keyValue.getValue())); + } + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + return stores; + } + + @Override + public boolean containsKey(byte[] key) throws PDException { + return !ArrayUtils.isEmpty(store.get(key)); + } + + @Override + public long remove(byte[] key) throws PDException { + try { + return this.store.remove(key); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public long removeByPrefix(byte[] prefix) throws PDException { + try { + return this.store.removeByPrefix(prefix); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public void clearAllCache() throws PDException { + this.store.clear(); + } + + @Override + public void close() { + + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java new file mode 100644 index 0000000000..409bb55aff --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java @@ -0,0 +1,101 @@ +package org.apache.hugegraph.pd.meta; + + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import org.apache.hugegraph.pd.store.KV; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import com.google.protobuf.Parser; + +public abstract class MetadataStoreBase { + + // public long timeout = 3; // 请求超时时间,默认三秒 + + public abstract byte[] getOne(byte[] key) throws PDException; + + public abstract E getOne(Parser parser, byte[] key) throws PDException; + + public abstract void put(byte[] key, byte[] value) throws PDException; + + /** + * 带有过期时间的put + */ + + public abstract void putWithTTL(byte[] key, + byte[] value, + long ttl) throws PDException; + public abstract void putWithTTL(byte[] key, + byte[] value, + long ttl, TimeUnit timeUnit) throws PDException; + public abstract byte[] getWithTTL(byte[] key) throws PDException; + + public abstract List getListWithTTL(byte[] key) throws PDException; + + public abstract void removeWithTTL(byte[] key) throws PDException; + /** + * 前缀查询 + * + * @param prefix + * @return + * @throws PDException + */ + public abstract List scanPrefix(byte[] prefix) throws PDException; + public abstract List scanRange(byte[] start,byte[] end) throws PDException; + public abstract List scanRange(Parser parser, byte[] start,byte[] end) throws PDException; + /** + * 前缀查询 + * + * @param prefix + * @return + * @throws PDException + */ + + public abstract List scanPrefix(Parser parser, byte[] prefix) throws PDException; + + + /** + * 检查Key是否存在 + * + * @param key + * @return + * @throws PDException + */ + + public abstract boolean containsKey(byte[] key) throws PDException; + + public abstract long remove(byte[] key) throws PDException; + + public abstract long removeByPrefix(byte[] prefix) throws PDException; + + public abstract void clearAllCache() throws PDException; + + public abstract void close() throws IOException; + + public T getInstanceWithTTL(Parser parser,byte[] key) throws PDException{ + try{ + byte[] withTTL = this.getWithTTL(key); + return parser.parseFrom(withTTL); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE,e); + } + } + public List getInstanceListWithTTL(Parser parser,byte[] key) + throws PDException{ + try{ + List withTTL = this.getListWithTTL(key); + LinkedList ts = new LinkedList<>(); + for (int i = 0; i < withTTL.size(); i++) { + ts.add(parser.parseFrom((byte[]) withTTL.get(i))); + } + return ts; + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE,e); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java new file mode 100644 index 0000000000..354bdc8913 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java @@ -0,0 +1,272 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.common.PartitionCache; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import lombok.extern.slf4j.Slf4j; + +import java.util.ArrayList; +import java.util.List; + +/** + * 分区信息管理 + */ +@Slf4j +public class PartitionMeta extends MetadataRocksDBStore { + private PDConfig pdConfig; + private PartitionCache cache; + + static String CID_GRAPH_ID_KEY = "GraphID"; + static int CID_GRAPH_ID_MAX = 0xFFFE; + + public PartitionMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + //this.timeout = pdConfig.getEtcd().getTimeout(); + this.cache = new PartitionCache(); + } + /** + * 初始化,加载所有的分区 + */ + public void init() throws PDException { + loadShardGroups(); + loadGraphs(); + } + + public void reload() throws PDException { + cache.clear(); + loadShardGroups(); + loadGraphs(); + } + + private void loadGraphs() throws PDException { + byte[] key = MetadataKeyHelper.getGraphPrefix(); + List graphs = scanPrefix(Metapb.Graph.parser(), key); + for (Metapb.Graph graph : graphs) { + cache.updateGraph(graph); + loadPartitions(graph); + } + } + + /** + * partition 和 shard group分开存储,再init的时候,需要加载进来 + * @throws PDException + */ + private void loadShardGroups() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + for (var shardGroup : scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix)){ + cache.updateShardGroup(shardGroup); + } + } + + private void loadPartitions(Metapb.Graph graph) throws PDException{ + byte[] prefix = MetadataKeyHelper.getPartitionPrefix( graph.getGraphName()); + List partitions = scanPrefix(Metapb.Partition.parser(), prefix); + partitions.forEach(p->{ + cache.updatePartition(p); + }); + } + + /** + * 根据id查找分区 (先从缓存找,再到数据库中找) + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public Metapb.Partition getPartitionById(String graphName, int partId) throws PDException { + var pair = cache.getPartitionById(graphName, partId); + Metapb.Partition partition; + if (pair == null) { + byte[] key = MetadataKeyHelper.getPartitionKey( graphName, partId); + partition = getOne(Metapb.Partition.parser(), key); + if ( partition != null ) { + cache.updatePartition(partition); + } + }else{ + partition = pair.getKey(); + } + return partition; + } + public List getPartitionById(int partId) throws PDException { + List partitions = new ArrayList<>(); + cache.getGraphs().forEach(graph -> { + cache.getPartitions(graph.getGraphName()).forEach(partition -> { + if ( partition.getId() == partId ) + partitions.add(partition); + }); + }); + return partitions; + } + + /** + * 根据code查找分区 + + */ + public Metapb.Partition getPartitionByCode(String graphName, long code) throws PDException { + var pair = cache.getPartitionByCode(graphName, code); + if (pair != null){ + return pair.getKey(); + } + return null; + } + + public Metapb.Graph getAndCreateGraph(String graphName) throws PDException { + return getAndCreateGraph(graphName, pdConfig.getPartition().getTotalCount()); + } + + public Metapb.Graph getAndCreateGraph(String graphName, int partitionCount) throws PDException { + + if (partitionCount > pdConfig.getPartition().getTotalCount()) { + partitionCount = pdConfig.getPartition().getTotalCount(); + } + + // 管理图,只有一个分区 + if (graphName.endsWith("/s") || graphName.endsWith("/m")){ + partitionCount = 1; + } + + Metapb.Graph graph = cache.getGraph(graphName); + if ( graph == null ){ + // 保存图信息 + graph = Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + updateGraph(graph); + } + return graph; + } + + /** + * 保存分区信息 + * @param partition + * @return + * @throws PDException + */ + public Metapb.Partition updatePartition(Metapb.Partition partition) throws PDException { + if ( !cache.hasGraph(partition.getGraphName())){ + getAndCreateGraph(partition.getGraphName()); + } + byte[] key = MetadataKeyHelper.getPartitionKey( partition.getGraphName(), partition.getId()); + put(key, partition.toByteString().toByteArray()); + cache.updatePartition(partition); + return partition; + } + + /** + * 检查数据库,是否存在对应的图,不存在,则创建。 + * 更新partition的 version, conf version 和 shard list + * @param partition + * @return + * @throws PDException + */ + public Metapb.Partition updateShardList(Metapb.Partition partition) throws PDException { + if ( !cache.hasGraph(partition.getGraphName())){ + getAndCreateGraph(partition.getGraphName()); + } + + Metapb.Partition pt = getPartitionById(partition.getGraphName(), partition.getId()); + // pt = pt.toBuilder().setVersion(partition.getVersion()) + // .setConfVer(partition.getConfVer()) + // .clearShards() + // .addAllShards(partition.getShardsList()).build(); + + byte[] key = MetadataKeyHelper.getPartitionKey( pt.getGraphName(), pt.getId()); + put(key, pt.toByteString().toByteArray()); + cache.updatePartition(pt); + return partition; + } + /** + * 删除所有分区 + */ + public long removeAllPartitions(String graphName) throws PDException { + cache.removeAll(graphName); + byte[] prefix = MetadataKeyHelper.getPartitionPrefix( graphName); + return removeByPrefix(prefix); + } + + public long removePartition(String graphName, int id) throws PDException { + cache.remove(graphName, id); + byte[] key = MetadataKeyHelper.getPartitionKey( graphName, id); + return remove(key); + } + + public void updatePartitionStats(Metapb.PartitionStats stats) throws PDException { + for(String graphName : stats.getGraphNameList()) { + byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, stats.getId()); + put(prefix, stats.toByteArray()); + } + } + + /** + * 获取分区状态 + */ + public Metapb.PartitionStats getPartitionStats(String graphName, int id) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, id); + return getOne(Metapb.PartitionStats.parser(),prefix); + } + + + /** + * 获取分区状态 + */ + public List getPartitionStats(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionStatusPrefixKey(graphName); + return scanPrefix(Metapb.PartitionStats.parser(),prefix); + } + + /** + * 更新图信息 + * @param graph + * @return + */ + public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + log.info("updateGraph {}", graph); + byte[] key = MetadataKeyHelper.getGraphKey( graph.getGraphName()); + // 保存图信息 + put(key, graph.toByteString().toByteArray()); + cache.updateGraph(graph); + return graph; + } + + public List getPartitions(){ + List partitions = new ArrayList<>(); + List graphs = cache.getGraphs(); + graphs.forEach(e->{ + partitions.addAll(cache.getPartitions(e.getGraphName())); + }); + return partitions; + } + + public List getPartitions(String graphName){ + return cache.getPartitions(graphName); + } + + public List getGraphs() throws PDException { + byte[] key = MetadataKeyHelper.getGraphPrefix(); + return scanPrefix(Metapb.Graph.parser(), key); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getGraphKey( graphName); + return getOne(Metapb.Graph.parser(), key); + } + + /** + * 删除图,并删除图id + */ + public long removeGraph(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getGraphKey( graphName); + long l = remove(key); + return l; + } + + public PartitionCache getPartitionCache(){ + return cache; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java new file mode 100644 index 0000000000..b3eb0c9714 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java @@ -0,0 +1,34 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.HgAssert; +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +import java.util.List; + +/** + * @author lynn.bond@hotmail.com on 2022/2/10 + */ +public class QueueStore extends MetadataRocksDBStore { + QueueStore(PDConfig pdConfig) { + super(pdConfig); + } + + public void addItem(Metapb.QueueItem queueItem) throws PDException { + HgAssert.isArgumentNotNull(queueItem, "queueItem"); + byte[] key = MetadataKeyHelper.getQueueItemKey(queueItem.getItemId()); + put(key, queueItem.toByteString().toByteArray()); + } + + public void removeItem(String itemId) throws PDException { + remove(MetadataKeyHelper.getQueueItemKey(itemId)); + } + + public List getQueue() throws PDException { + byte[] prefix = MetadataKeyHelper.getQueueItemPrefix(); + return scanPrefix(Metapb.QueueItem.parser(), prefix); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java new file mode 100644 index 0000000000..9544d87d41 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java @@ -0,0 +1,186 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import lombok.extern.slf4j.Slf4j; + +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; + +/** + * Store信息存储 + */ +@Slf4j +public class StoreInfoMeta extends MetadataRocksDBStore { + private PDConfig pdConfig; + + public StoreInfoMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + // this.timeout = pdConfig.getDiscovery().getHeartbeatOutTimes(); + } + + /** + * 更新Store信息 + * @param store + * @throws PDException + */ + public void updateStore(Metapb.Store store) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(store.getId()); + put(storeInfoKey, store.toByteArray()); + } + + /** + * 更新Store的存活状态 + * + * @param store + */ + public void keepStoreAlive(Metapb.Store store) throws PDException { + byte[] activeStoreKey = MetadataKeyHelper.getActiveStoreKey(store.getId()); + putWithTTL(activeStoreKey, store.toByteArray(), pdConfig.getStore().getKeepAliveTimeout()); + } + + + public void removeActiveStore(Metapb.Store store) throws PDException { + byte[] activeStoreKey = MetadataKeyHelper.getActiveStoreKey(store.getId()); + removeWithTTL(activeStoreKey); + } + + public Metapb.Store getStore(Long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + Metapb.Store store = getOne(Metapb.Store.parser(),storeInfoKey); + return store; + } + + /** + * 获取所有的store + * @param graphName + * @return + * @throws PDException + */ + public List getStores(String graphName) throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + return scanPrefix(Metapb.Store.parser(),storePrefix); + } + + /** + * 获取活跃的Store + * + * @param graphName + * @return + * @throws PDException + */ + public List getActiveStores(String graphName) throws PDException { + byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); + List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), + activePrefix); + return listWithTTL; + } + public List getActiveStores() throws PDException { + byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); + List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), + activePrefix); + return listWithTTL; + } + + /** + * 检查storeid是否存在 + * + * @param storeId + * @return + */ + public boolean storeExists(Long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + return containsKey(storeInfoKey); + } + + /** + * 更新存储状态信息 + * + * @param storeStats + */ + public Metapb.StoreStats updateStoreStats(Metapb.StoreStats storeStats) throws PDException { + byte[] storeStatusKey = MetadataKeyHelper.getStoreStatusKey(storeStats.getStoreId()); + + put(storeStatusKey, storeStats.toByteArray()); + return storeStats; + } + + public long removeStore(long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + return remove(storeInfoKey); + } + + public long removeAll() throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + return this.removeByPrefix(storePrefix); + } + + public void updateShardGroup(Metapb.ShardGroup group) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(group.getId()); + put(shardGroupKey, group.toByteArray()); + } + + public void deleteShardGroup(int groupId) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(groupId); + remove(shardGroupKey); + } + + public static boolean shardGroupEquals(List g1, List g2) { + ListIterator e1 = g1.listIterator(); + ListIterator e2 = g2.listIterator(); + while (e1.hasNext() && e2.hasNext()) { + Metapb.Shard o1 = e1.next(); + Metapb.Shard o2 = e2.next(); + if (!(o1 == null ? o2 == null : o1.getStoreId() == o2.getStoreId())) { + return false; + } + } + return !(e1.hasNext() || e2.hasNext()); + } + + public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(groupId); + return getOne(Metapb.ShardGroup.parser(), shardGroupKey); + } + + public int getShardGroupCount() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + return scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix).size(); + } + + public List getShardGroups() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + return scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix); + } + + public Metapb.StoreStats getStoreStats(long storeId) throws PDException { + byte[] storeStatusKey = MetadataKeyHelper.getStoreStatusKey(storeId); + Metapb.StoreStats stats = getOne(Metapb.StoreStats.parser(), + storeStatusKey); + return stats; + } + /** + * @return store及状态信息 + * @throws PDException + */ + public List getStoreStatus(boolean isActive) throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + List stores =isActive ? getActiveStores() : + scanPrefix(Metapb.Store.parser(),storePrefix); + LinkedList list = new LinkedList<>(); + for (int i = 0; i < stores.size(); i++) { + Metapb.Store store = stores.get(i); + Metapb.StoreStats stats = getStoreStats(store.getId()); + if (stats != null) + store = Metapb.Store.newBuilder(store).setStats(getStoreStats(store.getId())) + .build(); + list.add(store); + } + return list; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java new file mode 100644 index 0000000000..85c053882c --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java @@ -0,0 +1,113 @@ +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.MetaTask; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.MovePartition; +import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; + +import java.util.List; + +/** + * 任务管理 + */ +public class TaskInfoMeta extends MetadataRocksDBStore{ + public TaskInfoMeta(PDConfig pdConfig) { + super(pdConfig); + } + + /** + * 添加分区分裂任务 + */ + public void addSplitTask(int groupID, Metapb.Partition partition, SplitPartition splitPartition) + throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskKey(partition.getGraphName(), groupID); + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Split_Partition) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setSplitPartition(splitPartition) + .build(); + put(key, task.toByteString().toByteArray()); + } + + public void updateSplitTask(MetaTask.Task task) throws PDException { + var partition = task.getPartition(); + byte[] key = MetadataKeyHelper.getSplitTaskKey(partition.getGraphName(), partition.getId()); + put(key, task.toByteString().toByteArray()); + } + + public MetaTask.Task getSplitTask(String graphName, int groupID) throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskKey(graphName, groupID); + return getOne(MetaTask.Task.parser(), key); + } + + public List scanSplitTask(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getSplitTaskPrefix(graphName); + return scanPrefix(MetaTask.Task.parser(), prefix); + } + + public void removeSplitTaskPrefix(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskPrefix(graphName); + removeByPrefix(key); + } + + public boolean hasSplitTaskDoing() throws PDException { + byte[] key = MetadataKeyHelper.getAllSplitTaskPrefix(); + return scanPrefix(key).size() > 0; + } + + public void addMovePartitionTask(Metapb.Partition partition, MovePartition movePartition) + throws PDException { + byte[] key = MetadataKeyHelper.getMoveTaskKey(partition.getGraphName(), + movePartition.getTargetPartition().getId(), partition.getId()); + + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Move_Partition) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setMovePartition(movePartition) + .build(); + put(key, task.toByteArray()); + } + + public void updateMovePartitionTask(MetaTask.Task task) + throws PDException { + + byte[] key = MetadataKeyHelper.getMoveTaskKey(task.getPartition().getGraphName(), + task.getMovePartition().getTargetPartition().getId(), + task.getPartition().getId()); + put(key, task.toByteArray()); + } + + public MetaTask.Task getMovePartitionTask(String graphName, int targetId, int partId) throws PDException { + byte[] key = MetadataKeyHelper.getMoveTaskKey(graphName, targetId, partId); + return getOne(MetaTask.Task.parser(), key); + } + + public List scanMoveTask(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getMoveTaskPrefix(graphName); + return scanPrefix(MetaTask.Task.parser(), prefix); + } + + /** + * 按照prefix删除迁移任务,一次分组的 + * @param graphName 图名称 + * @throws PDException io error + */ + public void removeMoveTaskPrefix(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getMoveTaskPrefix(graphName); + removeByPrefix(key); + } + + public boolean hasMoveTaskDoing() throws PDException { + byte[] key = MetadataKeyHelper.getAllMoveTaskPrefix(); + return scanPrefix(key).size() > 0; + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java new file mode 100644 index 0000000000..5df461cafa --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.Closure; +import com.alipay.sofa.jraft.Status; + +import java.util.concurrent.CompletableFuture; + +public class FutureClosureAdapter implements Closure { + public final CompletableFuture future = new CompletableFuture<>(); + private T resp; + + public T getResponse() { return this.resp; } + + public void setResponse(T resp) { + this.resp = resp; + future.complete(resp); + run(Status.OK()); + } + + public void failure(Throwable t){ + future.completeExceptionally(t); + run(new Status(-1, t.getMessage())); + } + + @Override + public void run(Status status) { + + } +} \ No newline at end of file diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java new file mode 100644 index 0000000000..4c648b56ca --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java @@ -0,0 +1,142 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.util.BytesUtil; +import com.alipay.sofa.jraft.util.Requires; +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; + +import lombok.Data; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +@Data +public class KVOperation { + + /** + * Put operation + */ + public static final byte PUT = 0x01; + /** + * Get operation + */ + public static final byte GET = 0x02; + public static final byte DEL = 0x03; + public static final byte REMOVE_BY_PREFIX = 0x04; + public static final byte REMOVE = 0x05; + public static final byte PUT_WITH_TTL = 0x06; + public static final byte CLEAR = 0x07; + public static final byte PUT_WITH_TTL_UNIT = 0x08; + public static final byte REMOVE_WITH_TTL = 0x09; + /** + * Snapshot operation + */ + public static final byte SAVE_SNAPSHOT = 0x10; + public static final byte LOAD_SNAPSHOT = 0x11; + + private byte[] key; + private byte[] value; + private Object attach; // 原始对象,用于本机处理,减少一次反序列化操作 + private Object arg; + private byte op; + + public KVOperation(){ + + } + + public KVOperation(byte[] key, byte[] value, Object attach, byte op) { + this.key = key; + this.value = value; + this.attach = attach; + this.op = op; + } + public KVOperation(byte[] key, byte[] value, Object attach, byte op, Object arg) { + this.key = key; + this.value = value; + this.attach = attach; + this.op = op; + this.arg = arg; + } + + public byte[] toByteArray() throws IOException { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + bos.write(op); + Hessian2Output output = new Hessian2Output(bos); + output.writeObject(key); + output.writeObject(value); + output.writeObject(arg); + output.flush(); + return bos.toByteArray(); + } + } + + public static KVOperation fromByteArray(byte[] value) throws IOException { + + try (ByteArrayInputStream bis = new ByteArrayInputStream(value, 1, value.length - 1)) { + Hessian2Input input = new Hessian2Input(bis); + KVOperation op = new KVOperation(); + op.op = value[0]; + op.key = input.readBytes(); + op.value = input.readBytes(); + op.arg = input.readObject(); + input.close(); + return op; + } + } + + public static KVOperation createPut(final byte[] key, final byte[] value) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, null, PUT); + } + + public static KVOperation createGet(final byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, BytesUtil.EMPTY_BYTES, null, GET); + } + + public static KVOperation createPutWithTTL(byte[] key, byte[] value, long ttl) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, value, PUT_WITH_TTL, + ttl); + } + public static KVOperation createPutWithTTL(byte[] key, byte[] value, long ttl, + TimeUnit timeUnit) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, value, PUT_WITH_TTL_UNIT, + new Object[] { ttl, timeUnit}); + } + + public static KVOperation createRemoveWithTTL(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE_WITH_TTL); + } + + public static KVOperation createRemoveByPrefix(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE_BY_PREFIX); + } + + public static KVOperation createRemove(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE); + } + + public static KVOperation createClear() { + return new KVOperation(null, null, null, CLEAR); + } + + public static KVOperation createSaveSnapshot(String snapshotPath) { + Requires.requireNonNull(snapshotPath, "snapshotPath"); + return new KVOperation(null, null, snapshotPath, SAVE_SNAPSHOT); + } + + public static KVOperation createLoadSnapshot(String snapshotPath) { + Requires.requireNonNull(snapshotPath, "snapshotPath"); + return new KVOperation(null, null, snapshotPath, LOAD_SNAPSHOT); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java new file mode 100644 index 0000000000..527864baf7 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java @@ -0,0 +1,15 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.Closure; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +public interface KVStoreClosure extends Closure { + + Pdpb.Error getError(); + + void setError(final Pdpb.Error error); + + Object getData(); + + void setData(final Object data); +} \ No newline at end of file diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java new file mode 100644 index 0000000000..3106362750 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java @@ -0,0 +1,271 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.Node; +import com.alipay.sofa.jraft.RaftGroupService; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.entity.PeerId; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; +import com.alipay.sofa.jraft.option.NodeOptions; +import com.alipay.sofa.jraft.option.RaftOptions; +import com.alipay.sofa.jraft.option.RpcOptions; +import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory; +import com.alipay.sofa.jraft.rpc.RpcServer; +import com.alipay.sofa.jraft.util.Endpoint; +import com.alipay.sofa.jraft.util.internal.ThrowUtil; +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import lombok.extern.slf4j.Slf4j; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; + +@Slf4j +public class RaftEngine { + private volatile static RaftEngine instance = new RaftEngine(); + + public static RaftEngine getInstance() { + return instance; + } + + private String groupId = "pd_raft"; + private PDConfig.Raft config; + private RaftStateMachine stateMachine; + private RaftGroupService raftGroupService; + private RpcServer rpcServer; + private Node raftNode; + private RaftRpcClient raftRpcClient; + + public RaftEngine(){ + this.stateMachine = new RaftStateMachine(); + } + + public boolean init(PDConfig.Raft config) { + if (this.raftNode != null) return false; + this.config = config; + + raftRpcClient = new RaftRpcClient(); + raftRpcClient.init(new RpcOptions()); + + String raftPath = config.getDataPath() + "/" + groupId; + new File(raftPath).mkdirs(); + + new File(config.getDataPath()).mkdirs(); + Configuration initConf = new Configuration(); + initConf.parse(config.getPeersList()); + if (config.isEnable() && config.getPeersList().length() < 3) { + log.error( + "The RaftEngine parameter is incorrect." + + " When RAFT is enabled, the number of peers " + + "cannot be less than 3"); + } + // 设置Node参数,包括日志存储路径和状态机实例 + NodeOptions nodeOptions = new NodeOptions(); + nodeOptions.setFsm(stateMachine); + nodeOptions.setEnableMetrics(true); + // 日志路径 + nodeOptions.setLogUri(raftPath + "/log"); + // raft元数据路径 + nodeOptions.setRaftMetaUri(raftPath + "/meta"); + // 快照路径 + nodeOptions.setSnapshotUri(raftPath + "/snapshot"); + // 初始集群 + nodeOptions.setInitialConf(initConf); + // 快照时间间隔 + nodeOptions.setSnapshotIntervalSecs(config.getSnapshotInterval()); + + nodeOptions.setRpcConnectTimeoutMs(config.getRpcTimeout()); + nodeOptions.setRpcDefaultTimeout(config.getRpcTimeout()); + nodeOptions.setRpcInstallSnapshotTimeout(config.getRpcTimeout()); + // 设置raft配置 + RaftOptions raftOptions = nodeOptions.getRaftOptions(); + + nodeOptions.setEnableMetrics(true); + + final PeerId serverId = JRaftUtils.getPeerId(config.getAddress()); + + rpcServer = createRaftRpcServer(config.getAddress()); + // 构建raft组并启动raft + this.raftGroupService = new RaftGroupService(groupId, serverId, + nodeOptions, rpcServer, true); + this.raftNode = raftGroupService.start(false); + log.info("RaftEngine start successfully: id = {}, peers list = {}", groupId, nodeOptions.getInitialConf().getPeers()); + return this.raftNode != null; + } + + /** + * 创建raft rpc server,用于pd之间通讯 + */ + private RpcServer createRaftRpcServer(String raftAddr) { + Endpoint endpoint = JRaftUtils.getEndPoint(raftAddr); + RpcServer rpcServer = RaftRpcServerFactory.createRaftRpcServer(endpoint); + RaftRpcProcessor.registerProcessor(rpcServer, this); + rpcServer.init(null); + return rpcServer; + } + + public void shutDown() { + if (this.raftGroupService != null) { + this.raftGroupService.shutdown(); + try { + this.raftGroupService.join(); + } catch (final InterruptedException e) { + this.raftNode = null; + ThrowUtil.throwException(e); + } + this.raftGroupService = null; + } + if (this.rpcServer != null){ + this.rpcServer.shutdown(); + this.rpcServer = null; + } + if (this.raftNode != null) { + this.raftNode.shutdown(); + } + this.raftNode = null; + } + + public boolean isLeader() { + return this.raftNode.isLeader(true); + } + + /** + * 添加Raft任务,grpc通过该接口给raft发送数据 + */ + public void addTask(Task task) { + if (!isLeader()) { + KVStoreClosure closure = (KVStoreClosure) task.getDone(); + closure.setError(Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.NOT_LEADER).build()); + closure.run(new Status(RaftError.EPERM, "Not leader")); + return; + } + this.raftNode.apply(task); + } + + public void addStateListener(RaftStateListener listener){ + this.stateMachine.addStateListener(listener); + } + + public void addTaskHandler(RaftTaskHandler handler){ + this.stateMachine.addTaskHandler(handler); + } + public PDConfig.Raft getConfig() { + return this.config; + } + + public PeerId getLeader(){ + return raftNode.getLeaderId(); + } + + /** + * 向leader发消息,获取grpc地址; + */ + public String getLeaderGrpcAddress() throws ExecutionException, InterruptedException { + if (isLeader()) return config.getGrpcAddress(); + return raftRpcClient.getGrpcAddress( + raftNode.getLeaderId().getEndpoint().toString()) + .get().getGrpcAddress(); + } + + public Metapb.Member getLocalMember(){ + Metapb.Member.Builder builder = Metapb.Member.newBuilder(); + builder.setClusterId(config.getClusterId()); + builder.setRaftUrl(config.getAddress()); + builder.setDataPath(config.getDataPath()); + builder.setGrpcUrl(config.getGrpcAddress()); + builder.setState(Metapb.StoreState.Up); + return builder.build(); + } + + public List getMembers() throws ExecutionException, InterruptedException { + List members = new ArrayList<>(); + + List peers = raftNode.listPeers(); + for(PeerId peerId : peers){ + Metapb.Member.Builder builder = Metapb.Member.newBuilder(); + builder.setClusterId(config.getClusterId()); + CompletableFuture future = + raftRpcClient.getGrpcAddress(peerId.getEndpoint().toString()); + try { + if (future.isCompletedExceptionally()) { + log.error("failed to getGrpcAddress of {}", + peerId.getEndpoint().toString()); + builder.setState(Metapb.StoreState.Offline); + builder.setRaftUrl(peerId.getEndpoint().toString()); + members.add(builder.build()); + } else { + RaftRpcProcessor.GetMemberResponse response = future.get(); + builder.setState(Metapb.StoreState.Up); + builder.setRaftUrl(response.getRaftAddress()); + builder.setDataPath(response.getDatePath()); + builder.setGrpcUrl(response.getGrpcAddress()); + builder.setRestUrl(response.getRestAddress()); + members.add(builder.build()); + } + } catch (Exception e) { + log.error("failed to getGrpcAddress of {}. {}", + peerId.getEndpoint().toString(), e); + builder.setState(Metapb.StoreState.Offline); + builder.setRaftUrl(peerId.getEndpoint().toString()); + members.add(builder.build()); + } + + } + return members; + } + + public Status changePeerList(String peerList) { + AtomicReference result = new AtomicReference<>(); + try{ + String[] peers = peerList.split(",", -1); + if ((peers.length & 1) != 1){ + throw new PDException(-1,"the number of peer list must be odd."); + }; + Configuration newPeers = new Configuration(); + newPeers.parse(peerList); + CountDownLatch latch = new CountDownLatch(1); + this.raftNode.changePeers(newPeers, status -> { + result.set(status); + latch.countDown(); + }); + latch.await(); + } catch (Exception e) { + log.error("failed to changePeerList to {},{}", peerList, e); + result.set(new Status(-1, e.getMessage())); + } + return result.get(); + } + + public PeerId waitingForLeader(long timeOut){ + PeerId leader = getLeader(); + if ( leader != null ) { + return leader; + } + + synchronized (this) { + leader = getLeader(); + long start = System.currentTimeMillis(); + while ((System.currentTimeMillis() - start < timeOut) && (leader == null)) { + try { + this.wait(1000); + } catch (InterruptedException e) { + log.error("Raft wait for leader exception", e); + } + leader = getLeader(); + } + return leader != null ? leader : null; + } + + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java new file mode 100644 index 0000000000..28e1842a55 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java @@ -0,0 +1,64 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.option.RpcOptions; +import com.alipay.sofa.jraft.rpc.InvokeCallback; +import com.alipay.sofa.jraft.rpc.InvokeContext; +import com.alipay.sofa.jraft.rpc.RaftRpcFactory; +import com.alipay.sofa.jraft.rpc.RpcClient; +import com.alipay.sofa.jraft.util.Endpoint; +import com.alipay.sofa.jraft.util.RpcFactoryHelper; +import lombok.extern.slf4j.Slf4j; + +import java.util.concurrent.CompletableFuture; + +@Slf4j +public class RaftRpcClient { + protected volatile RpcClient rpcClient; + private RpcOptions rpcOptions; + + public synchronized boolean init(final RpcOptions rpcOptions) { + this.rpcOptions = rpcOptions; + final RaftRpcFactory factory = RpcFactoryHelper.rpcFactory(); + this.rpcClient = factory.createRpcClient(factory.defaultJRaftClientConfigHelper(this.rpcOptions)); + return this.rpcClient.init(null); + } + + /** + * 请求快照 + */ + public CompletableFuture + getGrpcAddress(final String address) { + RaftRpcProcessor.GetMemberRequest request = new RaftRpcProcessor.GetMemberRequest(); + FutureClosureAdapter response = new FutureClosureAdapter<>(); + internalCallAsyncWithRpc(JRaftUtils.getEndPoint(address), request, response); + return response.future; + } + + private void internalCallAsyncWithRpc(final Endpoint endpoint, final RaftRpcProcessor.BaseRequest request, + final FutureClosureAdapter closure) { + final InvokeContext invokeCtx = null; + final InvokeCallback invokeCallback = new InvokeCallback() { + + @Override + public void complete(final Object result, final Throwable err) { + if (err == null) { + final RaftRpcProcessor.BaseResponse response = (RaftRpcProcessor.BaseResponse) result; + closure.setResponse((V) response); + } else { + closure.failure(err); + closure.run(new Status(-1, err.getMessage())); + } + } + }; + + try { + this.rpcClient.invokeAsync(endpoint, request, invokeCtx, invokeCallback, this.rpcOptions.getRpcDefaultTimeout()); + } catch (final Throwable t) { + log.error("failed to call rpc to {}. {}", endpoint, t.getMessage()); + closure.failure(t); + closure.run(new Status(-1, t.getMessage())); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java new file mode 100644 index 0000000000..beeffae7d1 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java @@ -0,0 +1,113 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.rpc.RpcContext; +import com.alipay.sofa.jraft.rpc.RpcProcessor; +import com.alipay.sofa.jraft.rpc.RpcServer; +import lombok.Data; + +import java.io.Serializable; + +public class RaftRpcProcessor implements RpcProcessor { + + + public static void registerProcessor(final RpcServer rpcServer, RaftEngine raftEngine) { + rpcServer.registerProcessor(new RaftRpcProcessor<>(GetMemberRequest.class, raftEngine)); + } + + private final Class requestClass; + private RaftEngine raftEngine; + + public RaftRpcProcessor(Class requestClass, RaftEngine raftEngine) { + this.requestClass = requestClass; + this.raftEngine = raftEngine; + } + + @Override + public void handleRequest(RpcContext rpcCtx, T request) { + switch (request.magic()) { + case BaseRequest.GET_GRPC_ADDRESS: { + rpcCtx.sendResponse(getGrpcAddress()); + break; + } + + default: + } + } + + @Override + public String interest() { + return this.requestClass.getName(); + } + + private GetMemberResponse getGrpcAddress(){ + GetMemberResponse rep = new GetMemberResponse(); + rep.setGrpcAddress(raftEngine.getConfig().getGrpcAddress()); + rep.setClusterId(raftEngine.getConfig().getClusterId()); + rep.setDatePath(raftEngine.getConfig().getDataPath()); + rep.setRaftAddress(raftEngine.getConfig().getAddress()); + rep.setRestAddress(raftEngine.getConfig().getHost() + ":" + raftEngine.getConfig().getPort()); + rep.setStatus(Status.OK); + return rep; + } + + public abstract static class BaseRequest implements Serializable { + public static final byte GET_GRPC_ADDRESS = 0x01; + + public abstract byte magic(); + } + + @Data + public abstract static class BaseResponse implements Serializable { + private Status status; + + } + + @Data + public static class GetMemberRequest extends BaseRequest { + @Override + public byte magic() { + return GET_GRPC_ADDRESS; + } + } + + @Data + public static class GetMemberResponse extends BaseResponse { + private long clusterId; + private String raftAddress; + private String grpcAddress; + private String datePath; + private String restAddress; + } + + public enum Status implements Serializable{ + UNKNOWN(-1, "unknown"), + OK(0, "ok"), + COMPLETE(0, "Transmission completed"), + INCOMPLETE(1, "Incomplete transmission"), + NO_PARTITION(10, "Partition not found"), + IO_ERROR(11, "io error"), + EXCEPTION(12, "exception"), + ABORT(100, "Transmission aborted"); + + private int code; + private String msg; + + Status(int code, String msg) { + this.code = code; + this.msg = msg; + } + + public int getCode(){ + return this.code; + } + + public Status setMsg(String msg){ + this.msg = msg; + return this; + } + + public boolean isOK(){ + return this.code == 0; + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java new file mode 100644 index 0000000000..c36c553a7f --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java @@ -0,0 +1,5 @@ +package org.apache.hugegraph.pd.raft; + +public interface RaftStateListener { + void onRaftLeaderChanged(); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java new file mode 100644 index 0000000000..14aec4fd6a --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java @@ -0,0 +1,302 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.Closure; +import com.alipay.sofa.jraft.Iterator; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.core.StateMachineAdapter; +import com.alipay.sofa.jraft.entity.LeaderChangeContext; +import com.alipay.sofa.jraft.entity.LocalFileMetaOutter; +import com.alipay.sofa.jraft.error.RaftError; +import com.alipay.sofa.jraft.error.RaftException; +import com.alipay.sofa.jraft.storage.snapshot.SnapshotReader; +import com.alipay.sofa.jraft.storage.snapshot.SnapshotWriter; +import com.alipay.sofa.jraft.util.CRC64; +import com.alipay.sofa.jraft.util.Utils; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.springframework.util.CollectionUtils; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; +import java.util.zip.Checksum; + +@Slf4j +public class RaftStateMachine extends StateMachineAdapter { + private List taskHandlers; + private List stateListeners; + + private static final String SNAPSHOT_DIR_NAME = "snapshot"; + private static final String SNAPSHOT_ARCHIVE_NAME = "snapshot.zip"; + + private final AtomicLong leaderTerm = new AtomicLong(-1); + + public RaftStateMachine() { + this.taskHandlers = new CopyOnWriteArrayList<>(); + this.stateListeners = new CopyOnWriteArrayList<>(); + } + + public void addTaskHandler(RaftTaskHandler handler) { + taskHandlers.add(handler); + } + + public void addStateListener(RaftStateListener listener){ stateListeners.add(listener);} + + public boolean isLeader() { + return this.leaderTerm.get() > 0; + } + + @Override + public void onApply(Iterator iter) { + while (iter.hasNext()) { + final RaftClosureAdapter done = (RaftClosureAdapter) iter.done(); + try { + KVOperation kvOp; + if (done != null) { + kvOp = done.op; + } else { + kvOp = KVOperation.fromByteArray(iter.getData().array()); + } + for (RaftTaskHandler taskHandler : taskHandlers) { + taskHandler.invoke(kvOp, done); + } + if ( done != null) + done.run(Status.OK()); + } catch (Throwable t) { + log.error("StateMachine meet critical error: {}.", t); + if (done != null) + done.run(new Status(RaftError.EINTERNAL, t.getMessage())); + } + iter.next(); + } + } + + @Override + public void onError(final RaftException e) { + log.error("Raft StateMachine on error {}", e); + } + + @Override + public void onShutdown() { + super.onShutdown(); + } + + @Override + public void onLeaderStart(final long term) { + this.leaderTerm.set(term); + super.onLeaderStart(term); + + log.info("Raft becomes leader"); + Utils.runInThread(() -> { + if (!CollectionUtils.isEmpty(stateListeners)) + stateListeners.forEach(listener -> { + listener.onRaftLeaderChanged(); + }); + }); + } + + @Override + public void onLeaderStop(final Status status) { + this.leaderTerm.set(-1); + super.onLeaderStop(status); + log.info("Raft lost leader "); + } + + @Override + public void onStartFollowing(final LeaderChangeContext ctx) { + super.onStartFollowing(ctx); + Utils.runInThread(() -> { + if (!CollectionUtils.isEmpty(stateListeners)) + stateListeners.forEach(listener -> { + listener.onRaftLeaderChanged(); + }); + }); + } + + @Override + public void onStopFollowing(final LeaderChangeContext ctx) { + super.onStopFollowing(ctx); + } + + + @Override + public void onConfigurationCommitted(final Configuration conf) { + log.info("Raft onConfigurationCommitted {}", conf); + } + + @Override + public void onSnapshotSave(final SnapshotWriter writer, final Closure done) { + + String snapshotDir = writer.getPath() + File.separator + SNAPSHOT_DIR_NAME; + try { + FileUtils.deleteDirectory(new File(snapshotDir)); + FileUtils.forceMkdir(new File(snapshotDir)); + } catch (IOException e) { + log.error("Failed to create snapshot directory {}", snapshotDir); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + + CountDownLatch latch = new CountDownLatch(taskHandlers.size()); + for (RaftTaskHandler taskHandler : taskHandlers) { + Utils.runInThread(() -> { + try { + KVOperation op = KVOperation.createSaveSnapshot(snapshotDir); + taskHandler.invoke(op, null); + log.info("Raft onSnapshotSave success"); + latch.countDown(); + } catch (PDException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + } + }); + } + try { + latch.await(); + } catch (InterruptedException e) { + log.error("Raft onSnapshotSave failed. {}",e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + + // compress + try { + compressSnapshot(writer); + FileUtils.deleteDirectory(new File(snapshotDir)); + } catch (Exception e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + done.run(Status.OK()); + } + + @Override + public boolean onSnapshotLoad(final SnapshotReader reader) { + if (isLeader()) { + log.warn("Leader is not supposed to load snapshot"); + return false; + } + String snapshotDir = reader.getPath() + File.separator + SNAPSHOT_DIR_NAME; + String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; + // 2. decompress snapshot archive + try { + decompressSnapshot(reader); + } catch (PDException e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + return true; + } + + CountDownLatch latch = new CountDownLatch(taskHandlers.size()); + for (RaftTaskHandler taskHandler : taskHandlers) { + try { + KVOperation op = KVOperation.createLoadSnapshot(snapshotDir); + taskHandler.invoke(op, null); + log.info("Raft onSnapshotLoad success"); + latch.countDown(); + } catch (PDException e) { + log.error("Raft onSnapshotLoad failed. {}", e.toString()); + return false; + } + } + try { + latch.await(); + } catch (InterruptedException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + return false; + } + + + try { + // TODO: remove file from meta + // SnapshotReader 沒有提供刪除文件的接口 + FileUtils.deleteDirectory(new File(snapshotDir)); + File file = new File(snapshotArchive); + if (file.exists()) { + FileUtils.forceDelete(file); + } + } catch (IOException e) { + log.error("Failed to delete snapshot directory {} and file {}", snapshotDir, snapshotArchive); + return false; + } + + return true; + } + + private void compressSnapshot(final SnapshotWriter writer) throws PDException { + final Checksum checksum = new CRC64(); + final String snapshotArchive = writer.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME;; + try { + ZipUtils.compress(writer.getPath(), SNAPSHOT_DIR_NAME, snapshotArchive, checksum); + LocalFileMetaOutter.LocalFileMeta.Builder metaBuild = LocalFileMetaOutter.LocalFileMeta.newBuilder(); + metaBuild.setChecksum(Long.toHexString(checksum.getValue())); + if (!writer.addFile(SNAPSHOT_ARCHIVE_NAME, metaBuild.build())) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, "failed to add file to LocalFileMeta"); + } + } catch (IOException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); + } + } + + private void decompressSnapshot(final SnapshotReader reader) throws PDException { + final LocalFileMetaOutter.LocalFileMeta meta = (LocalFileMetaOutter.LocalFileMeta) reader.getFileMeta(SNAPSHOT_ARCHIVE_NAME); + final Checksum checksum = new CRC64(); + final String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME;; + try { + ZipUtils.decompress(snapshotArchive, reader.getPath(), checksum); + if (meta.hasChecksum()) { + if (!meta.getChecksum().equals(Long.toHexString(checksum.getValue()))) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, "Snapshot checksum failed"); + } + } + } catch (IOException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } + } + + + public static class RaftClosureAdapter implements KVStoreClosure { + private KVOperation op; + private KVStoreClosure closure; + + public RaftClosureAdapter(KVOperation op, KVStoreClosure closure) { + this.op = op; + this.closure = closure; + } + + public KVStoreClosure getClosure() { + return closure; + } + + @Override + public void run(Status status) { + closure.run(status); + } + + @Override + public Pdpb.Error getError() { + return null; + } + + @Override + public void setError(Pdpb.Error error) { + + } + + @Override + public Object getData() { + return null; + } + + @Override + public void setData(Object data) { + + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java new file mode 100644 index 0000000000..fc045e1053 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java @@ -0,0 +1,10 @@ +package org.apache.hugegraph.pd.raft; + +import com.baidu.hugegraph.pd.common.PDException; + +/** + * 接收raft发送的数据 + */ +public interface RaftTaskHandler { + boolean invoke(final KVOperation op, KVStoreClosure response) throws PDException; +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java new file mode 100644 index 0000000000..13f25347e1 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java @@ -0,0 +1,64 @@ +package org.apache.hugegraph.pd.raft; + +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.commons.io.output.NullOutputStream; + +import java.io.*; +import java.nio.file.Paths; +import java.util.zip.*; + +@Slf4j +public final class ZipUtils { + + public static void compress(final String rootDir, final String sourceDir, + final String outputFile, final Checksum checksum) throws IOException { + try (final FileOutputStream fos = new FileOutputStream(outputFile); + final CheckedOutputStream cos = new CheckedOutputStream(fos, checksum); + final ZipOutputStream zos = new ZipOutputStream(new BufferedOutputStream(cos))) { + ZipUtils.compressDirectoryToZipFile(rootDir, sourceDir, zos); + zos.flush(); + fos.getFD().sync(); + } + } + + private static void compressDirectoryToZipFile(final String rootDir, final String sourceDir, + final ZipOutputStream zos) throws IOException { + final String dir = Paths.get(rootDir, sourceDir).toString(); + final File[] files = new File(dir).listFiles(); + for (final File file : files) { + final String child = Paths.get(sourceDir, file.getName()).toString(); + if (file.isDirectory()) { + compressDirectoryToZipFile(rootDir, child, zos); + } else { + zos.putNextEntry(new ZipEntry(child)); + try (final FileInputStream fis = new FileInputStream(file); + final BufferedInputStream bis = new BufferedInputStream(fis)) { + IOUtils.copy(bis, zos); + } + } + } + } + + public static void decompress(final String sourceFile, final String outputDir, + final Checksum checksum) throws IOException { + try (final FileInputStream fis = new FileInputStream(sourceFile); + final CheckedInputStream cis = new CheckedInputStream(fis, checksum); + final ZipInputStream zis = new ZipInputStream(new BufferedInputStream(cis))) { + ZipEntry entry; + while ((entry = zis.getNextEntry()) != null) { + final String fileName = entry.getName(); + final File entryFile = new File(Paths.get(outputDir, fileName).toString()); + FileUtils.forceMkdir(entryFile.getParentFile()); + try (final FileOutputStream fos = new FileOutputStream(entryFile); + final BufferedOutputStream bos = new BufferedOutputStream(fos)) { + IOUtils.copy(zis, bos); + bos.flush(); + fos.getFD().sync(); + } + } + IOUtils.copy(cis, NullOutputStream.NULL_OUTPUT_STREAM); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java new file mode 100644 index 0000000000..0baf8c98c1 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java @@ -0,0 +1,31 @@ +package org.apache.hugegraph.pd.store; + +import org.apache.hugegraph.pd.raft.KVStoreClosure; + +import com.baidu.hugegraph.pd.grpc.Pdpb; + +public abstract class BaseKVStoreClosure implements KVStoreClosure { + private Pdpb.Error error; + private Object data; + @Override + public Pdpb.Error getError() { + return error; + } + + @Override + public void setError(Pdpb.Error error) { + this.error = error; + } + + @Override + public Object getData() { + return data; + } + + @Override + public void setData(Object data) { + this.data = data; + } + + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java new file mode 100644 index 0000000000..3d6bdf12e8 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java @@ -0,0 +1,41 @@ +package org.apache.hugegraph.pd.store; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +public interface HgKVStore { + void init(PDConfig config); + + void put(byte[] key, byte[] value) throws PDException; + + byte[] get(byte[] key) throws PDException; + + List scanPrefix(byte[] prefix); + + long remove(byte[] bytes) throws PDException; + + long removeByPrefix(byte[] bytes) throws PDException; + + void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException; + + void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException; + + byte[] getWithTTL(byte[] key) throws PDException; + void removeWithTTL(byte[] key) throws PDException; + + List getListWithTTL(byte[] key) throws PDException; + + void clear() throws PDException; + + void saveSnapshot(String snapshotPath) throws PDException; + + void loadSnapshot(String snapshotPath) throws PDException; + + List scanRange(byte[] start,byte[] end); + + void close(); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java new file mode 100644 index 0000000000..db84a2b992 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java @@ -0,0 +1,312 @@ +package org.apache.hugegraph.pd.store; + +import com.alipay.sofa.jraft.util.Utils; +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; +import com.google.common.cache.CacheBuilder; +import com.google.common.primitives.Bytes; + +import lombok.extern.slf4j.Slf4j; + +import org.apache.commons.io.FileUtils; +import org.rocksdb.*; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +@Slf4j +public class HgKVStoreImpl implements HgKVStore { + private RocksDB db; + private String dbPath; + private static final ConcurrentHashMap> CACHE = new ConcurrentHashMap(); + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + + private Options dbOptions; + @Override + public void init(PDConfig config) { + dbOptions = new Options().setCreateIfMissing(true); + + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try { + this.dbPath = config.getDataPath() + "/rocksdb/"; + File file = new File(this.dbPath); + if (!file.exists()) { + try { + FileUtils.forceMkdir(file); + } catch (IOException e) { + log.warn("Failed to create data file,{}", e); + } + } + openRocksDB(dbPath); + } catch (PDException e) { + log.error("Failed to open data file,{}", e); + } finally { + writeLock.unlock(); + } + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + db.put(key, value); + } catch (RocksDBException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + } + + @Override + public byte[] get(byte[] key) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + return db.get(key); + } catch (RocksDBException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + } + + @Override + public List scanPrefix(byte[] prefix) { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try (ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(prefix))) { + List kvs = new ArrayList<>(); + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + while (iterator.isValid() && 0 == Bytes.indexOf(iterator.key(), prefix)) { + kvs.add(new KV(iterator.key(), iterator.value())); + iterator.next(); + } + return kvs; + } finally { + readLock.unlock(); + } + } + + @Override + public long remove(byte[] key) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + db.delete(key); + } catch (RocksDBException e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_DEL_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + return 0; + } + + @Override + public long removeByPrefix(byte[] prefix) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try (ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(prefix))) { + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + + while (iterator.isValid()) { + if (0 == Bytes.indexOf(iterator.key(), prefix)) { + db.delete(iterator.key()); + } else + break; + iterator.next(); + } + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + return 0; + } + + @Override + public void clear() throws PDException { + CACHE.clear(); + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + String storeKey = new String(key, Charset.defaultCharset()); + LinkedList ts = new LinkedList<>(); + CACHE.keySet().forEach((cacheKey) -> { + if (cacheKey.startsWith(storeKey)) { + ConcurrentMap map; + if ((map = CACHE.get(cacheKey)) == null) return; + map.values().forEach((element) -> { + ts.add((byte[]) element); + }); + } + }); + return ts; + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + ConcurrentMap map; + String storeKey = new String(key, Charset.defaultCharset()); + if ((map = CACHE.get(storeKey)) == null) return null; + Object value = map.get(storeKey); + return value == null ? null : (byte[]) value; + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + ConcurrentMap map; + String storeKey = new String(key, Charset.defaultCharset()); + if ((map = CACHE.get(storeKey)) == null) return ; + map.remove(storeKey); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.putWithTTL(key,value,ttl,TimeUnit.SECONDS); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException { + try { + ConcurrentMap spaceNode = CacheBuilder.newBuilder().initialCapacity(200) + .expireAfterWrite(ttl, + timeUnit) + .build().asMap(); + String storeKey = new String(key, Charset.defaultCharset()); + ConcurrentMap space = CACHE.putIfAbsent(storeKey, spaceNode); + if (space == null) { + space = spaceNode; + } + space.put(storeKey, value); + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public void saveSnapshot(String snapshotPath) throws PDException { + log.info("begin save snapshot at {}", snapshotPath); + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try (final Checkpoint checkpoint = Checkpoint.create(this.db)) { + final String tempPath = Paths.get(snapshotPath) + "_temp"; + final File tempFile = new File(tempPath); + FileUtils.deleteDirectory(tempFile); + checkpoint.createCheckpoint(tempPath); + final File snapshotFile = new File(snapshotPath); + FileUtils.deleteDirectory(snapshotFile); + if (!Utils.atomicMoveFile(tempFile, snapshotFile, true)) { + log.error("Fail to rename {} to {}", tempPath, snapshotPath); + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", tempPath, snapshotPath)); + } + } catch (final PDException e) { + throw e; + } catch (final Exception e) { + log.error("Fail to write snapshot at path: {}", snapshotPath, e); + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); + } finally { + writeLock.unlock(); + } + log.info("saved snapshot into {}", snapshotPath); + } + + @Override + public void loadSnapshot(String snapshotPath) throws PDException { + log.info("begin load snapshot from {}", snapshotPath); + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try { + final File snapshotFile = new File(snapshotPath); + if (!snapshotFile.exists()) { + log.error("Snapshot file {} not exists.", snapshotPath); + return; + } + // close DB + closeRocksDB(); + // replace rocksdb data with snapshot data + final File dbFile = new File(this.dbPath); + FileUtils.deleteDirectory(dbFile); + if (!Utils.atomicMoveFile(snapshotFile, dbFile, true)) { + log.error("Fail to rename {} to {}", snapshotPath, this.dbPath); + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", snapshotPath, this.dbPath)); + } + // reopen the db + openRocksDB(this.dbPath); + } catch (final PDException e) { + throw e; + } catch (final Exception e) { + log.error("failed to load snapshot from {}", snapshotPath); + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } finally { + writeLock.unlock(); + } + log.info("loaded snapshot from {}", snapshotPath); + } + + @Override + public List scanRange(byte[] start, byte[] end) { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try(ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(start)) + .setIterateUpperBound(new Slice(end))) { + List kvs = new ArrayList<>(); + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + while (iterator.isValid()) { + kvs.add(new KV(iterator.key(), iterator.value())); + iterator.next(); + } + return kvs; + } finally { + readLock.unlock(); + } + } + + @Override + public void close() { + closeRocksDB(); + } + + + private void closeRocksDB() { + if (this.db != null) { + this.db.close(); + this.db = null; + } + } + + private void openRocksDB(String dbPath) throws PDException { + try { + this.db = RocksDB.open(dbOptions, dbPath); + } catch (RocksDBException e) { + log.error("Failed to open RocksDB from {}", dbPath, e); + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java new file mode 100644 index 0000000000..a7f39ffc07 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java @@ -0,0 +1,27 @@ +package org.apache.hugegraph.pd.store; + + +public class KV { + private byte[] key; + private byte[] value; + + public KV(byte[] key, byte[] value){ + this.key = key; + this.value = value; + } + public void setKey(byte[] key) { + this.key = key; + } + + public void setValue(byte[] value) { + this.value = value; + } + + public byte[] getKey() { + return key; + } + + public byte[] getValue() { + return value; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java new file mode 100644 index 0000000000..987a6c09af --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java @@ -0,0 +1,296 @@ +package org.apache.hugegraph.pd.store; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.raft.KVStoreClosure; +import org.apache.hugegraph.pd.raft.RaftStateMachine; + +import com.baidu.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.raft.KVOperation; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftTaskHandler; + +import lombok.extern.slf4j.Slf4j; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + + +@Slf4j +public class RaftKVStore implements HgKVStore, RaftTaskHandler { + + private final RaftEngine engine; + private final HgKVStore store; + + public RaftKVStore(RaftEngine engine, HgKVStore store) { + this.engine = engine; + this.store = store; + } + + @Override + public void init(PDConfig config) { + this.store.init(config); + this.engine.addTaskHandler(this); + } + + private BaseKVStoreClosure createClosure() { + return new BaseKVStoreClosure() { + @Override + public void run(Status status) { + if (!status.isOk()) { + log.error("An exception occurred while performing the RAFT,{}", + status.getErrorMsg()); + } else { + log.info("RAFT done!"); + } + } + }; + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + KVOperation operation = KVOperation.createPut(key, value); + try { + applyOperation(operation).get(); + } catch (Exception e){ + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + /** + * 查询可以不走raft,直接读取 + */ + @Override + public byte[] get(byte[] key) throws PDException { + return store.get(key); + + } + + @Override + public List scanPrefix(byte[] prefix) { + return store.scanPrefix(prefix); + } + + @Override + public long remove(byte[] bytes) throws PDException { + try { + applyOperation(KVOperation.createRemove(bytes)).get(); + } catch (Exception e){ + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + return 0; + } + + + @Override + public long removeByPrefix(byte[] bytes) throws PDException { + try { + applyOperation(KVOperation.createRemoveByPrefix(bytes)).get(); + } catch (Exception e){ + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + return 0; + } + + @Override + public void clear() throws PDException { + try { + applyOperation(KVOperation.createClear()).get(); + } catch (Exception e){ + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + try { + applyOperation(KVOperation.createPutWithTTL(key, value, ttl)).get(); + } catch (Exception e){ + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException { + try { + applyOperation(KVOperation.createPutWithTTL(key, value, ttl, timeUnit)).get(); + } catch (Exception e){ + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + return store.getListWithTTL(key); + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + return store.getWithTTL(key); + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + try { + applyOperation(KVOperation.createRemoveWithTTL(key)).get(); + } catch (Exception e){ + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + + @Override + public void saveSnapshot(String snapshotPath) throws PDException { + store.saveSnapshot(snapshotPath); + } + + @Override + public void loadSnapshot(String snapshotPath) throws PDException{ + store.loadSnapshot(snapshotPath); + } + + @Override + public List scanRange(byte[] start, byte[] end) { + return store.scanRange(start,end); + } + + @Override + public void close() { + store.close(); + } + + /** + * 需要走Raft的真实操作 + */ + private void doPut(byte[] key, byte[] value) throws PDException { + + store.put(key, value); + } + + public long doRemove(byte[] bytes) throws PDException { + return this.store.remove(bytes); + } + + public long doRemoveByPrefix(byte[] bytes) throws PDException { + return this.store.removeByPrefix(bytes); + } + + public void doRemoveWithTTL(byte[] key) throws PDException { + this.store.removeWithTTL(key); + } + public void doClear() throws PDException { + this.store.clear(); + } + + public void doPutWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.store.putWithTTL(key, value, ttl); + } + + public void doPutWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException { + this.store.putWithTTL(key, value, ttl, timeUnit); + } + + public void doSaveSnapshot(String snapshotPath) throws PDException { + this.store.saveSnapshot(snapshotPath); + } + + public void doLoadSnapshot(String snapshotPath) throws PDException { + this.store.loadSnapshot(snapshotPath); + } + + private CompletableFuture applyOperation(final KVOperation op) throws PDException { + CompletableFuture future = new CompletableFuture<>(); + try { + final Task task = new Task(); + task.setData(ByteBuffer.wrap(op.toByteArray())); + task.setDone(new RaftStateMachine.RaftClosureAdapter(op, new KVStoreClosure() { + Object data; + Pdpb.Error error; + @Override + public Pdpb.Error getError() { return error;} + + @Override + public void setError(Pdpb.Error error) { this.error = error;} + + @Override + public Object getData() { return data; } + + @Override + public void setData(Object data) { this.data = data;} + + @Override + public void run(Status status) { + if (status.isOk()) { + future.complete((T) data); + } else { + RaftError raftError = status.getRaftError(); + Pdpb.ErrorType type; + if (RaftError.EPERM.equals(raftError)) { + type = Pdpb.ErrorType.NOT_LEADER; + } else { + type = Pdpb.ErrorType.UNKNOWN; + } + error = Pdpb.Error.newBuilder().setType(type) + .setMessage(status.getErrorMsg()) + .build(); + future.completeExceptionally( + new PDException(error.getTypeValue())); + } + } + })); + this.engine.addTask(task); + return future; + } catch (Exception e) { + future.completeExceptionally(e); + return future; + } + } + + private boolean isLeader() { + return this.engine.isLeader(); + } + + @Override + public boolean invoke(KVOperation op, KVStoreClosure response) throws PDException { + switch (op.getOp()) { + case KVOperation.GET: + break; + case KVOperation.PUT: + doPut(op.getKey(), op.getValue()); + break; + case KVOperation.REMOVE: + doRemove(op.getKey()); + break; + case KVOperation.PUT_WITH_TTL: + doPutWithTTL(op.getKey(), op.getValue(), (long) op.getArg()); + break; + case KVOperation.PUT_WITH_TTL_UNIT: + Object[] arg = (Object[]) op.getArg(); + doPutWithTTL(op.getKey(), op.getValue(), (long) arg[0] , (TimeUnit)arg[1]); + break; + case KVOperation.REMOVE_BY_PREFIX: + doRemoveByPrefix(op.getKey()); + break; + case KVOperation.REMOVE_WITH_TTL: + doRemoveWithTTL(op.getKey()); + break; + case KVOperation.CLEAR: + doClear(); + break; + case KVOperation.SAVE_SNAPSHOT: + doSaveSnapshot((String) op.getAttach()); + break; + case KVOperation.LOAD_SNAPSHOT: + doLoadSnapshot((String) op.getAttach()); + break; + default: + log.error("Err op {}", op.getOp()); + } + return false; + } +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java new file mode 100644 index 0000000000..ce6d5c9108 --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java @@ -0,0 +1,90 @@ +package org.apache.hugegraph.pd; + +import java.util.concurrent.ExecutionException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.junit.Assert; +import org.junit.BeforeClass; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +// import org.junit.Test; + +public class MonitorServiceTest { + static PDConfig pdConfig; + + @BeforeClass + public static void init() throws ExecutionException, InterruptedException { + pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + }}; + + //pdConfig.setEtcd(new PDConfig().new Etcd() {{ + // this.setAddress("http://localhost:2379"); + // + //}}); + pdConfig.setStore(new PDConfig().new Store() {{ + this.setMaxDownTime(1); + this.setKeepAliveTimeout(5); + }}); + + pdConfig.setPartition(new PDConfig().new Partition() {{ + this.setShardCount(3); + this.setTotalCount(10); + }}); + + clearClusterData(); + } + + public static void clearClusterData() throws ExecutionException, InterruptedException { + //Client client = Client.builder().endpoints(pdConfig.getEtcd().getAddress()).build(); + //KV kvClient = client.getKVClient(); + // + //ByteSequence key = ByteSequence.from("HUGEGRAPH/" + pdConfig.getClusterId(), Charset.forName("utf-8")); + //CompletableFuture rsp = kvClient.delete(key, DeleteOption.newBuilder().isPrefix(true).build()); + //System.out.println("删除数量 : " + rsp.get().getDeleted()); + //kvClient.close(); + //client.close(); + } + + // @Test + public void testPatrolStores() throws PDException, InterruptedException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService); + TaskScheduleService monitorService = new TaskScheduleService(pdConfig, storeService, partitionService); + storeService.init(partitionService); + partitionService.init(); + monitorService.init(); + + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + partitionService.updateGraph(graph); + Thread.sleep(10000); + count = 0; + count += storeService.getStores("").stream().filter(store -> store.getState() == Metapb.StoreState.Tombstone).count(); + + Assert.assertEquals(6, count); + + } + + +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java new file mode 100644 index 0000000000..1e2795fcc1 --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class PartitionServiceTest { + @Test + public void testPartitionHeartbeat() { + List shardList = new ArrayList<>(); + shardList.add(Metapb.Shard.newBuilder().setStoreId(1).build()); + shardList.add(Metapb.Shard.newBuilder().setStoreId(2).build()); + shardList.add(Metapb.Shard.newBuilder().setStoreId(3).build()); + shardList = new ArrayList<>(shardList); + Metapb.PartitionStats stats = Metapb.PartitionStats.newBuilder() + .addAllShard(shardList).build(); + List shardList2 = new ArrayList<>(stats.getShardList()); + Collections.shuffle(shardList2); + shardList2.forEach(shard -> { + System.out.println(shard.getStoreId()); + }); + + + } +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java new file mode 100644 index 0000000000..7fe6a911b1 --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java @@ -0,0 +1,436 @@ +package org.apache.hugegraph.pd; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; +import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; +import com.baidu.hugegraph.pd.grpc.pulse.DbCompaction; +import com.baidu.hugegraph.pd.grpc.pulse.MovePartition; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; +import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; + +import org.apache.commons.io.FileUtils; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; + +public class StoreNodeServiceTest { + static PDConfig pdConfig; + + @BeforeClass + public static void init() throws Exception { + String path = "tmp/unitTest"; + deleteDirectory(new File(path)); + pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setInitialStoreList("127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503,127.0.0.1:8504,127.0.0.1:8505"); + }}; + + pdConfig.setStore(new PDConfig().new Store() {{ + this.setMaxDownTime(3600); + this.setKeepAliveTimeout(3600); + }}); + + pdConfig.setPartition(new PDConfig().new Partition() {{ + this.setShardCount(3); + this.setMaxShardsPerStore(3); + }}); + pdConfig.setRaft(new PDConfig().new Raft(){{ + this.setEnable(false); + }}); + pdConfig.setDiscovery(new PDConfig().new Discovery()); + pdConfig.setDataPath(path); + ConfigService configService = new ConfigService(pdConfig); + pdConfig = configService.loadConfig(); + } + + + + // @Test + public void testStoreNodeService() throws PDException { + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), + pdConfig.getInitialStoreMap().size() * pdConfig.getPartition().getMaxShardsPerStore() + / pdConfig.getPartition().getShardCount()); + StoreNodeService storeService = new StoreNodeService(pdConfig); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + stores[i].getId()); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + for (Metapb.Store store : stores) { + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() + .setStoreId(store.getId()) + .build(); + storeService.heartBeat(stats); + } + + Assert.assertEquals(6, storeService.getActiveStores("").size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .setPartitionCount(10) + .build(); + // 分配shard + List shards = storeService.allocShards(graph, 1); + + + Assert.assertEquals(3, shards.size()); + + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), storeService.getShardGroups().size()); // 设置leader + Metapb.Shard leader = Metapb.Shard.newBuilder(shards.get(0)) + .setRole(Metapb.ShardRole.Leader).build(); + shards = new ArrayList<>(shards); + shards.set(0, leader); + // 增加shard + pdConfig.getPartition().setShardCount(5); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(1) + .addAllShards(shards).build(); + shards = storeService.reallocShards(shardGroup); + + Assert.assertEquals(5, shards.size()); + // 减少shard + pdConfig.getPartition().setShardCount(3); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(3, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + // 减少shard + pdConfig.getPartition().setShardCount(1); + graph = Metapb.Graph.newBuilder(graph).build(); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(1, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + for (Metapb.Store store : stores) { + storeService.removeStore(store.getId()); + } + Assert.assertEquals(0, storeService.getStores("").size()); + + + } + + // @Test + public void testSplitPartition() throws PDException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService); + + storeService.init(partitionService); + partitionService.addInstructionListener(new PartitionInstructionListener(){ + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + + } + + @Override + public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException { + + } + + @Override + public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException { + splitPartition.getNewPartitionList().forEach(p->{ + System.out.println("SplitPartition " + p.getId() + " " + p.getStartKey() + "," + p.getEndKey()); + }); + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException{ + + } + + @Override + public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) throws PDException { + + } + }); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Assert.assertEquals(count, storeService.getStores().size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .build(); + Metapb.PartitionShard ptShard = partitionService.getPartitionByCode(graph.getGraphName(), 0); + System.out.println(ptShard.getPartition().getId()); + { + Metapb.Partition pt = ptShard.getPartition(); + System.out.println(pt.getId() + " " + pt.getStartKey() + "," + pt.getEndKey()); + } + + Assert.assertEquals(6, storeService.getShardGroups().size()); + // storeService.splitShardGroups(ptShard.getPartition().getId(), 4); + Assert.assertEquals(9, storeService.getShardGroups().size()); + storeService.getShardGroups().forEach(shardGroup -> { + System.out.println("shardGroup id = " + shardGroup.getId()); + }); + } + + // @Test + public void testPartitionService() throws PDException, ExecutionException, InterruptedException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + + PartitionService partitionService = new PartitionService(pdConfig, storeService); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + // 申请分区 + Metapb.PartitionShard[] partitions = new Metapb.PartitionShard[10]; + for (int i = 0; i < partitions.length; i++) { + partitions[i] = partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); + Assert.assertEquals(3, storeService.getShardGroup(i).getShardsCount()); + } + System.out.println("分区数量: " + partitionService.getPartitions(graph.getGraphName()).size()); + + int[] caseNo = {0}; //1 测试增加shard, 2 //测试store下线 + + Metapb.Shard leader = null; + int[] finalCaseNo = caseNo; + + partitionService.addInstructionListener(new PartitionInstructionListener(){ + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + switch (finalCaseNo[0]){ + case 2: + Assert.assertEquals(5, storeService.getShardGroup(partition.getId()).getShardsCount()); + break; + case 3: + storeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { + Assert.assertNotEquals(shard.getStoreId(), stores[0].getId()); + }); + break; + } + + } + + @Override + public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) { + + } + + @Override + public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) { + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException{ + + } + + @Override + public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) + throws PDException { + + } + }); + Metapb.Partition partition = partitions[0].getPartition(); + leader = Metapb.Shard.newBuilder(storeService.getShardGroup(partition.getId()).getShardsList().get(0)).build(); + Metapb.Shard finalLeader = leader; + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition) { + + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); + // 测试修改图 + caseNo[0] = 1; partitionService.updateGraph(graph); + for(int i = 0; i< partitions.length ;i++) { + partitions[i] = partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); + Assert.assertEquals(3, storeService.getShardGroup(i).getShardsCount()); + } + + graph = Metapb.Graph.newBuilder(graph) + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + caseNo[0] = 2; partitionService.updateGraph(graph); + + // 测试store离线 + caseNo[0] = 3; partitionService.storeOffline(stores[0]); + + + + Metapb.PartitionStats stats = Metapb.PartitionStats.newBuilder() + .addGraphName(partition.getGraphName()) + .setId(partition.getId()) + .setLeader(Metapb.Shard.newBuilder(leader).setRole(Metapb.ShardRole.Leader)) + .build(); + // 测试leader飘移 + caseNo[0] = 4; partitionService.partitionHeartbeat(stats); + AtomicReference shard = new AtomicReference<>(); + Metapb.PartitionShard ss = partitionService.getPartitionShardById(partition.getGraphName(), partition.getId()); + storeService.getShardList(partition.getId()).forEach(s->{ + if ( s.getRole() == Metapb.ShardRole.Leader){ + Assert.assertNull(shard.get()); + shard.set(s); + } + }); + + Assert.assertEquals(leader.getStoreId(), shard.get().getStoreId()); + + } + + + public static byte[] intToByteArray(int i) { + byte[] result = new byte[4]; + result[0] = (byte)((i >> 24) & 0xFF); + result[1] = (byte)((i >> 16) & 0xFF); + result[2] = (byte)((i >> 8) & 0xFF); + result[3] = (byte)(i & 0xFF); + return result; + } + + // @Test + public void testMergeGraphParams() throws PDException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService); + + Metapb.Graph dfGraph = Metapb.Graph.newBuilder() + + .setPartitionCount(pdConfig.getPartition().getTotalCount()) + + .build(); + + Metapb.Graph graph1 = Metapb.Graph.newBuilder() + .setGraphName("test") + .setPartitionCount(20) + + .build(); + + Metapb.Graph graph2 = Metapb.Graph.newBuilder() + .setGraphName("test") + .setPartitionCount(7).build(); + Metapb.Graph graph3 = Metapb.Graph.newBuilder() + .setGraphName("test") + .build(); + Metapb.Graph graph4 = Metapb.Graph.newBuilder() + .setGraphName("test") + .build(); + + Metapb.Graph graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph2).build(); + Assert.assertEquals(graph2.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(graph2.getPartitionCount(), graph.getPartitionCount()); + + + graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph3).build(); + Assert.assertEquals(graph3.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(dfGraph.getPartitionCount(), graph.getPartitionCount()); + + + graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph4).build(); + Assert.assertEquals(graph4.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(dfGraph.getPartitionCount(), graph.getPartitionCount()); + + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + } + } + + // @Test + public void test(){ + int[] n = new int[3]; + + + if ( ++n[2] > 1){ + System.out.println(n[2]); + } + if ( ++n[2] > 1){ + System.out.println(n[2]); + } + if ( ++n[2] > 1){ + System.out.println(n[2]); + } + } +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java new file mode 100644 index 0000000000..21afc4136f --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java @@ -0,0 +1,14 @@ +package org.apache.hugegraph.pd; + +import java.io.File; + +public class UnitTestBase { + public static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java new file mode 100644 index 0000000000..28deca26fa --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd.common; + +// import org.junit.Test; + +import java.nio.charset.StandardCharsets; + +import com.baidu.hugegraph.pd.common.PartitionUtils; + +public class PartitionUtilsTest { + + // @Test + public void testHashCode() { + int partCount = 10; + int partSize = PartitionUtils.MAX_VALUE / partCount + 1; + int[] counter = new int[partCount]; + for (int i = 0; i < 10000; i++) { + String s = String.format("BATCH-GET-UNIT-%02d", i); + int c = PartitionUtils.calcHashcode(s.getBytes(StandardCharsets.UTF_8)); + + counter[c / partSize]++; + + } + + for (int i = 0; i < counter.length; i++) + System.out.println(i + " " + counter[i]); + } + + +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java new file mode 100644 index 0000000000..b0a7568499 --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java @@ -0,0 +1,90 @@ +package org.apache.hugegraph.pd.store; + +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.commons.io.FileUtils; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; + +public class HgKVStoreImplTest { + static PDConfig pdConfig; + static final String testPath = "tmp/test"; + + @BeforeClass + public static void init() throws IOException { + File testFile = new File(testPath); + if(testFile.exists()) { + FileUtils.deleteDirectory(testFile); + } + FileUtils.forceMkdir(testFile); + pdConfig = new PDConfig() {{ + setDataPath(testPath); + }}; + } + + // @Test + public void Test() throws PDException { + HgKVStore kvStore = new HgKVStoreImpl(); + kvStore.init(pdConfig); + + { + byte[] key = "hello".getBytes(); + byte[] value = "pd".getBytes(); + kvStore.put(key, value); + } + for(int i = 0; i<100; i++){ + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + kvStore.removeByPrefix("k".getBytes()); + Assert.assertEquals(0, kvStore.scanPrefix("k".getBytes()).size()); + } + + // @Test + public void TestSnapshot() throws PDException { + HgKVStore kvStore = new HgKVStoreImpl(); + kvStore.init(pdConfig); + + // put 100 data + for(int i = 0; i<100; i++){ + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + // save snapshot + String snapshotPath = Paths.get(testPath, "snapshot").toString(); + kvStore.saveSnapshot(snapshotPath); + + // put another 100 data + for(int i = 100; i<200; i++){ + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(200, kvStore.scanPrefix("k".getBytes()).size()); + + // load snapshot + kvStore.loadSnapshot(snapshotPath); + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + // put another 100 data + for(int i = 100; i<200; i++){ + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(200, kvStore.scanPrefix("k".getBytes()).size()); + } +} diff --git a/hg-pd-core/src/test/resources/log4j2.xml b/hg-pd-core/src/test/resources/log4j2.xml new file mode 100644 index 0000000000..a157b6412b --- /dev/null +++ b/hg-pd-core/src/test/resources/log4j2.xml @@ -0,0 +1,122 @@ + + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hg-pd-dist/pom.xml b/hg-pd-dist/pom.xml new file mode 100644 index 0000000000..f0c3037c9b --- /dev/null +++ b/hg-pd-dist/pom.xml @@ -0,0 +1,130 @@ + + + + hugegraph-pd-root + com.baidu.hugegraph + 3.6.5-SNAPSHOT + + 4.0.0 + + hg-pd-dist + + + ${project.parent.basedir}/dist + bash + ${project.basedir}/src/assembly + ${assembly.dir}/descriptor + ${assembly.dir}/static + hugegraph-pd-${project.parent.version} + hugegraph-pd + + + + + com.baidu.hugegraph + hugegraph-pd + ${project.version} + + + + + + maven-assembly-plugin + 2.4 + + + assembly-hugegraph-pd + package + + single + + + false + false + ${dist.dir} + + + ${assembly.descriptor.dir}/server-assembly.xml + + + ${final.name} + + + + + + maven-clean-plugin + + + + ${dist.dir} + + + + + + initialize + + clean + + + + + + maven-antrun-plugin + 1.3 + + + package + + run + + + + + + + + + tar -zcvf \ + ${dist.dir}/${final.name}.tar.gz \ + ${final.name} || exit 1 + rm -f ${dist.dir}/dist.sh + rm -rf ${dist.dir}/${final.name} + echo + echo "HugeGraph dist tar.gz available at: + ${dist.dir}/${final.name}.tar.gz" + echo + + + + + + + + + + + + + ant-contrib + ant-contrib + 1.0b3 + + + ant + ant + + + + + + + + + + diff --git a/hg-pd-dist/src/assembly/descriptor/server-assembly.xml b/hg-pd-dist/src/assembly/descriptor/server-assembly.xml new file mode 100644 index 0000000000..42c4a79633 --- /dev/null +++ b/hg-pd-dist/src/assembly/descriptor/server-assembly.xml @@ -0,0 +1,40 @@ + + distribution + false + + + dir + + + + + ${assembly.static.dir}/bin + bin + + * + + 755 + + + ${assembly.static.dir}/conf + conf + + * + + + + + + + + /lib + false + runtime + false + + com.baidu.hugegraph:${executable.jar.name}:jar:* + + + + + \ No newline at end of file diff --git a/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh b/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh new file mode 100644 index 0000000000..9eb60d1cd0 --- /dev/null +++ b/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +if [ -z "$GC_OPTION" ];then + GC_OPTION="" +fi +if [ -z "$USER_OPTION" ];then + USER_OPTION="" +fi + +while getopts "g:j:v" arg; do + case ${arg} in + g) GC_OPTION="$OPTARG" ;; + j) USER_OPTION="$OPTARG" ;; + v) VERBOSE="verbose" ;; + ?) echo "USAGE: $0 [-g g1] [-j xxx] [-v]" && exit 1 ;; + esac +done + +function abs_path() { + SOURCE="${BASH_SOURCE[0]}" + while [ -h "$SOURCE" ]; do + DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + SOURCE="$(readlink "$SOURCE")" + [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" + done + echo "$( cd -P "$( dirname "$SOURCE" )" && pwd )" +} + +BIN=$(abs_path) +TOP="$(cd "$BIN"/../ && pwd)" +CONF="$TOP/conf" +LIB="$TOP/lib" +LOGS="$TOP/logs" +OUTPUT=${LOGS}/hugegraph-pd-stdout.log +PID_FILE="$BIN/pid" + +. "$BIN"/util.sh + +mkdir -p ${LOGS} + +# The maximum and minium heap memory that service can use +MAX_MEM=$((32 * 1024)) +MIN_MEM=$((1 * 512)) +EXPECT_JDK_VERSION=11 + +# Change to $BIN's parent +cd ${TOP} + +# Find Java +if [ "$JAVA_HOME" = "" ]; then + JAVA="java" +else + JAVA="$JAVA_HOME/bin/java" +fi + +# check jdk version +JAVA_VERSION=$($JAVA -version 2>&1 | awk 'NR==1{gsub(/"/,""); print $3}' | awk -F'_' '{print $1}') +if [[ $? -ne 0 || $JAVA_VERSION < $EXPECT_JDK_VERSION ]]; then + echo "Please make sure that the JDK is installed and the version >= $EXPECT_JDK_VERSION" >> ${OUTPUT} + exit 1 +fi + +# Set Java options +if [ "$JAVA_OPTIONS" = "" ]; then + XMX=$(calc_xmx $MIN_MEM $MAX_MEM) + if [ $? -ne 0 ]; then + echo "Failed to start HugeGraphPDServer, requires at least ${MIN_MEM}m free memory" \ + >> ${OUTPUT} + exit 1 + fi + JAVA_OPTIONS="-Xms${MIN_MEM}m -Xmx${XMX}m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${LOGS} ${USER_OPTION}" + + # Rolling out detailed GC logs + #JAVA_OPTIONS="${JAVA_OPTIONS} -XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M -XX:NumberOfGCLogFiles=3 \ + # -Xloggc:./logs/gc.log -XX:+PrintHeapAtGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps" +fi + +# Using G1GC as the default garbage collector (Recommended for large memory machines) +case "$GC_OPTION" in + g1) + echo "Using G1GC as the default garbage collector" + JAVA_OPTIONS="${JAVA_OPTIONS} -XX:+UseG1GC -XX:+ParallelRefProcEnabled \ + -XX:InitiatingHeapOccupancyPercent=50 -XX:G1RSetUpdatingPauseTimePercent=5" + ;; + "") ;; + *) + echo "Unrecognized gc option: '$GC_OPTION', only support 'g1' now" >> ${OUTPUT} + exit 1 +esac + +#if [ "${JMX_EXPORT_PORT}" != "" ] && [ ${JMX_EXPORT_PORT} -ne 0 ] ; then +# JAVA_OPTIONS="${JAVA_OPTIONS} -javaagent:${LIB}/jmx_prometheus_javaagent-0.16.1.jar=${JMX_EXPORT_PORT}:${CONF}/jmx_exporter.yml" +#fi +if [ $(ps -ef|grep -v grep| grep java|grep -cE ${CONF}) -ne 0 ]; then + echo "HugeGraphPDServer is already running..." + exit 0 +fi +echo "Starting HugeGraphPDServer..." + +# Turn on security check +exec ${JAVA} ${JAVA_OPTIONS} -jar -Dspring.config.location=${CONF}/application.yml \ + ${LIB}/hugegraph-pd-3.6.5-SNAPSHOT.jar >> ${OUTPUT} 2>&1 & + +PID="$!" +# Write pid to file +echo "$PID" > "$PID_FILE" +echo "[+pid] $PID" diff --git a/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh b/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh new file mode 100644 index 0000000000..eeb709271c --- /dev/null +++ b/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +abs_path() { + SOURCE="${BASH_SOURCE[0]}" + while [ -h "$SOURCE" ]; do + DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + SOURCE="$(readlink "$SOURCE")" + [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" + done + echo "$( cd -P "$( dirname "$SOURCE" )" && pwd )" +} + +BIN=`abs_path` +TOP="$(cd $BIN/../ && pwd)" + +. "$BIN"/util.sh + +PID_FILE=$BIN/pid +SERVER_SHUTDOWN_TIMEOUT_S=30 + +if [ ! -f ${PID_FILE} ]; then + echo "The pid file $PID_FILE doesn't exist" + exit 0 +fi + +PID=`cat $PID_FILE` +kill_process_and_wait "HugeGraphPDServer" "$PID" "$SERVER_SHUTDOWN_TIMEOUT_S" + +if [ $? -eq 0 ]; then + rm "$PID_FILE" +fi + diff --git a/hg-pd-dist/src/assembly/static/bin/util.sh b/hg-pd-dist/src/assembly/static/bin/util.sh new file mode 100644 index 0000000000..188d4bb545 --- /dev/null +++ b/hg-pd-dist/src/assembly/static/bin/util.sh @@ -0,0 +1,355 @@ +#!/bin/bash + +function command_available() { + local cmd=$1 + if [ `command -v $cmd >/dev/null 2>&1` ]; then + return 1 + else + return 0 + fi +} + +# read a property from .properties file +function read_property() { + # file path + file_name=$1 + # replace "." to "\." + property_name=`echo $2 | sed 's/\./\\\./g'` + cat $file_name | sed -n -e "s/^[ ]*//g;/^#/d;s/^$property_name=//p" | tail -1 +} + +function write_property() { + local file=$1 + local key=$2 + local value=$3 + + local os=`uname` + case $os in + # Note: in mac os should use sed -i '' "xxx" to replace string, + # otherwise prompt 'command c expects \ followed by text'. + # See http://www.cnblogs.com/greedy-day/p/5952899.html + Darwin) sed -i '' "s!$key=.*!$key=$value!g" "$file" ;; + *) sed -i "s!$key=.*!$key=$value!g" "$file" ;; + esac +} + +function parse_yaml() { + local file=$1 + local version=$2 + local module=$3 + + cat $file | tr -d '\n {}'| awk -F',+|:' '''{ + pre=""; + for(i=1; i<=NF; ) { + if(match($i, /version/)) { + pre=$i; + i+=1 + } else { + result[pre"-"$i] = $(i+1); + i+=2 + } + } + } END {for(e in result) {print e": "result[e]}}''' \ + | grep "$version-$module" | awk -F':' '{print $2}' | tr -d ' ' && echo +} + +function process_num() { + num=`ps -ef | grep $1 | grep -v grep | wc -l` + return $num +} + +function process_id() { + pid=`ps -ef | grep $1 | grep -v grep | awk '{print $2}'` + return $pid +} + +# check the port of rest server is occupied +function check_port() { + local port=`echo $1 | awk -F':' '{print $3}'` + if ! command_available "lsof"; then + echo "Required lsof but it is unavailable" + exit 1 + fi + lsof -i :$port >/dev/null + if [ $? -eq 0 ]; then + echo "The port $port has already been used" + exit 1 + fi +} + +function crontab_append() { + local job="$1" + crontab -l | grep -F "$job" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + return 1 + fi + (crontab -l ; echo "$job") | crontab - +} + +function crontab_remove() { + local job="$1" + # check exist before remove + crontab -l | grep -F "$job" >/dev/null 2>&1 + if [ $? -eq 1 ]; then + return 0 + fi + + crontab -l | grep -Fv "$job" | crontab - + + # Check exist after remove + crontab -l | grep -F "$job" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + return 1 + else + return 0 + fi +} + +# wait_for_startup friendly_name host port timeout_s +function wait_for_startup() { + local pid="$1" + local server_name="$2" + local server_url="$3" + local timeout_s="$4" + + local now_s=`date '+%s'` + local stop_s=$(( $now_s + $timeout_s )) + + local status + + echo -n "Connecting to $server_name ($server_url)" + while [ $now_s -le $stop_s ]; do + echo -n . + process_status "$server_name" "$pid" >/dev/null + if [ $? -eq 1 ]; then + echo "Starting $server_name failed" + return 1 + fi + + status=`curl -o /dev/null -s -k -w %{http_code} $server_url` + if [[ $status -eq 200 || $status -eq 401 ]]; then + echo "OK" + echo "Started [pid $pid]" + return 0 + fi + sleep 2 + now_s=`date '+%s'` + done + + echo "The operation timed out when attempting to connect to $server_url" >&2 + return 1 +} + +function free_memory() { + local free="" + local os=`uname` + if [ "$os" == "Linux" ]; then + local mem_free=`cat /proc/meminfo | grep -w "MemFree" | awk '{print $2}'` + local mem_buffer=`cat /proc/meminfo | grep -w "Buffers" | awk '{print $2}'` + local mem_cached=`cat /proc/meminfo | grep -w "Cached" | awk '{print $2}'` + if [[ "$mem_free" == "" || "$mem_buffer" == "" || "$mem_cached" == "" ]]; then + echo "Failed to get free memory" + exit 1 + fi + free=`expr $mem_free + $mem_buffer + $mem_cached` + free=`expr $free / 1024` + elif [ "$os" == "Darwin" ]; then + local pages_free=`vm_stat | awk '/Pages free/{print $0}' | awk -F'[:.]+' '{print $2}' | tr -d " "` + local pages_inactive=`vm_stat | awk '/Pages inactive/{print $0}' | awk -F'[:.]+' '{print $2}' | tr -d " "` + local pages_available=`expr $pages_free + $pages_inactive` + free=`expr $pages_available \* 4096 / 1024 / 1024` + else + echo "Unsupported operating system $os" + exit 1 + fi + echo $free +} + +function calc_xmx() { + local min_mem=$1 + local max_mem=$2 + # Get machine available memory + local free=`free_memory` + local half_free=$[free/2] + + local xmx=$min_mem + if [[ "$free" -lt "$min_mem" ]]; then + exit 1 + elif [[ "$half_free" -ge "$max_mem" ]]; then + xmx=$max_mem + elif [[ "$half_free" -lt "$min_mem" ]]; then + xmx=$min_mem + else + xmx=$half_free + fi + echo $xmx +} + +function remove_with_prompt() { + local path=$1 + local tips="" + + if [ -d "$path" ]; then + tips="Remove directory '$path' and all sub files [y/n]?" + elif [ -f "$path" ]; then + tips="Remove file '$path' [y/n]?" + else + return 0 + fi + + read -p "$tips " yn + case $yn in + [Yy]* ) rm -rf "$path";; + * ) ;; + esac +} + +function ensure_path_writable() { + local path=$1 + # Ensure input path exist + if [ ! -d "${path}" ]; then + mkdir -p ${path} + fi + # Check for write permission + if [ ! -w "${path}" ]; then + echo "No write permission on directory ${path}" + exit 1 + fi +} + +function get_ip() { + local os=`uname` + local loopback="127.0.0.1" + local ip="" + case $os in + Linux) + if command_available "ifconfig"; then + ip=`ifconfig | grep 'inet addr:' | grep -v "$loopback" | cut -d: -f2 | awk '{ print $1}'` + elif command_available "ip"; then + ip=`ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | awk -F"/" '{print $1}'` + else + ip=$loopback + fi + ;; + FreeBSD|OpenBSD|Darwin) + if command_available "ifconfig"; then + ip=`ifconfig | grep -E 'inet.[0-9]' | grep -v "$loopback" | awk '{ print $2}'` + else + ip=$loopback + fi + ;; + SunOS) + if command_available "ifconfig"; then + ip=`ifconfig -a | grep inet | grep -v "$loopback" | awk '{ print $2} '` + else + ip=$loopback + fi + ;; + *) ip=$loopback;; + esac + echo $ip +} + +function download() { + local path=$1 + local link_url=$2 + + if command_available "wget"; then + wget --help | grep -q '\--show-progress' && progress_opt="-q --show-progress" || progress_opt="" + wget ${link_url} -P ${path} $progress_opt + elif command_available "curl"; then + curl ${link_url} -o ${path}/${link_url} + else + echo "Required wget or curl but they are unavailable" + exit 1 + fi +} + +function ensure_package_exist() { + local path=$1 + local dir=$2 + local tar=$3 + local link=$4 + + if [ ! -d ${path}/${dir} ]; then + if [ ! -f ${path}/${tar} ]; then + echo "Downloading the compressed package '${tar}'" + download ${path} ${link} + if [ $? -ne 0 ]; then + echo "Failed to download, please ensure the network is available and link is valid" + exit 1 + fi + echo "[OK] Finished download" + fi + echo "Unzip the compressed package '$tar'" + tar -zxvf ${path}/${tar} -C ${path} >/dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "Failed to unzip, please check the compressed package" + exit 1 + fi + echo "[OK] Finished unzip" + fi +} + +########################################################################### + +function wait_for_shutdown() { + local process_name="$1" + local pid="$2" + local timeout_s="$3" + + local now_s=`date '+%s'` + local stop_s=$(( $now_s + $timeout_s )) + + echo -n "Killing $process_name(pid $pid)" >&2 + while [ $now_s -le $stop_s ]; do + echo -n . + process_status "$process_name" "$pid" >/dev/null + if [ $? -eq 1 ]; then + echo "OK" + return 0 + fi + sleep 2 + now_s=`date '+%s'` + done + echo "$process_name shutdown timeout(exceeded $timeout_s seconds)" >&2 + return 1 +} + +function process_status() { + local process_name="$1" + local pid="$2" + + ps -p "$pid" + if [ $? -eq 0 ]; then + echo "$process_name is running with pid $pid" + return 0 + else + echo "The process $process_name does not exist" + return 1 + fi +} + +function kill_process() { + local process_name="$1" + local pid="$2" + + if [ -z "$pid" ]; then + echo "The process $pid does not exist" + return 0 + fi + + case "`uname`" in + CYGWIN*) taskkill /F /PID "$pid" ;; + *) kill "$pid" ;; + esac +} + +function kill_process_and_wait() { + local process_name="$1" + local pid="$2" + local timeout_s="$3" + + kill_process "$process_name" "$pid" + wait_for_shutdown "$process_name" "$pid" "$timeout_s" +} diff --git a/hg-pd-dist/src/assembly/static/conf/application.yml b/hg-pd-dist/src/assembly/static/conf/application.yml new file mode 100644 index 0000000000..94c69e4ff5 --- /dev/null +++ b/hg-pd-dist/src/assembly/static/conf/application.yml @@ -0,0 +1,58 @@ +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +logging: + config: 'file:./conf/log4j2.xml' +license: + verify-path: ./conf/verify-license.json + license-path: ./conf/hugegraph.license +grpc: + port: 8686 + # grpc的服务地址, 部署时需要改为本地实际IPv4地址。 + host: 127.0.0.1 + +server: + # rest服务端口号 + port : 8620 + +pd: + # 存储路径 + data-path: ./pd_data + # 自动扩容的检查周期,定时检查每个Store的分区数量,自动进行分区数量平衡 + patrol-interval: 1800 + # 初始store列表,grpc IP:grpc port, 在列表内的store自动激活 + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 +raft: + # 本机raft服务地址 + address: 127.0.0.1:8610 + # PD集群服务地址 + peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 + +store: + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 172800 + # 是否开启store监控数据存储 + monitor_data_enabled: true + # 监控数据的间隔,minute(默认), hour, second + # default: 1 min * 1 day = 1440 + monitor_data_interval: 1 minute + # 监控数据的保留时间 1 天; day, month, year + monitor_data_retention: 1 day + +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 12 + diff --git a/hg-pd-dist/src/assembly/static/conf/application.yml.template b/hg-pd-dist/src/assembly/static/conf/application.yml.template new file mode 100644 index 0000000000..c00bf47b32 --- /dev/null +++ b/hg-pd-dist/src/assembly/static/conf/application.yml.template @@ -0,0 +1,54 @@ +spring: + application: + name: hugegraph-pd +logging: + config: file:./conf/log4j2.xml + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: $GRPC_PORT$ + # grpc的服务地址, + #注意:部署时需要改为本地实际IPv4地址。 + host: $GRPC_HOST$ + netty-server: + max-inbound-message-size: 100MB + +server: + port : $SERVER_PORT$ + +pd: + # 集群ID,区分不同的PD集群 + + patrol-interval: 2147483647 + data-path: $PD_DATA_PATH$ + +raft: + address: $RAFT_ADDRESS$ + # raft集群 + peers-list: $RAFT_PEERS_LIST$ + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 60 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 1800 +partition: + # 默认分区总数 + default-total-count: 30 + # 默认每个分区副本数 + default-shard-count: 3 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hg-pd-dist/src/assembly/static/conf/hugegraph.license b/hg-pd-dist/src/assembly/static/conf/hugegraph.license new file mode 100644 index 0000000000000000000000000000000000000000..bca71af9a1966024e7a99880302f6b592a33ca84 GIT binary patch literal 856 zcmV-e1E>69hn&I!I)GnSv~^hE{iID$vF0rJw0JnV&Nu&pJ?*HLQ#!nms8=CeFwzT% z)5v&U=s7DO7$r#-RSw|2Vq*`|(2D5<5GKxmIX=E1#UCniwf>oeSOP{p(e{%an?_vT zYc)wIUQsJ<9Ai=YsETz;YFaYQ^&YAJD(|KO1~NKs#B+QiROyg@d5VjOt~PJcGl>9= zPZ!gB)$<%a;*Yw{lDP)9Cj#PNktQ)_SRi&!l=1Y})2$oO3ASGnC|m#T{b9DXGt@^9 zRQlb4GeipvU&m_F1_$wwu0?i5Cz0TTSl`#uc}6(t4&YjvWkFI)*jMO@Me_Le_=M(v zSrA?RjyET9V15A)-p(d1q)ry6yF$rH4#5n`?m|ws9TUw@;$3qiBbOlEoKdy_MT)~V z=N#&y{vn8U2Og2iJ{)W-ZO7$zLHEUMN-l$XRvs_I_2T?X5&UXMi&;q8i8d}FWtd=8y@PLaCFm+wpR#G)aC{`rVrs%8)@u!?}9E$ZnDYo!c5hiA?8Qm}y{K9F) zTlyJ+OL<98%B`lIL|}hc9JSDFm9?7x2CnOjGJE_7%Jp&Bif`p29?ai>C(n0UWicnv zrZH578>dDfFP_O?YlK@|!U^eZflg1)Hs(pq1{WqS2Cflfgsa$(yyyfhqxr2y)xO<- ztE~YE$2dMsybWD-Bi<89DZ@L;S$hw~0DG7-yoR%(%Gon5wDYD`dL4k_U6j*47Z4h4 zwCZyUzNtj|D0Iax|0F9J(~M5|FP&r1ysHQXgy8J7aEa}MA3?yWdO@W6NuOQq zxI&O;%`%{#7kN9?F(hYrclJ8cN^CxZ6s68>v%<^cjA#tlBAKkmyCm&l%`g32m?oed z6hL($sbOP?W%_S#UNEW$*u&~DQD5$(f*}}lGTzkLe#?(+LA~;aL=0#EKm?Ju+ zaOe2lRrWmfBd1Mk57Xciq<;l;cn=*wkW>MbOpHWHg~zj9arkkR#q-;Ch&7kk6E(mR i^Sj|__)(b4Oh-2cCDrT@A%)082}=;$lBJ*eQVTP`m7MAT literal 0 HcmV?d00001 diff --git a/hg-pd-dist/src/assembly/static/conf/log4j2.xml b/hg-pd-dist/src/assembly/static/conf/log4j2.xml new file mode 100644 index 0000000000..275c8467c6 --- /dev/null +++ b/hg-pd-dist/src/assembly/static/conf/log4j2.xml @@ -0,0 +1,117 @@ + + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hg-pd-dist/src/assembly/static/conf/verify-license.json b/hg-pd-dist/src/assembly/static/conf/verify-license.json new file mode 100644 index 0000000000..868ccbebbb --- /dev/null +++ b/hg-pd-dist/src/assembly/static/conf/verify-license.json @@ -0,0 +1,6 @@ +{ + "subject": "hugegraph-license", + "public_alias": "publiccert", + "store_ticket": "803b6cc3-d144-47e8-948f-ec8b39c8881e", + "publickey_path": "/public-certs.store" +} diff --git a/hg-pd-grpc/pom.xml b/hg-pd-grpc/pom.xml new file mode 100644 index 0000000000..b55790263c --- /dev/null +++ b/hg-pd-grpc/pom.xml @@ -0,0 +1,120 @@ + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-pd-root + 3.6.5-SNAPSHOT + + hg-pd-grpc + + + + 1.6.0 + 1.39.0 + 3.17.2 + 0.6.1 + + + + + io.grpc + grpc-netty-shaded + ${grpc.version} + + + io.grpc + grpc-protobuf + ${grpc.version} + + + io.grpc + grpc-stub + ${grpc.version} + + + javax.annotation + javax.annotation-api + 1.3.2 + + + + + ${basedir}/src/main/java + + + src/main/resources + + + src/main/proto + + + + + kr.motd.maven + os-maven-plugin + ${os.plugin.version} + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf.plugin.version} + true + + + com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} + + grpc-java + + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + ${project.basedir}/src/main/proto + + + ${project.basedir}/src/main/java + + false + + + + + + generate-sources + + + compile + + compile-custom + + + + + + maven-clean-plugin + + + + src/main/java + + + + + + + initialize + + clean + + + + + + + \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/discovery.proto b/hg-pd-grpc/src/main/proto/discovery.proto new file mode 100644 index 0000000000..c76aadbd7c --- /dev/null +++ b/hg-pd-grpc/src/main/proto/discovery.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; +package discovery; +import "pdpb.proto"; + +option java_package = "com.baidu.hugegraph.pd.grpc.discovery"; +option java_multiple_files = true; + + +service DiscoveryService { + rpc register(NodeInfo) returns (RegisterInfo); + rpc getNodes(Query) returns (NodeInfos); +// rpc getNodesByLabel(Conditions) returns (NodeInfos); +} + +/* requests */ +message NodeInfo { + string id = 1; + string appName = 2; + string version = 3; + string address = 4; + int64 interval = 5; + map labels = 6; +} +message Query { + string appName = 1; + string version = 2; + map labels = 3; +} +message LeaseInfo { + int64 registrationTs = 1; + int64 lastHeartbeatTs = 2; + int64 serverUpTs = 3; +} +message RegisterInfo { + NodeInfo nodeInfo = 1; + LeaseInfo leaseInfo = 2 ; + RegisterType type = 3 ; + pdpb.ResponseHeader header = 4; +} +enum RegisterType { + Register = 0; + Heartbeat = 1; + Dislodge = 2; +} +//message Condition{ +// string label = 1; +//} +//message Conditions{ +// string label = 1; +// string value = 2; +//} +message NodeInfos{ + repeated NodeInfo info = 1; +} \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/kv.proto b/hg-pd-grpc/src/main/proto/kv.proto new file mode 100644 index 0000000000..3cd6d4b0b7 --- /dev/null +++ b/hg-pd-grpc/src/main/proto/kv.proto @@ -0,0 +1,126 @@ +syntax = "proto3"; +package kv; +import "pdpb.proto"; +import "metapb.proto"; + +option java_package = "com.baidu.hugegraph.pd.grpc.kv"; +option java_multiple_files = true; + + +service KvService { + rpc put(Kv) returns (KvResponse); + rpc get(K) returns (KResponse); + rpc delete(K) returns (KvResponse); + rpc deletePrefix(K) returns (KvResponse); + rpc scanPrefix(K) returns (ScanPrefixResponse); + rpc watch(WatchRequest) returns (stream WatchResponse); + rpc watchPrefix(WatchRequest) returns (stream WatchResponse); + rpc lock(LockRequest) returns (LockResponse); + rpc lockWithoutReentrant(LockRequest) returns (LockResponse); + rpc unlock(LockRequest) returns (LockResponse); + rpc keepAlive(LockRequest) returns (LockResponse); + rpc isLocked(LockRequest) returns (LockResponse); + rpc putTTL(TTLRequest) returns (TTLResponse); + rpc keepTTLAlive(TTLRequest) returns (TTLResponse); +} + +/* requests */ +message Kv { + pdpb.RequestHeader header = 1; + string key = 2; + string value = 3; +} +message KvResponse { + pdpb.ResponseHeader header = 1; +} + +message K{ + pdpb.RequestHeader header = 1; + string key = 2; +} + +message KResponse{ + pdpb.ResponseHeader header = 1; + string value = 2; +} + +message ScanPrefixResponse { + pdpb.ResponseHeader header = 1; + map kvs = 2; +} + +message LockRequest{ + pdpb.RequestHeader header = 1; + string key = 2; + int64 ttl = 3; + int64 clientId = 4; +} +message LockResponse{ + pdpb.ResponseHeader header = 1; + string key = 2; + int64 ttl = 3; + int64 clientId = 4; + bool succeed = 5; +} + +message LockAliveResponse{ + pdpb.ResponseHeader header = 1; + int64 clientId = 2; +} + + +message WatchKv { + string key = 1; + string value = 2; +} + +enum WatchType { + Put = 0; + Delete = 1; + Unrecognized = 2; +} + +message WatchEvent { + WatchKv current = 1; + WatchKv prev = 2; + WatchType type = 3; +} + +message WatchResponse { + pdpb.ResponseHeader header = 1; + repeated WatchEvent events= 2; + int64 clientId = 3; + WatchState state = 4; +} + +enum WatchState { + Starting = 0; + Started = 1; + Leader_Changed = 2; + Alive = 3; +} + +message WatchRequest { + pdpb.RequestHeader header = 1; + WatchState state= 2; + string key = 3; + int64 clientId = 4; +} + +message V{ + string value = 1; + int64 ttl = 2; + int64 st =3; +} + +message TTLRequest{ + pdpb.RequestHeader header = 1; + string key = 2; + string value = 3; + int64 ttl = 4; +} + +message TTLResponse{ + pdpb.ResponseHeader header = 1; + bool succeed = 2; +} \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/metaTask.proto b/hg-pd-grpc/src/main/proto/metaTask.proto new file mode 100644 index 0000000000..47cc9cd5d6 --- /dev/null +++ b/hg-pd-grpc/src/main/proto/metaTask.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; +package metaTask; +import "metapb.proto"; +import "pd_pulse.proto"; +option java_package = "com.baidu.hugegraph.pd.grpc"; + +enum TaskType { + Unknown = 0; + Split_Partition = 1; + Change_Shard = 2; + Move_Partition = 3; + Clean_Partition = 4; + Change_KeyRange = 5; +} + +// 一条任务信息 +message Task { + uint64 id = 1; + TaskType type = 2; + TaskState state = 3; + int64 start_timestamp = 4; + metapb.Partition partition = 5; + string message = 6; + //每个shard执行的任务状态 + repeated ShardTaskState shardState = 7; + ChangeShard changeShard = 9; + SplitPartition splitPartition = 10; + MovePartition movePartition = 11; + CleanPartition cleanPartition = 12; + PartitionKeyRange partitionKeyRange = 13; +} + +enum TaskState{ + Task_Unknown = 0; + Task_Ready = 1; //任务就绪 + Task_Doing = 2; //执行中 + Task_Done = 3; //完成 + Task_Exit = 4; //退出 + Task_Stop = 10; + Task_Success = 11; + Task_Failure = 12; +} + +message ShardTaskState{ + uint64 store_id = 1; + TaskState state = 2; +} + diff --git a/hg-pd-grpc/src/main/proto/metapb.proto b/hg-pd-grpc/src/main/proto/metapb.proto new file mode 100644 index 0000000000..54477288b6 --- /dev/null +++ b/hg-pd-grpc/src/main/proto/metapb.proto @@ -0,0 +1,375 @@ +syntax = "proto3"; +package metapb; +option java_package = "com.baidu.hugegraph.pd.grpc"; +import "google/protobuf/any.proto"; + +enum ClusterState{ + // 集群健康 + Cluster_OK = 0; + // 分区警告,存在部分故障节点,短时间不影响读写 + Cluster_Warn = 2; + // 分区下线,可以读,无法写 + Cluster_Offline = 10; + // 分区故障,无法读写,需要尽快修复故障节点。 + Cluster_Fault = 11; + Cluster_Not_Ready = -1; +} +// 集群状态 +message ClusterStats{ + ClusterState state = 1; + string message = 2; + uint64 timestamp = 16; +} + +enum StoreState { + Unknown = 0; + // 未激活 + Pending = 4; + // 在线 + Up = 1; + // 离线 + Offline = 2; + // 下线中 + Exiting = 5; + // 已下线 + Tombstone = 3; +} + +// Store label for Storage grouping. +message StoreLabel { + string key = 1; + string value = 2; +} + +message Store { + uint64 id = 1; + // Address to handle client requests + string address = 2; + string raft_address = 3; + repeated StoreLabel labels = 4; + // Store软件版本号 + string version = 5; + StoreState state = 6; + // The start timestamp of the current store + int64 start_timestamp = 7; + string deploy_path = 8; + // The last heartbeat timestamp of the store. + int64 last_heartbeat = 9; + StoreStats stats = 10; + // 数据格式版本号 + int32 data_version = 11; + int32 cores = 12; + string data_path = 13; +} + +enum ShardRole { + None = 0; + Leader = 1; + Follower = 2; + // Learner/None -> Learner + Learner = 3; +} + +message Shard { + uint64 store_id = 2; + ShardRole role = 3; +} + +message ShardGroup{ + uint32 id = 1; + uint64 version = 2; + uint64 conf_ver = 3; + repeated Shard shards = 6; + PartitionState state = 10; + string message = 11; +} + +message Graph { + string graph_name = 2; + // 分区数量,0表示无效,不能大于raft分组总数 + int32 partition_count = 3; + // 当前工作状态 + PartitionState state = 10; + string message = 11; + GraphState graph_state = 12; +} +// 分区工作状态 +enum PartitionState{ + PState_None = 0; + // + PState_Normal = 1; + // 分区警告,存在部分故障节点,短时间不影响读写 + PState_Warn = 2; + // 分区下线,可以读,无法写 + PState_Offline = 10; + // 分区故障,无法读写,需要尽快修复故障节点。 + PState_Fault = 11; +} + +message PartitionV36 { + uint32 id = 1; + string graph_name = 3; + // 分区范围 [start_key, end_key). + uint64 start_key = 4; + uint64 end_key = 5; + repeated Shard shards = 6; + // Leader任期,leader切换后递增 + uint64 version = 7; + // shards版本号,每次改变后递增 + uint64 conf_ver = 8; + // 当前工作状态 + PartitionState state = 10; + string message = 11; +} + +message Partition { + uint32 id = 1; + string graph_name = 3; + // 分区范围 [start_key, end_key). + uint64 start_key = 4; + uint64 end_key = 5; + // Partition 对象不在保存 shard list(根据对应的shard group 去查询), version 和 conf version不再有实际的意义 + // repeated Shard shards = 6; + // key range 每次改变后递增 + uint64 version = 7; + // shards版本号,每次改变后递增 + // uint64 conf_ver = 8; + // 当前工作状态 + PartitionState state = 10; + string message = 11; +} + +message PartitionShard { + metapb.Partition partition = 1; + metapb.Shard leader = 2; + // 离线的Shard + repeated metapb.Shard offline_shards = 3; +} +// 记录分区所在的存储位置 +message PartitionStore { + uint32 partition_id = 1; + string graph_name = 3; + // 存储位置 + string store_location = 4; +} + +message PartitionRaft { + uint32 partition_id = 1; + string graph_name = 3; + // 存储位置 + string raft_location = 4; +} + +message ShardStats{ + uint64 store_id = 2; + ShardRole role = 3; + ShardState state = 4; + // 安装快照的进度 + uint32 progress = 5; +} +message PartitionStats{ + uint32 id = 1; + // raft分组的任期. + uint64 leader_term = 2; + repeated string graph_name = 3; + metapb.Shard leader = 4; + // 离线 shards + repeated metapb.Shard shard = 5; + repeated metapb.Shard learner = 6; + uint64 conf_ver = 7; + // 分区状态 + PartitionState state = 8; + repeated ShardStats shardStats = 9; + // 分区近似大小 + uint64 approximate_size = 10; + // 分区key的近似数量 + uint64 approximate_keys = 13; + // heartbeat timestamp + int64 timestamp = 16; +} + +message GraphStats{ + // 图名 + string graph_name = 1; + // 分区近似大小 + uint64 approximate_size = 2; + // 分区key的近似数量 + uint64 approximate_keys = 3; +// // committed index +// uint64 committed_index = 4; + uint32 partition_id = 5; + ShardRole role = 6; + // 当前工作状态 + PartitionState work_state = 8; +} + +message RaftStats { + // partition id + uint32 partition_id = 1; + // committed index + uint64 committed_index = 2; +} + +message TimeInterval { + // The unix timestamp in seconds of the start of this period. + uint64 start_timestamp = 1; + // The unix timestamp in seconds of the end of this period. + uint64 end_timestamp = 2; +} + +message RecordPair { + string key = 1; + uint64 value = 2; +} + + +message QueryStats { + uint64 GC = 1; + uint64 Get = 2; + uint64 Scan = 3; + uint64 Coprocessor = 4; + uint64 Delete = 5; + uint64 DeleteRange = 6; + uint64 Put = 7; +} + +enum ShardState{ + SState_None = 0; + // 正常 + SState_Normal = 1; + // 安装快照 + SState_Snapshot = 2; + // 离线 + SState_Offline = 10; +} + + +message StoreStats { + uint64 store_id = 1; + // Capacity for the store. + uint64 capacity = 2; + // Available size for the store. + uint64 available = 3; + // Total partition count in this store. + uint32 partition_count = 4; + // Current sending snapshot count. + uint32 sending_snap_count = 5; + // Current receiving snapshot count. + uint32 receiving_snap_count = 6; + // When the store is started (unix timestamp in seconds). + uint32 start_time = 7; + // How many partition is applying snapshot. + uint32 applying_snap_count = 8; + // If the store is busy + bool is_busy = 9; + // Actually used space by db + uint64 used_size = 10; + // Bytes written for the store during this period. + uint64 bytes_written = 11; + // Keys written for the store during this period. + uint64 keys_written = 12; + // Bytes read for the store during this period. + uint64 bytes_read = 13; + // Keys read for the store during this period. + uint64 keys_read = 14; + // Actually reported time interval + TimeInterval interval = 15; + // Threads' CPU usages in the store + repeated RecordPair cpu_usages = 16; + // Threads' read disk I/O rates in the store + repeated RecordPair read_io_rates = 17; + // Threads' write disk I/O rates in the store + repeated RecordPair write_io_rates = 18; + // Operations' latencies in the store + repeated RecordPair op_latencies = 19; + // Store query stats + QueryStats query_stats = 21; + // graph stats + repeated GraphStats graph_stats = 22; + // raft stats + repeated RaftStats raft_stats = 23; + int32 cores = 24; + // system metrics + repeated RecordPair system_metrics = 25; +} + +// 分区查询条件 +message PartitionQuery{ + optional uint64 store_id = 1; // 0 表示查询条件不包含store_id + optional string graph_name = 2; + optional uint32 partition_id = 4; +} + +//PD 节点信息 +message Member { + uint64 cluster_id = 1; + string raft_url = 3; + string grpc_url = 4; + string rest_url = 5; + string data_path = 6; + StoreState state = 7; +} + +// 图空间配置 +message GraphSpace{ + string name = 1; + // 最大占用存储 + uint64 storage_limit = 2; + // 已使用空间 + uint64 used_size = 3; + // 修改时间 + uint64 timestamp = 10; +} + +// PD 配置 +message PDConfig{ + uint64 version = 1; + // 分区数量, 初始化根据Store数量动态计算,分裂后进行修改 + int32 partition_count = 2; + // 每分区副本数量 + int32 shard_count = 3; + // pd集群列表 + string peers_list = 4; + // 集群中最少store数量 + int32 min_store_count = 6; + // 每个store最大副本数 + int32 max_Shards_Per_Store = 7; + // 修改时间 + uint64 timestamp = 10; +} + + + +//消息持久化 +message QueueItem{ + string item_id=1; + string item_class=2; + bytes item_content=3; + int64 timestamp=10; +} + +message LogRecord{ + string action = 1; + int64 timestamp = 2; + map labels = 3; + google.protobuf.Any object = 4; + string message = 5; +} + +message GraphState{ + GraphMode mode = 1; + GraphModeReason reason = 2; +} + +enum GraphMode{ + ReadWrite = 0; + ReadOnly = 1; + WriteOnly = 2; +} + +enum GraphModeReason{ + Empty = 0; // 空 + Initiative = 1; // 主动的状态设置 + Quota = 2; // 达到限额条件 + +} \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/pd_common.proto b/hg-pd-grpc/src/main/proto/pd_common.proto new file mode 100644 index 0000000000..eaab29fcc2 --- /dev/null +++ b/hg-pd-grpc/src/main/proto/pd_common.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "com.baidu.hugegraph.pd.grpc.common"; +option java_outer_classname = "HgPdCommonProto"; + +message RequestHeader { + // 集群 ID. + uint64 cluster_id = 1; + // 发送者 ID. + uint64 sender_id = 2; +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + Error error = 2; +} + +enum ErrorType { + OK = 0; + UNKNOWN = 1; + STORE_NON_EXIST = 101; + STORE_TOMBSTONE = 103; + ALREADY_BOOTSTRAPPED = 4; + INCOMPATIBLE_VERSION = 5; + PARTITION_NOT_FOUND = 6; + + ETCD_READ_ERROR = 1000; + ETCD_WRITE_ERROR = 1001; +} + +message Error { + ErrorType type = 1; + string message = 2; +} \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/pd_pulse.proto b/hg-pd-grpc/src/main/proto/pd_pulse.proto new file mode 100644 index 0000000000..a5c0b71a5f --- /dev/null +++ b/hg-pd-grpc/src/main/proto/pd_pulse.proto @@ -0,0 +1,144 @@ +syntax = "proto3"; + +import "metapb.proto"; +import "pd_common.proto"; + +option java_multiple_files = true; +option java_package = "com.baidu.hugegraph.pd.grpc.pulse"; +option java_outer_classname = "HgPdPulseProto"; + +service HgPdPulse { + rpc Pulse(stream PulseRequest) returns (stream PulseResponse); +} + +/* requests */ +message PulseRequest { + PulseCreateRequest create_request = 1; + PulseCancelRequest cancel_request = 2; + PulseNoticeRequest notice_request = 3; + PulseAckRequest ack_request = 4; +} + +message PulseCreateRequest { + PulseType pulse_type = 1; +} + +message PulseCancelRequest { + int64 observer_id = 1; +} + +message PulseNoticeRequest { + int64 observer_id = 1; + oneof request_union { + PartitionHeartbeatRequest partition_heartbeat_request = 10; + } +} + +message PulseAckRequest { + int64 observer_id = 1; + int64 notice_id = 2; +} + +// 分区心跳,分区的peer增减、leader改变等事件发生时,由leader发送心跳。 +// 同时pd对分区进行shard增减通过Response发送给leader +message PartitionHeartbeatRequest { + RequestHeader header = 1; + // Leader Peer sending the heartbeat + metapb.PartitionStats states = 4; +} + +/* responses */ +message PulseResponse { + PulseType pulse_type = 1; + int64 observer_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id=4; + oneof response_union { + PartitionHeartbeatResponse partition_heartbeat_response = 10; + } +} + +message PartitionHeartbeatResponse { + ResponseHeader header = 1; + uint64 id = 3; + metapb.Partition partition = 2; + ChangeShard change_shard = 4; + + TransferLeader transfer_leader = 5; + // 拆分成多个分区,第一个SplitPartition是原分区,从第二开始是新分区 + SplitPartition split_partition = 6; + // rocksdb compaction 指定的表,null是针对所有 + DbCompaction db_compaction = 7; + // 将partition的数据,迁移到 target + MovePartition move_partition = 8; + // 清理partition的graph的数据 + CleanPartition clean_partition = 9; + // partition key range 变化 + PartitionKeyRange key_range = 10; +} + +/* Date model */ +message ChangeShard { + repeated metapb.Shard shard = 1; + ConfChangeType change_type = 2; +} + +message TransferLeader { + metapb.Shard shard = 1; +} + +message SplitPartition { + repeated metapb.Partition new_partition = 1; +} + +message DbCompaction { + string table_name = 3; +} + +message MovePartition{ + // target partition的key range为,迁移后的新range + metapb.Partition target_partition = 1; + // partition 的 key start 和 key end的所有数据, + // 会迁移到 target partition 上 + uint64 key_start = 2; + uint64 key_end = 3; +} + +message CleanPartition { + uint64 key_start = 1; + uint64 key_end = 2; + CleanType clean_type = 3; + bool delete_partition = 4; //是否删除分区 +} + +message PartitionKeyRange{ + uint32 partition_id = 1; + uint64 key_start = 2; + uint64 key_end = 3; +} + +/* enums */ +enum PulseType { + PULSE_TYPE_UNKNOWN = 0; + PULSE_TYPE_PARTITION_HEARTBEAT = 1; +} + +enum PulseChangeType { + PULSE_CHANGE_TYPE_UNKNOWN = 0; + PULSE_CHANGE_TYPE_ADD = 1; + PULSE_CHANGE_TYPE_ALTER = 2; + PULSE_CHANGE_TYPE_DEL = 3; +} + +enum ConfChangeType { + CONF_CHANGE_TYPE_UNKNOWN = 0; + CONF_CHANGE_TYPE_ADD_NODE = 1; + CONF_CHANGE_TYPE_REMOVE_NODE = 2; + CONF_CHANGE_TYPE_ADD_LEARNER_NODE = 3; + CONF_CHANGE_TYPE_ADJUST = 4; // 调整shard,leader根据新的配置动态增减。 +} + +enum CleanType { + CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range + CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range +} \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/pd_watch.proto b/hg-pd-grpc/src/main/proto/pd_watch.proto new file mode 100644 index 0000000000..0c1dc84e39 --- /dev/null +++ b/hg-pd-grpc/src/main/proto/pd_watch.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +import "metapb.proto"; + +option java_multiple_files = true; +option java_package = "com.baidu.hugegraph.pd.grpc.watch"; +option java_outer_classname = "HgPdWatchProto"; + +service HgPdWatch { + rpc Watch(stream WatchRequest) returns (stream WatchResponse); +} + +message WatchRequest { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; +} + +message WatchCreateRequest { + WatchType watch_type = 1; +} + +message WatchCancelRequest { + int64 watcher_id = 1; +} + +message WatchResponse { + WatchType watch_type = 1; + int64 watcher_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id = 4; + string msg = 5; + oneof response_union { + WatchPartitionResponse partition_response = 10; + WatchNodeResponse node_response = 11; + WatchGraphResponse graph_response = 12; + WatchShardGroupResponse shard_group_response = 13; + } +} + +message WatchPartitionResponse { + string graph = 1; + int32 partition_id = 2; + WatchChangeType change_type = 3; +} + +message WatchNodeResponse { + string graph = 1; + uint64 node_id = 2; + NodeEventType node_event_type = 3; +} + +message WatchGraphResponse { + metapb.Graph graph = 1; + WatchType type = 2; +} + +message WatchShardGroupResponse { + metapb.ShardGroup shard_group = 1; + WatchChangeType type = 2; + int32 shard_group_id = 3; +} + +enum WatchType { + WATCH_TYPE_UNKNOWN = 0; + WATCH_TYPE_PARTITION_CHANGE = 1; + WATCH_TYPE_STORE_NODE_CHANGE = 2; + WATCH_TYPE_GRAPH_CHANGE = 3; + WATCH_TYPE_SHARD_GROUP_CHANGE = 4; +} + +enum WatchChangeType { + WATCH_CHANGE_TYPE_UNKNOWN = 0; + WATCH_CHANGE_TYPE_ADD = 1; + WATCH_CHANGE_TYPE_ALTER = 2; + WATCH_CHANGE_TYPE_DEL = 3; + WATCH_CHANGE_TYPE_SPECIAL1 = 4; +} + +enum NodeEventType { + NODE_EVENT_TYPE_UNKNOWN = 0; + NODE_EVENT_TYPE_NODE_ONLINE = 1; + NODE_EVENT_TYPE_NODE_OFFLINE = 2; + NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3; +} \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/pdpb.proto b/hg-pd-grpc/src/main/proto/pdpb.proto new file mode 100644 index 0000000000..e510c4d782 --- /dev/null +++ b/hg-pd-grpc/src/main/proto/pdpb.proto @@ -0,0 +1,564 @@ +syntax = "proto3"; +package pdpb; + +import "metapb.proto"; +import "metaTask.proto"; + +option java_package = "com.baidu.hugegraph.pd.grpc"; + +service PD { + // 注册store,首次注册会生成新的store_id, store_id是store唯一标识 + rpc RegisterStore(RegisterStoreRequest) returns (RegisterStoreResponse) {} + rpc GetStore(GetStoreRequest) returns (GetStoreResponse) {} + // 修改Store状态等信息. + rpc SetStore(SetStoreRequest) returns (SetStoreResponse) {} + // 根据可以查找所属分区 + rpc DelStore(DetStoreRequest) returns (DetStoreResponse) {} + rpc GetAllStores(GetAllStoresRequest) returns (GetAllStoresResponse) {} + rpc StoreHeartbeat(StoreHeartbeatRequest) returns (StoreHeartbeatResponse) {} + + // 根据可以查找所属分区 + rpc GetPartition(GetPartitionRequest) returns (GetPartitionResponse) {} + + // 根据HashCode查找所属分区 + rpc GetPartitionByCode(GetPartitionByCodeRequest) returns (GetPartitionResponse) {} + // 根据PartitionID返回分区 + rpc GetPartitionByID(GetPartitionByIDRequest) returns (GetPartitionResponse) {} + rpc ScanPartitions(ScanPartitionsRequest) returns (ScanPartitionsResponse) {} + // 更新分区信息,主要用来更新分区key范围,调用此接口需谨慎,否则会造成数据丢失。 + rpc UpdatePartition(UpdatePartitionRequest) returns (UpdatePartitionResponse) {} + // 根据可以查找所属分区 + rpc DelPartition(DelPartitionRequest) returns (DelPartitionResponse) {} + // 根据条件查询分区信息, 包括Store、Graph等条件 + rpc QueryPartitions(QueryPartitionsRequest) returns (QueryPartitionsResponse){} + // 读取图信息 + rpc GetGraph(GetGraphRequest) returns (GetGraphResponse){} + // 修改图信息 + rpc SetGraph(SetGraphRequest) returns (SetGraphResponse){} + rpc DelGraph(DelGraphRequest) returns (DelGraphResponse){} + // 全局唯一自增ID + rpc GetId(GetIdRequest) returns (GetIdResponse){} + rpc ResetId(ResetIdRequest) returns (ResetIdResponse){} + // PD的集群列表 + rpc GetMembers(GetMembersRequest) returns (GetMembersResponse) {} + rpc GetStoreStatus(GetAllStoresRequest) returns (GetAllStoresResponse) {} + rpc GetPDConfig(GetPDConfigRequest) returns (GetPDConfigResponse){} + rpc SetPDConfig(SetPDConfigRequest) returns (SetPDConfigResponse){} + rpc GetGraphSpace(GetGraphSpaceRequest) returns (GetGraphSpaceResponse){} + rpc SetGraphSpace(SetGraphSpaceRequest) returns (SetGraphSpaceResponse){} + // 获取集群健康状态 + rpc GetClusterStats(GetClusterStatsRequest) returns (GetClusterStatsResponse){} + // 替换PD的集群节点 + rpc ChangePeerList(ChangePeerListRequest) returns (getChangePeerListResponse) {} + // 数据分裂 + rpc SplitData(SplitDataRequest) returns (SplitDataResponse){} + + rpc SplitGraphData(SplitGraphDataRequest) returns (SplitDataResponse) {} + // 数据迁移 + rpc MovePartition(MovePartitionRequest) returns (MovePartitionResponse){} + // 汇报分区分裂等任务执行结果 + rpc ReportTask(ReportTaskRequest) returns (ReportTaskResponse){} + + rpc GetPartitionStats(GetPartitionStatsRequest) returns (GetPartitionStatsResponse){} + //平衡store中分区leader的数量 + rpc BalanceLeaders(BalanceLeadersRequest) returns (BalanceLeadersResponse){} + + // 替换license文件 + rpc PutLicense(PutLicenseRequest) returns (PutLicenseResponse){} + + // 通知rocksdb进行compaction + rpc DbCompaction(DbCompactionRequest) returns (DbCompactionResponse){} + + // 合并分区 + rpc CombineCluster(CombineClusterRequest) returns (CombineClusterResponse){} + // 单个图缩容 + rpc CombineGraph(CombineGraphRequest) returns (CombineGraphResponse) {} + + // shard group + rpc GetShardGroup(GetShardGroupRequest) returns (GetShardGroupResponse){} + rpc UpdateShardGroup(UpdateShardGroupRequest) returns (UpdateShardGroupResponse){} + // 删除掉shard group + rpc DeleteShardGroup(DeleteShardGroupRequest) returns (DeleteShardGroupResponse) {} + // shard group 运维相关的处理 + rpc UpdateShardGroupOp(ChangeShardRequest) returns (ChangeShardResponse){} + // change shard + rpc ChangeShard(ChangeShardRequest) returns (ChangeShardResponse) {} +} + +message RequestHeader { + // 集群 ID. + uint64 cluster_id = 1; + // 发送者 ID. + uint64 sender_id = 2; +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + Error error = 2; +} + +enum ErrorType { + OK = 0; + UNKNOWN = 1; + + NOT_LEADER = 100; + STORE_ID_NOT_EXIST = 101; + NO_ACTIVE_STORE = 102; + NOT_FOUND = 103; + PD_UNREACHABLE = 104; + LESS_ACTIVE_STORE = 105; + STORE_HAS_BEEN_REMOVED = 106; + STORE_PROHIBIT_DELETION = 111; + SET_CONFIG_SHARD_COUNT_ERROR = 112; + UPDATE_STORE_STATE_ERROR = 113; + STORE_PROHIBIT_DUPLICATE = 114; + ROCKSDB_READ_ERROR = 1002; + ROCKSDB_WRITE_ERROR = 1003; + ROCKSDB_DEL_ERROR = 1004; + ROCKSDB_SAVE_SNAPSHOT_ERROR = 1005; + ROCKSDB_LOAD_SNAPSHOT_ERROR = 1006; + + // 当前集群状态禁止分裂 + Cluster_State_Forbid_Splitting = 1007; + // 正在分裂中 + Split_Partition_Doing = 1008; + // store上分区数量超过上限 + Too_Many_Partitions_Per_Store = 1009; + // license 错误 + LICENSE_ERROR= 107; + // license 认证错误 + LICENSE_VERIFY_ERROR= 108; + + //分区下线正在进行 + Store_Tombstone_Doing = 1010; + + // 不合法的分裂个数 + Invalid_Split_Partition_Count = 1011; +} + +message Error { + ErrorType type = 1; + string message = 2; +} +message GetStoreRequest { + RequestHeader header = 1; + uint64 store_id = 2; +} + +message GetStoreResponse { + ResponseHeader header = 1; + + metapb.Store store = 2; + metapb.StoreStats stats = 3; +} + +message DetStoreRequest { + RequestHeader header = 1; + uint64 store_id = 2; +} + +message DetStoreResponse { + ResponseHeader header = 1; + metapb.Store store = 2; +} + +message RegisterStoreRequest { + RequestHeader header = 1; + metapb.Store store = 2; +} + + +message RegisterStoreResponse { + ResponseHeader header = 1; + // 初次注册,返回新的store_id + uint64 store_id = 2; +} + +message SetStoreRequest { + RequestHeader header = 1; + metapb.Store store = 2; +} + +message SetStoreResponse { + ResponseHeader header = 1; + // 返回修改后的Store + metapb.Store store = 2; +} + + +// 返回graph_name所在的所有store,如果graph_name为空值,则返回系统所有的store +message GetAllStoresRequest { + RequestHeader header = 1; + string graph_name = 2; + // 是否返回离线的store + bool exclude_offline_stores = 3; +} + +message GetAllStoresResponse { + ResponseHeader header = 1; + + repeated metapb.Store stores = 2; +} + + +message StoreHeartbeatRequest { + RequestHeader header = 1; + + metapb.StoreStats stats = 2; +} + +message StoreHeartbeatResponse { + ResponseHeader header = 1; + string cluster_version = 3; + metapb.ClusterStats clusterStats = 4; +} + +message GetPartitionRequest { + RequestHeader header = 1; + string graph_name = 2; + bytes key = 3; +} + + +message GetPartitionByCodeRequest { + RequestHeader header = 1; + string graph_name = 2; + uint64 code = 3; +} + + +message GetPartitionResponse { + ResponseHeader header = 1; + metapb.Partition partition = 2; + metapb.Shard leader = 3; + // 离线的Shard + repeated metapb.Shard offline_shards = 4; +} + +message GetPartitionByIDRequest { + RequestHeader header = 1; + string graph_name = 2; + uint32 partition_id = 3; +} + +message DelPartitionRequest { + RequestHeader header = 1; + string graph_name = 2; + uint32 partition_id = 3; +} +message DelPartitionResponse { + ResponseHeader header = 1; + metapb.Partition partition = 2; +} + +message UpdatePartitionRequest{ + RequestHeader header = 1; + repeated metapb.Partition partition = 2; +} + +message UpdatePartitionResponse{ + ResponseHeader header = 1; + repeated metapb.Partition partition = 2; +} +// Use GetPartitionResponse as the response of GetPartitionByIDRequest. + +message ScanPartitionsRequest { + RequestHeader header = 1; + string graph_name = 2; + bytes start_key = 3; + bytes end_key = 4; // end_key is +inf when it is empty. +} + + + +message ScanPartitionsResponse { + ResponseHeader header = 1; + repeated metapb.PartitionShard partitions = 4; +} + + + +message QueryPartitionsRequest{ + RequestHeader header = 1; + metapb.PartitionQuery query = 2; +} + +message QueryPartitionsResponse { + ResponseHeader header = 1; + repeated metapb.Partition partitions = 4; +} + + + +message GetGraphRequest{ + RequestHeader header = 1; + string graph_name = 2; +} + +message GetGraphResponse{ + ResponseHeader header = 1; + metapb.Graph graph = 2; +} + +message SetGraphRequest{ + RequestHeader header = 1; + metapb.Graph graph = 2; +} + +message SetGraphResponse{ + ResponseHeader header = 1; + metapb.Graph graph = 2; +} + +message DelGraphRequest{ + RequestHeader header = 1; + string graph_name = 2; +} + +message DelGraphResponse{ + ResponseHeader header = 1; + metapb.Graph graph = 2; +} + +message GetIdRequest{ + RequestHeader header = 1; + string key = 2; + int32 delta = 3; +} + +message GetIdResponse{ + ResponseHeader header = 1; + int64 id =2; + int32 delta =3; +} + +message ResetIdRequest{ + RequestHeader header = 1; + string key = 2; +} + +message ResetIdResponse{ + ResponseHeader header = 1; + int32 result = 2; +} + +message GetMembersRequest{ + RequestHeader header = 1; +} + +message GetMembersResponse{ + ResponseHeader header = 1; + repeated metapb.Member members = 2; + metapb.Member leader = 3; +} + +message GetPDConfigRequest{ + RequestHeader header = 1; + uint64 version = 2 ; +} + +message GetPDConfigResponse{ + ResponseHeader header = 1; + metapb.PDConfig pd_config = 2; +} + +message SetPDConfigRequest{ + RequestHeader header = 1; + metapb.PDConfig pd_config = 2; +} + +message SetPDConfigResponse{ + ResponseHeader header = 1; +} + + +message GetGraphSpaceRequest{ + RequestHeader header = 1; + string graph_Space_Name = 2; +} + +message GetGraphSpaceResponse{ + ResponseHeader header = 1; + repeated metapb.GraphSpace graph_space = 2; +} + +message SetGraphSpaceRequest{ + RequestHeader header = 1; + metapb.GraphSpace graph_space = 2; +} + +message SetGraphSpaceResponse{ + ResponseHeader header = 1; +} + +message GetClusterStatsRequest{ + RequestHeader header = 1; +} + +message GetClusterStatsResponse{ + ResponseHeader header = 1; + metapb.ClusterStats cluster = 2; +} +message ChangePeerListRequest{ + RequestHeader header = 1; + string peer_List = 2; +} +message getChangePeerListResponse{ + ResponseHeader header = 1; +} + +enum OperationMode { + Auto = 0; + Expert = 1; +} + +message SplitDataParam{ + // 被分裂的源分区ID + uint32 partition_id = 1; + //目标分区数量 + uint32 count = 2; +} + +message SplitDataRequest{ + RequestHeader header = 1; + //工作模式 + // Auto:自动分裂,每个Store上分区数达到最大值 + // Expert:专家模式,需要指定splitParams + OperationMode mode = 2; + repeated SplitDataParam param = 3; +} + +message SplitGraphDataRequest{ + RequestHeader header = 1; + //工作模式 + string graph_name = 2; + uint32 to_count = 3; +} + +message SplitDataResponse{ + ResponseHeader header = 1; +} + +message MovePartitionParam{ + uint32 partition_id = 1; + uint64 src_store_id = 2; + uint64 dst_store_id = 3; +} + +message MovePartitionRequest{ + RequestHeader header = 1; + //工作模式 + // Auto:自动转移,达到每个Store上分区数量相同 + // Expert:专家模式,需要指定transferParams + OperationMode mode = 2; + repeated MovePartitionParam param = 3; +} + +message MovePartitionResponse{ + ResponseHeader header = 1; +} + +message ReportTaskRequest{ + RequestHeader header = 1; + metaTask.Task task = 2; +} + +message ReportTaskResponse{ + ResponseHeader header = 1; +} + +message GetPartitionStatsRequest{ + RequestHeader header = 1; + uint32 partition_id = 2; + // 如果未空,返回所有图的同一分区ID + string graph_name = 4; +} + +message GetPartitionStatsResponse{ + ResponseHeader header = 1; + metapb.PartitionStats partition_stats = 2; +} + +message BalanceLeadersRequest{ + RequestHeader header = 1; + } + +message BalanceLeadersResponse{ + ResponseHeader header = 1; +} + +message PutLicenseRequest{ + RequestHeader header = 1; + bytes content = 2; +} + +message PutLicenseResponse{ + ResponseHeader header = 1; +} + +message DbCompactionRequest{ + RequestHeader header = 1; + string tableName = 2; +} + +message DbCompactionResponse{ + ResponseHeader header = 1; +} + +message CombineClusterRequest { + RequestHeader header = 1; + uint32 toCount = 2; +} + +message CombineClusterResponse { + ResponseHeader header = 1; +} + +message CombineGraphRequest { + RequestHeader header = 1; + string graphName = 2; + uint32 toCount = 3; +} + +message CombineGraphResponse { + ResponseHeader header = 1; +} + +message DeleteShardGroupRequest { + RequestHeader header = 1; + uint32 groupId = 2; +} + +message DeleteShardGroupResponse { + ResponseHeader header = 1; +} + +message GetShardGroupRequest{ + RequestHeader header = 1; + uint32 group_id = 2 ; +} + +message GetShardGroupResponse{ + ResponseHeader header = 1; + metapb.ShardGroup shardGroup = 2; +} + +message UpdateShardGroupRequest{ + RequestHeader header = 1; + metapb.ShardGroup shardGroup = 2; +} + +message UpdateShardGroupResponse{ + ResponseHeader header = 1; +} + +message ChangeShardRequest{ + RequestHeader header = 1; + uint32 groupId = 2; + repeated metapb.Shard shards = 3; +} + +message ChangeShardResponse { + ResponseHeader header = 1; +} diff --git a/hg-pd-service/pom.xml b/hg-pd-service/pom.xml new file mode 100644 index 0000000000..f93bf54f21 --- /dev/null +++ b/hg-pd-service/pom.xml @@ -0,0 +1,121 @@ + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-pd-root + 3.6.5-SNAPSHOT + + + hugegraph-pd + + + 0.5.10 + + + + com.baidu.hugegraph + hg-pd-grpc + + + io.grpc + * + + + + + + com.baidu.hugegraph + hg-pd-core + ${project.version} + + + + io.github.lognet + grpc-spring-boot-starter + 4.5.5 + + + org.springframework.boot + spring-boot-starter-logging + + + + + org.springframework.boot + spring-boot-starter-web + + + + + org.springframework.boot + spring-boot-starter-tomcat + + + org.springframework.boot + spring-boot-starter-logging + + + + + + org.springframework.boot + spring-boot-starter-actuator + + + io.micrometer + micrometer-registry-prometheus + + + org.springframework.boot + spring-boot-starter-jetty + + + org.springframework.boot + spring-boot-starter-log4j2 + + + com.lmax + disruptor + 3.4.1 + + + org.projectlombok + lombok + + + + com.google.protobuf + protobuf-java-util + 3.17.2 + + + com.baidu.hugegraph + hugegraph-common + 1.8.12 + + + + + + + org.springframework.boot + spring-boot-maven-plugin + 2.5.0 + + + + spring-boot + + org.apache.hugegraph.pd.boot.HugePDServer + + + + + + + + \ No newline at end of file diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java new file mode 100644 index 0000000000..add0cba5b2 --- /dev/null +++ b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java @@ -0,0 +1,41 @@ +package com.baidu.hugegraph.pd.upgrade; + +import com.baidu.hugegraph.pd.upgrade.scripts.PartitionMetaUpgrade; +import com.baidu.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade; + +import java.util.LinkedList; +import java.util.List; + +public class VersionScriptFactory { + private static volatile VersionScriptFactory factory; + + private static List scripts = new LinkedList<>(); + + static { + registerScript(new PartitionMetaUpgrade()); + registerScript(new TaskCleanUpgrade()); + } + + private VersionScriptFactory(){ + + } + + public static VersionScriptFactory getInstance(){ + if (factory == null) { + synchronized (VersionScriptFactory.class) { + if (factory == null) { + factory = new VersionScriptFactory(); + } + } + } + return factory; + } + + public static void registerScript(VersionUpgradeScript script) { + scripts.add(script); + } + + public List getScripts() { + return scripts; + } +} diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java new file mode 100644 index 0000000000..c0851dedad --- /dev/null +++ b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java @@ -0,0 +1,39 @@ +package com.baidu.hugegraph.pd.upgrade; + +import com.baidu.hugegraph.pd.config.PDConfig; + +public interface VersionUpgradeScript { + + String UNLIMITED_VERSION = "UNLIMITED_VERSION"; + + /** + * the highest version that need to run upgrade instruction + * @return high version + */ + String getHighVersion(); + + /** + * the lowest version that need to run upgrade instruction + * @return lower version + */ + String getLowVersion(); + + /** + * pd中没有data version的时候,是否执行. 一般是对应3。6。2之前的版本 + * + * @return run when pd has no data version + */ + boolean isRunWithoutDataVersion(); + + /** + * the scrip just run once, ignore versions + * @return run once script + */ + boolean isRunOnce(); + + /** + * run the upgrade instruction + */ + void runInstruction(PDConfig config); + +} diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java new file mode 100644 index 0000000000..703842274a --- /dev/null +++ b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java @@ -0,0 +1,99 @@ +package com.baidu.hugegraph.pd.upgrade.scripts; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; +import com.baidu.hugegraph.pd.meta.MetadataRocksDBStore; +import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; +import lombok.extern.slf4j.Slf4j; + +import java.util.HashSet; + +@Slf4j +public class PartitionMetaUpgrade implements VersionUpgradeScript { + + @Override + public String getHighVersion() { + return "3.6.2"; + } + + @Override + public String getLowVersion() { + return UNLIMITED_VERSION; + } + + @Override + public void runInstruction(PDConfig config) { + + log.info("run PartitionMetaUpgrade script"); + var dbStore = new MetadataRocksDBStore(config); + + try { + var partSet = new HashSet(); + for (var graph : dbStore.scanPrefix(Metapb.Graph.parser(), MetadataKeyHelper.getGraphPrefix())) { + var graphPrefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName()); + for (var partition : dbStore.scanPrefix(Metapb.PartitionV36.parser(), graphPrefix)) { + var newPartition = trans(partition); + var partId = partition.getId(); + log.info("trans partition structure: from {} to {}", partition, newPartition); + // backup + var key36 = MetadataKeyHelper.getPartitionV36Key(graph.getGraphName(), partId); + dbStore.put(key36, partition.toByteArray()); + // write new structure + var key = MetadataKeyHelper.getPartitionKey(graph.getGraphName(), partId); + dbStore.put(key, newPartition.toByteArray()); + + // construct shard group + if (! partSet.contains(partId)) { + var shardGroupKey = MetadataKeyHelper.getShardGroupKey(partId); + var shardGroup = dbStore.getOne(Metapb.ShardGroup.parser(), shardGroupKey); + if (shardGroup == null) { + var shardList = partition.getShardsList(); + if (shardList.size() > 0) { + shardGroup = Metapb.ShardGroup.newBuilder() + .setId(partId) + .setVersion(partition.getVersion()) + .setConfVer(0) + .setState(partition.getState()) + .addAllShards(shardList) + .build(); + dbStore.put(shardGroupKey, shardGroup.toByteArray()); + log.info("extract shard group from partition, {}", shardGroup); + } else { + throw new PDException(1000, "trans partition failed, no shard list"); + } + } + partSet.add(partId); + } + + } + } + } catch (Exception e) { + log.error("script: {}, run error : {}", getClass().getName(), e.getMessage()); + } + } + + @Override + public boolean isRunOnce() { + return true; + } + + @Override + public boolean isRunWithoutDataVersion() { + return true; + } + + private Metapb.Partition trans(Metapb.PartitionV36 partition) { + + return Metapb.Partition.newBuilder() + .setId(partition.getId()) + .setGraphName(partition.getGraphName()) + .setStartKey(partition.getStartKey()) + .setEndKey(partition.getEndKey()) + .setVersion(partition.getVersion()) + .setState(partition.getState()) + .setMessage(partition.getMessage()) + .build(); + } +} diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java new file mode 100644 index 0000000000..a870386384 --- /dev/null +++ b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java @@ -0,0 +1,47 @@ +package com.baidu.hugegraph.pd.upgrade.scripts; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; +import com.baidu.hugegraph.pd.meta.MetadataRocksDBStore; +import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class TaskCleanUpgrade implements VersionUpgradeScript { + @Override + public String getHighVersion() { + return UNLIMITED_VERSION; + } + + @Override + public String getLowVersion() { + return UNLIMITED_VERSION; + } + + @Override + public boolean isRunWithoutDataVersion() { + return true; + } + + @Override + public boolean isRunOnce() { + return true; + } + + @Override + public void runInstruction(PDConfig config) { + log.info("run TaskCleanUpgrade script"); + var dbStore = new MetadataRocksDBStore(config); + + try { + byte[] key = MetadataKeyHelper.getAllSplitTaskPrefix(); + log.info("delete split task:{}", dbStore.removeByPrefix(key)); + byte[] key2 = MetadataKeyHelper.getAllMoveTaskPrefix(); + log.info("delete move task:{}", dbStore.removeByPrefix(key2)); + } catch (PDException e) { + throw new RuntimeException(e); + } + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java new file mode 100644 index 0000000000..03f4b09cca --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java @@ -0,0 +1,25 @@ +package org.apache.hugegraph.pd.boot; + +import com.alipay.remoting.util.StringUtils; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.context.annotation.ComponentScan; + +/** + * PD服务启动类 + */ +@ComponentScan(basePackages={"com.baidu.hugegraph.pd"}) +@SpringBootApplication +public class HugePDServer { + public static void main(String[] args) { + String logPath = System.getProperty("logging.path"); + if (StringUtils.isBlank(logPath)) { + System.setProperty("logging.path", "logs"); + System.setProperty("com.alipay.remoting.client.log.level", "error"); + } + + SpringApplication.run(HugePDServer.class); + System.out.println("Hugegraph-pd started."); + } +} + diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java new file mode 100644 index 0000000000..b75e27656d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java @@ -0,0 +1,404 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.license; + +import com.baidu.hugegraph.license.ExtraParam; +import com.baidu.hugegraph.license.LicenseVerifyParam; +import com.baidu.hugegraph.license.MachineInfo; +import com.baidu.hugegraph.pd.KvService; +import com.baidu.hugegraph.pd.common.PDRuntimeException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.kv.KvServiceGrpc; +import com.baidu.hugegraph.pd.grpc.kv.TTLRequest; +import com.baidu.hugegraph.pd.grpc.kv.TTLResponse; +import com.baidu.hugegraph.pd.raft.RaftEngine; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.gson.Gson; +import com.google.gson.internal.LinkedTreeMap; +import de.schlichtherle.license.CipherParam; +import de.schlichtherle.license.DefaultCipherParam; +import de.schlichtherle.license.DefaultKeyStoreParam; +import de.schlichtherle.license.DefaultLicenseParam; +import de.schlichtherle.license.KeyStoreParam; +import de.schlichtherle.license.LicenseContent; +import de.schlichtherle.license.LicenseParam; +import io.grpc.CallOptions; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.springframework.stereotype.Service; +import org.springframework.util.Base64Utils; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.charset.Charset; +import java.text.SimpleDateFormat; +import java.time.Duration; +import java.time.Instant; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.prefs.Preferences; + +@Service +@Slf4j +public class LicenseVerifierService { + + private PDConfig pdConfig; + private static final Duration CHECK_INTERVAL = Duration.ofMinutes(10); + private volatile Instant lastCheckTime = Instant.now(); + // private final LicenseVerifyParam verifyParam; + private LicenseVerifyManager manager; + private static LicenseContent content; + private static KvService kvService; + private static String contentKey = "contentKey"; + private static Gson mapper = new Gson(); + private final MachineInfo machineInfo; + private static volatile boolean installed = false; + + public LicenseVerifierService(PDConfig pdConfig) { + this.pdConfig = pdConfig; + machineInfo = new MachineInfo(); + kvService = new KvService(pdConfig); + // verifyParam = initLicense(pdConfig); + } + + public LicenseVerifyParam init() { + LicenseVerifyParam verifyParam = null; + if (!installed) { + synchronized (LicenseVerifierService.class) { + if (!installed) { + verifyParam = buildVerifyParam(pdConfig.getVerifyPath()); + log.info("get license param: {}", pdConfig.getVerifyPath()); + if (verifyParam != null) { + LicenseParam licenseParam = this.initLicenseParam(verifyParam); + this.manager = new LicenseVerifyManager(licenseParam); + // this.install("d01e1814cd9edb01a05671bebf3919cc"); + try { + // this.verifyPublicCert(md5); + File licenseFile = new File(pdConfig.getLicensePath()); + if (!licenseFile.exists()) { + log.warn("invalid parameter:license-path"); + return null; + } else { + log.info("get license file....{}", licenseFile.getAbsolutePath()); + } + this.manager.uninstall(); + content = this.manager.install(licenseFile); + ExtraParam param = LicenseVerifyManager.getExtraParams(content); + content.setExtra(param); + this.checkIpAndMac(param); + // 获取有效期,并设置过期时间,通知leader,将content保存到... + Date notAfter = content.getNotAfter(); + long ttl = notAfter.getTime() - System.currentTimeMillis(); + final TTLResponse[] info = {null}; + if (!isLeader()) { + while (RaftEngine.getInstance().getLeader() == null) { + this.wait(200); + } + if (RaftEngine.getInstance().getLeader() != null) { + CountDownLatch latch = new CountDownLatch(1); + TTLRequest request = TTLRequest.newBuilder().setKey(contentKey).setValue( + mapper.toJson(content, LicenseContent.class)).setTtl(ttl).build(); + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(TTLResponse value) { + info[0] = value; + latch.countDown(); + } + + @Override + public void onError(Throwable t) { + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + redirectToLeader(KvServiceGrpc.getPutTTLMethod(), request, observer); + latch.await(); + Pdpb.Error error = info[0].getHeader().getError(); + if (!error.getType().equals(Pdpb.ErrorType.OK)) { + throw new Exception(error.getMessage()); + } + } else { + log.warn("wait for leader to put the license content......"); + } + + } else { + kvService.put(contentKey, mapper.toJson(content, LicenseContent.class), ttl); + } + installed = true; + log.info("The license is successfully installed, valid for {} - {}", + content.getNotBefore(), notAfter); + } catch (Exception e) { + log.error("Failed to install license", e); + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, + "Failed to install license, ", e); + } + } + } + } + } + return verifyParam; + } + + // public static LicenseVerifierService instance() { + // if (INSTANCE == null) { + // synchronized (LicenseVerifierService.class) { + // if (INSTANCE == null) { + // INSTANCE = new LicenseVerifierService(); + // } + // } + // } + // return INSTANCE; + // } + + // public void verifyIfNeeded() { + // Instant now = Instant.now(); + // Duration interval = Duration.between(this.lastCheckTime, now); + // if (!interval.minus(CHECK_INTERVAL).isNegative()) { + // this.verify(); + // this.lastCheckTime = now; + // } + // } + + public synchronized void install(String md5) { + + } + SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + public HashMap getContext() throws Exception { + try { + String value = kvService.get(contentKey); + if (StringUtils.isEmpty(value)) { + throw new Exception("can not find license content from storage"); + } + LicenseContent content = mapper.fromJson(value, LicenseContent.class); + Date notAfter = content.getNotAfter(); + Date notBefore = content.getNotBefore(); + Date issued = content.getIssued(); + // long currentTimeMillis = System.currentTimeMillis(); + // long diff = notAfter - currentTimeMillis; + // boolean expired = diff <= 0; + HashMap result = mapper.fromJson(value, HashMap.class); + result.put("current", formatter.format(new Date())); + result.put("notAfter", formatter.format(notAfter)); + result.put("issued", formatter.format(issued)); + result.put("notBefore", formatter.format(notBefore)); + return result; + } catch (Exception e) { + throw new Exception("can not find license content from storage:" + e.getMessage()); + } + } + + public LicenseContent verify(int cores, int nodeCount) { + try { + String value = kvService.get(contentKey); + if (StringUtils.isEmpty(value)) { + throw new Exception("can not find license content from storage"); + } + LicenseContent content = mapper.fromJson(value, LicenseContent.class); + LinkedTreeMap param = (LinkedTreeMap) content.getExtra(); + int licCpus = ((Double) param.get("cpus")).intValue(); + int licNodes = ((Double) param.get("nodes")).intValue(); + if (param != null) { + if (licCpus != -1) { + // licCpus为 -1时,表示不限制cpu核数 + if (cores <= 0 || cores > licCpus) { + String msg = + String.format("无效的cpu核数: %s,授权数: %s", cores, licCpus); + throw new PDRuntimeException( + Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); + } + } + + if (licNodes != -1) { + // licNodes为 -1时,表示不限制服务节点数目 + if (nodeCount > licNodes) { + String msg = String.format("无效的节点个数: %s,授权数: %s", nodeCount, licNodes); + throw new PDRuntimeException( + Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); + } + } + } + return content; + } catch (Exception e) { + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, + "授权信息校验异常," + e.getMessage()); + } + } + + private ManagedChannel channel; + + public boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + private > void redirectToLeader( + MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver observer) { + try { + if (channel == null) { + synchronized (this) { + if (channel == null) { + channel = ManagedChannelBuilder + .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()).usePlaintext() + .build(); + } + } + log.info("Grpc get leader address {}", RaftEngine.getInstance().getLeaderGrpcAddress()); + } + + io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), req, + observer); + } catch (Exception e) { + e.printStackTrace(); + } + + } + + // private void verifyPublicCert(String expectMD5) { + // String path = this.verifyParam.publicKeyPath(); + // try (InputStream is = LicenseVerifierService.class.getResourceAsStream(path)) { + // String actualMD5 = DigestUtils.md5Hex(is); + // if (!actualMD5.equals(expectMD5)) { + // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Invalid public cert"); + // } + // } catch (IOException e) { + // log.error("Failed to read public cert", e); + // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Failed to read public cert", e); + // } + // } + + private LicenseParam initLicenseParam(LicenseVerifyParam param) { + Preferences preferences = Preferences.userNodeForPackage(LicenseVerifierService.class); + CipherParam cipherParam = new DefaultCipherParam(param.storePassword()); + KeyStoreParam keyStoreParam = new DefaultKeyStoreParam(LicenseVerifierService.class, + param.publicKeyPath(), param.publicAlias(), + param.storePassword(), null); + return new DefaultLicenseParam(param.subject(), preferences, keyStoreParam, cipherParam); + } + + private static LicenseVerifyParam buildVerifyParam(String path) { + // NOTE: can't use JsonUtil due to it bind tinkerpop jackson + try { + ObjectMapper mapper = new ObjectMapper(); + File licenseParamFile = new File(path); + if (!licenseParamFile.exists()) { + log.warn("failed to get file:{}", path); + return null; + } + return mapper.readValue(licenseParamFile, LicenseVerifyParam.class); + } catch (IOException e) { + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, + String.format("Failed to read json stream to %s", + LicenseVerifyParam.class)); + } + } + + public String getIpAndMac() { + List actualIps = this.machineInfo.getIpAddress(); + String host = pdConfig.getHost(); + String licenseHost = host; + if (!actualIps.contains(host)) { + licenseHost = actualIps.get(0); + } + try { + String mac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(licenseHost)); + HashMap ipAndMac = new HashMap<>(); + ipAndMac.put("ip", licenseHost); + ipAndMac.put("mac", mac); + String json = new Gson().toJson(ipAndMac); + String encode = Base64Utils.encodeToString(json.getBytes(Charset.defaultCharset())); + return encode; + } catch (Exception e) { + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, + String.format("Failed to get ip and mac for %s", + e.getMessage())); + } + } + + private void checkIpAndMac(ExtraParam param) { + String expectIp = param.ip(); + boolean matched = false; + List actualIps = null; + if (StringUtils.isEmpty(expectIp)) { + matched = true; + } else { + actualIps = this.machineInfo.getIpAddress(); + for (String actualIp : actualIps) { + if (actualIp.equalsIgnoreCase(expectIp)) { + matched = true; + break; + } + } + } + if (!matched) { + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( + "The server's ip '%s' doesn't match the authorized '%s'", actualIps, expectIp)); + } + String expectMac = param.mac(); + if (StringUtils.isEmpty(expectMac)) { + return; + } + // The mac must be not empty here + if (!StringUtils.isEmpty(expectIp)) { + String actualMac; + try { + actualMac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(expectIp)); + } catch (UnknownHostException e) { + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, + String.format("Failed to get mac address for ip '%s'", + expectIp)); + } + String expectFormatMac = expectMac.replaceAll(":", "-"); + String actualFormatMac = actualMac.replaceAll(":", "-"); + if (!actualFormatMac.equalsIgnoreCase(expectFormatMac)) { + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( + "The server's mac '%s' doesn't match the authorized '%s'", actualMac, expectMac)); + } + } else { + String expectFormatMac = expectMac.replaceAll(":", "-"); + List actualMacs = this.machineInfo.getMacAddress(); + matched = false; + for (String actualMac : actualMacs) { + String actualFormatMac = actualMac.replaceAll(":", "-"); + if (actualFormatMac.equalsIgnoreCase(expectFormatMac)) { + matched = true; + break; + } + } + if (!matched) { + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( + "The server's macs %s don't match the authorized '%s'", actualMacs, expectMac)); + } + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java new file mode 100644 index 0000000000..ff86fc910e --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java @@ -0,0 +1,77 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.license; + +import java.io.IOException; +import java.util.List; + +import com.baidu.hugegraph.license.CommonLicenseManager; +import com.baidu.hugegraph.license.ExtraParam; +import com.baidu.hugegraph.pd.common.PDRuntimeException; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; + +import de.schlichtherle.license.LicenseContent; +import de.schlichtherle.license.LicenseContentException; +import de.schlichtherle.license.LicenseParam; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class LicenseVerifyManager extends CommonLicenseManager { + + private static final ObjectMapper MAPPER = new ObjectMapper(); + private static final int NO_LIMIT = -1; + + public LicenseVerifyManager(LicenseParam param) { + super(param); + } + + @Override + protected synchronized void validate(LicenseContent content) throws LicenseContentException { + // Call super validate firstly to verify the common license parameters + try { + super.validate(content); + } catch (LicenseContentException e) { + // log.error("Failed to verify license", e); + throw e; + } + // Verify the customized license parameters. + getExtraParams(content); + } + + public static ExtraParam getExtraParams(LicenseContent content) { + List params; + try { + TypeReference> type; + type = new TypeReference>() { + }; + params = MAPPER.readValue((String) content.getExtra(), type); + if (params != null && params.size() > 0) { + return params.get(0); + } + } catch (IOException e) { + log.error("Failed to read extra params", e); + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, + "Failed to read extra params", e); + } + return null; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java new file mode 100644 index 0000000000..0bbba95f1e --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd.metrics; + +import io.micrometer.core.instrument.MeterRegistry; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +/** + * @author lynn.bond@hotmail.com on 2022/01/05 + */ +@Configuration +public class MetricsConfig { + @Autowired + private PDMetrics metrics; + + @Bean + public MeterRegistryCustomizer metricsCommonTags() { + return (registry) -> registry.config().commonTags("hg", "pd"); + } + + @Bean + public MeterRegistryCustomizer registerMeters() { + return (registry) -> { + metrics.init(registry); + }; + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java new file mode 100644 index 0000000000..17f0dad1d2 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java @@ -0,0 +1,99 @@ +package org.apache.hugegraph.pd.metrics; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.service.PDService; +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author lynn.bond@hotmail.com on 2022/1/5 + */ +@Component +@Slf4j +public final class PDMetrics { + public final static String PREFIX = "hg"; + private final static AtomicLong graphs = new AtomicLong(0); + private MeterRegistry registry; + + @Autowired + private PDService pdService; + + public synchronized void init(MeterRegistry meterRegistry) { + + if (registry == null) { + registry = meterRegistry; + registerMeters(); + } + + } + + private void registerMeters() { + Gauge.builder(PREFIX + ".up", () -> 1).register(registry); + + Gauge.builder(PREFIX + ".graphs", () -> updateGraphs()) + .description("Number of graphs registered in PD") + .register(registry); + + Gauge.builder(PREFIX + ".stores", () -> updateStores()) + .description("Number of stores registered in PD") + .register(registry); + + } + + private long updateGraphs() { + long buf = getGraphs(); + + if (buf != graphs.get()) { + graphs.set(buf); + registerGraphMetrics(); + } + return buf; + } + + private long updateStores() { + return getStores(); + } + + private long getGraphs() { + return getGraphMetas().size(); + } + + private long getStores(){ + try { + return this.pdService.getStoreNodeService().getStores(null).size(); + } catch (PDException e) { + log.error(e.getMessage(),e); + e.printStackTrace(); + } + return 0; + } + + private List getGraphMetas(){ + try { + return this.pdService.getPartitionService().getGraphs(); + } catch (PDException e) { + log.error(e.getMessage(),e); + } + return Collections.EMPTY_LIST; + } + + private void registerGraphMetrics(){ + this.getGraphMetas().forEach(meta->{ + Gauge.builder(PREFIX + ".partitions",this.pdService.getPartitionService() + ,e-> e.getPartitions(meta.getGraphName()).size()) + .description("Number of partitions assigned to a graph") + .tag("graph",meta.getGraphName()) + .register(this.registry); + + }); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java new file mode 100644 index 0000000000..eb1ca0005e --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java @@ -0,0 +1,55 @@ +package org.apache.hugegraph.pd.model; + +import java.util.Objects; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/1 + */ +public class DemoModel { + private int status; + private String text; + + public DemoModel(int status, String text) { + this.status = status; + this.text = text; + } + + public int getStatus() { + return status; + } + + public DemoModel setStatus(int status) { + this.status = status; + return this; + } + + public String getText() { + return text; + } + + public DemoModel setText(String text) { + this.text = text; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DemoModel that = (DemoModel) o; + return status == that.status && Objects.equals(text, that.text); + } + + @Override + public int hashCode() { + return Objects.hash(status, text); + } + + @Override + public String toString() { + return "HgNodeStatus{" + + "status=" + status + + ", text='" + text + '\'' + + '}'; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java new file mode 100644 index 0000000000..60340beb57 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java @@ -0,0 +1,9 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class GraphRestRequest { + private int partitionCount; + private int shardCount; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java new file mode 100644 index 0000000000..92b48982ea --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java @@ -0,0 +1,8 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class GraphSpaceRestRequest { + private Long storageLimit; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java new file mode 100644 index 0000000000..daa48dffad --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java @@ -0,0 +1,8 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class PeerRestRequest { + private String peerList; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java new file mode 100644 index 0000000000..b7deee61be --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java @@ -0,0 +1,72 @@ +package org.apache.hugegraph.pd.model; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * @author lynn.bond@hotmail.com on 2022/2/14 + */ +public class PromTargetsModel { + private static final String LABEL_METRICS_PATH = "__metrics_path__"; + private static final String LABEL_SCHEME = "__scheme__"; + private static final String LABEL_JOB_NAME = "job"; + private static final String LABEL_CLUSTER = "cluster"; + + private Set targets = new HashSet<>(); + private Map labels = new HashMap<>(); + + public static PromTargetsModel of() { + return new PromTargetsModel(); + } + + private PromTargetsModel() {} + + public Set getTargets() { + return targets; + } + + public Map getLabels() { + return labels; + } + + public PromTargetsModel addTarget(String target) { + if (target == null) return this; + this.targets.add(target); + return this; + } + + public PromTargetsModel setTargets(Set targets) { + if (targets != null) { + this.targets = targets; + } + return this; + } + + public PromTargetsModel setMetricsPath(String path) { + return this.addLabel(LABEL_METRICS_PATH, path); + } + + public PromTargetsModel setScheme(String scheme) { + return this.addLabel(LABEL_SCHEME, scheme); + } + + public PromTargetsModel setClusterId(String clusterId){ + return this.addLabel(LABEL_CLUSTER,clusterId); + } + + public PromTargetsModel addLabel(String label, String value) { + if (label == null || value == null) return this; + this.labels.put(label, value); + return this; + } + + @Override + public String toString() { + return "PromTargetModel{" + + "targets=" + targets + + ", labels=" + labels + + '}'; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java new file mode 100644 index 0000000000..a001578736 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java @@ -0,0 +1,17 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +import java.util.HashMap; + +/** + * @author zhangyingjie + * @date 2022/2/8 + **/ +@Data +public class RegistryQueryRestRequest { + + String appName; + String version; + HashMap labels; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java new file mode 100644 index 0000000000..9682de795a --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java @@ -0,0 +1,20 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +import java.util.HashMap; + +/** + * @author zhangyingjie + * @date 2022/2/8 + **/ +@Data +public class RegistryRestRequest { + + String id; + String appName; + String version; + String address; + String interval; + HashMap labels; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java new file mode 100644 index 0000000000..a09744cff7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java @@ -0,0 +1,19 @@ +package org.apache.hugegraph.pd.model; + +import com.baidu.hugegraph.pd.grpc.Pdpb; +import lombok.Data; + +import java.io.Serializable; + +/** + * @author zhangyingjie + * @date 2022/2/8 + **/ +@Data +public class RegistryRestResponse { + + Pdpb.ErrorType errorType; + String message; + Serializable data; + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java new file mode 100644 index 0000000000..410a3f528a --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java @@ -0,0 +1,40 @@ +package org.apache.hugegraph.pd.model; + +import com.baidu.hugegraph.pd.grpc.Pdpb; +import lombok.Data; + +import java.io.Serializable; +import java.util.HashMap; + +/** + * @author tianxiaohui + * @date 2022-07-21 + */ +@Data +public class RestApiResponse { + String message; + Object data; + int status; + + public RestApiResponse(Object data, Pdpb.ErrorType status, String message) { + if (data == null){ + data = new HashMap(); + } + this.data = data; + this.status = status.getNumber(); + this.message = message; + } + + public RestApiResponse() { + + } + + public RestApiResponse(Object data, int status, String message){ + if (data == null){ + data = new HashMap(); + } + this.data = data; + this.status = status; + this.message = message; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java new file mode 100644 index 0000000000..30a02d49b7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java @@ -0,0 +1,8 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class StoreRestRequest { + String storeState; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java new file mode 100644 index 0000000000..cf0f03ddef --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java @@ -0,0 +1,13 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +/** + * @author zhangyingjie + * @date 2022/3/23 + **/ +@Data +public class TimeRangeRequest { + String startTime; + String endTime; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java new file mode 100644 index 0000000000..b76897ff93 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -0,0 +1,157 @@ +package org.apache.hugegraph.pd.notice; + +import com.baidu.hugegraph.pd.common.HgAssert; +import lombok.extern.slf4j.Slf4j; + +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * @author lynn.bond@hotmail.com on 2022/2/10 + */ +@Slf4j +public class NoticeBroadcaster { + private long noticeId; + private String durableId; + private Supplier noticeSupplier; + private Supplier durableSupplier; + private Function removeFunction; + private int state; //0=ready; 1=notified; 2=done ack; -1=error + private int counter; + private long timestamp; + + public static NoticeBroadcaster of(Supplier noticeSupplier) { + HgAssert.isArgumentNotNull(noticeSupplier, "noticeSupplier"); + return new NoticeBroadcaster(noticeSupplier); + } + + private NoticeBroadcaster(Supplier noticeSupplier) { + this.noticeSupplier = noticeSupplier; + this.timestamp = System.currentTimeMillis(); + } + + public NoticeBroadcaster setDurableSupplier(Supplier durableSupplier) { + this.durableSupplier = durableSupplier; + return this; + } + + public NoticeBroadcaster setRemoveFunction(Function removeFunction) { + this.removeFunction = removeFunction; + return this; + } + + public NoticeBroadcaster notifying() { + + if (this.state >= 2) { + log.warn("Aborted notifying as ack has done. notice: {}", this); + return this; + } + + this.counter++; + + if (this.durableId == null && this.durableSupplier != null) { + try { + this.durableId = this.durableSupplier.get(); + } catch (Throwable t) { + log.error("Failed to invoke durableSupplier, cause by:", t); + } + } + + try { + this.noticeId = this.noticeSupplier.get(); + state = 1; + } catch (Throwable t) { + state = -1; + log.error("Failed to invoke noticeSupplier: {}; cause by: " + this.noticeSupplier.toString(), t); + } + + return this; + } + + public boolean checkAck(long ackNoticeId) { + boolean flag = false; + + if (this.noticeId == ackNoticeId) { + flag = true; + this.state = 2; + } + + if (flag) { + this.doRemoveDurable(); + } + + return flag; + } + + public boolean doRemoveDurable() { + log.info("Removing NoticeBroadcaster is stating, noticeId:{}, durableId: {}" + , this.noticeId, this.durableId); + boolean flag = false; + + if (this.removeFunction == null) { + log.warn("The remove-function hasn't been set."); + return false; + } + + if (this.durableId == null) { + log.warn("The durableId hasn't been set."); + return false; + } + + try { + if (!(flag = this.removeFunction.apply(this.durableId))) { + log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}" + , this.noticeId, this.durableId); + } + } catch (Throwable t) { + log.error("Failed to remove NoticeBroadcaster, noticeId: " + + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t); + } + + return flag; + } + + public void setDurableId(String durableId) { + + if (HgAssert.isInvalid(durableId)) { + log.warn("Set an invalid durable-id to NoticeBroadcaster."); + } + + this.durableId = durableId; + } + + public long getNoticeId() { + return noticeId; + } + + public int getState() { + return state; + } + + public int getCounter() { + return counter; + } + + public String getDurableId() { + return durableId; + } + + public long getTimestamp() { + return timestamp; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + @Override + public String toString() { + return "NoticeBroadcaster{" + + "noticeId=" + noticeId + + ", durableId='" + durableId + '\'' + + ", state=" + state + + ", counter=" + counter + + ", timestamp=" + timestamp + + '}'; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java new file mode 100644 index 0000000000..56e02226b7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java @@ -0,0 +1,202 @@ +package org.apache.hugegraph.pd.pulse; + +import com.baidu.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PulseResponse; +import com.baidu.hugegraph.pd.grpc.pulse.PulseType; +import com.baidu.hugegraph.pd.util.IdUtil; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +@ThreadSafe +@Slf4j +abstract class AbstractObserverSubject { + /* send notice to client */ + private final Map> observerHolder = new HashMap<>(1024); + /* notice from client */ + private final Map listenerHolder = new HashMap<>(1024); + + private final byte[] lock = new byte[0]; + private final PulseResponse.Builder builder = PulseResponse.newBuilder(); + private final PulseType pulseType; + + protected AbstractObserverSubject(PulseType pulseType) { + this.pulseType = pulseType; + } + + /** + * Add an observer from remote client + * + * @param observerId + * @param responseObserver + */ + void addObserver(Long observerId, StreamObserver responseObserver) { + synchronized (this.observerHolder) { + + if (this.observerHolder.containsKey(observerId)) { + responseObserver.onError( + new Exception("The observer-id[" + observerId + "] of " + this.pulseType.name() + + " subject has been existing.")); + return; + } + + log.info("Adding a " + this.pulseType + "'s observer, observer-id is [" + observerId + "]."); + this.observerHolder.put(observerId, responseObserver); + } + + } + + /** + * Remove an observer by id + * + * @param observerId + * @param responseObserver + */ + void removeObserver(Long observerId, StreamObserver responseObserver) { + synchronized (this.observerHolder) { + log.info("Removing a " + this.pulseType + "'s observer, observer-id is [" + observerId + "]."); + this.observerHolder.remove(observerId); + } + + responseObserver.onCompleted(); + } + + abstract String toNoticeString(PulseResponse res); + + /** + * + * @param c + * @return notice ID + */ + protected long notifyClient(Consumer c) { + synchronized (lock) { + + if (c == null) { + log.error(this.pulseType.name() + "'s notice was abandoned, caused by: notifyObserver(null)"); + return -1; + } + + try { + c.accept(this.builder.clear()); + } catch (Throwable t) { + log.error(this.pulseType.name() + "'s notice was abandoned, caused by:", t); + return -1; + } + + long noticeId = IdUtil.createMillisId(); + + Iterator>> iter = observerHolder.entrySet().iterator(); + + // long start = System.currentTimeMillis(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long observerId = entry.getKey(); + PulseResponse res = this.builder.setObserverId(observerId).setNoticeId(noticeId).build(); + + try { + entry.getValue().onNext(res); + } catch (Throwable e) { + log.error("Failed to send " + this.pulseType.name() + "'s notice[" + toNoticeString(res) + + "] to observer[" + observerId + "].", e); + + // TODO: ? try multi-times? + // iter.remove(); + log.error("Removed a " + this.pulseType.name() + "'s observer[" + entry.getKey() + + "], because of once failure of sending.", e); + } + + } + + // log.info("notice client: notice id: {}, ts :{}, cost: {}", noticeId, System.currentTimeMillis(), + // (System.currentTimeMillis() - start )/1000); + return noticeId; + } + + } + + protected void notifyError(String message){ + synchronized (lock) { + Iterator>> iter = observerHolder.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long observerId = entry.getKey(); + PulseResponse res = this.builder.setObserverId(observerId).build(); + try { + entry.getValue().onError( + Status.PERMISSION_DENIED.withDescription(message).asRuntimeException()); + } catch (Throwable e) { + log.error("Failed to send " + this.pulseType.name() + "'s notice[" + toNoticeString(res) + + "] to observer[" + observerId + "].", e); + + } + } + } + } + + /** + * Add a listener from local server + * + * @param listenerId + * @param listener + */ + void addListener(Long listenerId, PulseListener listener) { + synchronized (this.listenerHolder) { + + if (this.listenerHolder.containsKey(listenerId)) { + listener.onError( + new Exception("The listener-id[" + listenerId + "] of " + this.pulseType.name() + + " subject has been existing.")); + return; + } + + log.info("Adding a " + this.pulseType + "'s listener, listener-id is [" + listenerId + "]."); + this.listenerHolder.put(listenerId, listener); + + } + + } + + /** + * Remove a listener by id + * + * @param listenerId + * @param listener + */ + void removeListener(Long listenerId, PulseListener listener) { + synchronized (this.listenerHolder) { + log.info("Removing a " + this.pulseType + "'s listener, listener-id is [" + listenerId + "]."); + this.observerHolder.remove(listenerId); + } + + listener.onCompleted(); + } + + abstract Function getNoticeHandler(); + + void handleClientNotice(PulseNoticeRequest noticeRequest) { + + Iterator> iter = listenerHolder.entrySet().iterator(); + + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + Long listenerId = entry.getKey(); + try { + entry.getValue().onNext(getNoticeHandler().apply(noticeRequest)); + } catch (Throwable e) { + log.error(e.getMessage(),e); + } + + } + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java new file mode 100644 index 0000000000..3e55177f37 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java @@ -0,0 +1,348 @@ +package org.apache.hugegraph.pd.pulse; + +import com.baidu.hugegraph.pd.common.HgAssert; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.*; + +import org.apache.hugegraph.pd.notice.NoticeBroadcaster; +import org.apache.hugegraph.pd.util.IdUtil; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Parser; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.*; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + * @version 1.1.0 added ack on 2020/02/11 + */ +@Slf4j +@ThreadSafe +public class PDPulseSubject { + private final static long NOTICE_EXPIRATION_TIME=30*60*1000; + private final static int RETRYING_PERIOD_SECONDS=60; + private final static Map subjectHolder = new ConcurrentHashMap<>(); + private final static ConcurrentLinkedQueue broadcasterQueue = new ConcurrentLinkedQueue<>(); + private final static ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1); + + private static Supplier> queueRetrieveFunction = () -> Collections.emptyList(); + private static Function queueDurableFunction = (e) -> true; + private static Function queueRemoveFunction = (e) -> true; + + static { + subjectHolder.put(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name(), new PartitionHeartbeatSubject()); + // add some other type here... + // ... + } + + //Schedule tasks + static { + scheduledExecutor.scheduleAtFixedRate(() -> doSchedule(), 0, RETRYING_PERIOD_SECONDS, TimeUnit.SECONDS); + } + + private static void doSchedule() { + appendQueue(); + expireQueue(); + //retry + broadcasterQueue.forEach(e -> { + + e.notifying();}); + } + + private static void appendQueue(){ + broadcasterQueue.addAll( + getQueueItems() + .parallelStream() + .filter(e -> !broadcasterQueue + .stream() + .anyMatch(b -> e.getItemId().equals(b.getDurableId())) + ).map(e -> createBroadcaster(e)) + .peek(e->log.info("Appending notice: {}",e)) + .filter(e -> e != null) + .collect(Collectors.toList()) + ); + } + + private static void expireQueue(){ + broadcasterQueue.removeIf(e->{ + if(System.currentTimeMillis()-e.getTimestamp()>=NOTICE_EXPIRATION_TIME){ + log.info("Notice was expired, trying to remove, notice: {}",e); + return e.doRemoveDurable(); + }else{ + return false; + } + }); + } + + private static List getQueueItems() { + try { + return queueRetrieveFunction.get(); + } catch (Throwable t) { + log.error("Failed to retrieve queue from queueRetrieveFunction, cause by:", t); + } + + return Collections.emptyList(); + } + + public static void setQueueRetrieveFunction(Supplier> queueRetrieveFunction) { + HgAssert.isArgumentNotNull(queueRetrieveFunction, "queueRetrieveFunction"); + PDPulseSubject.queueRetrieveFunction = queueRetrieveFunction; + } + + public static void setQueueDurableFunction(Function queueDurableFunction) { + HgAssert.isArgumentNotNull(queueDurableFunction, "queueDurableFunction"); + PDPulseSubject.queueDurableFunction = queueDurableFunction; + } + + public static void setQueueRemoveFunction(Function queueRemoveFunction) { + HgAssert.isArgumentNotNull(queueRemoveFunction, "queueRemoveFunction"); + PDPulseSubject.queueRemoveFunction = queueRemoveFunction; + } + + /** + * Add a responseObserver of client + * + * @param responseObserver + * @return + */ + public static StreamObserver addObserver(StreamObserver responseObserver) { + isArgumentNotNull(responseObserver, "responseObserver"); + return new PDPulseStreamObserver(responseObserver); + } + + /** + * Send Notice to pd-client + * + * @param responseBuilder + */ + public static void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) { + HgAssert.isArgumentNotNull(responseBuilder, "responseBuilder"); + notifyClient(responseBuilder.build()); + } + + private static void notifyClient(PartitionHeartbeatResponse response) { + doBroadcast(createBroadcaster(response)); + } + + private static void doBroadcast(NoticeBroadcaster broadcaster) { + broadcasterQueue.add(broadcaster.notifying()); + } + + private static T getSubject(PulseType pulseType, Class clazz) { + return (T) subjectHolder.get(pulseType.name()); + } + + private static NoticeBroadcaster createBroadcaster(Metapb.QueueItem item) { + PartitionHeartbeatResponse notice=toNotice(item); + if(notice==null)return null; + NoticeBroadcaster res=createBroadcaster(notice); + res.setDurableId(item.getItemId()); + res.setTimestamp(item.getTimestamp()); + return res; + } + + private static NoticeBroadcaster createBroadcaster(PartitionHeartbeatResponse notice) { + return NoticeBroadcaster.of(getNoticeSupplier(notice)) + .setDurableSupplier(getDurableSupplier(notice)) + .setRemoveFunction(getRemoveFunction()); + } + + public static Supplier getNoticeSupplier(PartitionHeartbeatResponse notice) { + // TODO: PartitionHeartbeatSubject.class -> T + return () -> getSubject(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT, PartitionHeartbeatSubject.class) + .notifyClient(notice); + } + + private static Supplier getDurableSupplier(PartitionHeartbeatResponse notice) { + return () -> { + Metapb.QueueItem queueItem = toQueueItem(notice); + String res = null; + + try { + if (queueDurableFunction.apply(queueItem)) { + res = queueItem.getItemId(); + } else { + log.error("Failed to persist queue-item that contained PartitionHeartbeatResponse: {}" + , notice); + } + } catch (Throwable t) { + log.error("Failed to invoke queueDurableFunction, cause by:", t); + } + + return res; + }; + } + + private static Function getRemoveFunction() { + return s -> { + boolean flag = false; + + try { + flag = queueRemoveFunction.apply(s); + } catch (Throwable t) { + log.error("Failed to invoke queueRemoveFunction, cause by:", t); + } + + return flag; + }; + } + + private static Metapb.QueueItem toQueueItem(PartitionHeartbeatResponse notice) { + return Metapb.QueueItem.newBuilder() + .setItemId(IdUtil.createMillisStr()) + .setItemClass(notice.getClass().getTypeName()) + .setItemContent(notice.toByteString()) + .setTimestamp(System.currentTimeMillis()) + .build(); + } + + private static PartitionHeartbeatResponse toNotice(Metapb.QueueItem item) { + Parser parser = PartitionHeartbeatResponse.parser(); + PartitionHeartbeatResponse buf = null; + try { + buf = parser.parseFrom(item.getItemContent()); + } catch (InvalidProtocolBufferException t) { + log.error("Failed to parse queue-item to PartitionHeartbeatResponse, cause by:", t); + } + return buf; + } + + public static void notifyError(String message) { + subjectHolder.forEach((k, v) -> { + v.notifyError(message); + }); + } + + /** + * Adding notice listener, the notice is come from pd-client. + * + * @param listener + */ + public static void listenPartitionHeartbeat(PulseListener listener) { + subjectHolder.get(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name()).addListener(createListenerId(), listener); + } + + private static Long createListenerId() { + // TODO: Maybe some other way... + return createObserverId(); + } + + private static Long createObserverId() { + return IdUtil.createMillisId(); + } + + /* inner classes below */ + + private static class PDPulseStreamObserver implements StreamObserver { + private final StreamObserver responseObserver; + private AbstractObserverSubject subject; + private Long observerId; + + PDPulseStreamObserver(StreamObserver responseObserver) { + this.responseObserver = responseObserver; + } + + private void cancelObserver() { + + if (this.subject == null) { + this.responseObserver.onError(new Exception("Invoke cancel-observer before create-observer.")); + return; + } + + this.subject.removeObserver(this.observerId, this.responseObserver); + } + + private void addObserver(PulseCreateRequest request) { + if (this.subject != null) { + return; + } + + PulseType pulseType = getPulseType(request); + if (pulseType == null) return; + + this.subject = getSubject(pulseType); + this.observerId = createObserverId(); + + this.subject.addObserver(this.observerId, this.responseObserver); + } + + private void ackNotice(long noticeId,long observerId) { + // log.info("ack noticeId, noticeId: {}, observerId: {}, ts:{}", + // noticeId,observerId, System.currentTimeMillis()); + broadcasterQueue.removeIf(e->e.checkAck(noticeId)); + } + + private PulseType getPulseType(PulseCreateRequest request) { + PulseType pulseType = request.getPulseType(); + + if (pulseType.equals(PulseType.PULSE_TYPE_UNKNOWN)) { + this.responseObserver.onError(new Exception("unknown pulse type.")); + return null; + } + + return pulseType; + } + + private AbstractObserverSubject getSubject(PulseType pulseType) { + AbstractObserverSubject subject = subjectHolder.get(pulseType.name()); + + if (subject == null) { + responseObserver.onError(new Exception("Unsupported pulse-type: " + pulseType.name())); + return null; + } + + return subject; + } + + private void handleNotice(PulseNoticeRequest noticeRequest) { + subject.handleClientNotice(noticeRequest); + } + + @Override + public void onNext(PulseRequest pulseRequest) { + + if (pulseRequest.hasCreateRequest()) { + this.addObserver(pulseRequest.getCreateRequest()); + return; + } + + if (pulseRequest.hasCancelRequest()) { + this.cancelObserver(); + return; + } + + if (pulseRequest.hasNoticeRequest()) { + this.handleNotice(pulseRequest.getNoticeRequest()); + } + + if (pulseRequest.hasAckRequest()) { + this.ackNotice(pulseRequest.getAckRequest().getNoticeId() + ,pulseRequest.getAckRequest().getObserverId()); + return; + } + } + + @Override + public void onError(Throwable throwable) { + this.cancelObserver(); + } + + @Override + public void onCompleted() { + this.cancelObserver(); + } + + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java new file mode 100644 index 0000000000..67fca9c0c2 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java @@ -0,0 +1,39 @@ +package org.apache.hugegraph.pd.pulse; + +import com.baidu.hugegraph.pd.grpc.pulse.*; + +import java.util.function.Function; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +public class PartitionHeartbeatSubject extends AbstractObserverSubject { + + PartitionHeartbeatSubject() { + super(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getPartitionHeartbeatResponse().toString(); + } + + @Override + Function getNoticeHandler() { + return r->r.getPartitionHeartbeatRequest(); + } + + void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) { + + super.notifyClient(b -> { + b.setPartitionHeartbeatResponse(responseBuilder);; + }); + + } + + long notifyClient(PartitionHeartbeatResponse response) { + return super.notifyClient(b -> { + b.setPartitionHeartbeatResponse(response);; + }); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java new file mode 100644 index 0000000000..fd66f1f977 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java @@ -0,0 +1,25 @@ +package org.apache.hugegraph.pd.pulse; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +public interface PulseListener { + /** + * Invoked on new notice. + * + * @param notice the notice. + */ + void onNext(T notice); + + /** + * Invoked on errors. + * + * @param throwable the error. + */ + void onError(Throwable throwable); + + /** + * Invoked on completion. + */ + void onCompleted(); +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java new file mode 100644 index 0000000000..cd552e4471 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -0,0 +1,164 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.common.PDException; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.MessageOrBuilder; +import com.google.protobuf.util.JsonFormat; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class API { + + public static String STATUS_KEY = "status"; + public static String ERROR_KEY = "error"; + public static String QUOTATION = "\""; + public static String COMMA = ","; + public static String COLON = ": "; + public static final String VERSION = "3.6.3"; + public static final String PD = "PD"; + public static final String STORE = "STORE"; + + + public String toJSON(List values, String key) { + + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0").append(COMMA) + .append(QUOTATION).append(key).append(QUOTATION).append(COLON) + .append("[ "); + + if (values != null) { + values.forEach(s -> { + try { + builder.append(JsonFormat.printer().print(s)); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); + } + builder.append("]}"); + return builder.toString(); + } + + public String toJSON(MessageOrBuilder value, String key) { + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0").append(COMMA) + .append(QUOTATION).append(key).append(QUOTATION).append(COLON); + try { + if (value != null) + builder.append(JsonFormat.printer().print(value)); + else + builder.append("{}"); + builder.append("}"); + return builder.toString(); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + return toJSON(e); + } + + } + + public String toJSON(Map> values) { + StringBuilder builder = new StringBuilder(); + builder.append("{ "); + for (Map.Entry> entry : values.entrySet()) { + String entryKey = entry.getKey(); + List entryValue = entry.getValue(); + builder.append(QUOTATION).append(entryKey).append(QUOTATION).append(COLON).append("["); + if ((entryValue != null) && !(entryValue.isEmpty())) { + entryValue.forEach(s -> { + try { + if (s == null){ + builder.append("null"); + }else{ + builder.append(JsonFormat.printer().print(s)); + } + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); //删除最后一个逗号 + } + builder.append("]").append(COMMA); + } + builder.deleteCharAt(builder.length() - 1); + builder.append("}"); + return builder.toString(); + } + + public String toJSON(PDException exception) { + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append(exception.getErrorCode()).append(COMMA) + .append(QUOTATION).append(ERROR_KEY).append(QUOTATION).append(COLON) + .append(QUOTATION).append(exception.getMessage()).append(QUOTATION); + builder.append("}"); + + return builder.toString(); + } + + public String toJSON(Exception exception) { + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("-1").append(COMMA) + .append(QUOTATION).append(ERROR_KEY).append(QUOTATION).append(COLON) + .append(QUOTATION).append(exception.getMessage()).append(QUOTATION); + builder.append("}"); + + return builder.toString(); + } + + /** + * @param object + * @return + * @author tianxiaohui + */ + public String toJSON(Object object) { + ObjectMapper mapper = new ObjectMapper(); + try { + return mapper.writeValueAsString(object); + } catch (JsonProcessingException e) { + e.printStackTrace(); + return e.getMessage(); + } + } + + public Map okMap(String k, Object v) { + Map map = new HashMap<>(); + map.put(STATUS_KEY, 0); + map.put(k, v); + return map; + } + + public String toJSON(List values, JsonFormat.TypeRegistry registry) { + + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0").append(COMMA) + .append(QUOTATION).append("log").append(QUOTATION).append(COLON) + .append("[ "); + JsonFormat.Printer printer = JsonFormat.printer().usingTypeRegistry(registry); + if (values != null) { + values.forEach(s -> { + try { + builder.append(printer.print(s)); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); + } + builder.append("]}"); + return builder.toString(); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java new file mode 100644 index 0000000000..fca2db50ae --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java @@ -0,0 +1,261 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import org.apache.hugegraph.pd.model.GraphRestRequest; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; + +import javax.servlet.http.HttpServletRequest; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class GraphAPI extends API { + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @GetMapping(value = "/graph/partitionSizeRange", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getPartitionSizeRange() { + try { + int minPartitionSize = 1; + int maxPartitionSize = pdService.getStoreNodeService().getShardGroups().size(); + Map dataMap = new HashMap<>(); + dataMap.put("minPartitionSize", minPartitionSize); + dataMap.put("maxPartitionSize", maxPartitionSize); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException:", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "/graphs", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getGraphs() { + RestApiResponse response = new RestApiResponse(); + try { + List graphs = pdRestService.getGraphs(); + List resultGraphs = new ArrayList<>(); + for (Metapb.Graph graph : graphs) { + if ((graph.getGraphName() != null) && (graph.getGraphName().endsWith("/g"))) { + resultGraphs.add(new GraphStatistics(graph)); + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("graphs", resultGraphs); + response.setData(dataMap); + response.setStatus(Pdpb.ErrorType.OK.getNumber()); + response.setMessage(Pdpb.ErrorType.OK.name()); + + } catch (PDException e) { + log.error("PDException: ", e); + response.setData(new HashMap()); + response.setStatus(e.getErrorCode()); + response.setMessage(e.getMessage()); + } + return response; + } + + @PostMapping(value = "/graph/**", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setGraph(@RequestBody GraphRestRequest body, HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + final String prefix = "/graph/"; + final int limit = 2; + String graphName = requestURL.split(prefix, limit)[1]; + graphName = URLDecoder.decode(graphName, "utf-8"); + Metapb.Graph curGraph = pdRestService.getGraph(graphName); + Metapb.Graph.Builder builder = Metapb.Graph.newBuilder(curGraph == null ? Metapb.Graph.getDefaultInstance() : curGraph); + builder.setGraphName(graphName); + if (body.getPartitionCount() > 0) { + builder.setPartitionCount(body.getPartitionCount()); + } + + Metapb.Graph newGraph = pdRestService.updateGraph(builder.build()); + return toJSON(newGraph, "graph"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + + @GetMapping(value = "/graph/**", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getGraph(HttpServletRequest request) throws UnsupportedEncodingException { + RestApiResponse response = new RestApiResponse(); + GraphStatistics statistics = null; + String requestURL = request.getRequestURL().toString(); + final String prefix = "/graph/"; + final int limit = 2; + String graphName = requestURL.split(prefix, limit)[1]; + graphName = URLDecoder.decode(graphName, "utf-8"); + try { + Metapb.Graph graph = pdRestService.getGraph(graphName); + if (graph != null) { + statistics = new GraphStatistics(graph); + response.setData(statistics); + } else { + response.setData(new HashMap()); //没有该图 + } + response.setStatus(Pdpb.ErrorType.OK.getNumber()); + response.setMessage(Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error(e.getMessage()); + response.setData(new HashMap()); + response.setStatus(Pdpb.ErrorType.UNKNOWN.getNumber()); + response.setMessage(e.getMessage()); + } + return response; + } + + @Data + class Shard { + long partitionId; + long storeId; + String state; + String role; + int progress; + + public Shard(Metapb.ShardStats shardStats, long partitionId) { + this.role = String.valueOf(shardStats.getRole()); + this.storeId = shardStats.getStoreId(); + this.state = String.valueOf(shardStats.getState()); + this.partitionId = partitionId; + this.progress = shardStats.getProgress(); + } + + public Shard(Metapb.Shard shard, long partitionId) { + this.role = String.valueOf(shard.getRole()); + this.storeId = shard.getStoreId(); + this.state = Metapb.ShardState.SState_Normal.name(); //gshard的状态默认为normal + this.progress = 0; + this.partitionId = partitionId; + } + + } + + @Data + class Partition { + int partitionId; + String graphName; + String workState; + long startKey; + long endKey; + List shards; + long dataSize; + + public Partition(Metapb.Partition pt, Metapb.PartitionStats partitionStats) { + if (pt != null) { + partitionId = pt.getId(); + startKey = pt.getStartKey(); + endKey = pt.getEndKey(); + workState = String.valueOf(pt.getState()); + graphName = pt.getGraphName(); + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + if (partitionStats != null) { + List shardStatsList = partitionStats.getShardStatsList(); + List shardsList = new ArrayList<>(); + for (Metapb.ShardStats shardStats : shardStatsList) { + Shard shard = new Shard(shardStats, partitionId); + shardsList.add(shard); + } + this.shards = shardsList; + } else { + List shardsList = new ArrayList<>(); + try { + var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId()); + if (shardGroup != null) { + for (Metapb.Shard shard1 : shardGroup.getShardsList()) { + shardsList.add(new Shard(shard1, partitionId)); + } + } else { + log.error("GraphAPI.Partition(), get shard group: {} returns null", pt.getId()); + } + } catch (PDException e){ + log.error("Partition init failed, error: {}", e.getMessage()); + } + this.shards = shardsList; + } + + + } + } + } + + @Data + class GraphStatistics { + //图统计信息 + String graphName; + long partitionCount; + String state; + List partitions; + long dataSize; + //todo + int nodeCount; + int edgeCount; + long keyCount; + + public GraphStatistics(Metapb.Graph graph) throws PDException { + if (graph == null) { + return; + } + Map partition2DataSize = new HashMap<>(); + graphName = graph.getGraphName(); + partitionCount = graph.getPartitionCount(); + state = String.valueOf(graph.getState()); + // 数据量及key的数量 + List stores = pdRestService.getStores(graphName); + for (Metapb.Store store : stores) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + if ((graphName.equals(graphStats.getGraphName())) + && (Metapb.ShardRole.Leader.equals(graphStats.getRole()))) { + keyCount += graphStats.getApproximateKeys(); + dataSize += graphStats.getApproximateSize(); + partition2DataSize.put(graphStats.getPartitionId(), graphStats.getApproximateSize()); + } + } + } + List resultPartitionList = new ArrayList<>(); + List tmpPartitions = pdRestService.getPartitions(graphName); + if ((tmpPartitions != null) && (!tmpPartitions.isEmpty())) { + // 需要返回的分区信息 + for (Metapb.Partition partition : tmpPartitions) { + Metapb.PartitionStats partitionStats = pdRestService + .getPartitionStats(graphName, partition.getId()); + Partition pt = new Partition(partition, partitionStats); + pt.dataSize = partition2DataSize.getOrDefault(partition.getId(), 0L); + resultPartitionList.add(pt); + } + } + partitions = resultPartitionList; + // 隐去图名后面的 /g /m /s + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + } + } +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java new file mode 100644 index 0000000000..3cbcaee7ac --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java @@ -0,0 +1,75 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.model.GraphSpaceRestRequest; + +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; + +import javax.servlet.http.HttpServletRequest; +import java.net.URLDecoder; +import java.util.List; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class GraphSpaceAPI extends API{ + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/graph-spaces", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getGraphSpaces() { + try { + List graphSpaces = pdRestService.getGraphSpaces(); + return toJSON(graphSpaces, "graph-spaces"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @PostMapping(value = "/graph-spaces/**", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setGraphSpace(@RequestBody GraphSpaceRestRequest body, HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1]; + graphSpaceName = URLDecoder.decode(graphSpaceName, "utf-8"); + Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder() + .setName(graphSpaceName) + .setStorageLimit(body.getStorageLimit()) + .build(); + Metapb.GraphSpace newGraphSpace = pdRestService.setGraphSpace(graphSpace); + return toJSON(newGraphSpace, "graph-spaces"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + @GetMapping(value = "/graph-spaces/**", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getGraphSpace(HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1]; + graphSpaceName = URLDecoder.decode(graphSpaceName, "utf-8"); + Metapb.GraphSpace graphSpace = pdRestService.getGraphSpace(graphSpaceName); + return toJSON(graphSpace, "graphs-paces"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java new file mode 100644 index 0000000000..864ef2bba8 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @@ -0,0 +1,232 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; + +import com.baidu.hugegraph.pd.raft.RaftEngine; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +@RestController +@Slf4j +@RequestMapping("/") +public class IndexAPI extends API { + @Autowired + PDService pdService; + @Autowired + PDRestService pdRestService; + @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public BriefStatistics index() throws PDException, ExecutionException, InterruptedException { + + BriefStatistics statistics = new BriefStatistics(); + statistics.leader = RaftEngine.getInstance().getLeaderGrpcAddress(); + statistics.state = pdService.getStoreNodeService().getClusterStats().getState().toString(); + statistics.storeSize = pdService.getStoreNodeService().getActiveStores().size(); + statistics.graphSize = pdService.getPartitionService().getGraphs().size(); + statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size(); + return statistics; + + } + + @Data + class BriefStatistics { + String state; + String leader; + int memberSize; + int storeSize; + int graphSize; + int partitionSize; + } + + @GetMapping(value = "/v1/cluster", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse cluster() throws InterruptedException, ExecutionException { + Statistics statistics = new Statistics(); + try { + statistics.state = String.valueOf(pdService.getStoreNodeService().getClusterStats().getState()); + String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + List pdList = new ArrayList<>(); + for (Metapb.Member member : RaftEngine.getInstance().getMembers()) { + Member member1 = new Member(member); + if ((leaderGrpcAddress != null) && (leaderGrpcAddress.equals(member.getGrpcUrl()))) { + member1.role = "Leader"; + statistics.pdLeader = member1; + } else { + member1.role = "Follower"; + } + pdList.add(member1); + } + statistics.pdList = pdList; + statistics.memberSize = pdList.size(); + List stores = new ArrayList<>(); + for (Metapb.Store store : pdService.getStoreNodeService().getStores()) { + stores.add(new Store(store)); + } + statistics.stores = stores; + statistics.storeSize = statistics.stores.size(); + statistics.onlineStoreSize = pdService.getStoreNodeService().getActiveStores().size(); + statistics.offlineStoreSize = statistics.storeSize - statistics.onlineStoreSize; + List graphs = pdRestService.getGraphs(); + // 图的数量,只统计/g + statistics.graphSize = graphs.stream().filter((g) -> (g.getGraphName() != null) + && (g.getGraphName().endsWith("/g"))).count(); + statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size(); + statistics.shardCount = pdService.getConfigService().getPDConfig().getShardCount(); + for (Metapb.Store store : pdService.getStoreNodeService().getStores()) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + statistics.keyCount += graphStats.getApproximateKeys(); + statistics.dataSize += graphStats.getApproximateSize(); + } + } + // 数据状态:根据图的状态推出数据状态,枚举值越大,问题越严重, 默认为正常状态 + Metapb.PartitionState dataState = Metapb.PartitionState.PState_Normal; + for (Metapb.Graph graph : pdRestService.getGraphs()) { + if (graph.getState() == Metapb.PartitionState.UNRECOGNIZED) { + continue; // 未识别不参与对比,不然会抛出异常 + } + if ((graph.getState() != null) && (graph.getState().getNumber() > dataState.getNumber())) { + dataState = graph.getState(); + } + } + statistics.dataState = dataState.name(); + return new RestApiResponse(statistics, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e){ + log.error("PD Exception: ", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @Data + class Store { + long storeId; + String address; + String raftAddress; + String version; + String state; + long startTimeStamp; + + public Store(Metapb.Store store) { + if (store != null) { + storeId = store.getId(); + address = store.getAddress(); + raftAddress = store.getRaftAddress(); + version = store.getVersion(); + state = String.valueOf(store.getState()); + startTimeStamp = store.getStartTimestamp(); + } + + } + } + + @Data + class Member { + String raftUrl; + String grpcUrl; + String restUrl; + String state; + String dataPath; + String role; + String serviceName; //服务名称,自定义属性 + String serviceVersion; //静态定义 + long startTimeStamp; //进程启动时间 + + public Member(Metapb.Member member) { + if (member != null) { + raftUrl = member.getRaftUrl(); + grpcUrl = member.getGrpcUrl(); + restUrl = member.getRestUrl(); + state = String.valueOf(member.getState()); + dataPath = member.getDataPath(); + serviceName = grpcUrl + "-PD"; + serviceVersion = VERSION; + startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + } + } + + public Member() { + + } + } + + @Data + class Statistics { + /** + * 集群状态 + */ + String state; + /** + * 数据状态 + */ + String dataState; + /** + * pd集群成员 + */ + List pdList; + /** + * pd集群的leader + */ + Member pdLeader; + /** + * pd集群的大小 + */ + int memberSize; + /** + * stores列表 + */ + List stores; + /** + * store的数量 + */ + int storeSize; + /** + * onlineStore + */ + int onlineStoreSize; + /** + * 离线的store的数量 + */ + int offlineStoreSize; + /** + * 图的数量 + */ + long graphSize; + /** + * 分区的数量 + */ + int partitionSize; + /** + * 分区副本数 + */ + int shardCount; + /** + * key的数量 + */ + long keyCount; + /** + * 数据量 + */ + long dataSize; + + } +} + diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java new file mode 100644 index 0000000000..c0f3fadd3d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @@ -0,0 +1,208 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import org.apache.hugegraph.pd.model.PeerRestRequest; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDService; + +import com.baidu.hugegraph.pd.raft.RaftEngine; + +import io.grpc.stub.CallStreamObserver; +import io.grpc.stub.StreamObserver; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; + +import javax.servlet.http.HttpServletRequest; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.*; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class MemberAPI extends API { + //TODO + @Autowired + PDService pdService; + + @GetMapping(value = "/members", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getMembers() throws InterruptedException, ExecutionException { + + String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + List members = new ArrayList<>(); + Member leader = null; + Map stateCountMap = new HashMap<>(); + for (Metapb.Member member : response.get().get(0).getMembersList()) { + String stateKey = member.getState().name(); + stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); + Member member1 = new Member(member); + if ((leaderGrpcAddress != null) && (leaderGrpcAddress.equals(member.getGrpcUrl()))) { + member1.role = "Leader"; + leader = member1; + } else { + member1.role = "Follower"; + } + members.add(member1); + } + String state = pdService.getStoreNodeService().getClusterStats().getState().toString(); + HashMap resultMap = new HashMap<>(); + resultMap.put("state", state); + resultMap.put("pdList", members); + resultMap.put("pdLeader", leader); + resultMap.put("numOfService", members.size()); + resultMap.put("numOfNormalService", stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); + resultMap.put("stateCountMap", stateCountMap); + return new RestApiResponse(resultMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } + + @PostMapping(value = "/members/change", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String changePeerList(@RequestBody PeerRestRequest body, HttpServletRequest request) { + try { + Pdpb.ChangePeerListRequest rpcRequest = Pdpb.ChangePeerListRequest.newBuilder().setPeerList( + body.getPeerList()).build(); + CountDownLatch latch = new CountDownLatch(1); + final Pdpb.ResponseHeader[] responseHeader = {null}; + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(Pdpb.getChangePeerListResponse value) { + responseHeader[0] = value.getHeader(); + } + + @Override + public void onError(Throwable t) { + responseHeader[0] = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType( + Pdpb.ErrorType.UNKNOWN).setMessage( + t.getMessage()).build()).build(); + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + pdService.changePeerList(rpcRequest, observer); + latch.await(); + return toJSON(responseHeader[0], "changeResult"); + } catch (Exception e) { + return toJSON(e); + } + } + + + public static class CallStreamObserverWrap extends CallStreamObserver implements Future> { + CompletableFuture> future = new CompletableFuture<>(); + List values = new ArrayList<>(); + + @Override + public boolean isReady() { + return false; + } + + @Override + public void setOnReadyHandler(Runnable runnable) { + + } + + @Override + public void disableAutoInboundFlowControl() { + + } + + @Override + public void request(int i) { + + } + + @Override + public void setMessageCompression(boolean b) { + + } + + @Override + public void onNext(V v) { + values.add(v); + } + + @Override + public void onError(Throwable throwable) { + future.completeExceptionally(throwable); + } + + @Override + public void onCompleted() { + future.complete(values); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return future.cancel(mayInterruptIfRunning); + } + + @Override + public boolean isCancelled() { + return future.isCancelled(); + } + + @Override + public boolean isDone() { + return future.isDone(); + } + + @Override + public List get() throws InterruptedException, ExecutionException { + return future.get(); + } + + @Override + public List get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return future.get(timeout, unit); + } + } + + @Data + class Member { + String raftUrl; + String grpcUrl; + String restUrl; + String state; + String dataPath; + String role; + String serviceName; //服务名称,自定义属性 + String serviceVersion; //静态定义 + long startTimeStamp; //启动时间,暂时取进程的启动时间 + + public Member(Metapb.Member member) { + if (member != null) { + raftUrl = member.getRaftUrl(); + grpcUrl = member.getGrpcUrl(); + restUrl = member.getRestUrl(); + state = String.valueOf(member.getState()); + dataPath = member.getDataPath(); + serviceName = grpcUrl + "-PD"; + serviceVersion = "3.6.3"; + startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + + } + + } + + public Member() { + + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java new file mode 100644 index 0000000000..7a35593b3b --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java @@ -0,0 +1,435 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.util.DateUtil; + +import com.google.protobuf.util.JsonFormat; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +import org.apache.commons.lang.time.DateFormatUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; + +import java.util.*; +import java.util.concurrent.ExecutionException; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class PartitionAPI extends API { + @Autowired + PDRestService pdRestService; + + public static final String DEFAULT_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss"; + + @GetMapping(value = "/highLevelPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + public RestApiResponse getHighLevelPartitions() { + // 分区下多个图的信息 + Map> partitions2GraphsMap = new HashMap<>(); + Map resultPartitionsMap = new HashMap<>(); + // 每一个分区的keyCount, 只从leader处取出 + Map partition2KeyCount = new HashMap<>(); + // 每一个分区的dataSize, 只从leader处取出 + Map partition2DataSize = new HashMap<>(); + List stores; + Map storesMap = new HashMap<>(); + try { + stores = pdRestService.getStores(""); + } catch (PDException e) { + log.error("getStores error", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + for (Metapb.Store store : stores) { + storesMap.put(store.getId(), store); + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + // 获取分区保存的图信息(只从leader处取出来) + if (Metapb.ShardRole.Leader != graphStats.getRole()) { + continue; + } + // 计算分区的keyCount(不区分图) + partition2KeyCount.put(graphStats.getPartitionId(), + partition2KeyCount.getOrDefault(graphStats.getPartitionId(), + graphStats.getApproximateKeys())); + // 计算分区的dataSize, 通过累加图的大小实现 + partition2DataSize.put(graphStats.getPartitionId(), + partition2DataSize.getOrDefault(graphStats.getPartitionId(), 0L) + + graphStats.getApproximateSize()); + // 构造分区下的图信息 + if (partitions2GraphsMap.get(graphStats.getPartitionId()) == null) { + partitions2GraphsMap.put(graphStats.getPartitionId(), new HashMap()); + } + Map partitionGraphsMap = partitions2GraphsMap.get(graphStats.getPartitionId()); + partitionGraphsMap.put(graphStats.getGraphName(), new GraphStats(graphStats)); + } + } + // 构造分区的所有需返回的信息 + List partitionList = pdRestService.getPartitions(""); + for (Metapb.Partition partition : partitionList) { + // 补充分区内图信息的startKey, endKey + if (partitions2GraphsMap.get(partition.getId()) != null) { + GraphStats graphStats = partitions2GraphsMap.get(partition.getId()).get(partition.getGraphName()); + if (graphStats != null) { + graphStats.startKey = partition.getStartKey(); + graphStats.endKey = partition.getEndKey(); + } + } + // 构造分区整体信息(不区分图) + if ((resultPartitionsMap.get(partition.getId()) == null) + && (!partition.getGraphName().endsWith("/s")) + ) { + Metapb.PartitionStats partitionStats; + try { + partitionStats = pdRestService.getPartitionStats(partition.getGraphName(), partition.getId()); + } catch (PDException e) { + log.error("getPartitionStats error", e); + partitionStats = null; + } + // 初始化分区信息 + HighLevelPartition resultPartition = new HighLevelPartition(partition, partitionStats); + resultPartition.keyCount = partition2KeyCount.getOrDefault(resultPartition.partitionId, 0L); + resultPartition.dataSize = partition2DataSize.getOrDefault(resultPartition.partitionId, 0L); + for (ShardStats shard : resultPartition.shards) { + // 对副本的地址,分区信息赋值 + shard.address = storesMap.get(shard.storeId).getAddress(); + shard.partitionId = partition.getId(); + } + if ((partitionStats != null) && (partitionStats.getLeader() != null)) { + long storeId = partitionStats.getLeader().getStoreId(); // 获取leader的storeId + resultPartition.leaderAddress = storesMap.get(storeId).getAddress(); // 获取leader的address + } + resultPartitionsMap.put(partition.getId(), resultPartition); + } + } + // 构造需返回的分区下的图列表,只返回/g, 且按名称排序 + for (Map.Entry entry : resultPartitionsMap.entrySet()) { + Integer partitionId = entry.getKey(); + HighLevelPartition currentPartition = resultPartitionsMap.get(partitionId); + Map graphsMap = partitions2GraphsMap + .getOrDefault(partitionId, new HashMap<>()); // 避免后面出现空指针异常 + ArrayList graphsList = new ArrayList<>(); + for (Map.Entry entry1 : graphsMap.entrySet()) { + if (!entry1.getKey().endsWith("/g")) { + continue; // 只保留/g的图 + } + String graphName = entry1.getKey(); + GraphStats tmpGraph = graphsMap.get(graphName); + final int postfixLength = 2; + tmpGraph.graphName = tmpGraph.graphName.substring(0, tmpGraph.graphName.length() - postfixLength); + graphsList.add(tmpGraph); + } + graphsList.sort((o1, o2) -> o1.graphName.compareTo(o2.graphName)); + currentPartition.graphs = graphsList; + } + List resultPartitionList = new ArrayList<>(); + if (!resultPartitionsMap.isEmpty()) { + ArrayList partitionids = new ArrayList(resultPartitionsMap.keySet()); + partitionids.sort((o1, o2) -> o1.intValue() - o2.intValue()); + for (Integer partitionId : partitionids) { + resultPartitionList.add(resultPartitionsMap.get(partitionId)); + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("partitions", resultPartitionList); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } + + @GetMapping(value = "/partitions", produces = MediaType.APPLICATION_JSON_VALUE) + public RestApiResponse getPartitions() { + try { + List partitions = new ArrayList<>();//需返回的分区对象 + List partitionList = pdRestService.getPartitions(""); + List stores = pdRestService.getStoreStats(false); + //分区的raftNode的状态 + HashMap> raftMap = new HashMap<>(); + + HashMap> shardIndexMap = new HashMap<>(); + String delimiter = "@"; + for (int i = 0; i < stores.size(); i++) { + Metapb.Store store = stores.get(i); + Metapb.StoreStats storeStats = store.getStats(); + HashMap storeRaftStats = new HashMap<>(); + List raftStatsList = storeStats.getRaftStatsList(); + for (int j = 0; j < raftStatsList.size(); j++) { + Metapb.RaftStats raftStats = raftStatsList.get(j); + storeRaftStats.put(raftStats.getPartitionId(), raftStats); + } + + HashMap partitionShardStats = new HashMap<>(); + List graphStatsList = storeStats.getGraphStatsList(); + StringBuilder builder = new StringBuilder(); + for (int j = 0; j < graphStatsList.size(); j++) { + Metapb.GraphStats graphStats = graphStatsList.get(j); + String graphName = graphStats.getGraphName(); + String partitionId = Integer.toString(graphStats.getPartitionId()); + builder.append(graphName).append(delimiter).append(partitionId); + partitionShardStats.put(builder.toString(), graphStats); + builder.setLength(0); + } + raftMap.put(store.getId(), storeRaftStats); + shardIndexMap.put(store.getId(), partitionShardStats); + } + + for (Metapb.Partition pt : partitionList) { + Partition partition = new Partition(pt); + String graphName = partition.getGraphName(); + partition.getShards().sort(Comparator.comparing(Shard::getStoreId)); + Metapb.PartitionStats partitionStats = pdRestService.getPartitionStats(graphName, pt.getId()); + Map shardStats = new HashMap<>(); + if (partitionStats != null) { + String dateTime = DateFormatUtils.format( + partitionStats.getTimestamp(), DEFAULT_DATETIME_FORMAT); + partition.setTimestamp(dateTime); + shardStats = getShardStats(partitionStats); + } + + for (Metapb.Shard shard : pdRestService.getShardList(pt.getId())) { + Map finalShardStats = shardStats; + partition.getShards().add(new Shard() {{ + storeId = Long.toString(shard.getStoreId()); + role = shard.getRole(); + address = pdRestService.getStore( + shard.getStoreId()).getAddress(); + if (finalShardStats.containsKey(shard.getStoreId())) { + state = finalShardStats.get(shard.getStoreId()).getState().toString(); + progress = finalShardStats.get(shard.getStoreId()).getProgress(); + role = finalShardStats.get(shard.getStoreId()).getRole(); + } + + + HashMap storeRaftStats = raftMap.get(shard.getStoreId()); + if (storeRaftStats != null) { + Metapb.RaftStats raftStats = storeRaftStats.get(partition.getId()); + if (raftStats != null) { + committedIndex = Long.toString(raftStats.getCommittedIndex()); + } + } + }}); + } + + partition.setPartitionStats(partitionStats); + + partitions.add(partition); + } + partitions.sort(Comparator.comparing(Partition::getGraphName).thenComparing(Partition::getId)); + HashMap dataMap = new HashMap<>(); + dataMap.put("partitions", partitions); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("query metric data error", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "/partitionsAndStats", produces = MediaType.APPLICATION_JSON_VALUE) + public String getPartitionsAndStats() { + //for debug use, return partition && partitionStats + try { + Map> graph2Partitions = new HashMap<>(); + Map> graph2PartitionStats = new HashMap<>(); + for (Metapb.Graph graph : pdRestService.getGraphs()) { + List partitionList = new ArrayList<>(); + List partitionStatsList = new ArrayList<>(); + for (Metapb.Partition partition : pdRestService.getPartitions(graph.getGraphName())) { + Metapb.PartitionStats partitionStats = pdRestService + .getPartitionStats(graph.getGraphName(), partition.getId()); + partitionList.add(partition); + partitionStatsList.add(partitionStats); + } + graph2Partitions.put(graph.getGraphName(), partitionList); + graph2PartitionStats.put(graph.getGraphName(), partitionStatsList); + } + StringBuilder builder = new StringBuilder(); + builder.append("{\"partitions\":").append(toJSON(graph2Partitions)); + builder.append(",\"partitionStats\":").append(toJSON(graph2PartitionStats)).append("}"); + return builder.toString(); + } catch (PDException e) { + log.error("PD exception:" + e); + return toJSON(e); + } + } + + private Map getShardStats(Metapb.PartitionStats partitionStats) { + Map stats = new HashMap<>(); + if (partitionStats.getShardStatsList() != null) + partitionStats.getShardStatsList().forEach(shardStats -> { + stats.put(shardStats.getStoreId(), shardStats); + }); + return stats; + } + + @PostMapping(value = "/partitions/log", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getPartitionLog(@RequestBody TimeRangeRequest request) { + try { + Date dateStart = DateUtil.getDate(request.getStartTime()); + Date dateEnd = DateUtil.getDate(request.getEndTime()); + List changedRecords = pdRestService.getPartitionLog(dateStart.getTime(), + dateEnd.getTime()); + if (changedRecords != null) { + JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry + .newBuilder().add(Pdpb.SplitDataRequest.getDescriptor()).build(); + return toJSON(changedRecords, registry); + } else { + return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public Statistics getStatistics() throws PDException, ExecutionException, InterruptedException { + + Statistics statistics = new Statistics(); + int partitionId = -1; + return statistics; + } + + @Data + class Shard { + String address; + String storeId; + Metapb.ShardRole role; + String state; + int progress; + String committedIndex; + long partitionId; + + } + + @Data + class Partition { + int id; + long version; + String graphName; + long startKey; + long endKey; + + Metapb.PartitionState workState; + List shards; + String timestamp; + + + Partition(Metapb.Partition pt) { + id = pt.getId(); + version = pt.getVersion(); + graphName = pt.getGraphName(); + startKey = pt.getStartKey(); + endKey = pt.getEndKey(); + workState = pt.getState(); + shards = new ArrayList<>(); + } + + public void setPartitionStats(Metapb.PartitionStats stats) { + + } + } + + @Data + class Statistics { + + + } + + @Data + class HighLevelPartition { + int partitionId; + String state; + String leaderAddress; + long keyCount; + long dataSize; + String shardState; + int progress; + long raftTerm; //任期 + List graphs; + List shards; + String failureCause = ""; + + HighLevelPartition(Metapb.Partition partition, Metapb.PartitionStats partitionStats) { + partitionId = partition.getId(); + state = String.valueOf(partition.getState()); + if (partitionStats != null) { + raftTerm = partitionStats.getLeaderTerm(); + } + Metapb.ShardState tmpShardState = Metapb.ShardState.SState_Normal; + if (partitionStats != null) { + shards = new ArrayList<>(); + for (Metapb.ShardStats shardStats : partitionStats.getShardStatsList()) { + if ((shardStats.getState() != Metapb.ShardState.UNRECOGNIZED) + && (shardStats.getState().getNumber() > tmpShardState.getNumber())) { + tmpShardState = shardStats.getState(); + progress = shardStats.getProgress(); + } + shards.add(new ShardStats(shardStats)); + } + } else { + shards = new ArrayList<>(); + try { + for (Metapb.Shard shard : pdRestService.getShardList(partition.getId())) { + shards.add(new ShardStats(shard)); + } + } catch (PDException e){ + log.error("get shard list failed, {}", e.getMessage()); + } + } + // 综合所有副本的状态,给shardState赋值 + shardState = tmpShardState.name(); + } + } + + @Data + class GraphStats { + String graphName; + long keyCount; + long startKey; + long endKey; + long dataSize; + String workState; + long partitionId; + + GraphStats(Metapb.GraphStats graphStats) { + graphName = graphStats.getGraphName(); + keyCount = graphStats.getApproximateKeys(); + workState = graphStats.getWorkState().toString(); + dataSize = graphStats.getApproximateSize(); + partitionId = graphStats.getPartitionId(); + } + } + + @Data + class ShardStats { + long storeId; + String role; + String state; + int progress; + //额外属性 + long partitionId; + String address; + + ShardStats(Metapb.ShardStats shardStats) { + storeId = shardStats.getStoreId(); + role = String.valueOf(shardStats.getRole()); + state = shardStats.getState().toString(); + progress = shardStats.getProgress(); + } + + ShardStats(Metapb.Shard shard) { + //当没有shardStats的初始化方法 + storeId = shard.getStoreId(); + role = String.valueOf(shard.getRole()); + state = Metapb.ShardState.SState_Normal.name(); + progress = 0; + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java new file mode 100644 index 0000000000..b7a1ce2987 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java @@ -0,0 +1,72 @@ +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.model.PromTargetsModel; +import org.apache.hugegraph.pd.service.PromTargetsService; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +/** + * @author lynn.bond@hotmail.com on 2022/2/14 + */ +@RestController +@Slf4j +@RequestMapping("/v1/prom") +public class PromTargetsAPI { + + @Autowired + private PromTargetsService service; + + @GetMapping(value = "/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getPromTargets(@PathVariable(value = "appName", required = true) String appName) { + return ResponseEntity.of(Optional.ofNullable(this.service.getTargets(appName))); + } + + @GetMapping(value = "/targets-all", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getPromAllTargets() { + return ResponseEntity.of(Optional.ofNullable(this.service.getAllTargets())); + } + + @GetMapping(value = "/demo/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) + public List getDemoTargets(@PathVariable(value = "appName", required = true) String targetType) { + + PromTargetsModel model =null; + switch (targetType) { + case "node": + model=PromTargetsModel.of() + .addTarget("10.14.139.26:8100") + .addTarget("10.14.139.27:8100") + .addTarget("10.14.139.28:8100") + .setMetricsPath("/metrics") + .setScheme("http"); + break; + case "store": + model=PromTargetsModel.of() + .addTarget("172.20.94.98:8521") + .addTarget("172.20.94.98:8522") + .addTarget("172.20.94.98:8523") + .setMetricsPath("/actuator/prometheus") + .setScheme("http"); + break; + case "pd": + model=PromTargetsModel.of() + .addTarget("172.20.94.98:8620") + .setMetricsPath("/actuator/prometheus"); + + break; + default: + + } + + return Collections.singletonList(model); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java new file mode 100644 index 0000000000..ea93674a50 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -0,0 +1,179 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.common.PDRuntimeException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.Pdpb.GetMembersResponse; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.model.RegistryQueryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import javax.servlet.http.HttpServletRequest; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; + +/** + * @author zhangyingjie + * @date 2022/2/8 + **/ +@RestController +@Slf4j +@RequestMapping("/v1") +public class RegistryAPI extends API { + + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @PostMapping(value = "/registry", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse register(@RequestBody RegistryRestRequest body, HttpServletRequest request) { + RegistryRestResponse registryResponse = null; + try { + long interval = Long.valueOf(body.getInterval()).longValue(); + NodeInfo info = NodeInfo.newBuilder().setAppName(body.getAppName()).setVersion(body.getVersion()) + .setAddress(body.getAddress()).putAllLabels(body.getLabels()) + .setInterval(interval).build(); + registryResponse = pdRestService.register(info); + } catch (PDException e) { + registryResponse = new RegistryRestResponse(); + registryResponse.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + registryResponse.setMessage(e.getMessage()); + } catch (PDRuntimeException e) { + registryResponse = new RegistryRestResponse(); + registryResponse.setErrorType(Pdpb.ErrorType.LICENSE_VERIFY_ERROR); + registryResponse.setMessage(e.getMessage()); + } + return registryResponse; + } + + @PostMapping(value = "/registryInfo", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getInfo(@RequestBody RegistryQueryRestRequest body, + HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + boolean labelNotEmpty = body.getLabels() != null && !body.getLabels().isEmpty(); + Query query = Query.newBuilder() + .setAppName(StringUtils.isEmpty(body.getAppName()) ? "" : body.getAppName()) + .putAllLabels(labelNotEmpty ? body.getLabels() : new HashMap<>()) + .setVersion(StringUtils.isEmpty(body.getVersion()) ? "" : body.getVersion()) + .build(); + ArrayList registryResponse = pdRestService.getNodeInfo(query); + response.setErrorType(Pdpb.ErrorType.OK); + response.setData(registryResponse); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + @GetMapping(value = "/allInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse allInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + //1.normal registry + Query query = Query.newBuilder().setAppName("").putAllLabels(new HashMap<>()).setVersion("") + .build(); + ArrayList registryResponse = pdRestService.getNodeInfo(query); + //2.pd member + LinkedList pdMembers = getMembers(); + //3.store member + List stores = pdRestService.getStores(""); + LinkedList storeMembers = new LinkedList<>(); + for (Metapb.Store store : stores) { + RegistryRestRequest restRequest = new RegistryRestRequest(); + restRequest.setAddress(store.getAddress()); + restRequest.setVersion(store.getVersion()); + restRequest.setAppName(STORE); + restRequest.setId(String.valueOf(store.getId())); + storeMembers.add(restRequest); + } + response.setErrorType(Pdpb.ErrorType.OK); + HashMap result = new HashMap<>(); + result.put("other", registryResponse); + result.put(PD, pdMembers); + result.put(STORE, storeMembers); + response.setData(result); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + private LinkedList getMembers() throws Exception { + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + LinkedList members =new LinkedList<>(); + List membersList = response.get().get(0).getMembersList(); + for (Metapb.Member member : membersList) { + RegistryRestRequest restRequest = new RegistryRestRequest(); + restRequest.setAddress(member.getRestUrl()); + restRequest.setVersion(VERSION); + restRequest.setAppName(PD); + members.add(restRequest); + } + return members; + } + + @GetMapping(value = "/license", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getLicenseInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + response.setErrorType(Pdpb.ErrorType.OK); + LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + response.setData(licenseVerifierService.getContext()); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + @GetMapping(value = "/license/machineInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getLicenseMachineInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + response.setErrorType(Pdpb.ErrorType.OK); + LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + response.setData(licenseVerifierService.getIpAndMac()); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java new file mode 100644 index 0000000000..84a672c5ea --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java @@ -0,0 +1,100 @@ +package org.apache.hugegraph.pd.rest; + + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class ShardAPI extends API { + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @GetMapping(value = "/shards", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getShards() { + + //对shards信息的统计 + try { + List resultShardList = new ArrayList<>(); + List graphs = pdRestService.getGraphs(); + for (Metapb.Graph graph : graphs) { + String graphName = graph.getGraphName(); + List partitions = pdRestService.getPartitions(graphName); + for (Metapb.Partition pt : partitions) { + Metapb.PartitionStats partitionStats = pdRestService.getPartitionStats(graphName, pt.getId()); + if (partitionStats != null) { + List shardStatsList = partitionStats.getShardStatsList(); + for (Metapb.ShardStats shardStats : shardStatsList) { + Shard resultShard = new Shard(); + resultShard.storeId = shardStats.getStoreId(); + resultShard.partitionId = pt.getId(); + resultShard.role = String.valueOf(shardStats.getRole()); + resultShard.state = String.valueOf(shardStats.getState()); + resultShard.graphName = graphName; + resultShard.progress = shardStats.getProgress(); + resultShardList.add(resultShard); + } + } else { + List shardList = new ArrayList<>(); + var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId()); + if (shardGroup != null){ + shardList = shardGroup.getShardsList(); + } else { + log.error("ShardAPI.getShards(), get shards of group id: {} returns null.", pt.getId()); + } + + for (Metapb.Shard shard : shardList) { + Shard resultShard = new Shard(); + resultShard.storeId = shard.getStoreId(); + resultShard.partitionId = pt.getId(); + resultShard.role = String.valueOf(shard.getRole()); + resultShard.state = String.valueOf(Metapb.ShardState.SState_Normal); + resultShard.graphName = graphName; + resultShard.progress = 0; + resultShardList.add(resultShard); + } + } + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("shards", resultShardList); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException: ", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @Data + class Shard { + long storeId; + long partitionId; + String role; + String state; + String graphName; + int progress; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java new file mode 100644 index 0000000000..a07b7025ac --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java @@ -0,0 +1,315 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.StoreRestRequest; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.util.DateUtil; +import com.google.protobuf.util.JsonFormat; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; + +import java.util.*; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class StoreAPI extends API { + + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/stores", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStores() { + List storeStatsList = new ArrayList<>(); + try { + HashMap dataMap = new HashMap<>(); + Map stateCountMap = new HashMap<>(); + for (Metapb.Store store : pdRestService.getStores("")) { + String stateKey = store.getState().name(); + stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); + storeStatsList.add(new StoreStatistics(store)); + } + storeStatsList.sort((o1, o2) -> o1.address.compareTo(o2.address)); + dataMap.put("stores", storeStatsList); + dataMap.put("numOfService", storeStatsList.size()); + dataMap.put("numOfNormalService", stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); + dataMap.put("stateCountMap", stateCountMap); + return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + // 仅支持通过该接口修改 storeState + @PostMapping(value = "/store/{storeId}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setStore(@PathVariable long storeId, @RequestBody StoreRestRequest request) { + try { + Metapb.Store lastStore = pdRestService.getStore(storeId); + if (lastStore != null) { + Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore); + Metapb.StoreState storeState = Metapb.StoreState.valueOf(request.getStoreState()); + builder.setState(storeState); + Metapb.Store newStore = pdRestService.updateStore(builder.build()); + return toJSON(newStore, "store"); + } else { + return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + @GetMapping(value = "/shardGroups", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getShardGroups() { + try { + return toJSON(pdRestService.getShardGroups(), "shardGroups"); + } catch (PDException e) { + return toJSON(e); + } + } + + /** + * 返回每个store上的leader + * + * @return + */ + @GetMapping(value = "/shardLeaders") + public Map> shardLeaders() throws PDException { + Map> leaders = new HashMap<>(); + try { + + List groups = pdRestService.getShardGroups(); + groups.forEach(group -> { + group.getShardsList().forEach(shard -> { + if (shard.getRole() == Metapb.ShardRole.Leader) { + try { + String ip = pdRestService.getStore(shard.getStoreId()).getRaftAddress(); + if (!leaders.containsKey(ip)) + leaders.put(ip, new ArrayList<>()); + leaders.get(ip).add(group.getId()); + } catch (PDException e) { + throw new RuntimeException(e); + } + } + }); + }); + } catch (PDException e) { + throw e; + } + return leaders; + } + + @GetMapping(value = "/balanceLeaders") + public Map balanceLeaders() throws PDException { + return pdRestService.balancePartitionLeader(); + } + + @DeleteMapping(value = "/store/{storeId}") + public String removeStore(@PathVariable(value = "storeId") Long storeId) { + try { + pdRestService.removeStore(storeId); + } catch (PDException e) { + return e.getStackTrace().toString(); + } + return "OK"; + } + + @PostMapping(value = "/store/log", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getStoreLog(@RequestBody TimeRangeRequest request) { + try { + Date dateStart = DateUtil.getDate(request.getStartTime()); + Date dateEnd = DateUtil.getDate(request.getEndTime()); + List changedStore = pdRestService.getStoreStatusLog(dateStart.getTime(), + dateEnd.getTime()); + if (changedStore != null) { + JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry + .newBuilder().add(Metapb.Store.getDescriptor()).build(); + return toJSON(changedStore, registry); + } else { + return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + + @GetMapping(value = "store/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStore(@PathVariable long storeId) { + //获取store的统计信息 + Metapb.Store store = null; + try { + store = pdRestService.getStore(storeId); + } catch (PDException e) { + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + if (store != null) { + StoreStatistics resultStoreStats = resultStoreStats = new StoreStatistics(store); + return new RestApiResponse(resultStoreStats, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } else { + return new RestApiResponse(null, Pdpb.ErrorType.STORE_ID_NOT_EXIST, Pdpb.ErrorType.STORE_ID_NOT_EXIST.name()); + } + } + + @GetMapping(value = "storesAndStats", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getStoresAndStats() { + //for debug use + try { + List stores = pdRestService.getStores(""); + return toJSON(stores, "stores"); + } catch (PDException e) { + log.error("PD exception:" + e); + return toJSON(e); + } + } + + @GetMapping(value = "store_monitor/json/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStoreMonitorData(@PathVariable long storeId) { + try { + List> result = pdRestService.getMonitorData(storeId); + return new RestApiResponse(result, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "store_monitor/{storeId}") + @ResponseBody + public String getStoreMonitorDataText(@PathVariable long storeId) { + try { + return pdRestService.getMonitorDataText(storeId); + } catch (PDException e) { + return "error:" + e.getErrorCode() + e.getMessage(); + } + } + + @Data + class Partition { + //分区信息 + int partitionId; + String graphName; + String role; // shard role + String workState; + long dataSize; // 占用的存储空间 + + Partition() { + } + + Partition(Metapb.GraphStats graphStats) { + partitionId = graphStats.getPartitionId(); + graphName = graphStats.getGraphName(); + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + role = String.valueOf(graphStats.getRole()); + workState = String.valueOf(graphStats.getWorkState()); + dataSize = graphStats.getApproximateSize(); + } + } + + @Data + class StoreStatistics { + //store的统计信息 + long storeId; + String address; + String raftAddress; + String version; + String state; + String deployPath; + String dataPath; // 数据存储路径 + long startTimeStamp; + long registedTimeStamp; // 暂时取第一次心跳时间作为注册时间 + long lastHeartBeat; // 上一次心跳时间 + long capacity; + long available; + int partitionCount; + int graphSize; + long keyCount; + long leaderCount; // shard role = 'Leader'的分区数量 + String serviceName; + String serviceVersion; + long serviceCreatedTimeStamp; // 服务创建时间 + List partitions; + + StoreStatistics(Metapb.Store store) { + if (store != null) { + storeId = store.getId(); + address = store.getAddress(); + raftAddress = store.getRaftAddress(); + state = String.valueOf(store.getState()); + version = store.getVersion(); + deployPath = store.getDeployPath(); + final String prefix = "file:"; + if ((deployPath != null) && (deployPath.startsWith(prefix))){ + // 去掉前缀 + deployPath = deployPath.substring(prefix.length()); + } + if ((deployPath != null) && (deployPath.contains(".jar"))){ + // 去掉jar包之后的信息 + deployPath = deployPath.substring(0, deployPath.indexOf(".jar") + 4); + } + dataPath = store.getDataPath(); + startTimeStamp = store.getStartTimestamp(); + try { + serviceCreatedTimeStamp = pdRestService.getStore(store.getId()) + .getStats().getStartTime(); // 实例时间 + final int base = 1000; + serviceCreatedTimeStamp *= base; // 转化为毫秒 + } catch (PDException e) { + e.printStackTrace(); + serviceCreatedTimeStamp = store.getStartTimestamp(); + } + registedTimeStamp = store.getStartTimestamp(); // 注册时间 + lastHeartBeat = store.getLastHeartbeat(); + capacity = store.getStats().getCapacity(); + available = store.getStats().getAvailable(); + partitionCount = store.getStats().getPartitionCount(); + serviceName = address + "-store"; + serviceVersion = store.getVersion(); + List graphStatsList = store.getStats().getGraphStatsList(); + List partitionStatsList = new ArrayList<>(); // 保存分区信息 + HashSet graphNameSet = new HashSet<>(); // 用于统计图的数量 + HashSet leaderPartitionIds = new HashSet(); // 统计leader的分区数量 + // 构造分区信息(store中存储的图信息) + Map partition2KeyCount = new HashMap<>(); + for (Metapb.GraphStats graphStats : graphStatsList) { + String graphName = graphStats.getGraphName(); + // 图名只保留/g /m /s前面的部分 + final int postfixLength = 2; + graphNameSet.add(graphName.substring(0, graphName.length() - postfixLength)); + if ((graphStats.getGraphName() != null) && (graphStats.getGraphName().endsWith("/g"))) { + Partition pt = new Partition(graphStats); + partitionStatsList.add(pt); + } + // 统计每个分区的keyCount + partition2KeyCount.put(graphStats.getPartitionId(), graphStats.getApproximateKeys()); + if (graphStats.getRole() == Metapb.ShardRole.Leader) { + leaderPartitionIds.add(graphStats.getPartitionId()); + } + } + for (Map.Entry entry : partition2KeyCount.entrySet()) { + keyCount += entry.getValue(); + } + partitions = partitionStatsList; + graphSize = graphNameSet.size(); + leaderCount = leaderPartitionIds.size(); + } + + } + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java new file mode 100644 index 0000000000..3f5ddc16c6 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java @@ -0,0 +1,85 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +import org.apache.hugegraph.pd.service.PDRestService; + +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import java.util.List; +import java.util.Map; + +@RestController +@Slf4j +@RequestMapping("/v1/task") +public class TaskAPI extends API { + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/patrolStores", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String patrolStores() { + try { + List stores = pdRestService.patrolStores(); + return toJSON(stores, "stores"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @GetMapping(value = "/patrolPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String patrolPartitions() { + try { + List partitions = pdRestService.patrolPartitions(); + return toJSON(partitions, "partitions"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @GetMapping(value = "/balancePartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public Map> balancePartitions() { + try { + Map> partitions = pdRestService.balancePartitions(); + return partitions; + } catch (PDException e) { + e.printStackTrace(); + return null; + } + } + + @GetMapping(value = "/splitPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String splitPartitions() { + try { + List partitions = pdRestService.splitPartitions(); + return toJSON(partitions, "partitions"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + @GetMapping(value = "/balanceLeaders") + public Map balanceLeaders() throws PDException { + return pdRestService.balancePartitionLeader(); + } + + @GetMapping(value = "/compact") + public String dbCompaction() throws PDException { + pdRestService.dbCompaction(); + return "compact ok"; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java new file mode 100644 index 0000000000..fe769cd473 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java @@ -0,0 +1,140 @@ +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.RegistryService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import com.baidu.hugegraph.pd.meta.MetadataFactory; +import com.baidu.hugegraph.pd.meta.QueueStore; + +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.watch.PDWatchSubject; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Parser; +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; + +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author lynn.bond@hotmail.com on 2022/2/9 + */ +@RestController +@Slf4j +@RequestMapping("/test") +public class TestAPI { + + @Autowired + private PDConfig pdConfig; + + @GetMapping(value = "/discovery/{appName}", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String discovery(@PathVariable(value = "appName", required = true)String appName){ + RegistryService register =new RegistryService(pdConfig); + // Query query=Query.newBuilder().setAppName("hugegraph").build(); + AtomicLong label = new AtomicLong(); + HashMap labels = new HashMap<>(); + String labelValue = String.valueOf(label.incrementAndGet()); + //labels.put("address",labelValue); + Query query = Query.newBuilder().build(); + // Query query = Query.newBuilder().setAppName("hugegraph").set.build(); + + return register.getNodes(query).toString(); + } + + @GetMapping(value = "/pulse", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyClient() { + PDPulseSubject.notifyClient( + PartitionHeartbeatResponse.newBuilder() + .setPartition(Metapb.Partition.newBuilder() + .setId(8) + .setGraphName("graphName8")) + + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue(8) + .addShard(Metapb.Shard.newBuilder() + .setRoleValue(8) + .setStoreId(8) + ) + ) + + ); + return "partition"; + } + + @GetMapping(value = "/partition", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String noticePartition() { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER, "graph-test", 99); + return "partition"; + } + + @PutMapping(value = "/queue", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String testPutQueue() { + this.putQueue(); + return "queue"; + } + + public void putQueue(){ + PartitionHeartbeatResponse response=PartitionHeartbeatResponse.newBuilder() + .setPartition(Metapb.Partition.newBuilder() + .setId(9) + .setGraphName("graphName")) + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue(9) + .addShard(Metapb.Shard.newBuilder() + .setRoleValue(9) + .setStoreId(9) + ) + ).build(); + + Metapb.QueueItem.Builder builder=Metapb.QueueItem.newBuilder() + .setItemId("item-id") + .setItemClass("item-class") + .setItemContent(response.toByteString()); + + + QueueStore store= MetadataFactory.newQueueStore(pdConfig); + + try { + store.addItem(builder.setItemId("item-id-1").build()); + store.addItem(builder.setItemId("item-id-2").build()); + store.addItem(builder.setItemId("item-id-3").build()); + } catch (PDException e) { + e.printStackTrace(); + } + List queue=null; + try { + queue=store.getQueue(); + } catch (PDException e) { + e.printStackTrace(); + } + Parser parser= PartitionHeartbeatResponse.parser(); + + queue.stream().forEach(e->{ + PartitionHeartbeatResponse buf=null; + try { + buf=parser.parseFrom(e.getItemContent()); + } catch (InvalidProtocolBufferException ex) { + ex.printStackTrace(); + } + PDPulseSubject.notifyClient( PartitionHeartbeatResponse.newBuilder(buf)); + }); + + + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java new file mode 100644 index 0000000000..b8554431d5 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java @@ -0,0 +1,166 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.RegistryService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.common.PDRuntimeException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import com.baidu.hugegraph.pd.raft.RaftEngine; +import com.baidu.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import io.grpc.CallOptions; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.stub.AbstractBlockingStub; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import javax.annotation.PostConstruct; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ +@Slf4j +@GRpcService +public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplBase implements RaftStateListener { + + @Autowired + private PDConfig pdConfig; + static final AtomicLong id = new AtomicLong(); + RegistryService register = null; + LicenseVerifierService licenseVerifierService; + + @PostConstruct + public void init() throws PDException { + log.info("PDService init………… {}", pdConfig); + RaftEngine.getInstance().init(pdConfig.getRaft()); + RaftEngine.getInstance().addStateListener(this); + register = new RegistryService(pdConfig); + licenseVerifierService = new LicenseVerifierService(pdConfig); + } + + private Pdpb.ResponseHeader newErrorHeader(PDException e) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())).build(); + return header; + } + + private static final String CORES = "cores"; + + @Override + public void register(NodeInfo request, io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(DiscoveryServiceGrpc.getRegisterMethod(), request, observer); + return; + } + int outTimes = pdConfig.getDiscovery().getHeartbeatOutTimes(); + RegisterInfo registerInfo; + try { + if (request.getAppName().equals("hg")) { + Query queryRequest = Query.newBuilder().setAppName(request.getAppName()) + .setVersion(request.getVersion()).build(); + NodeInfos nodes = register.getNodes(queryRequest); + String address = request.getAddress(); + int nodeCount = nodes.getInfoCount() + 1; + for (NodeInfo node : nodes.getInfoList()) { + if (node.getAddress().equals(address)) { + nodeCount = nodes.getInfoCount(); + break; + } + } + Map labelsMap = request.getLabelsMap(); + String coreCount = labelsMap.get(CORES); + if (StringUtils.isEmpty(coreCount)) { + throw new PDException(-1, "core count can not be null"); + } + int core = Integer.parseInt(coreCount); + licenseVerifierService.verify(core, nodeCount); + } + register.register(request, outTimes); + String valueId = request.getId(); + registerInfo = RegisterInfo.newBuilder().setNodeInfo(NodeInfo.newBuilder().setId( + "0".equals(valueId) ? String.valueOf(id.incrementAndGet()) : valueId).build()).build(); + + } catch (PDException e) { + registerInfo = RegisterInfo.newBuilder().setHeader(newErrorHeader(e)).build(); + log.debug("registerStore exception: ", e); + } catch (PDRuntimeException ex) { + Pdpb.Error error = Pdpb.Error.newBuilder().setTypeValue(ex.getErrorCode()) + .setMessage(ex.getMessage()).build(); + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); + registerInfo = RegisterInfo.newBuilder().setHeader(header).build(); + log.debug("registerStore exception: ", ex); + } catch (Exception e) { + Pdpb.Error error = Pdpb.Error.newBuilder().setTypeValue(Pdpb.ErrorType.UNKNOWN.getNumber()) + .setMessage(e.getMessage()).build(); + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); + registerInfo = RegisterInfo.newBuilder().setHeader(header).build(); + } + observer.onNext(registerInfo); + observer.onCompleted(); + } + + public void getNodes(Query request, io.grpc.stub.StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(DiscoveryServiceGrpc.getGetNodesMethod(), request, responseObserver); + return; + } + responseObserver.onNext(register.getNodes(request)); + responseObserver.onCompleted(); + } + + private ManagedChannel channel; + + public boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + private > void redirectToLeader( + MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver observer) { + try { + if (channel == null) { + synchronized (this) { + if (channel == null) { + channel = ManagedChannelBuilder + .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()).usePlaintext() + .build(); + } + } + log.info("Grpc get leader address {}", RaftEngine.getInstance().getLeaderGrpcAddress()); + } + + io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), req, + observer); + } catch (Exception e) { + e.printStackTrace(); + } + + } + + @Override + public synchronized void onRaftLeaderChanged() { + channel = null; + if (!isLeader()) { + try { + String message = "lose leader"; + PDPulseSubject.notifyError(message); + PDWatchSubject.notifyError(message); + } catch (Exception e) { + log.error("notifyError error {}", e); + } + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java new file mode 100644 index 0000000000..59a4ffd30c --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -0,0 +1,525 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.KvService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.kv.K; +import com.baidu.hugegraph.pd.grpc.kv.KResponse; +import com.baidu.hugegraph.pd.grpc.kv.Kv; +import com.baidu.hugegraph.pd.grpc.kv.KvResponse; +import com.baidu.hugegraph.pd.grpc.kv.KvServiceGrpc; +import com.baidu.hugegraph.pd.grpc.kv.LockRequest; +import com.baidu.hugegraph.pd.grpc.kv.LockResponse; +import com.baidu.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import com.baidu.hugegraph.pd.grpc.kv.TTLRequest; +import com.baidu.hugegraph.pd.grpc.kv.TTLResponse; +import com.baidu.hugegraph.pd.grpc.kv.WatchKv; +import com.baidu.hugegraph.pd.grpc.kv.WatchRequest; +import com.baidu.hugegraph.pd.grpc.kv.WatchResponse; +import com.baidu.hugegraph.pd.grpc.kv.WatchState; +import com.baidu.hugegraph.pd.grpc.kv.WatchType; +import com.baidu.hugegraph.pd.raft.RaftEngine; +import com.baidu.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.watch.KvWatchSubject; +import io.grpc.ManagedChannel; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import javax.annotation.PostConstruct; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +/** + * kv存储的核心实现类 + */ +@Slf4j +@GRpcService +public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements RaftStateListener, ServiceGrpc { + + @Autowired + private PDConfig pdConfig; + KvService kvService; + private ManagedChannel channel = null; + private KvWatchSubject subjects; + private ScheduledExecutorService executor; + + @PostConstruct + public void init() { + RaftEngine.getInstance().init(pdConfig.getRaft()); + RaftEngine.getInstance().addStateListener(this); + kvService = new KvService(pdConfig); + subjects = new KvWatchSubject(pdConfig); + executor = Executors.newScheduledThreadPool(1); + executor.scheduleWithFixedDelay(() -> { + if (isLeader()) { + subjects.keepClientAlive(); + } + }, 0, KvWatchSubject.WATCH_TTL * 1 / 2, TimeUnit.MILLISECONDS); + } + + /** + * 普通的put + * + * @param request + * @param responseObserver + */ + public void put(Kv request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + String value = request.getValue(); + this.kvService.put(key, value); + WatchKv watchKV = getWatchKv(key, value); + subjects.notifyAllObserver(key, WatchType.Put, new WatchKv[]{watchKV}); + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + + /** + * 普通的get + * + * @param request + * @param responseObserver + */ + public void get(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver); + return; + } + KResponse response; + KResponse.Builder builder = KResponse.newBuilder(); + try { + String value = this.kvService.get(request.getKey()); + builder.setHeader(getResponseHeader()); + if (value != null) builder.setValue(value); + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 普通的delete + * + * @param request + * @param responseObserver + */ + public void delete(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + Kv deleted = this.kvService.delete(key); + if (deleted.getValue() != null) { + WatchKv watchKV = getWatchKv(deleted.getKey(), deleted.getValue()); + subjects.notifyAllObserver(key, WatchType.Delete, new WatchKv[]{watchKV}); + } + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + + /** + * 按前缀删除 + * + * @param request + * @param responseObserver + */ + public void deletePrefix(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + List kvs = this.kvService.deleteWithPrefix(key); + WatchKv[] watchKvs = new WatchKv[kvs.size()]; + int i = 0; + for (Kv kv : kvs) { + WatchKv watchKV = getWatchKv(kv.getKey(), kv.getValue()); + watchKvs[i++] = watchKV; + } + subjects.notifyAllObserver(key, WatchType.Delete, watchKvs); + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 按前缀查询 + * + * @param request + * @param responseObserver + */ + public void scanPrefix(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, responseObserver); + return; + } + ScanPrefixResponse response; + ScanPrefixResponse.Builder builder = ScanPrefixResponse.newBuilder(); + try { + Map kvs = this.kvService.scanWithPrefix(request.getKey()); + response = builder.setHeader(getResponseHeader()).putAllKvs(kvs).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + AtomicLong count = new AtomicLong(); + + /** + * 获取随机非0字符串做Id + * + * @return + */ + private long getRandomLong() { + + long result; + Random random = new Random(); + while ((result = random.nextLong()) == 0) { + continue; + } + return result; + } + String msg = "node is not leader,it is necessary to redirect to the leader on the client"; + /** + * 普通的watch + * @param request + * @param responseObserver + */ + public void watch(WatchRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + responseObserver.onError(new PDException(-1, msg)); + return; + } + try { + clientWatch(request, responseObserver, false); + } catch (PDException e) { + if (!isLeader()) { + try { + responseObserver.onError(new PDException(-1, msg)); + return; + } catch (Exception e1) { + log.error("redirect with error: ", e1); + } + } + responseObserver.onError(e); + } + } + + /** + * 普通的前缀watch + * @param request + * @param responseObserver + */ + public void watchPrefix(WatchRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + responseObserver.onError(new PDException(-1, msg)); + return; + } + try { + clientWatch(request, responseObserver, true); + } catch (PDException e) { + if (!isLeader()) { + try { + responseObserver.onError(new PDException(-1, msg)); + return; + } catch (Exception e1) { + log.error("redirect with error: ", e1); + } + } + responseObserver.onError(e); + } + } + + /** + * 上面两个方法的通用方式 + * @param request + * @param responseObserver + * @param isPrefix + * @throws PDException + */ + private void clientWatch(WatchRequest request, StreamObserver responseObserver, + boolean isPrefix) throws PDException { + try { + String key = request.getKey(); + long clientId = request.getClientId(); + WatchResponse.Builder builder = WatchResponse.newBuilder(); + WatchResponse response; + if (request.getState().equals(WatchState.Starting) && clientId == 0) { + clientId = getRandomLong(); + response = builder.setClientId(clientId).setState(WatchState.Starting).build(); + } else { + response = builder.setState(WatchState.Started).build(); + } + String delimiter = isPrefix ? KvWatchSubject.PREFIX_DELIMITER : KvWatchSubject.KEY_DELIMITER; + subjects.addObserver(key, clientId, responseObserver, delimiter); + synchronized (responseObserver) { + responseObserver.onNext(response); + } + } catch (PDException e) { + if (!isLeader()){ + throw new PDException(-1, msg); + } + throw new PDException(e.getErrorCode(), e); + } + + } + + + /** + * 加锁 + * @param request + * @param responseObserver + */ + public void lock(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) clientId = getRandomLong(); + boolean locked = this.kvService.lock(request.getKey(), request.getTtl(), clientId); + response = builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver); + return; + } + log.error("lock with error :", e); + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + public void lockWithoutReentrant(LockRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request, + responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + clientId = getRandomLong(); + } + boolean locked = this.kvService.lockWithoutReentrant( + request.getKey(), request.getTtl(), clientId); + response = builder.setHeader(getResponseHeader()).setSucceed(locked) + .setClientId(clientId).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request, + responseObserver); + return; + } + log.error("lock with error :", e); + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + public void isLocked(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + boolean locked = this.kvService.locked(request.getKey()); + response = builder.setHeader(getResponseHeader()).setSucceed(locked).build(); + } catch (PDException e) { + log.error("lock with error :", e); + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 解锁 + * @param request + * @param responseObserver + */ + public void unlock(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) throw new PDException(-1, "incorrect clientId: 0"); + boolean unlocked = this.kvService.unlock(request.getKey(), clientId); + response = builder.setHeader(getResponseHeader()).setSucceed(unlocked).setClientId(clientId).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + + /** + * 锁续活 + * @param request + * @param responseObserver + */ + public void keepAlive(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) throw new PDException(-1, "incorrect clientId: 0"); + boolean alive = this.kvService.keepAlive(request.getKey(), clientId); + response = builder.setHeader(getResponseHeader()).setSucceed(alive).setClientId(clientId).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 带超时时间的put + * @param request + * @param responseObserver + */ + public void putTTL(TTLRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, responseObserver); + return; + } + TTLResponse response; + TTLResponse.Builder builder = TTLResponse.newBuilder(); + try { + this.kvService.put(request.getKey(), request.getValue(), request.getTtl()); + response = builder.setHeader(getResponseHeader()).setSucceed(true).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 续活带有超时时间的key + * @param request + * @param responseObserver + */ + public void keepTTLAlive(TTLRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, responseObserver); + return; + } + TTLResponse response; + TTLResponse.Builder builder = TTLResponse.newBuilder(); + try { + this.kvService.keepAlive(request.getKey()); + response = builder.setHeader(getResponseHeader()).setSucceed(true).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + private WatchKv getWatchKv(String key, String value) { + WatchKv kv = WatchKv.newBuilder().setKey(key).setValue(value).build(); + return kv; + } + + @Override + public void onRaftLeaderChanged() { + subjects.notifyClientChangeLeader(); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java new file mode 100644 index 0000000000..aae14ec789 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java @@ -0,0 +1,101 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; +import com.baidu.hugegraph.pd.grpc.pulse.PulseRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PulseResponse; +import com.baidu.hugegraph.pd.meta.MetadataFactory; +import com.baidu.hugegraph.pd.meta.QueueStore; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import com.baidu.hugegraph.pd.raft.RaftEngine; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ + +@Slf4j +@GRpcService +public class PDPulseService extends HgPdPulseGrpc.HgPdPulseImplBase { + + @Autowired + private PDConfig pdConfig; + + private QueueStore queueStore=null; + + public PDPulseService(){ + PDPulseSubject.setQueueRetrieveFunction(()->getQueue()); + PDPulseSubject.setQueueDurableFunction(getQueueDurableFunction()); + PDPulseSubject.setQueueRemoveFunction(getQueueRemoveFunction()); + } + + @Override + public StreamObserver pulse(StreamObserver responseObserver) { + return PDPulseSubject.addObserver(responseObserver); + } + + private static Supplier> queueRetrieveFunction = () -> Collections.emptyList(); + private static Function queueDurableFunction = (e) -> true; + private static Function queueRemoveFunction = (e) -> true; + + + private Function getQueueRemoveFunction(){ + return itemId->{ + try{ + this.getQueueStore().removeItem(itemId); + return true; + }catch (Throwable t){ + log.error("Failed to remove item from store, item-id: "+itemId+", cause by:",t); + } + return false; + }; + } + + private Function getQueueDurableFunction(){ + return item->{ + try{ + this.getQueueStore().addItem(item); + return true; + }catch (Throwable t){ + log.error("Failed to add item to store, item: "+item.toString()+", cause by:",t); + } + return false; + }; + } + + private boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + private List getQueue(){ + + if(!isLeader()){ + return Collections.emptyList(); + } + + try{ + return this.getQueueStore().getQueue(); + }catch (Throwable t){ + log.error("Failed to retrieve queue from QueueStore, cause by:",t); + } + + log.warn("Returned empty queue list."); + return Collections.emptyList(); + } + + private QueueStore getQueueStore(){ + if(this.queueStore==null){ + this.queueStore=MetadataFactory.newQueueStore(pdConfig); + } + return this.queueStore; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java new file mode 100644 index 0000000000..a7c0876fdd --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java @@ -0,0 +1,251 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.ConfigService; +import com.baidu.hugegraph.pd.LogService; +import com.baidu.hugegraph.pd.StoreMonitorDataService; +import com.baidu.hugegraph.pd.TaskScheduleService; +import com.baidu.hugegraph.pd.PartitionService; +import com.baidu.hugegraph.pd.StoreNodeService; +import com.baidu.hugegraph.pd.common.HgAssert; +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.InitializingBean; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +@Slf4j +@Service +public class PDRestService implements InitializingBean { + private static final String EMPTY_STRING = ""; + private StoreNodeService storeNodeService; + private PartitionService partitionService; + private TaskScheduleService monitorService; + private ConfigService configService; + private LogService logService; + private StoreMonitorDataService storeMonitorDataService; + @Autowired + PDService pdService; + @Autowired + DiscoveryService discoveryService; + + /** + * 初始化 + * + * @throws Exception + */ + @Override + public void afterPropertiesSet() throws Exception { + storeNodeService = pdService.getStoreNodeService(); + partitionService = pdService.getPartitionService(); + monitorService = pdService.getTaskService(); + configService = pdService.getConfigService(); + logService = pdService.getLogService(); + storeMonitorDataService = pdService.getStoreMonitorDataService(); + HgAssert.isNotNull(storeNodeService, "storeNodeService does not initialize"); + HgAssert.isNotNull(partitionService, "partitionService does not initialize"); + } + + + public List getStores(String graphName) throws PDException { + return storeNodeService.getStores(graphName); + } + + public Metapb.Store getStore(long storeId) throws PDException { + return storeNodeService.getStore(storeId); + } + + public List getShardGroups() throws PDException { + return storeNodeService.getShardGroups(); + } + + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + logService.insertLog(LogService.NODE_CHANGE, LogService.REST, store); + return storeNodeService.updateStore(store); + } + + public boolean removeStore(Long storeId) throws PDException { + if (storeId == null) return false; + return 0 != storeNodeService.removeStore(storeId); + } + + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + return configService.setGraphSpace(graphSpace); + } + + public List getGraphSpaces() throws PDException { + return configService.getGraphSpace(EMPTY_STRING); + } + + public Metapb.GraphSpace getGraphSpace(String graphSpaceName) throws PDException { + return configService.getGraphSpace(graphSpaceName).get(0); + } + + public List getGraphs() throws PDException { + return partitionService.getGraphs(); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + return partitionService.getGraph(graphName); + } + + public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + return partitionService.updateGraph(graph); + } + + public List getPartitions(String graphName) { + return partitionService.getPartitions(graphName); + } + + public List patrolStores() throws PDException { + return monitorService.patrolStores(); + } + + public List patrolPartitions() throws PDException { + return monitorService.patrolPartitions(); + } + + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws PDException { + return partitionService.getPartitionStats(graphName, partitionId); + } + + public List getPartitionStatus(String graphName) throws PDException { + return partitionService.getPartitionStatus(graphName); + } + + public Map> balancePartitions() throws PDException { + return monitorService.balancePartitionShard(); + } + + public List splitPartitions() throws PDException { + return monitorService.autoSplitPartition(); + } + + public List getStoreStats(boolean isActive) throws PDException { + return storeNodeService.getStoreStatus(isActive); + } + + public List> getMonitorData(long storeId) throws PDException { + return storeMonitorDataService.getStoreMonitorData(storeId); + } + + public String getMonitorDataText(long storeId) throws PDException { + return storeMonitorDataService.getStoreMonitorDataText(storeId); + } + + public RegistryRestResponse register(NodeInfo nodeInfo) throws PDException { + CountDownLatch latch = new CountDownLatch(1); + final RegisterInfo[] info = {null}; + RegistryRestResponse response = new RegistryRestResponse(); + try { + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(RegisterInfo value) { + info[0] = value; + latch.countDown(); + } + + @Override + public void onError(Throwable t) { + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + this.discoveryService.register(nodeInfo, observer); + latch.await(); + Pdpb.Error error = info[0].getHeader().getError(); + response.setErrorType(error.getType()); + response.setMessage(error.getMessage()); + } catch (InterruptedException e) { + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + public ArrayList getNodeInfo(Query request) throws PDException { + CountDownLatch latch = new CountDownLatch(1); + final NodeInfos[] info = {null}; + RegistryRestResponse response = new RegistryRestResponse(); + ArrayList registryRestRequests = null; + try { + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(NodeInfos value) { + info[0] = value; + latch.countDown(); + } + + @Override + public void onError(Throwable t) { + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + this.discoveryService.getNodes(request, observer); + latch.await(); + List infoList = info[0].getInfoList(); + registryRestRequests = new ArrayList(infoList.size()); + for (int i = 0; i < infoList.size(); i++) { + NodeInfo element = infoList.get(i); + RegistryRestRequest registryRestRequest = new RegistryRestRequest(); + registryRestRequest.setAddress(element.getAddress()); + registryRestRequest.setAppName(element.getAppName()); + registryRestRequest.setVersion(element.getVersion()); + registryRestRequest.setInterval(String.valueOf(element.getInterval())); + HashMap labels = new HashMap<>(); + labels.putAll(element.getLabelsMap()); + registryRestRequest.setLabels(labels); + registryRestRequests.add(registryRestRequest); + } + } catch (InterruptedException e) { + response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return registryRestRequests; + } + + public List getStoreStatusLog(Long start, Long end) throws PDException { + return logService.getLog(LogService.NODE_CHANGE, start, end); + } + + + public List getPartitionLog(Long start, Long end) throws PDException { + return logService.getLog(LogService.PARTITION_CHANGE, start, end); + } + + public Map balancePartitionLeader() throws PDException { + return monitorService.balancePartitionLeader(true); + } + + public void dbCompaction() throws PDException { + monitorService.dbCompaction(""); + } + + public List getShardList(int partitionId) throws PDException { + return storeNodeService.getShardList(partitionId); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java new file mode 100644 index 0000000000..a8bb78050f --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -0,0 +1,1547 @@ +package org.apache.hugegraph.pd.service; + +import com.alipay.sofa.jraft.Status; +import com.baidu.hugegraph.pd.ConfigService; +import com.baidu.hugegraph.pd.IdService; +import com.baidu.hugegraph.pd.LogService; +import com.baidu.hugegraph.pd.PartitionInstructionListener; +import com.baidu.hugegraph.pd.PartitionService; +import com.baidu.hugegraph.pd.PartitionStatusListener; +import com.baidu.hugegraph.pd.ShardGroupStatusListener; +import com.baidu.hugegraph.pd.StoreMonitorDataService; +import com.baidu.hugegraph.pd.StoreNodeService; +import com.baidu.hugegraph.pd.StoreStatusListener; +import com.baidu.hugegraph.pd.TaskScheduleService; +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.PDGrpc; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; +import com.baidu.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; +import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; +import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; +import com.baidu.hugegraph.pd.grpc.pulse.DbCompaction; +import com.baidu.hugegraph.pd.grpc.pulse.MovePartition; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; +import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; +import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; +import com.baidu.hugegraph.pd.grpc.watch.WatchGraphResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; + +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.pulse.PulseListener; +import com.baidu.hugegraph.pd.raft.RaftEngine; +import com.baidu.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil; + +import io.grpc.CallOptions; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import org.apache.commons.io.FileUtils; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.util.CollectionUtils; + +import javax.annotation.PostConstruct; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Slf4j +@GRpcService +public class PDService extends PDGrpc.PDImplBase implements RaftStateListener { + + static String TASK_ID_KEY = "task_id"; + @Autowired + private PDConfig pdConfig; + + private StoreNodeService storeNodeService; + private PartitionService partitionService; + private TaskScheduleService taskService; + private IdService idService; + private ConfigService configService; + private LogService logService; + private LicenseVerifierService licenseVerifierService; + + private StoreMonitorDataService storeMonitorDataService; + + private Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); + + private Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build(); + return header; + } + + private Pdpb.ResponseHeader newErrorHeader(PDException e) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())).build(); + return header; + } + + public StoreNodeService getStoreNodeService() { + return storeNodeService; + } + + public PartitionService getPartitionService() { + return partitionService; + } + + public TaskScheduleService getTaskService() { + return taskService; + } + + public ConfigService getConfigService() { + return configService; + } + + public StoreMonitorDataService getStoreMonitorDataService(){ + return this.storeMonitorDataService; + } + + public LogService getLogService() { + return logService; + } + + public LicenseVerifierService getLicenseVerifierService() { + return licenseVerifierService; + } + + /** + * 初始化 + */ + @PostConstruct + public void init() throws PDException { + log.info("PDService init………… {}", pdConfig); + configService = new ConfigService(pdConfig); + + RaftEngine.getInstance().addStateListener(this); + RaftEngine.getInstance().addStateListener(configService); + RaftEngine.getInstance().init(pdConfig.getRaft()); + //pdConfig = configService.loadConfig(); onLeaderChanged中加载 + storeNodeService = new StoreNodeService(pdConfig); + partitionService = new PartitionService(pdConfig, storeNodeService); + taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService); + idService = new IdService(pdConfig); + logService = new LogService(pdConfig); + storeMonitorDataService = new StoreMonitorDataService(pdConfig); + if (licenseVerifierService == null) { + licenseVerifierService = new LicenseVerifierService(pdConfig); + } + RaftEngine.getInstance().addStateListener(partitionService); + pdConfig.setIdService(idService); + + // 接收心跳消息 + PDPulseSubject.listenPartitionHeartbeat(new PulseListener() { + @Override + public void onNext(PartitionHeartbeatRequest request) { + try { + partitionService.partitionHeartbeat(request.getStates()); + } catch (PDException e) { + log.error("PartitionHeartbeatRequest onNext exception", e); + } + } + + @Override + public void onError(Throwable throwable) { + log.error("Received an error notice from pd-client", throwable); + } + + @Override + public void onCompleted() { + log.info("Received an completed notice from pd-client"); + } + }); + + + /** + * 监听分区指令,并转发给Store + */ + partitionService.addInstructionListener(new PartitionInstructionListener() { + private PartitionHeartbeatResponse.Builder getBuilder(Metapb.Partition partition) throws + PDException { + return PartitionHeartbeatResponse.newBuilder().setPartition(partition) + .setId(idService.getId(TASK_ID_KEY, 1)); + } + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setChangeShard(changeShard)); + + } + + @Override + public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws + PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setTransferLeader(transferLeader)); + } + + @Override + public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws + PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setSplitPartition(splitPartition)); + + } + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setDbCompaction(dbCompaction)); + + } + + @Override + public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setMovePartition(movePartition)); + } + + @Override + public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setCleanPartition(cleanPartition)); + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) + throws PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setKeyRange(partitionKeyRange)); + } + }); + + /** + * 监听分区状态改变消息,并转发给Client + */ + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER, + partition.getGraphName(), partition.getId()); + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.DEL, partition.getGraphName(), + partition.getId()); + + } + }); + + storeNodeService.addShardGroupStatusListener(new ShardGroupStatusListener() { + @Override + public void onShardListChanged(Metapb.ShardGroup shardGroup, Metapb.ShardGroup newShardGroup) { + // invoked before change, saved to db and update cache. + if (newShardGroup == null) { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.DEL, shardGroup.getId(), + shardGroup); + } else { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.ALTER, + shardGroup.getId(), newShardGroup); + } + } + + @Override + public void onShardListOp(Metapb.ShardGroup shardGroup) { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.USER_DEFINED, + shardGroup.getId(), shardGroup); + } + }); + + /** + * 监听store状态改变消息,并转发给Client + */ + storeNodeService.addStatusListener(new StoreStatusListener() { + + @Override + public void onStoreStatusChanged(Metapb.Store store, + Metapb.StoreState old, + Metapb.StoreState status) { + NodeEventType type = NodeEventType.NODE_EVENT_TYPE_UNKNOWN; + if (status == Metapb.StoreState.Up) { + type = NodeEventType.NODE_EVENT_TYPE_NODE_ONLINE; + } else if (status == Metapb.StoreState.Offline) { + type = NodeEventType.NODE_EVENT_TYPE_NODE_OFFLINE; + } + PDWatchSubject.notifyNodeChange(type, "", store.getId()); + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + WatchGraphResponse wgr = WatchGraphResponse.newBuilder() + .setGraph(graph) + .build(); + WatchResponse.Builder wr = WatchResponse.newBuilder() + .setGraphResponse(wgr); + PDWatchSubject.notifyChange(WatchType.WATCH_TYPE_GRAPH_CHANGE, + wr); + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_NODE_RAFT_CHANGE, "", store.getId()); + } + }); + storeNodeService.init(partitionService); + partitionService.init(); + taskService.init(); + // log.info("init ......."); + // licenseVerifierService.init(); + + // UpgradeService upgradeService = new UpgradeService(pdConfig); + // upgradeService.upgrade(); + } + + /** + *
+     * 注册store,首次注册会生成新的store_id, store_id是store唯一标识
+     * 
+ */ + @Override + public void registerStore(Pdpb.RegisterStoreRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getRegisterStoreMethod(), request, observer); + return; + } + Pdpb.RegisterStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.register(request.getStore()); + response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(okHeader).setStoreId(store.getId()) + .build(); + } catch (PDException e) { + response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("registerStore exception: ", e); + } + // 拉取所有分区信息,并返回 + observer.onNext(response); + observer.onCompleted(); + + } + + /** + * 根据store_id查找store + */ + @Override + public void getStore(Pdpb.GetStoreRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoreMethod(), request, observer); + return; + } + Pdpb.GetStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.getStore(request.getStoreId()); + response = Pdpb.GetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); + } catch (PDException e) { + response = Pdpb.GetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("{} getStore exception: {}", StreamObserverUtil.getRemoteIP(observer), e); + } + + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 修改Store状态等信息.
+     * 
+ */ + public void setStore(Pdpb.SetStoreRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetStoreMethod(), request, observer); + return; + } + Pdpb.SetStoreResponse response = null; + try { + Metapb.StoreState state = request.getStore().getState(); + Long storeId = request.getStore().getId(); + // 处于Pending状态,才可以上线 + Metapb.Store lastStore = storeNodeService.getStore(request.getStore().getId()); + if (lastStore == null){ + // storeId不存在,抛出异常 + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist!", storeId)); + } + if (Metapb.StoreState.Up.equals(state)){ + if (!Metapb.StoreState.Pending.equals(lastStore.getState())){ + throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "only stores in Pending state can be set to Up!"); + } + } + if (state.equals(Metapb.StoreState.Offline)) { + Metapb.ClusterStats stats = storeNodeService.getClusterStats(); + if (stats.getState() != Metapb.ClusterState.Cluster_OK) { + Pdpb.ResponseHeader errorHeader = newErrorHeader(-1, + "can not offline node " + + "when cluster state is not normal "); + response = Pdpb.SetStoreResponse.newBuilder().setHeader(errorHeader).build(); + observer.onNext(response); + observer.onCompleted(); + return; + } + } + logService.insertLog(LogService.NODE_CHANGE, LogService.GRPC, request.getStore()); + // 检查失败,状态改为Pending,把错误原因返回去 + if (state.equals(Metapb.StoreState.Up)) { + int cores = 0; + long id = request.getStore().getId(); + List stores = storeNodeService.getStores(); + int nodeCount = 0; + for (Metapb.Store store : stores) { + if (store.getId() == id) { + // 获取之前注册的store中的cores 作为验证参数 + cores = store.getCores(); + } + if (store.getState().equals(Metapb.StoreState.Up)) { + nodeCount++; + } + } + try { + licenseVerifierService.verify(cores, nodeCount); + } catch (Exception e) { + Metapb.Store store = Metapb.Store.newBuilder(request.getStore()) + .setState(Metapb.StoreState.Pending).build(); + storeNodeService.updateStore(store); + throw new PDException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, + "check license with error :" + + e.getMessage() + + ", and changed node state to 'Pending'"); + } + } + Metapb.Store store = request.getStore(); + // 下线之前先判断一下,活跃机器数是否大于最小阈值 + if (state.equals(Metapb.StoreState.Tombstone)) { + List activeStores = storeNodeService.getActiveStores(); + if (lastStore.getState() == Metapb.StoreState.Up + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); + } + if (!storeNodeService.checkStoreCanOffline(request.getStore())){ + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "check activeStores or online shardsList size"); + } + if (lastStore.getState() == Metapb.StoreState.Exiting){ + // 如果已经是下线中的状态,则不作进一步处理 + throw new PDException(Pdpb.ErrorType.Store_Tombstone_Doing_VALUE, + "Downline is in progress, do not resubmit"); + } + Map resultMap = taskService.canAllPartitionsMovedOut(lastStore); + if ((boolean) resultMap.get("flag") == true) { + if (resultMap.get("current_store_is_online") != null + && (boolean) resultMap.get("current_store_is_online") == true) { + log.info("updateStore removeActiveStores store {}", store.getId()); + // 将在线的store的状态设置为下线中,等待副本迁移 + store = Metapb.Store.newBuilder(lastStore) + .setState(Metapb.StoreState.Exiting).build(); + // 进行分区迁移操作 + taskService.movePartitions((Map>) resultMap.get("movedPartitions")); + }else { + // store已经离线的,不做副本迁移 + // 将状态改为Tombstone + } + }else{ + throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "the resources on other stores may be not enough to store " + + "the partitions of current store!"); + } + } + // 替换license 都走grpc + store = storeNodeService.updateStore(store); + response = Pdpb.SetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); + } catch (PDException e) { + response = Pdpb.SetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("setStore exception: ", e); + } + + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 返回所有的store,exclude_offline_stores=true,返回活跃的stores + */ + @Override + public void getAllStores(Pdpb.GetAllStoresRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetAllStoresMethod(), request, observer); + return; + } + Pdpb.GetAllStoresResponse response = null; + try { + List stores = null; + if (request.getExcludeOfflineStores()) { + stores = storeNodeService.getActiveStores(request.getGraphName()); + } else { + stores = storeNodeService.getStores(request.getGraphName()); + } + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) + .build(); + } catch (PDException e) { + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getAllStores exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 处理store心跳 + */ + @Override + public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getStoreHeartbeatMethod(), request, observer); + return; + } + + Metapb.StoreStats stats = request.getStats(); + + // save monitor data when monitor data enabled + if (this.pdConfig.getStore().isMonitorDataEnabled()) { + try { + storeMonitorDataService.saveMonitorData(stats); + }catch (PDException e){ + log.error("save status failed, state:{}", stats); + } + // remove system_metrics + stats = Metapb.StoreStats.newBuilder() + .mergeFrom(request.getStats()) + .clearField(Metapb.StoreStats.getDescriptor().findFieldByName("system_metrics")) + .build(); + } + + Pdpb.StoreHeartbeatResponse response = null; + try { + Metapb.ClusterStats clusterStats = storeNodeService.heartBeat(stats); + response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(okHeader) + .setClusterStats(clusterStats).build(); + } catch (PDException e) { + response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("storeHeartbeat exception: ", e); + } catch (Exception e2) { + response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader( + newErrorHeader(Pdpb.ErrorType.UNKNOWN_VALUE, e2.getMessage())).build(); + log.error("storeHeartbeat exception: ", e2); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 查找key所属的分区
+     * 
+ */ + @Override + public void getPartition(Pdpb.GetPartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = partitionService.getPartitionShard(request.getGraphName(), + request.getKey() + .toByteArray()); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getPartition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 查找HashCode所属的分区
+     * 
+ */ + @Override + public void getPartitionByCode(Pdpb.GetPartitionByCodeRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionByCodeMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = partitionService.getPartitionByCode(request.getGraphName(), + request.getCode()); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getPartitionByCode exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + + /** + * 根据partition_id查找partition + */ + @Override + public void getPartitionByID(Pdpb.GetPartitionByIDRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionByIDMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = partitionService.getPartitionShardById(request.getGraphName(), + request.getPartitionId()); + if (partShard == null) { + throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, + String.format("partition: %s-%s not found", request.getGraphName(), request.getPartitionId())); + } + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getPartitionByID exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 更新分区信息,主要用来更新分区key范围,调用此接口需谨慎,否则会造成数据丢失。
+     * 
+ */ + public void updatePartition(Pdpb.UpdatePartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdatePartitionMethod(), request, observer); + return; + } + Pdpb.UpdatePartitionResponse response = null; + try { + partitionService.updatePartition(request.getPartitionList()); + response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(okHeader).build(); + + } catch (PDException e) { + response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("update partition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 根据partition_id查找partition + */ + @Override + public void delPartition(Pdpb.DelPartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelPartitionMethod(), request, observer); + return; + } + Pdpb.DelPartitionResponse response = null; + try { + Metapb.Partition partition = partitionService.getPartitionById(request.getGraphName(), + request.getPartitionId()); + if (partition != null) { + partitionService.removePartition(request.getGraphName(), (int) request.getPartitionId()); + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader).setPartition(partition) + .build(); + } else { + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader).build(); + } + } catch (PDException e) { + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("delPartition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 给定key范围查找所属的partition集合 + */ + @Override + public void scanPartitions(Pdpb.ScanPartitionsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getScanPartitionsMethod(), request, observer); + return; + } + Pdpb.ScanPartitionsResponse response = null; + try { + List partShards = partitionService.scanPartitions(request.getGraphName(), + request.getStartKey() + .toByteArray(), + request.getEndKey() + .toByteArray()); + response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(okHeader) + .addAllPartitions(partShards).build(); + } catch (PDException e) { + response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("scanPartitions exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 获得图信息 + */ + public void getGraph(Pdpb.GetGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetGraphMethod(), request, observer); + return; + } + + Pdpb.GetGraphResponse response = null; + String graphName = request.getGraphName(); + try { + Metapb.Graph graph = partitionService.getGraph(graphName); + if (graph != null) { + response = Pdpb.GetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + } else { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.NOT_FOUND).build()).build(); + response = Pdpb.GetGraphResponse.newBuilder().setHeader(header).build(); + } + } catch (PDException e) { + response = Pdpb.GetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getGraph exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 修改图信息 + */ + public void setGraph(Pdpb.SetGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetGraphMethod(), request, observer); + return; + } + Pdpb.SetGraphResponse response = null; + Metapb.Graph graph = request.getGraph(); + try { + graph = partitionService.updateGraph(graph); + response = Pdpb.SetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + } catch (PDException e) { + log.error("setGraph exception: ", e); + response = Pdpb.SetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 获得图信息 + */ + public void delGraph(Pdpb.DelGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelGraphMethod(), request, observer); + return; + } + + Pdpb.DelGraphResponse response = null; + String graphName = request.getGraphName(); + try { + Metapb.Graph graph = partitionService.delGraph(graphName); + if (graph != null) + response = Pdpb.DelGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + } catch (PDException e) { + response = Pdpb.DelGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getGraph exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 根据条件查询分区信息, 包括Store、Graph等条件
+     * 
+ */ + public void queryPartitions(Pdpb.QueryPartitionsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getQueryPartitionsMethod(), request, observer); + return; + } + //TODO 临时采用遍历方案,后续使用rocksdb存储时,通过kv索引实现 + Metapb.PartitionQuery query = request.getQuery(); + List partitions = partitionService.getPartitions(query.getGraphName()); + List result = new ArrayList<>(); + if (!CollectionUtils.isEmpty(partitions)) { + for (Metapb.Partition partition : partitions) { + if (query.hasPartitionId() && partition.getId() != query.getPartitionId()) { + continue; + } + if (query.hasGraphName() && !partition.getGraphName().equals(query.getGraphName())) { + continue; + } + long storeId = query.getStoreId(); + if (query.hasStoreId() && query.getStoreId() != 0) { + try { + storeNodeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { + if (shard.getStoreId() == storeId) { + result.add(partition); + } + }); + }catch (PDException e){ + log.error("query partitions error, req:{}, error:{}", request, e.getMessage()); + } + } else { + result.add(partition); + } + } + } + Pdpb.QueryPartitionsResponse response = Pdpb.QueryPartitionsResponse.newBuilder() + .addAllPartitions(result).build(); + observer.onNext(response); + observer.onCompleted(); + + } + + @Override + public void getId(Pdpb.GetIdRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetIdMethod(), request, responseObserver); + return; + } + long id = 0L; + try { + id = idService.getId(request.getKey(), request.getDelta()); + } catch (PDException e) { + responseObserver.onError(e); + log.error("getId exception: ", e); + return; + } + Pdpb.GetIdResponse response = Pdpb.GetIdResponse.newBuilder().setId(id).setDelta(request.getDelta()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void resetId(Pdpb.ResetIdRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getResetIdMethod(), request, responseObserver); + return; + } + try { + idService.resetId(request.getKey()); + } catch (PDException e) { + responseObserver.onError(e); + log.error("getId exception: ", e); + return; + } + Pdpb.ResetIdResponse response = Pdpb.ResetIdResponse.newBuilder().setResult(0).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * 获取集群成员信息 + */ + public void getMembers(Pdpb.GetMembersRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetMembersMethod(), request, observer); + return; + } + Pdpb.GetMembersResponse response; + try { + response = Pdpb.GetMembersResponse.newBuilder() + .addAllMembers(RaftEngine.getInstance().getMembers()) + .setLeader(RaftEngine.getInstance().getLocalMember()).build(); + + } catch (Exception e) { + log.error("getMembers exception: ", e); + response = Pdpb.GetMembersResponse.newBuilder().setHeader(newErrorHeader(-1, e.getMessage())) + .build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getStoreStatus(Pdpb.GetAllStoresRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoreStatusMethod(), request, observer); + return; + } + Pdpb.GetAllStoresResponse response = null; + try { + List stores = null; + stores = storeNodeService.getStoreStatus(request.getExcludeOfflineStores()); + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) + .build(); + } catch (PDException e) { + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getAllStores exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 读取PD配置 + */ + @Override + public void getPDConfig(Pdpb.GetPDConfigRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPDConfigMethod(), request, observer); + return; + } + Pdpb.GetPDConfigResponse response = null; + try { + Metapb.PDConfig pdConfig = null; + pdConfig = configService.getPDConfig(request.getVersion()); + response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(okHeader).setPdConfig(pdConfig) + .build(); + } catch (PDException e) { + response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 修改PD配置 + */ + @Override + public void setPDConfig(Pdpb.SetPDConfigRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetPDConfigMethod(), request, observer); + return; + } + Pdpb.SetPDConfigResponse response = null; + try { + if (request.getPdConfig().getShardCount() % 2 != 1){ + // 副本数奇偶校验 + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count must be an odd number!"); + } + if (request.getPdConfig().getShardCount() > + storeNodeService.getActiveStores().size()){ + // 不能大于活跃的store数量 + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count can't be greater than the number of active stores!"); + } + int oldShardCount = configService.getPDConfig().getShardCount(); + int newShardCount = request.getPdConfig().getShardCount(); + if (newShardCount > oldShardCount){ + // 如果副本数增大,则检查store内部的资源是否够用 + if (! isResourceEnough(oldShardCount, newShardCount)) { + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "There is not enough disk space left!"); + } + + if (! checkShardCount(newShardCount)) { + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "the cluster can't support so many shard count!"); + } + } + configService.setPDConfig(request.getPdConfig()); + response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 读取图空间配置 + */ + @Override + public void getGraphSpace(Pdpb.GetGraphSpaceRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetGraphSpaceMethod(), request, observer); + return; + } + Pdpb.GetGraphSpaceResponse response = null; + try { + List graphSpaces = null; + graphSpaces = configService.getGraphSpace(request.getGraphSpaceName()); + response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(okHeader) + .addAllGraphSpace(graphSpaces).build(); + } catch (PDException e) { + response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 修改图空间配置 + */ + @Override + public void setGraphSpace(Pdpb.SetGraphSpaceRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetGraphSpaceMethod(), request, observer); + return; + } + Pdpb.SetGraphSpaceResponse response = null; + try { + configService.setGraphSpace(request.getGraphSpace()); + response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 数据分裂
+     * 
+ */ + public void splitData(Pdpb.SplitDataRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSplitDataMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "splitData", request); + Pdpb.SplitDataResponse response = null; + try { + taskService.splitPartition(request.getMode(), request.getParamList()); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("splitData exception {}", e); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + + } + + @Override + public void splitGraphData(Pdpb.SplitGraphDataRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSplitGraphDataMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "splitGraphData", request); + Pdpb.SplitDataResponse response ; + try { + partitionService.splitPartition(partitionService.getGraph(request.getGraphName()), request.getToCount()); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("splitGraphData exception {}", e); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * 在store之间平衡数据 + */ + public void movePartition(Pdpb.MovePartitionRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getMovePartitionMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "balanceData", request); + Pdpb.MovePartitionResponse response = null; + try { + taskService.patrolPartitions(); + taskService.balancePartitionShard(); + response = Pdpb.MovePartitionResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("transferData exception {}", e); + response = Pdpb.MovePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * 获取集群健康状态
+     * 
+ */ + public void getClusterStats(Pdpb.GetClusterStatsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetClusterStatsMethod(), request, observer); + return; + } + Pdpb.GetClusterStatsResponse response = null; + response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(okHeader) + .setCluster(storeNodeService.getClusterStats()).build(); + observer.onNext(response); + observer.onCompleted(); + } + + + /** + *
+     * 汇报分区分裂等任务执行结果
+     * 
+ */ + public void reportTask(Pdpb.ReportTaskRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getReportTaskMethod(), request, observer); + return; + } + try { + taskService.reportTask(request.getTask()); + } catch (Exception e) { + log.error("PDService.reportTask {}", e); + } + Pdpb.ReportTaskResponse response = null; + response = Pdpb.ReportTaskResponse.newBuilder().setHeader(okHeader).build(); + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void getPartitionStats(Pdpb.GetPartitionStatsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionStatsMethod(), request, observer); + return; + } + Pdpb.GetPartitionStatsResponse response; + // TODO + try { + Metapb.PartitionStats stats = partitionService.getPartitionStats(request.getGraphName(), + request.getPartitionId()); + response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(okHeader) + .setPartitionStats(stats).build(); + } catch (PDException e) { + log.error("getPartitionStats exception {}", e); + response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + + private ManagedChannel channel; + + public boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + private > void redirectToLeader( + MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver observer) { + try { + if (channel == null) { + synchronized (this) { + if (channel == null) { + channel = ManagedChannelBuilder + .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()).usePlaintext() + .build(); + } + } + log.info("Grpc get leader address {}", RaftEngine.getInstance().getLeaderGrpcAddress()); + } + + io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), req, + observer); + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** + * 更新peerList + */ + @Override + public void changePeerList(Pdpb.ChangePeerListRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getChangePeerListMethod(), request, observer); + return; + } + Pdpb.getChangePeerListResponse response; + try { + Status status = RaftEngine.getInstance().changePeerList(request.getPeerList()); + Pdpb.ResponseHeader responseHeader = status.isOk() ? okHeader : newErrorHeader(status.getCode(), + status.getErrorMsg()); + response = Pdpb.getChangePeerListResponse.newBuilder().setHeader(responseHeader).build(); + + } catch (Exception e) { + log.error("changePeerList exception: ", e); + response = Pdpb.getChangePeerListResponse.newBuilder() + .setHeader(newErrorHeader(-1, e.getMessage())).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public synchronized void onRaftLeaderChanged() { + log.info("onLeaderChanged"); + channel = null; + if (licenseVerifierService == null) { + licenseVerifierService = new LicenseVerifierService(pdConfig); + } + licenseVerifierService.init(); + if (!isLeader()) { + try { + // 关闭Client通知,Client重新向Leader发起连接 + String message = "lose leader"; + PDPulseSubject.notifyError(message); + PDWatchSubject.notifyError(message); + } catch (Exception e) { + + } + } + } + + @Override + public void balanceLeaders(Pdpb.BalanceLeadersRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getBalanceLeadersMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "balanceLeaders", request); + Pdpb.BalanceLeadersResponse response = null; + try { + taskService.balancePartitionLeader(true); + response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("balance Leaders exception {}", e); + response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void putLicense(PutLicenseRequest request, StreamObserver responseObserver) { + PutLicenseResponse response = null; + boolean moved = false; + String bakPath = pdConfig.getLicensePath() + "-bak"; + File bakFile = new File(bakPath); + File licenseFile = new File(pdConfig.getLicensePath()); + try { + byte[] content = request.getContent().toByteArray(); + if (licenseFile.exists()) { + if (bakFile.exists()) { + FileUtils.deleteQuietly(bakFile); + } + FileUtils.moveFile(licenseFile, bakFile); + moved = true; + } + FileUtils.writeByteArrayToFile(licenseFile, content, false); + } catch (Exception e) { + log.error("putLicense with error: {}", e); + if (moved) { + try { + FileUtils.moveFile(bakFile, licenseFile); + } catch (IOException ex) { + log.error("failed to restore the license file.{}", ex); + } + } + Pdpb.ResponseHeader header = newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); + response = Pdpb.PutLicenseResponse.newBuilder().setHeader(header).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + @Override + public void delStore(Pdpb.DetStoreRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelStoreMethod(), request, observer); + return; + } + long storeId = request.getStoreId(); + Pdpb.DetStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.getStore(storeId); + if (Metapb.StoreState.Tombstone == store.getState()){ + // 只有已经被下线(Tombstone)的store可以被删除 + storeNodeService.removeStore(storeId); + response = Pdpb.DetStoreResponse.newBuilder() + .setHeader(okHeader) + .setStore(store) + .build(); + }else{ + throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DELETION_VALUE, + "the store can't be deleted, please check store state!"); + } + } catch (PDException e) { + log.error("delete store exception: {}", e); + response = Pdpb.DetStoreResponse.newBuilder() + .setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * check the shard whether exceed the cluster's max shard group count + * + * @param newShardCount new shard count + * @return true if can be set to new shard count, otherwise false + */ + private boolean checkShardCount(int newShardCount) { + try { + var maxCount = pdConfig.getPartition().getMaxShardsPerStore() * storeNodeService.getActiveStores().size() / + pdConfig.getConfigService().getPartitionCount(); + + if (newShardCount > maxCount) { + log.error("new shard count :{} exceed current cluster max shard count {}", newShardCount, maxCount); + return false; + } + }catch (Exception e) { + log.error("checkShardCount: {}", e.getMessage()); + } + return true; + } + + /** + * 检查store资源是否够用 + */ + public boolean isResourceEnough(int oldShardCount, int newShardCount) { + // 活跃的store的资源是否够用 + try { + + float expansionRatio = newShardCount / oldShardCount; // 占用的存储空间膨胀的倍数 + // 当前占用的空间 + long currentDataSize = 0L; + // 数据膨胀后占用的空间 + long newDataSize = 0L; + // 总的可用空间 + long totalAvaible = 0L; + // 统计当前占用的存储空间 + for (Metapb.Store store : storeNodeService.getStores()) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + currentDataSize += graphStats.getApproximateSize(); + } + } + // 估计数据膨胀后占用的存储空间 + newDataSize = (long) Math.ceil(currentDataSize * expansionRatio); + // 统计所有活跃的store里面可用的空间 + List activeStores = storeNodeService.getActiveStores(); + for (Metapb.Store store : activeStores){ + Metapb.StoreStats storeStats = store.getStats(); + totalAvaible += storeStats.getAvailable(); + } + // 考虑当分区均匀分配的情况下,资源是否可用 + if (totalAvaible > newDataSize - currentDataSize){ + return true; + }else{ + return false; + } + } catch (PDException e) { + e.printStackTrace(); + return false; + } + } + + /** + *
+     * 对rocksdb进行compaction
+     * 
+ */ + public void dbCompaction(Pdpb.DbCompactionRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDbCompactionMethod(), request, observer); + return; + } + logService.insertLog(LogService.TASK, "dbCompaction", request); + Pdpb.DbCompactionResponse response = null; + try { + log.info("dbCompaction call dbCompaction"); + taskService.dbCompaction(request.getTableName()); + response = Pdpb.DbCompactionResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("dbCompaction exception {}", e); + response = Pdpb.DbCompactionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void combineCluster(Pdpb.CombineClusterRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCombineClusterMethod(), request, observer); + return; + } + + Pdpb.CombineClusterResponse response ; + + try{ + partitionService.combinePartition(request.getToCount()); + response = Pdpb.CombineClusterResponse.newBuilder().setHeader(okHeader).build(); + }catch (PDException e){ + response = Pdpb.CombineClusterResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void combineGraph(Pdpb.CombineGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCombineGraphMethod(), request, observer); + return; + } + + Pdpb.CombineGraphResponse response ; + + try{ + partitionService.combineGraphPartition(request.getGraphName(), request.getToCount()); + response = Pdpb.CombineGraphResponse.newBuilder().setHeader(okHeader).build(); + }catch (PDException e){ + response = Pdpb.CombineGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void deleteShardGroup(Pdpb.DeleteShardGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDeleteShardGroupMethod(), request, observer); + return; + } + + Pdpb.DeleteShardGroupResponse response; + + try { + storeNodeService.deleteShardGroup(request.getGroupId()); + response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + public void getShardGroup(Pdpb.GetShardGroupRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetShardGroupMethod(), request, observer); + return; + } + Pdpb.GetShardGroupResponse response; + // TODO + try { + Metapb.ShardGroup shardGroup = storeNodeService.getShardGroup(request.getGroupId()); + response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(okHeader).setShardGroup(shardGroup).build(); + } catch (PDException e) { + log.error("getPartitionStats exception", e); + response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void updateShardGroup(Pdpb.UpdateShardGroupRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateShardGroupMethod(), request, responseObserver); + return; + } + Pdpb.UpdateShardGroupResponse response; + var group = request.getShardGroup(); + storeNodeService.updateShardGroup(group.getId(), group.getShardsList(), + group.getVersion(), group.getConfVer()); + + response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); + + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void updateShardGroupOp(Pdpb.ChangeShardRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateShardGroupOpMethod(), request, observer); + return; + } + + Pdpb.ChangeShardResponse response; + + try { + storeNodeService.shardGroupOp(request.getGroupId(), request.getShardsList()); + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("changeShard exception, ", e); + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void changeShard(Pdpb.ChangeShardRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getChangeShardMethod(), request, observer); + return; + } + + Pdpb.ChangeShardResponse response; + + try { + partitionService.changeShard(request.getGroupId(), request.getShardsList()); + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); + }catch (PDException e) { + log.error("changeShard exception, ", e); + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java new file mode 100644 index 0000000000..2379f47bf7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java @@ -0,0 +1,22 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import com.baidu.hugegraph.pd.grpc.watch.HgPdWatchGrpc; +import com.baidu.hugegraph.pd.grpc.watch.WatchRequest; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; +import org.lognet.springboot.grpc.GRpcService; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +@Slf4j +@GRpcService +public class PDWatchService extends HgPdWatchGrpc.HgPdWatchImplBase { + + @Override + public StreamObserver watch(StreamObserver responseObserver) { + return PDWatchSubject.addObserver(responseObserver); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java new file mode 100644 index 0000000000..df641564c1 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java @@ -0,0 +1,234 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.RegistryService; +import com.baidu.hugegraph.pd.common.HgAssert; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; + +import org.apache.hugegraph.pd.util.HgMapCache; +import org.apache.hugegraph.pd.model.PromTargetsModel; +import org.apache.hugegraph.pd.rest.MemberAPI; + +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.*; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +/** + * @author lynn.bond@hotmail.com on 2022/2/24 + */ +@Service +@Slf4j +public class PromTargetsService { + + @Autowired + private PDConfig pdConfig; + @Autowired + private PDService pdService; + + private RegistryService register; + + private final PromTargetsModel pdModel = PromTargetsModel.of() + .addLabel("__app_name", "pd") + .setScheme("http") + .setMetricsPath("/actuator/prometheus"); + + + private final PromTargetsModel storeModel = PromTargetsModel.of() + .addLabel("__app_name", "store") + .setScheme("http") + .setMetricsPath("/actuator/prometheus"); + + + private HgMapCache> targetsCache = HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H. + + private RegistryService getRegister() { + if (this.register == null) { + this.register = new RegistryService(this.pdConfig); + } + return this.register; + } + + public List getAllTargets() { + List res = new LinkedList<>(); + List buf = this.toModels(this.getRegister().getNodes(Query.newBuilder().build())); + + if (buf != null) { + res.addAll(buf); + } + + res.add(getPdTargets()); + res.add(getStoreTargets()); + + return res; + } + + /** + * @param appName + * @return null if it's not existing + */ + public List getTargets(String appName) { + HgAssert.isArgumentNotNull(appName, "appName"); + switch (appName) { + case "pd": + return Collections.singletonList(this.getPdTargets()); + case "store": + return Collections.singletonList(this.getStoreTargets()); + default: + return this.toModels(this.getRegister().getNodes(Query.newBuilder().setAppName(appName).build())); + } + } + + private PromTargetsModel getPdTargets() { + return setTargets(pdModel, () -> this.mergeCache("pd", getPdAddresses())); + } + + private PromTargetsModel getStoreTargets() { + return setTargets(storeModel, () -> this.mergeCache("store", getStoreAddresses())); + } + + private PromTargetsModel setTargets(PromTargetsModel model, Supplier> supplier) { + return model.setTargets(supplier.get()).setClusterId(String.valueOf(pdConfig.getClusterId())); + } + + /* to prevent the failure of connection between pd and store or pd and pd.*/ + //TODO: To add a schedule task to refresh targets, not to retrieve in every time. + private Set mergeCache(String key, Set set) { + Set buf = this.targetsCache.get(key); + + if (buf == null) { + buf = new HashSet<>(); + this.targetsCache.put(key, buf); + } + + if (set != null) { + buf.addAll(set); + } + + return buf; + } + + private List toModels(NodeInfos info) { + if (info == null) { + return null; + } + + List nodes = info.getInfoList(); + if (nodes == null || nodes.isEmpty()) { + return null; + } + + List res = + nodes.stream().map(e -> { + Map labels = e.getLabelsMap(); + + String target = labels.get("target"); + if (HgAssert.isInvalid(target)) return null; + + PromTargetsModel model = PromTargetsModel.of(); + model.addTarget(target); + model.addLabel("__app_name", e.getAppName()); + + labels.forEach((k, v) -> { + k = k.trim(); + switch (k) { + case "metrics": + model.setMetricsPath(v.trim()); + break; + case "scheme": + model.setScheme(v.trim()); + break; + default: + if (k.startsWith("__")) { + model.addLabel(k, v); + } + + } + }); + + + return model; + }) + .filter(e -> e != null) + .collect(Collectors.toList()); + + if (res.isEmpty()) { + return null; + } + return res; + } + + private Set getPdAddresses() { + MemberAPI.CallStreamObserverWrap response = new MemberAPI.CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + List members = null; + + try { + members = response.get().get(0).getMembersList(); + } catch (Throwable e) { + log.error("Failed to get all pd members.", e); + } + + Set res = new HashSet<>(); + if (members != null) { + members.stream().forEach(e -> res.add(e.getRestUrl())); + } + + return res; + } + + private Set getStoreAddresses() { + Set res = new HashSet<>(); + List stores = null; + try { + stores = pdService.getStoreNodeService().getStores(); + } catch (PDException e) { + log.error("Failed to get all stores.", e); + } + + if (stores != null) { + stores.stream().forEach(e -> { + String buf = this.getRestAddress(e); + if (buf != null) { + res.add(buf); + } + }); + } + + return res; + } + + //TODO: optimized store registry data, to add host:port of REST server. + private String getRestAddress(Metapb.Store store) { + String address = store.getAddress(); + if (address == null || address.isEmpty()) return null; + try { + Optional port = store.getLabelsList().stream().map( + e -> { + if ("rest.port".equals(e.getKey())) { + return e.getValue(); + } + return null; + }).filter(e -> e != null).findFirst(); + + if (port.isPresent()) { + address = address.substring(0, address.indexOf(':') + 1); + address = address + port.get(); + + } + } catch (Throwable t) { + log.error("Failed to extract the REST address of store, cause by:", t); + } + return address; + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java new file mode 100644 index 0000000000..75033c3317 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java @@ -0,0 +1,79 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.watch.PDWatchSubject; + +import java.util.concurrent.ConcurrentHashMap; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; + +import com.baidu.hugegraph.pd.raft.RaftEngine; +import com.baidu.hugegraph.pd.raft.RaftStateListener; +import com.baidu.hugegraph.pd.watch.PDWatchSubject; + +import io.grpc.CallOptions; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; + +/** + * @author zhangyingjie + * @date 2022/6/21 + **/ +public interface ServiceGrpc extends RaftStateListener { + + default Pdpb.ResponseHeader getResponseHeader(PDException e) { + Pdpb.Error error = Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage()).build(); + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); + return header; + } + + default Pdpb.ResponseHeader getResponseHeader() { + Pdpb.Error error = Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK).build(); + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); + return header; + } + + default boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + ConcurrentHashMap channels = new ConcurrentHashMap(); + + default void redirectToLeader(ManagedChannel channel, MethodDescriptor method, + ReqT req, io.grpc.stub.StreamObserver observer) { + try { + String address = RaftEngine.getInstance().getLeaderGrpcAddress(); + if ((channel = channels.get(address)) == null) { + synchronized (this) { + if ((channel = channels.get(address)) == null) { + ManagedChannel c = ManagedChannelBuilder.forTarget(address).usePlaintext().build(); + channels.put(address, c); + channel = c; + } + } + } + io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), + req, observer); + } catch (Exception e) { + e.printStackTrace(); + } + + } + + @Override + default void onRaftLeaderChanged() { + synchronized (this){ + if (!isLeader()) { + try { + String message = "lose leader"; + PDPulseSubject.notifyError(message); + PDWatchSubject.notifyError(message); + }catch (Exception e){ + e.printStackTrace(); + } + } + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java new file mode 100644 index 0000000000..4e45e9027b --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java @@ -0,0 +1,88 @@ +package com.baidu.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.KvService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.rest.API; +import com.baidu.hugegraph.pd.upgrade.VersionScriptFactory; +import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class UpgradeService { + + private static final String VERSION_KEY = "DATA_VERSION"; + + private static final String RUN_LOG_PREFIX = "SCRIPT_RUN_LOG"; + + private PDConfig pdConfig; + + private KvService kvService; + + public UpgradeService (PDConfig pdConfig){ + this.pdConfig = pdConfig; + this.kvService = new KvService(pdConfig); + } + + public void upgrade() throws PDException { + + log.info("upgrade service start"); + VersionScriptFactory factory = VersionScriptFactory.getInstance(); + var dataVersion = getDataVersion(); + log.info("now db data version : {}", dataVersion); + for(VersionUpgradeScript script : factory.getScripts()) { + // 执行过,run once的跳过 + if (isExecuted(script.getClass().getName()) && script.isRunOnce()) { + log.info("Script {} is Executed and is run once", script.getClass().getName()); + continue; + } + + // 判断跳过的条件 + if (dataVersion == null && !script.isRunWithoutDataVersion() || dataVersion != null && + !versionCompare(dataVersion, script.getHighVersion(), script.getLowVersion())) { + log.info("Script {} is did not match version requirements, current data version:{}, current version:{}" + + "script run version({} to {}), run without data version:{}", + script.getClass().getName(), + dataVersion, + API.VERSION, + script.getHighVersion(), + script.getLowVersion(), + script.isRunWithoutDataVersion()); + continue; + } + + script.runInstruction(pdConfig); + logRun(script.getClass().getName()); + } + + writeCurrentDataVersion(); + } + + private boolean isExecuted(String className) throws PDException { + var ret = kvService.get(RUN_LOG_PREFIX + "/" + className); + return ret.length() > 0; + } + + private void logRun(String className) throws PDException { + kvService.put(RUN_LOG_PREFIX + "/" + className, API.VERSION); + } + + private String getDataVersion() throws PDException { + return kvService.get(VERSION_KEY); + } + + private boolean versionCompare(String dataVersion, String high, String low) { + var currentVersion = API.VERSION; + if (!high.equals(VersionUpgradeScript.UNLIMITED_VERSION) && high.compareTo(dataVersion) < 0 + || !low.equals(VersionUpgradeScript.UNLIMITED_VERSION) && low.compareTo(currentVersion) > 0){ + return false; + } + return true; + } + + private void writeCurrentDataVersion() throws PDException { + log.info("update db version to {}", API.VERSION); + kvService.put(VERSION_KEY, API.VERSION); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java new file mode 100644 index 0000000000..6e61d9a252 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java @@ -0,0 +1,58 @@ +package org.apache.hugegraph.pd.util; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import org.apache.commons.lang3.time.DateUtils; + +import java.text.ParseException; +import java.util.Date; + +/** + * @author zhangyingjie + * @date 2022/3/23 + **/ +public class DateUtil { + private static String DATE = "yyyy-MM-dd"; + private static String DATETIME = "yyyy-MM-dd HH:mm:ss"; + private static String DATETIME_MM = "yyyy-MM-dd HH:mm"; + private static String DATETIME_SSS = "yyyy-MM-dd HH:mm:ss.SSS"; + private static String TIME = "HH:mm"; + private static String TIME_SS = "HH:mm:ss"; + private static String SYS_DATE = "yyyy/MM/dd"; + private static String SYS_DATETIME = "yyyy/MM/dd HH:mm:ss"; + private static String SYS_DATETIME_MM = "yyyy/MM/dd HH:mm"; + private static String SYS_DATETIME_SSS = "yyyy/MM/dd HH:mm:ss.SSS"; + private static String NONE_DATE = "yyyyMMdd"; + private static String NONE_DATETIME = "yyyyMMddHHmmss"; + private static String NONE_DATETIME_MM = "yyyyMMddHHmm"; + private static String NONE_DATETIME_SSS = "yyyyMMddHHmmssSSS"; + private static String[] PATTERNS =new String[]{ + DATE, + DATETIME, + DATETIME_MM, + DATETIME_SSS, + TIME, + TIME_SS, + SYS_DATE, + SYS_DATETIME, + SYS_DATETIME_MM, + SYS_DATETIME_SSS, + NONE_DATE, + NONE_DATETIME, + NONE_DATETIME_MM, + NONE_DATETIME_SSS + }; + + public static String[] getDefaultPattern(){ + return PATTERNS; + } + + public static Date getDate(String date) throws PDException { + try { + return DateUtils.parseDate(date,getDefaultPattern()); + } catch (ParseException e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java new file mode 100644 index 0000000000..7d7b126579 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java @@ -0,0 +1,82 @@ +package org.apache.hugegraph.pd.util; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +/** + * @param + * @param + * @author lynn.bond@hotmail.com on 2022/3/10 + */ +public class HgMapCache { + private Map cache = new ConcurrentHashMap(); + private Supplier expiry; + + public static HgMapCache expiredOf(long interval){ + return new HgMapCache(new CycleIntervalPolicy(interval)); + } + + private HgMapCache(Supplier expiredPolicy) { + this.expiry = expiredPolicy; + } + + private boolean isExpired() { + if (expiry != null && expiry.get()) { + cache.clear(); + return true; + } + return false; + } + + public void put(K key, V value) { + if (key == null || value == null) return; + this.cache.put(key, value); + } + + + public V get(K key) { + if (isExpired()) return null; + return this.cache.get(key); + } + + public void removeAll() { + this.cache.clear(); + } + + public boolean remove(K key) { + if (key != null) { + this.cache.remove(key); + return true; + } + return false; + } + + public Map getAll() { + return this.cache; + } + + private static class CycleIntervalPolicy implements Supplier{ + private long expireTime=0; + private long interval=0; + + public CycleIntervalPolicy(long interval){ + this.interval=interval; + init(); + } + private void init(){ + expireTime=System.currentTimeMillis()+interval; + } + + @Override + public Boolean get() { + if(System.currentTimeMillis()>expireTime){ + init(); + return true; + } + return false; + } + + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java new file mode 100644 index 0000000000..844f306f8d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java @@ -0,0 +1,32 @@ +package org.apache.hugegraph.pd.util; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/2/8 + */ +@Slf4j +public final class IdUtil { + private final static byte[] lock = new byte[0]; + + public static String createMillisStr(){ + return String.valueOf(createMillisId()); + } + + /** + * Create millisecond style ID; + * @return + */ + public static Long createMillisId() { + synchronized (lock) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + log.error("Failed to sleep", e); + } + + return System.currentTimeMillis(); + } + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java new file mode 100644 index 0000000000..e1fc4a3b10 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java @@ -0,0 +1,32 @@ +package org.apache.hugegraph.pd.util.grpc; + +import io.grpc.Grpc; +import io.grpc.ServerCall; +import io.grpc.stub.StreamObserver; + +import java.lang.reflect.Field; + +public class StreamObserverUtil { + + static Object fieldLock = new Object(); + static Field callField; + + public static String getRemoteIP(StreamObserver observer) { + String ip = ""; + try { + if (callField == null) { + synchronized (fieldLock) { + callField = observer.getClass().getDeclaredField("call"); + callField.setAccessible(true); + } + } + ServerCall call = (ServerCall) callField.get(observer); + if (call != null) { + ip = call.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR).toString(); + } + } catch (Exception e) { + + } + return ip; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java new file mode 100644 index 0000000000..fa053db456 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java @@ -0,0 +1,142 @@ +package org.apache.hugegraph.pd.watch; + +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import com.google.protobuf.util.JsonFormat; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.function.Consumer; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/5 + */ +@ThreadSafe +@Slf4j +abstract class AbstractWatchSubject { + private final Map> watcherHolder = new HashMap<>(1024); + private final byte[] lock = new byte[0]; + private final WatchResponse.Builder builder = WatchResponse.newBuilder(); + private final WatchType watchType; + + protected AbstractWatchSubject(WatchType watchType) { + this.watchType = watchType; + } + + void addObserver(Long watcherId, StreamObserver responseObserver) { + synchronized (this.watcherHolder) { + + if (this.watcherHolder.containsKey(watcherId)) { + responseObserver.onError( + new Exception("The watcher-id[" + watcherId + "] of " + this.watchType.name() + + " subject has been existing, please unwatch it first")); + return; + } + + log.info("Adding a "+this.watchType+"'s watcher, watcher-id is ["+ watcherId+"]."); + this.watcherHolder.put(watcherId, responseObserver); + } + + } + + void removeObserver(Long watcherId, StreamObserver responseObserver) { + synchronized (this.watcherHolder) { + log.info("Removing a "+this.watchType+"'s watcher, watcher-id is ["+ watcherId+"]."); + this.watcherHolder.remove(watcherId); + } + responseObserver.onCompleted(); + } + + abstract String toNoticeString(WatchResponse res); + + public void notifyError(String message){ + synchronized (lock) { + Iterator>> iter = watcherHolder.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long watcherId = entry.getKey(); + WatchResponse res = this.builder.setWatcherId(watcherId).build(); + try { + entry.getValue().onError( + Status.PERMISSION_DENIED.withDescription(message).asRuntimeException()); + } catch (Throwable e) { + //log.error("Failed to send " + this.watchType.name() + "'s error message [" + toNoticeString(res) + // + "] to watcher[" + watcherId + "].", e); + + } + } + } + } + + protected void notifyWatcher(WatchResponse.Builder response) { + + Iterator>> iter = watcherHolder + .entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long watcherId = entry.getKey(); + WatchResponse res = response.setWatcherId(watcherId).build(); + try { + synchronized (lock) { + entry.getValue().onNext(res); + } + } catch (Throwable e) { + try { + String msg = JsonFormat.printer().print(res); + log.error( + "failed to send to watcher [{}] with notice {} for ", + msg, toNoticeString(res), watcherId, e); + } catch (Exception ex) { + + } + } + } + } + + protected void notifyWatcher(Consumer c) { + synchronized (lock) { + + if(c==null){ + log.error(this.watchType.name()+"'s notice was abandoned, caused by: notifyWatcher(null)"); + return; + } + + try{ + c.accept(this.builder.clear()); + }catch (Throwable t){ + log.error(this.watchType.name()+"'s notice was abandoned, caused by:",t ); + return; + } + + Iterator>> iter = watcherHolder.entrySet().iterator(); + + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long watcherId = entry.getKey(); + WatchResponse res = this.builder.setWatcherId(watcherId).build(); + + try { + entry.getValue().onNext(res); + } catch (Throwable e) { + log.error("Failed to send " + this.watchType.name() + "'s notice[" + toNoticeString(res) + + "] to watcher[" + watcherId + "].", e); + + // TODO: ? try multi-times? + iter.remove(); + + log.error("Removed a " + this.watchType.name() + "'s watcher[" + entry.getKey() + + "], because of once failure of sending.", e); + } + + } + + } + + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java new file mode 100644 index 0000000000..88bfbfff1c --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -0,0 +1,243 @@ +package org.apache.hugegraph.pd.watch; + +import com.baidu.hugegraph.pd.KvService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.kv.WatchEvent; +import com.baidu.hugegraph.pd.grpc.kv.WatchKv; +import com.baidu.hugegraph.pd.grpc.kv.WatchResponse; +import com.baidu.hugegraph.pd.grpc.kv.WatchState; +import com.baidu.hugegraph.pd.grpc.kv.WatchType; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.BiPredicate; + +/** watch订阅、响应处理类 + * @author zhangyingjie + * @date 2022/6/21 + **/ +@Slf4j +public class KvWatchSubject { + + private KvService kvService; + public static final String KEY_DELIMITER = "KW"; + public static final String PREFIX_DELIMITER = "PW"; + public static final String ALL_PREFIX = "W"; + public static final long WATCH_TTL = 20000L; + private static final ConcurrentMap> clients = new ConcurrentHashMap<>(); + + /** + * 会使用以下三组key: + * clients -> W@KW@key@clientId + * rocksdb key1 ->W@KW@key@clientId + * rocksdb key2 ->W@clientId@KW@key@clientId + **/ + public KvWatchSubject(PDConfig pdConfig) { + this.kvService = new KvService(pdConfig); + } + + public String getWatchKey(String key, String watchDelimiter) { + return KvService.getKeyWithoutPrefix(ALL_PREFIX, watchDelimiter, key); + } + + private void addWatchKey(String key, String delimiter, long clientId) throws PDException { + String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + kvService.put(watchKey, "", WATCH_TTL); + String clientFirstKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key, clientId); + kvService.put(clientFirstKey, "", WATCH_TTL); + } + + private void removeWatchKey(String key, String delimiter, long clientId) throws PDException { + String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + kvService.delete(watchKey); + String clientFirstKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key); + kvService.deleteWithPrefix(clientFirstKey); + } + + /** + * 增加观察者 + * @param key 观察的key + * @param clientId 客户端标识 + * @param observer + * @param delimiter 观察类型标识符,对前缀监听或者对key的监听可以通过此参数区分 + * @throws PDException + */ + public void addObserver(String key, long clientId, StreamObserver observer, + String delimiter) throws PDException { + String keyWithoutPrefix = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + clients.putIfAbsent(keyWithoutPrefix, observer); + addWatchKey(key, delimiter, clientId); + log.info("client:{},start to watch key:{}", clientId, key); + } + + public void removeObserver(String key, long clientId, String delimiter) throws PDException { + removeWatchKey(key, delimiter, clientId); + String keyWithoutPrefix = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + clients.remove(keyWithoutPrefix); + } + + BiPredicate equal = (kvKey, watchKey) -> kvKey.equals(watchKey); + BiPredicate startWith = (kvKey, watchKey) -> kvKey.startsWith(watchKey); + + /** + * 通知观察者方法,key和prefix都使用此方法,predicate不同 + * @param key + * @param watchType 观察类型,一般是增加和删除 + * @param predicate 判断等于或者是前匹配,用来适配key或prefix观察 + * @param kvs + * @throws PDException + */ + public void notifyObserver(String key, WatchType watchType, BiPredicate predicate, + WatchKv... kvs) throws PDException { + boolean isEqual = predicate.equals(equal); + String watchDelimiter = isEqual ? KEY_DELIMITER : PREFIX_DELIMITER; + String watchKeyPrefix = isEqual ? key : ""; + String storeKey = getWatchKey(watchKeyPrefix, watchDelimiter); + Map map = kvService.scanWithPrefix(storeKey); + String delimiter = String.valueOf(KvService.KV_DELIMITER); + WatchResponse watchResponse; + for (String keyAndClient : map.keySet()) { + String[] values = keyAndClient.split(delimiter); + assert values.length == 4; + String watchKey = values[2]; + String c = values[3]; + long clientId = new Long(c); + LinkedList watchEvents = new LinkedList<>(); + for (WatchKv kv : kvs) { + String kvKey = kv.getKey(); + boolean match = predicate.test(kvKey, watchKey); + if (!match) { + continue; + } + WatchKv watchKv = WatchKv.newBuilder().setKey(kvKey).setValue(kv.getValue()).build(); + WatchEvent event = WatchEvent.newBuilder().setCurrent(watchKv).setType(watchType).build(); + watchEvents.add(event); + } + StreamObserver observer = clients.get(keyAndClient); + watchResponse = WatchResponse.newBuilder().setState(WatchState.Started).setClientId(clientId) + .addAllEvents(watchEvents).build(); + + try { + if (observer != null) { + synchronized (observer) { + // log.info("notifyObserver for clientId:{}", clientId); + observer.onNext(watchResponse); + } + } else { + log.info("cannot find StreamObserver for clientId:{}", clientId); + } + } catch (Exception e) { + log.warn("notifyObserver with error:{}", clientId, e); + } + } + } + + public void notifyAllObserver(String key, WatchType watchType, WatchKv[] kvs) throws PDException { + notifyObserver(key, watchType, equal, kvs); + notifyObserver(key, watchType, startWith, kvs); + } + + /** + * 续活客户端 + * 1.往客户端发一个alive的消息,带重试哈 + * 2.如果有响应,则续活之前保存的那两组key + * 3.如果多次都失败,则删除内存和rocksdb的数据 + */ + public void keepClientAlive() { + WatchResponse testAlive = WatchResponse.newBuilder().setState(WatchState.Alive).build(); + Set>> entries = clients.entrySet(); + Map.Entry>[] array = entries.toArray(new Map.Entry[0]); + Arrays.stream(array).parallel().forEach(entry -> { + StreamObserver value = entry.getValue(); + String key = entry.getKey(); + String delimiter = KvService.getDelimiter(); + String client = key.split(delimiter)[3]; + String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client); + if (value == null) { + removeClient(null, key, clientKey); + } + boolean done = false; + String removes = client + KvService.KV_DELIMITER; + for (int i = 0; i < 3; i++) { + try { + synchronized (value) { + value.onNext(testAlive); + } + Map clientKeys = kvService.scanWithPrefix(clientKey); + for (Map.Entry keyEntry : clientKeys.entrySet()) { + String entryKey = keyEntry.getKey(); + String aliveKey = entryKey.replaceFirst(removes, ""); + boolean keepAliveKey = kvService.keepAlive(aliveKey); + boolean keepAliveEntry = kvService.keepAlive(entryKey); + // log.info("keep alive client:{},{}:{},{}:{}", client, aliveKey, keepAliveKey, + // entryKey, + // keepAliveEntry); + done = true; + } + break; + } catch (Exception e) { + try { + Thread.sleep(100); + } catch (InterruptedException ex) { + log.info("keep alive client {} with error:{}", client, e); + } + } + } + if (!done) { + log.info("remove client {} for no data", client); + removeClient(value, key, clientKey); + } + }); + } + + private void removeClient(StreamObserver value, String key, String clientKey) { + try { + log.info("remove null observer,client:", clientKey); + kvService.deleteWithPrefix(clientKey); + if (value != null) { + synchronized (value) { + value.onCompleted(); + } + } + clients.remove(key); + } catch (PDException e) { + log.error("remove client with error:", e); + } + } + + /** + * 通知客户端leader切换了,重连 + */ + public void notifyClientChangeLeader() { + WatchResponse response = WatchResponse.newBuilder().setState(WatchState.Leader_Changed).build(); + for (Map.Entry> entry : clients.entrySet()) { + StreamObserver value = entry.getValue(); + String key = entry.getKey(); + String client = key.split(KvService.getDelimiter())[3]; + String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client); + if (value == null) removeClient(null, key, clientKey); + for (int i = 0; i < 3; i++) { + try { + synchronized (value) { + value.onNext(response); + } + removeClient(value, key, clientKey); + break; + } catch (Exception e) { + try { + Thread.sleep(100); + } catch (InterruptedException ex) { + + } + } + } + } + } +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java new file mode 100644 index 0000000000..1173f8c1bf --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java @@ -0,0 +1,51 @@ +package org.apache.hugegraph.pd.watch; + +import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; + +import javax.annotation.concurrent.ThreadSafe; + +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentValid; + +/** + * The subject of partition change. + * @author lynn.bond@hotmail.com created on 2021/11/26 + */ +@ThreadSafe +final class NodeChangeSubject extends AbstractWatchSubject { + + NodeChangeSubject() { + super(WatchType.WATCH_TYPE_STORE_NODE_CHANGE); + } + + @Override + String toNoticeString(WatchResponse res) { + StringBuilder sb = new StringBuilder(); + return sb.append("graph:").append(res.getNodeResponse().getGraph()) + .append(",") + .append("nodeId:").append(res.getNodeResponse().getNodeId()) + .toString(); + } + + public void notifyWatcher(NodeEventType nodeEventType, String graph, long nodeId) { + isArgumentNotNull(nodeEventType, "nodeEventType"); + + super.notifyWatcher(builder -> { + builder.setNodeResponse( + builder.getNodeResponseBuilder().clear() + .setGraph(graph) + .setNodeId(nodeId) + .setNodeEventType(nodeEventType) + .build() + ); + + }); + } + + public void notifyError(String message){ + super.notifyError(message); + } + +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java new file mode 100644 index 0000000000..0328dcafad --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java @@ -0,0 +1,190 @@ +package org.apache.hugegraph.pd.watch; + +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; +import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; +import com.baidu.hugegraph.pd.grpc.watch.WatchCreateRequest; +import com.baidu.hugegraph.pd.grpc.watch.WatchRequest; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +@Slf4j +@ThreadSafe +public class PDWatchSubject implements StreamObserver { + public final static Map subjectHolder = new ConcurrentHashMap<>(); + private final static byte[] lock = new byte[0]; + + private final StreamObserver responseObserver; + private AbstractWatchSubject subject; + private Long watcherId; + + static { + subjectHolder.put(WatchType.WATCH_TYPE_PARTITION_CHANGE.name(), new PartitionChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name(), new NodeChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_GRAPH_CHANGE.name(), new NodeChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name(), new ShardGroupChangeSubject()); + } + + public static StreamObserver addObserver(StreamObserver responseObserver) { + isArgumentNotNull(responseObserver, "responseObserver"); + return new PDWatchSubject(responseObserver); + } + + /** + * Notify partition change + * @param changeType change type + * @param graph name of graph + * @param partitionId id of partition + */ + public static void notifyPartitionChange(ChangeType changeType, String graph, int partitionId) { + ((PartitionChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_PARTITION_CHANGE.name())) + .notifyWatcher(changeType.getGrpcType(), graph, partitionId); + + } + + public static void notifyShardGroupChange(ChangeType changeType, int groupId, Metapb.ShardGroup group) { + ((ShardGroupChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name())) + .notifyWatcher(changeType.getGrpcType(), groupId, group); + } + + /** + * Notify store-node change + * @param changeType change type + * @param graph name of graph + * @param nodeId id of partition + */ + public static void notifyNodeChange(NodeEventType changeType, String graph, long nodeId) { + ((NodeChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name())) + .notifyWatcher(changeType, graph, nodeId); + } + + public static void notifyChange(WatchType type, + WatchResponse.Builder builder) { + subjectHolder.get(type.name()).notifyWatcher(builder); + } + + public static void notifyError(String message){ + subjectHolder.forEach((k, v)->{ + v.notifyError(message); + }); + } + + private PDWatchSubject(StreamObserver responseObserver) { + this.responseObserver = responseObserver; + } + + private static Long createWatcherId() { + synchronized (lock) { + Thread.yield(); + try { + Thread.sleep(1); + } catch (InterruptedException e) { + log.error("Failed to sleep", e); + } + + return System.currentTimeMillis(); + } + + } + + private void cancelWatcher() { + + if (this.subject == null) { + this.responseObserver.onError(new Exception("Invoke cancel-watch before create-watch.")); + return; + } + + this.subject.removeObserver(this.watcherId, this.responseObserver); + } + + + private WatchType getWatchType(WatchCreateRequest request) { + WatchType watchType = request.getWatchType(); + + if (watchType.equals(WatchType.WATCH_TYPE_UNKNOWN)) { + this.responseObserver.onError(new Exception("unknown watch type.")); + return null; + } + + return watchType; + } + + private AbstractWatchSubject getSubject(WatchType watchType) { + AbstractWatchSubject subject = subjectHolder.get(watchType.name()); + + if (subject == null) { + responseObserver.onError(new Exception("Unsupported watch-type: " + watchType.name())); + return null; + } + + return subject; + } + + + private void addWatcher(WatchCreateRequest request) { + if (this.subject != null) { + return; + } + WatchType watchType = getWatchType(request); + if (watchType == null) return; + + this.subject = getSubject(watchType); + this.watcherId = createWatcherId(); + + this.subject.addObserver(this.watcherId, this.responseObserver); + } + + @Override + public void onNext(WatchRequest watchRequest) { + + if (watchRequest.hasCreateRequest()) { + this.addWatcher(watchRequest.getCreateRequest()); + return; + } + + if (watchRequest.hasCancelRequest()) { + this.cancelWatcher(); + } + + } + + @Override + public void onError(Throwable throwable) { + this.cancelWatcher(); + } + + @Override + public void onCompleted() { + this.cancelWatcher(); + } + + public enum ChangeType { + ADD(WatchChangeType.WATCH_CHANGE_TYPE_ADD), + ALTER(WatchChangeType.WATCH_CHANGE_TYPE_ALTER), + DEL(WatchChangeType.WATCH_CHANGE_TYPE_DEL), + + USER_DEFINED (WatchChangeType.WATCH_CHANGE_TYPE_SPECIAL1); + + private final WatchChangeType grpcType; + + ChangeType(WatchChangeType grpcType) { + this.grpcType = grpcType; + } + + public WatchChangeType getGrpcType() { + return this.grpcType; + } + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java new file mode 100644 index 0000000000..1a69612844 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java @@ -0,0 +1,48 @@ +package org.apache.hugegraph.pd.watch; + +import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; + +import javax.annotation.concurrent.ThreadSafe; + +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentValid; + +/** + * The subject of partition change. + * @author lynn.bond@hotmail.com created on 2021/11/5 + */ +@ThreadSafe +final class PartitionChangeSubject extends AbstractWatchSubject { + + PartitionChangeSubject() { + super(WatchType.WATCH_TYPE_PARTITION_CHANGE); + } + + @Override + String toNoticeString(WatchResponse res) { + StringBuilder sb = new StringBuilder(); + return sb.append("graph:").append(res.getPartitionResponse().getGraph()) + .append(",") + .append("partitionId:").append(res.getPartitionResponse().getPartitionId()) + .toString(); + } + + public void notifyWatcher(WatchChangeType changeType, String graph, int partitionId) { + isArgumentNotNull(changeType, "changeType"); + isArgumentValid(graph, "graph"); + + super.notifyWatcher(builder -> { + builder.setPartitionResponse( + builder.getPartitionResponseBuilder().clear() + .setGraph(graph) + .setPartitionId(partitionId) + .setChangeType(changeType) + .build() + ); + + }); + } + +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java new file mode 100644 index 0000000000..8046636808 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java @@ -0,0 +1,37 @@ +package com.baidu.hugegraph.pd.watch; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; + +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +public class ShardGroupChangeSubject extends AbstractWatchSubject{ + + protected ShardGroupChangeSubject() { + super(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE); + } + + @Override + String toNoticeString(WatchResponse res) { + StringBuilder sb = new StringBuilder(); + sb.append("shard group:") + .append(res.getShardGroupResponse().getShardGroup().toString().replace("\n", " ")); + return sb.toString(); + } + + public void notifyWatcher(WatchChangeType changeType, int groupId, Metapb.ShardGroup shardGroup) { + isArgumentNotNull(changeType, "changeType"); + + super.notifyWatcher(builder -> { + builder.setShardGroupResponse( + builder.getShardGroupResponseBuilder().clear() + .setShardGroupId(groupId) + .setType(changeType) + .setShardGroup(shardGroup) + .build() + ); + }); + } +} diff --git a/hg-pd-service/src/main/resources/application.yml b/hg-pd-service/src/main/resources/application.yml new file mode 100644 index 0000000000..06306e02fe --- /dev/null +++ b/hg-pd-service/src/main/resources/application.yml @@ -0,0 +1,63 @@ +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8686 + # grpc的服务地址 + host: 127.0.0.1 + netty-server: + max-inbound-message-size: 100MB + +license: + verify-path: 'conf/verify-license.json' + license-path: 'conf/hugegraph.license' + +server: + port : 8620 + +pd: + # 定期检查集群是否健康的时间间隔,单位秒 + patrol-interval: 300 + # 存储路径 + data-path: tmp/pd/8610 + + # 最少节点数,少于该数字,集群停止入库 + initial-store-count: 1 + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8502 + +raft: + # 本机raft服务地址 + address: 127.0.0.1:8610 + # PD集群服务地址 + peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 86400 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 12 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 \ No newline at end of file diff --git a/hg-pd-service/src/main/resources/banner.txt b/hg-pd-service/src/main/resources/banner.txt new file mode 100644 index 0000000000..27babf0e9a --- /dev/null +++ b/hg-pd-service/src/main/resources/banner.txt @@ -0,0 +1,6 @@ + _ _ _____ _____ _____ + | | | |/ ____| | __ \| __ \ + | |__| | | __ ______| |__) | | | | + | __ | | |_ |______| ___/| | | | + | | | | |__| | | | | |__| | + |_| |_|\_____| |_| |_____/ diff --git a/hg-pd-service/src/main/resources/log4j2.xml b/hg-pd-service/src/main/resources/log4j2.xml new file mode 100644 index 0000000000..a157b6412b --- /dev/null +++ b/hg-pd-service/src/main/resources/log4j2.xml @@ -0,0 +1,122 @@ + + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hg-pd-service/src/main/resources/private-keys.store b/hg-pd-service/src/main/resources/private-keys.store new file mode 100644 index 0000000000000000000000000000000000000000..258075b0d7b3670bb6eb00094ec3433d2bc00c2f GIT binary patch literal 1299 zcmezO_TO6u1_mY|W&~rdf}+f_#FEtP)JmXG&b)3p0U+IP(8Snnz{kd=&Bn;WsKq46 z$jHjT(!^NqB{sKy!R$S6#XNgs7OV&su=_qcqxkp?v43~6B{+m?W{H%fIxr(lpIcO%zi00)SpGkuEqmE*{o^ib^0(@>%9gr_ zZ%tdAYW94AT&n7WFC8;CZO(k?EM~xGysav4oupQ8*u-0Ip$;AKeXT#&Zq!-!ojvi# z-rHvdK5%|yo;+ob+i$mLD%;g-{uLAkZ@O4I>)~FpIcGyIbEtpYY{7T(<;7PmCl~l= zO3D6Kkx#2O_Sau;8FuixYJagzE^h8h4UbV;CGR~>fEvs74 zg28I*)5N~-O}l5;#pUp;tz8*ZS@NsnPDhQn;!Wo5>T4cl$MXWC1Q=bc5qhQumJAHc zmIh7CrUp$+pBFGQF)}f+>$ndv5xKXxLNHgfZ*Bre!Po94h+#Y1}-i6_?oPEQ#K($QQSO=xR-;uVx z5x@2y{kqHG+v(Y%IoBJl8)QxLKcyvX&z>2ubjkerFIcJtln(jtUU~iB_G8jNtizul zTd_=(pXGP+x64Q64l|jC2uF!cH2PNhd7XCRb*=ZGl$a&wUi4r*&Lqk(Jt3f9RGItC zrb#=S7QJra{tzMa738$<4;Qpvn{9jV^q$o*W;?=nR!&*J`4A~q zeY>ti`adgV7ietQ;we_B{K7{=OnCcwPNSX^%O2W@Rdt8d^lCgsws><*8k3XYB4vqGJxE1 zyzyV*q#aq0>hEosd_&sMZhGjPlq;9YN@}kO-{;=?k|ut1*ThBJ{B<+ktp$bU*d|> z17tY2ytry9XFV@q-A0sj&aBU1pu;4hq0aC)l+AYTv)YsEG&74}kc pnItpQn_JcVxlcHLRjC^3>K}WyL{jo{$^6#e?qSKFL!!&y0|4Xr{n`Kk literal 0 HcmV?d00001 diff --git a/hg-pd-service/src/main/resources/public-certs.store b/hg-pd-service/src/main/resources/public-certs.store new file mode 100644 index 0000000000000000000000000000000000000000..0da6d226993ecbab1650809dd3456e549d3844af GIT binary patch literal 892 zcmezO_TO6u1_mY|W(3n*1*J(jnaRniMI}I?oOwM}-VCe}dZq@J3=GVc22ISS22D(# z7cet1GBL5_WMuakaI>*%wRxPgU}0uuFvu|!FyLcj4rO5zW)IEF&dV>)gNblph_GXb zaAAmWV~8-rMGWM`c?~TMEDX&J%uURUO`^nkjX+!zD3{KzX=2=A(8Q<%3l2sgN1my% zk>PKBaHDLgkY><*uQ_^eo;?31xIM_`y$i!(Is1lffohqqu?|XszawpXBYy2a`gNDV zx6`vjbFMd9H^`dge@aW(o;@>Q>5}>LU$9gQC>`?Oz4H3M?Z>2lScgA9wqltmKg;jt zZss$WDKSgVz39PsoJo{ndO|?Is51ANO_O#uEqdL; z{UJi;E68czA1-LUHrw{z={>7s%yxwDtemob_b;ngO+UkZxNX^+(gP#I`*vN4^nX^! zF3{Mp#Z#$nDF-VoJKt-mOZo)tLhG^>D73!c%7W~=4GW5^A^{0|57wf`uIDk zic!V-%#x4$Rs>GyT+w|0Vbs(4Y?Hdv;u@+$RZ|#!tpA<&)M9RIWdOP3c;mmqNjtJ0 z)!*AN`G&Ni-Sp5oDOWC)mDFAnzR$h&Bct7)myUMrIjP6pZvQV}=M#AqesP`pq8HJ| zSIjdCN*|uQxk{16@@>9nY{klh%XBqMw68X%@7l>}J@vRi|LRso{^fBEDgPYiCU7`T zR`kCvI&c2|Q%`i*YVswgy6rF3Z3|eeXdrJO3rx4Nd@N!tB2nV=zQh%$2gq=4d2!WJ z&U#+Jx{WC5oLQg2K!-_0L!IGqD4XrvXSFBSY3%0mV%}4;l}V&lE9|A@GD&8pH@B+! hbDwbhs!}!L3FzK_@~w;Bl-ae + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hg-pd-test/pom.xml b/hg-pd-test/pom.xml new file mode 100644 index 0000000000..602bdb13d2 --- /dev/null +++ b/hg-pd-test/pom.xml @@ -0,0 +1,350 @@ + + + + hugegraph-pd-root + com.baidu.hugegraph + 3.6.5-SNAPSHOT + + 4.0.0 + + hg-pd-test + + + true + + 2.0.0-RC.3 + + + + + jacoco + + false + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + **/grpc/**.* + **/config/**.* + + + + + + prepare-agent + + + + + + + + + + + + junit + junit + 4.13.2 + + + com.baidu.hugegraph + hg-store-grpc + ${project.version} + + + com.baidu.hugegraph + hg-store-common + ${project.version} + + + org.projectlombok + lombok + ${lombok.version} + + + org.springframework + spring-context-support + + + org.springframework + spring-test + test + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + + com.baidu.hugegraph + hg-pd-client + ${project.version} + + + + com.google.code.gson + gson + 2.8.9 + + + com.baidu.hugegraph + hg-pd-grpc + ${project.version} + + + commons-io + commons-io + 2.7 + + + com.fasterxml.jackson.core + jackson-databind + 2.13.0 + + + com.fasterxml.jackson.core + jackson-core + 2.13.0 + + + com.fasterxml.jackson.core + jackson-annotations + 2.13.0 + + + + + + + org.springframework.boot + spring-boot-starter-test + + + org.springframework.boot + spring-boot-starter-logging + + + + + com.baidu.hugegraph + hg-pd-clitools + ${project.version} + + + com.baidu.hugegraph + hg-pd-common + ${project.version} + + + + com.baidu.hugegraph + hg-pd-core + ${project.version} + + + com.baidu.hugegraph + hg-pd-dist + ${project.version} + compile + + + com.baidu.hugegraph + hugegraph-pd + ${project.version} + + + + + + org.powermock + powermock-classloading-xstream + ${powermock.version} + + + org.powermock + powermock-module-junit4-rule + ${powermock.version} + + + org.powermock + powermock-api-support + ${powermock.version} + + + org.powermock + powermock-module-junit4 + 2.0.0-RC.3 + compile + + + org.powermock + powermock-api-mockito2 + 2.0.0-RC.3 + compile + + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + client-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/PDClientSuiteTest.java + + + + + core-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/PDCoreSuiteTest.java + + + + + cli-tools-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/CliToolsSuiteTest.java + + + + + common-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/CommonSuiteTest.java + + + + + service-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/ServerSuiteTest.java + + + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + pre-test + + prepare-agent + + + + post-test + test + + report-aggregate + + + ${basedir}/target/site/jacoco + + + + + + com/baidu/hugegraph/pd/rest/*.class + com/baidu/hugegraph/pd/service/*.class + com/baidu/hugegraph/pd/model/*.class + com/baidu/hugegraph/pd/watch/*.class + com/baidu/hugegraph/pd/pulse/*.class + com/baidu/hugegraph/pd/license/*.class + com/baidu/hugegraph/pd/notice/*.class + com/baidu/hugegraph/pd/util/*.class + com/baidu/hugegraph/pd/metrics/*.class + com/baidu/hugegraph/pd/util/grpc/*.class + com/baidu/hugegraph/pd/boot/*.class + com/baidu/hugegraph/pd/grpc/**/*.class + com/baidu/hugegraph/pd/raft/*.class + + **/RaftKVStore.class + + + + + + + + src/main/resources/ + true + + + + + \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java new file mode 100644 index 0000000000..3e2c205bb3 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java @@ -0,0 +1,30 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.client.PDClient; +import com.baidu.hugegraph.pd.client.PDConfig; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.runner.RunWith; +import org.mockito.runners.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class BaseClientTest { + public static PDClient pdClient; + + public long storeId = 0; + public final String storeAddr = "localhost"; + public final String graphName = "default/hugegraph/g"; + + @BeforeClass + public static void beforeClass() throws Exception { + PDConfig config = PDConfig.of("localhost:8686"); +// PDConfig config = PDConfig.of("10.81.116.77:8986"); + config.setEnableCache(true); + pdClient = PDClient.create(config); + } + + @After + public void teardown() throws Exception { + // pass + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java new file mode 100644 index 0000000000..9b269ca9cb --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java @@ -0,0 +1,62 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.client.DiscoveryClientImpl; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import org.junit.Before; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; + +public class DiscoveryClientTest { + + private DiscoveryClientImpl client; + + @Before + public void setUp() { + client = getClient("appName", "localhost:8654", new HashMap()); + } + + @Test + public void testGetRegisterNode() { + // Setup + try { + Consumer result = client.getRegisterConsumer(); + final NodeInfo expectedResult = NodeInfo.newBuilder() + .setAppName("appName") + .build(); + + Thread.sleep(3000); + Query query = Query.newBuilder().setAppName("appName") + .setVersion("0.13.0").build(); + + // Run the test + client.getNodeInfos(query); + } catch (InterruptedException e) { + e.printStackTrace(); + } finally { + client.close(); + } + + } + + private DiscoveryClientImpl getClient(String appName, String address, + Map labels) { + DiscoveryClientImpl discoveryClient = null; + try { + discoveryClient = DiscoveryClientImpl.newBuilder().setCenterAddress( + "localhost:8686").setAddress(address).setAppName(appName) + .setDelay(2000) + .setVersion("0.13.0") + .setId("0").setLabels(labels) + .build(); + discoveryClient.scheduleTask(); + } catch (Exception e) { + e.printStackTrace(); + } + + return discoveryClient; + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java new file mode 100644 index 0000000000..3cec45e974 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java @@ -0,0 +1,107 @@ +package org.apache.hugegraph.pd.client; + +import com.baidu.hugegraph.pd.client.KvClient; +import com.baidu.hugegraph.pd.client.PDConfig; +import com.baidu.hugegraph.pd.grpc.kv.KResponse; +import com.baidu.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import com.baidu.hugegraph.pd.grpc.kv.WatchEvent; +import com.baidu.hugegraph.pd.grpc.kv.WatchKv; +import com.baidu.hugegraph.pd.grpc.kv.WatchResponse; +import com.baidu.hugegraph.pd.grpc.kv.WatchState; +import com.baidu.hugegraph.pd.grpc.kv.WatchType; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import org.apache.commons.lang3.StringUtils; +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.Before; +import org.junit.Test; +import static org.mockito.Mockito.mock; + +import java.util.function.Consumer; + +public class KvClientTest extends BaseClientTest { + + private KvClient client; + + @Before + public void setUp() { + client = new KvClient<>(PDConfig.of("localhost:8686")); + } + + @Test + public void testCreateStub() { + // Setup + // Run the test + try{ + final AbstractStub result = client.createStub(); + } catch (Exception e) { + + } finally { + + } + + + // Verify the results + } + + @Test + public void testCreateBlockingStub() { + // Setup + // Run the test + try{ + final AbstractBlockingStub result = client.createBlockingStub(); + } catch (Exception e) { + + } finally { + + } + } + + String key = "key"; + String value = "value"; + + @Test + public void testPutAndGet() throws Exception { + // Run the test + try { + client.put(key, value); + // Run the test + KResponse result = client.get(key); + + // Verify the results + assertThat(result.getValue()).isEqualTo(value); + client.delete(key); + result = client.get(key); + assertThat(StringUtils.isEmpty(result.getValue())); + client.deletePrefix(key); + client.put(key + "1", value); + client.put(key + "2", value); + ScanPrefixResponse response = client.scanPrefix(key); + assertThat(response.getKvsMap().size() == 2); + client.putTTL(key + "3", value, 1000); + client.keepTTLAlive(key + "3"); + final Consumer mockConsumer = mock(Consumer.class); + + // Run the test + client.listen(key + "3", mockConsumer); + client.listenPrefix(key + "4", mockConsumer); + WatchResponse r = WatchResponse.newBuilder().addEvents( + WatchEvent.newBuilder().setCurrent( + WatchKv.newBuilder().setKey(key).setValue("value") + .build()).setType(WatchType.Put).build()) + .setClientId(0L) + .setState(WatchState.Starting) + .build(); + client.getWatchList(r); + client.getWatchMap(r); + client.lock(key, 3000L); + client.isLocked(key); + client.unlock(key); + client.lock(key, 3000L); + client.keepAlive(key); + client.close(); + } catch (Exception e) { + + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java new file mode 100644 index 0000000000..f862196c37 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java @@ -0,0 +1,18 @@ +package org.apache.hugegraph.pd.client; + +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + PDClientTest.class, + KvClientTest.class, + DiscoveryClientTest.class +}) + +@Slf4j +public class PDClientSuiteTest { + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java new file mode 100644 index 0000000000..ddedb78c74 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java @@ -0,0 +1,407 @@ +package org.apache.hugegraph.pd.client; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import com.baidu.hugegraph.pd.client.PDClient; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.MetaTask; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import org.junit.Test; +import org.mockito.Mockito; + +/** + * @author zhengfuquan + * @date 2022/11/28 + **/ +public class PDClientTest extends BaseClientTest { + @Test + public void testDbCompaction() { + System.out.println("testDbCompaction start"); + + try { + pdClient.dbCompaction(""); + pdClient.dbCompaction(); + } catch (PDException e) { + e.printStackTrace(); + } + + System.out.println("pdclienttest testDbCompaction end"); + } + @Test + public void testRegisterStore() { + Metapb.Store store = Metapb.Store.newBuilder().build(); + try { + pdClient.registerStore(store); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetGraph(){ + Metapb.Graph graph = Metapb.Graph.newBuilder().setGraphName("test").build(); + try { + pdClient.setGraph(graph); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGraph() { + try { + pdClient.getGraph("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetStore() { + try { + pdClient.getStore(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + @Test + public void testUpdateStore() { + Metapb.Store store = Metapb.Store.newBuilder().build(); + try { + pdClient.updateStore(store); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetActiveStores(){ + try { + pdClient.getActiveStores("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetAllStores() { + try { + pdClient.getAllStores("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + +// @Test +// public void testStoreHeartbeat(){ +// Metapb.StoreStats stats = Metapb.StoreStats.newBuilder().build(); +// try { +// pdClient.storeHeartbeat(stats); +// } catch (PDException e) { +// e.printStackTrace(); +// } +// } + + @Test + public void testKeyToCode(){ + pdClient.keyToCode("test", "test".getBytes(StandardCharsets.UTF_8)); + } + + @Test + public void testScanPartitions() { + try { + pdClient.scanPartitions("test", "1".getBytes(StandardCharsets.UTF_8), + "9".getBytes(StandardCharsets.UTF_8)); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartitionsByStore() { + try { + pdClient.getPartitionsByStore(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testQueryPartitions() { + try { + pdClient.queryPartitions(0L, 0); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartitions() { + try { + pdClient.getPartitions(0L, "test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testUpdatePartitionLeader() { + System.out.println("updatePartitionLeader start"); + + pdClient.updatePartitionLeader("aaa", 0, 0L); + } + + @Test + public void testInvalidPartitionCache() { + pdClient.invalidPartitionCache(); + } + + @Test + public void testInvalidStoreCache() { + pdClient.invalidStoreCache(0L); + } + + @Test + public void testUpdatePartitionCache() { + Metapb.Partition partition = Metapb.Partition.newBuilder().build(); + Metapb.Shard leader = Metapb.Shard.newBuilder().build(); + pdClient.updatePartitionCache(partition, leader); + } + + @Test + public void testGetIdByKey() { + try { + pdClient.getIdByKey("test", 1); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testResetIdByKey() { + try { + pdClient.resetIdByKey("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGetLeader() { + try { + pdClient.getLeader(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetMembers() { + try { + pdClient.getMembers(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetClusterStats() { + try { + pdClient.getClusterStats(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testAddEventListener() { + PDClient.PDEventListener listener = Mockito.mock(PDClient.PDEventListener.class); + pdClient.addEventListener(listener); + } + + @Test + public void testGetWatchClient() { + pdClient.getWatchClient(); + } + + @Test + public void testGetPulseClient() { + pdClient.getPulseClient(); + } + + @Test + public void testGetStoreStatus() { + try { + pdClient.getStoreStatus(true); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartition() { + try { + pdClient.getPartition("test", "test".getBytes(StandardCharsets.UTF_8)); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetGraphSpace() { + try { + pdClient.setGraphSpace("test", 1L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGraphSpace() { + try { + pdClient.getGraphSpace("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetPDConfig() { + try { + pdClient.setPDConfig(0, "", 0, 0L); + } catch (PDException e) { + e.printStackTrace(); + } + Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().build(); + + try { + pdClient.setPDConfig(pdConfig); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPDConfig() { + try { + pdClient.getPDConfig(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testChangePeerList() { + try { + pdClient.changePeerList(""); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSplitData() { + try { + Metapb.PDConfig config = pdClient.getPDConfig(); + pdClient.setPDConfig(config.toBuilder() + .setMaxShardsPerStore(12) + .build()); + System.out.println(pdClient.getPDConfig()); + + // 开始分区分裂 + pdClient.splitData(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testBalancePartition() { + try { + pdClient.balancePartition(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testMovePartition() { + Pdpb.OperationMode mode = Pdpb.OperationMode.Auto; + List params = new ArrayList<>(1); + try { + pdClient.movePartition(mode, params); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testReportTask() { + MetaTask.Task task = MetaTask.Task.newBuilder().build(); + try { + pdClient.reportTask(task); + } catch (PDException e) { + e.printStackTrace(); + } + } + + + @Test + public void testBalanceLeaders() { + try { + pdClient.balanceLeaders(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testDelStore() { + try { + pdClient.delStore(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + +// @Test +// public void testgetQuota() { +// try { +// pdClient.getQuota(); +// } catch (PDException e) { +// e.printStackTrace(); +// } +// } + + @Test + public void testUpdatePartition() { + List partitions = new ArrayList<>(1); + try { + pdClient.updatePartition(partitions); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testDelPartition() { + try { + pdClient.delPartition("test", 0); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testdelGraph() { + try { + pdClient.delGraph("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java new file mode 100644 index 0000000000..248eba841b --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java @@ -0,0 +1,18 @@ +package org.apache.hugegraph.pd.clitools; + +import org.junit.After; +import org.junit.BeforeClass; + + + +public class BaseCliToolsTest { + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java new file mode 100644 index 0000000000..6e69aa1e05 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java @@ -0,0 +1,17 @@ +package org.apache.hugegraph.pd.clitools; + +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + MainTest.class +}) + +@Slf4j +public class CliToolsSuiteTest { + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java new file mode 100644 index 0000000000..d52dd09e11 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java @@ -0,0 +1,68 @@ +package org.apache.hugegraph.pd.clitools; + +import com.baidu.hugegraph.pd.clitools.Main; +import com.baidu.hugegraph.pd.common.PDException; +import lombok.extern.slf4j.Slf4j; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +@Slf4j +public class MainTest extends BaseCliToolsTest{ + + + @Test + public void getConfig() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad"}); + } + // @Test + public void setBatchTrue() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad= true "}); + } + + // @Test + public void setBatchFalse() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad=false"}); + } + + @Test + public void getConfig2() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount"}); + } + // @Test + public void setShardCount1() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=1"}); + } + + // @Test + public void setShardCount3() throws PDException { + Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=3"}); + } + + @Test + public void test2(){ + Integer[] a = new Integer[] { 1, 0, 3, 2}; + List aa = Arrays.asList(a); + System.out.printf(test2sup(aa, aa.size(), 0) ? "TRUE" : "FALSE"); + } + public static boolean test2sup (List arrays, int tail, int res) { + System.out.println(String.format("%d %d", tail, res)); + if (tail == 0) { + System.out.println(String.format("a = %d %d", tail, res)); + return false; + } else if (tail == 1) { + System.out.println(String.format("b = %d %d", arrays.get(0), res)); + return (arrays.get(0) == res); + } else if (tail == 2) { + System.out.println(String.format("c = %d %d %d", arrays.get(0), arrays.get(1), res)); + return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || + (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); + } else { + return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || + test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); + } + } + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java new file mode 100644 index 0000000000..2b5a9e531b --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.common; + +import org.junit.After; +import org.junit.BeforeClass; + +public class BaseCommonTest { + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java new file mode 100644 index 0000000000..5e2ebb405e --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java @@ -0,0 +1,27 @@ +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.service.IdServiceTest; +import org.apache.hugegraph.pd.service.KvServiceTest; + +import lombok.extern.slf4j.Slf4j; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + PartitionUtilsTest.class, + PartitionCacheTest.class, + MetadataKeyHelperTest.class, + KvServiceTest.class, + HgAssertTest.class, + KVPairTest.class, + IdServiceTest.class +}) + +@Slf4j +public class CommonSuiteTest { + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java new file mode 100644 index 0000000000..ee24075af2 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java @@ -0,0 +1,108 @@ +package org.apache.hugegraph.pd.common; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.baidu.hugegraph.pd.common.HgAssert; + +public class HgAssertTest { + + @Test(expected = IllegalArgumentException.class) + public void testIsTrue() { + HgAssert.isTrue(false, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsTrue2() { + HgAssert.isTrue(true, null); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsFalse() { + HgAssert.isFalse(true, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsFalse2() { + HgAssert.isTrue(false, null); + } + + @Test(expected = IllegalArgumentException.class) + public void isArgumentValid() { + HgAssert.isArgumentValid(new byte[0], ""); + } + + @Test(expected = IllegalArgumentException.class) + public void isArgumentValidStr() { + HgAssert.isArgumentValid("", ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsArgumentNotNull() { + HgAssert.isArgumentNotNull(null, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIstValid() { + HgAssert.istValid(new byte[0], ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIstValidStr() { + HgAssert.isValid("", ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsNotNull() { + HgAssert.isNotNull(null, ""); + } + + + @Test + public void testIsInvalid() { + assertFalse(HgAssert.isInvalid( "abc", "test")); + assertTrue(HgAssert.isInvalid( "", null)); + } + + @Test + public void testIsInvalidByte() { + assertTrue(HgAssert.isInvalid( new byte[0])); + assertFalse(HgAssert.isInvalid( new byte[1])); + } + + @Test + public void testIsInvalidMap() { + assertTrue(HgAssert.isInvalid(new HashMap())); + assertFalse(HgAssert.isInvalid(new HashMap(){{put(1, 1);}})); + } + + @Test + public void testIsInvalidCollection() { + assertTrue(HgAssert.isInvalid(new ArrayList())); + assertFalse(HgAssert.isInvalid(new ArrayList(){{add(1);}})); + } + + @Test + public void testIsContains() { + assertTrue(HgAssert.isContains(new Object[]{new Integer(1), new Long(2)}, new Long(2))); + assertFalse(HgAssert.isContains(new Object[]{new Integer(1), new Long(2)}, new Long(3))); + } + + @Test + public void testIsContainsT() { + assertTrue(HgAssert.isContains(new ArrayList<>(){{add(1);}}, 1)); + assertFalse(HgAssert.isContains(new ArrayList<>(){{add(1);}}, 2)); + } + + @Test + public void testIsNull() { + assertTrue(HgAssert.isNull(null)); + assertFalse(HgAssert.isNull("abc", "cdf")); + } + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java new file mode 100644 index 0000000000..bc7bedad12 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java @@ -0,0 +1,58 @@ +package org.apache.hugegraph.pd.common; + +import org.junit.Before; +import org.junit.Test; + +import java.util.Objects; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.baidu.hugegraph.pd.common.KVPair; + +public class KVPairTest { + + KVPair pair; + @Before + public void init(){ + pair = new KVPair<>("key", 1); + } + + @Test + public void testGetKey(){ + assertEquals(pair.getKey(), "key"); + } + + @Test + public void testSetKey(){ + pair.setKey("key2"); + assertEquals(pair.getKey(), "key2"); + } + + @Test + public void testGetValue(){ + assertTrue(Objects.equals(pair.getValue(), 1)); + } + + @Test + public void testSetValue(){ + pair.setValue(2); + assertTrue(Objects.equals(pair.getValue(), 2)); + } + + @Test + public void testToString(){ + + } + + @Test + public void testHashCode(){ + + } + + @Test + public void testEquals(){ + var pair2 = new KVPair<>("key", 1); + assertTrue(pair2.equals(pair)); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java new file mode 100644 index 0000000000..3a03f2104b --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java @@ -0,0 +1,199 @@ +package org.apache.hugegraph.pd.common; + +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.Test; + +public class MetadataKeyHelperTest { + + @Test + public void testGetStoreInfoKey() { + assertThat(MetadataKeyHelper.getStoreInfoKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetActiveStoreKey() { + assertThat(MetadataKeyHelper.getActiveStoreKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetActiveStorePrefix() { + assertThat(MetadataKeyHelper.getActiveStorePrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetStorePrefix() { + assertThat(MetadataKeyHelper.getStorePrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetStoreStatusKey() { + assertThat(MetadataKeyHelper.getStoreStatusKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardGroupKey() { + assertThat(MetadataKeyHelper.getShardGroupKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardGroupPrefix() { + assertThat(MetadataKeyHelper.getShardGroupPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionKey() { + assertThat(MetadataKeyHelper.getPartitionKey("graphName", 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionPrefix() { + assertThat(MetadataKeyHelper.getPartitionPrefix("graphName")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardKey() { + assertThat(MetadataKeyHelper.getShardKey(0L, 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardPrefix() { + assertThat(MetadataKeyHelper.getShardPrefix(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphKey() { + assertThat(MetadataKeyHelper.getGraphKey("graphName")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphPrefix() { + assertThat(MetadataKeyHelper.getGraphPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionStatusKey() { + assertThat(MetadataKeyHelper.getPartitionStatusKey("graphName", + 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionStatusPrefixKey() { + assertThat(MetadataKeyHelper.getPartitionStatusPrefixKey( + "graphName")).contains(MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphSpaceKey() { + assertThat(MetadataKeyHelper.getGraphSpaceKey("graphSpace")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPdConfigKey() { + assertThat(MetadataKeyHelper.getPdConfigKey("configKey")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetQueueItemPrefix() { + assertThat(MetadataKeyHelper.getQueueItemPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetQueueItemKey() { + assertThat(MetadataKeyHelper.getQueueItemKey("itemId")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetSpitTaskKey() { + assertThat(MetadataKeyHelper.getSplitTaskKey( "graphName", 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetSpitTaskPrefix() { + assertThat(MetadataKeyHelper.getSplitTaskPrefix("graph0")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetLogKey() { + // Setup + final Metapb.LogRecord record = Metapb.LogRecord.newBuilder() + .setAction("value") + .setTimestamp(0L) + .build(); + + // Run the test + final byte[] result = MetadataKeyHelper.getLogKey(record); + + // Verify the results + assertThat(result).contains(MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetLogKeyPrefix() { + assertThat(MetadataKeyHelper.getLogKeyPrefix("action", 0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVPrefix() { + assertThat(MetadataKeyHelper.getKVPrefix("prefix", "key")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVTTLPrefix() { + assertThat(MetadataKeyHelper.getKVTTLPrefix("ttlPrefix", "prefix", + "key")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVWatchKeyPrefix1() { + assertThat( + MetadataKeyHelper.getKVWatchKeyPrefix("key", "watchDelimiter", + 0L)).contains( + String.valueOf(MetadataKeyHelper.getDelimiter())); + } + + @Test + public void testGetKVWatchKeyPrefix2() { + assertThat(MetadataKeyHelper.getKVWatchKeyPrefix("key", + "watchDelimiter")).contains( + String.valueOf(MetadataKeyHelper.getDelimiter())); + } + + @Test + public void testGetDelimiter() { + assertThat(MetadataKeyHelper.getDelimiter()).isEqualTo('/'); + } + + @Test + public void testGetStringBuilderHelper() { + try{ + MetadataKeyHelper.getStringBuilderHelper(); + } catch (Exception e) { + + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java new file mode 100644 index 0000000000..a3438d743d --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java @@ -0,0 +1,376 @@ +package org.apache.hugegraph.pd.common; + +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PartitionCache; +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +import java.io.UnsupportedEncodingException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class PartitionCacheTest { + + private PartitionCache cache ; + + @Before + public void setup(){ + cache = new PartitionCache(); + } + + @Test + public void testGetPartitionById(){ + var partition = createPartition(0, "graph0", 0, 65535); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + var ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + } + + @Test + public void testGetPartitionByKey() throws UnsupportedEncodingException { + var partition = createPartition(0, "graph0", 0, 65535); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + var ret = cache.getPartitionByKey("graph0", "0".getBytes("utf-8")); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + } + + @Test + public void getPartitionByCode(){ + var partition = createPartition(0, "graph0", 0, 1024); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + var ret = cache.getPartitionByCode("graph0", 10); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNull(cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testGetPartitions(){ + var partition1 = createPartition(0, "graph0", 0, 1024); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition1); + assertEquals(cache.getPartitions("graph0").size(), 1); + var partition2 = createPartition(1, "graph0", 1024, 2048); + cache.updateShardGroup(creteShardGroup(1)); + cache.updatePartition(partition2); + assertEquals(cache.getPartitions("graph0").size(), 2); + System.out.print(cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testAddPartition(){ + var partition = createPartition(0, "graph0", 0, 65535); + cache.addPartition("graph0", 0, partition); + var ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNotNull(cache.getPartitionByCode("graph0", 2000)); + System.out.print(cache.debugCacheByGraphName("graph0")); + var partition2 = createPartition(0, "graph0", 0, 1024); + cache.addPartition("graph0", 0, partition2); + ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition2); + assertNull(cache.getPartitionByCode("graph0", 2000)); + System.out.print(cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testUpdatePartition(){ + var partition = createPartition(0, "graph0", 0, 65535); + cache.updateShardGroup(creteShardGroup(0)); + cache.addPartition("graph0", 0, partition); + var partition2 = createPartition(0, "graph0", 0, 1024); + cache.updatePartition("graph0", 0, partition2); + var ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition2); + assertNull(cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testUpdatePartition2(){ + var partition = createPartition(0, "graph0", 0, 1024); + cache.updateShardGroup(creteShardGroup(0)); + assertTrue(cache.updatePartition(partition)); + assertFalse(cache.updatePartition(partition)); + var ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNull(cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testRemovePartition(){ + var partition = createPartition(0, "graph0", 0, 1024); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + assertNotNull(cache.getPartitionById("graph0", 0)); + cache.removePartition("graph0", 0); + assertNull(cache.getPartitionById("graph0", 0)); + System.out.print(cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testRange(){ + var partition1 = createPartition(1, "graph0", 0, 3); + var partition2 = createPartition(2, "graph0", 3, 6); + cache.updatePartition(partition1); + cache.updatePartition(partition2); + + var partition3 = createPartition(3, "graph0", 1, 2); + var partition4 = createPartition(4, "graph0", 2, 3); + + cache.updatePartition(partition3); + cache.updatePartition(partition4); + System.out.println(cache.debugCacheByGraphName("graph0")); + var partition6 = createPartition(1, "graph0", 0, 1); + cache.updatePartition(partition6); + + + System.out.println(cache.debugCacheByGraphName("graph0")); + + var partition5 = createPartition(1, "graph0", 0, 3); + cache.updatePartition(partition5); + System.out.println(cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testRange2(){ + var partition1 = createPartition(1, "graph0", 0, 3); + var partition2 = createPartition(2, "graph0", 3, 6); + cache.updatePartition(partition1); + cache.updatePartition(partition2); + + System.out.println(cache.debugCacheByGraphName("graph0")); + + // 中间有缺失 + var partition3 = createPartition(1, "graph0", 2, 3); + cache.updatePartition(partition3); + + System.out.println(cache.debugCacheByGraphName("graph0")); + + var partition5 = createPartition(1, "graph0", 0, 3); + cache.updatePartition(partition5); + System.out.println(cache.debugCacheByGraphName("graph0")); + } + + + @Test + public void testRemovePartitions(){ + var partition1 = createPartition(0, "graph0", 0, 1024); + var partition2 = createPartition(1, "graph0", 1024, 2048); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition1); + cache.updateShardGroup(creteShardGroup(1)); + cache.updatePartition(partition2); + assertEquals(cache.getPartitions("graph0").size(), 2); + cache.removePartitions(); + assertEquals(cache.getPartitions("graph0").size(), 0); + } + + + + @Test + public void testRemoveAll(){ + var partition1 = createPartition(0, "graph0", 0, 1024); + var partition2 = createPartition(1, "graph0", 1024, 2048); + var partition3 = createPartition(0, "graph1", 0, 2048); + cache.updateShardGroup(creteShardGroup(0)); + cache.updateShardGroup(creteShardGroup(1)); + cache.updatePartition(partition1); + cache.updatePartition(partition2); + cache.updatePartition(partition3); + + assertEquals(cache.getPartitions("graph0").size(), 2); + assertEquals(cache.getPartitions("graph1").size(), 1); + cache.removeAll("graph0"); + assertEquals(cache.getPartitions("graph0").size(), 0); + assertEquals(cache.getPartitions("graph1").size(), 1); + } + + @Test + public void testUpdateShardGroup(){ + var shardGroup = createShardGroup(); + cache.updateShardGroup(shardGroup); + assertNotNull(cache.getShardGroup(shardGroup.getId())); + } + + @Test + public void testGetShardGroup(){ + var shardGroup = createShardGroup(); + cache.updateShardGroup(shardGroup); + assertTrue(Objects.equals(cache.getShardGroup(shardGroup.getId()), shardGroup)); + } + + @Test + public void testAddStore(){ + var store = createStore(1); + cache.addStore(1L, store); + assertEquals(cache.getStoreById(1L), store); + } + + @Test + public void testGetStoreById(){ + var store = createStore(1); + cache.addStore(1L, store); + assertEquals(cache.getStoreById(1L), store); + } + + @Test + public void testRemoveStore(){ + var store = createStore(1); + cache.addStore(1L, store); + assertEquals(cache.getStoreById(1L), store); + + cache.removeStore(1L); + assertNull(cache.getStoreById(1L)); + } + + @Test + public void testHasGraph(){ + var partition = createPartition(0, "graph0", 0, 65535); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + assertTrue(cache.hasGraph("graph0")); + assertFalse(cache.hasGraph("graph1")); + } + + @Test + public void testUpdateGraph(){ + var graph = createGraph("graph0", 10); + cache.updateGraph(graph); + assertEquals(cache.getGraph("graph0"), graph); + graph = createGraph("graph0", 12); + cache.updateGraph(graph); + assertEquals(cache.getGraph("graph0"), graph); + } + + @Test + public void testGetGraph(){ + var graph = createGraph("graph0", 12); + cache.updateGraph(graph); + assertEquals(cache.getGraph("graph0"), graph); + } + + @Test + public void testGetGraphs(){ + var graph1 = createGraph("graph0", 12); + var graph2 = createGraph("graph1", 12); + var graph3 = createGraph("graph2", 12); + cache.updateGraph(graph1); + cache.updateGraph(graph2); + cache.updateGraph(graph3); + assertEquals(cache.getGraphs().size(), 3); + } + + @Test + public void testReset(){ + var graph1 = createGraph("graph0", 12); + var graph2 = createGraph("graph1", 12); + var graph3 = createGraph("graph2", 12); + cache.updateGraph(graph1); + cache.updateGraph(graph2); + cache.updateGraph(graph3); + assertEquals(cache.getGraphs().size(), 3); + cache.reset(); + assertEquals(cache.getGraphs().size(), 0); + } + + @Test + public void testUpdateShardGroupLeader(){ + var shardGroup = createShardGroup(); + cache.updateShardGroup(shardGroup); + + var leader = Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Leader).build(); + cache.updateShardGroupLeader(shardGroup.getId(), leader); + + assertEquals(cache.getLeaderShard(shardGroup.getId()), leader); + } + + private static Metapb.Partition createPartition(int pid, String graphName, long start, long end){ + return Metapb.Partition.newBuilder() + .setId(pid) + .setGraphName(graphName) + .setStartKey(start) + .setEndKey(end) + .setState(Metapb.PartitionState.PState_Normal) + .setVersion(1) + .build(); + } + + private static Metapb.ShardGroup creteShardGroup(int pid) { + return Metapb.ShardGroup.newBuilder() + .addShards( + Metapb.Shard.newBuilder().setStoreId(0).setRole(Metapb.ShardRole.Leader).build() + ) + .setId(pid) + .setVersion(0) + .setConfVer(0) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + } + + private static Metapb.Shard createShard(){ + return Metapb.Shard.newBuilder() + .setStoreId(0) + .setRole(Metapb.ShardRole.Leader) + .build(); + } + + private static Metapb.Store createStore(long storeId){ + return Metapb.Store.newBuilder() + .setId(storeId) + .setAddress("127.0.0.1") + .setCores(4) + .setVersion("1") + .setDataPath("/tmp/junit") + .setDataVersion(1) + .setLastHeartbeat(System.currentTimeMillis()) + .setStartTimestamp(System.currentTimeMillis()) + .setState(Metapb.StoreState.Up) + .setDeployPath("/tmp/junit") + .build(); + } + + private static Metapb.Graph createGraph(String graphName, int partitionCount){ + return Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + } + + private static Metapb.ShardGroup createShardGroup(){ + List shards = new ArrayList<>() ; + for (int i = 0 ; i < 3 ; i ++ ) { + shards.add(Metapb.Shard.newBuilder() + .setStoreId(i) + .setRole( i == 0 ? Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) + .build() + ); + } + + return Metapb.ShardGroup.newBuilder() + .setId(1) + .setVersion(1) + .setConfVer(1) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(shards) + .build(); + } + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java new file mode 100644 index 0000000000..958d916191 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java @@ -0,0 +1,18 @@ +package org.apache.hugegraph.pd.common; + +import lombok.extern.slf4j.Slf4j; +import org.junit.Assert; +import org.junit.Test; + +import com.baidu.hugegraph.pd.common.PartitionUtils; + + +@Slf4j +public class PartitionUtilsTest extends BaseCommonTest { + @Test + public void testCalcHashcode() { + byte[] key = new byte[5]; + long code = PartitionUtils.calcHashcode(key); + Assert.assertEquals(code, 31912L); + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java new file mode 100644 index 0000000000..72292625ce --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java @@ -0,0 +1,56 @@ +package org.apache.hugegraph.pd.core; + +import com.baidu.hugegraph.pd.ConfigService; +import com.baidu.hugegraph.pd.config.PDConfig; +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.BeforeClass; + +import java.io.File; +import java.io.IOException; + + +public class BaseCoreTest { + + static com.baidu.hugegraph.pd.config.PDConfig pdConfig; + @BeforeClass + public static void init() throws Exception { + String path = "tmp/unitTest"; + deleteDirectory(new File(path)); + pdConfig = new com.baidu.hugegraph.pd.config.PDConfig() {{ + this.setClusterId(100); + this.setInitialStoreList("127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502," + + "127.0.0.1:8503,127.0.0.1:8504,127.0.0.1:8505"); + }}; + + pdConfig.setStore(new com.baidu.hugegraph.pd.config.PDConfig().new Store() {{ + this.setMaxDownTime(3600); + this.setKeepAliveTimeout(3600); + }}); + + pdConfig.setPartition(new com.baidu.hugegraph.pd.config.PDConfig().new Partition() {{ + this.setShardCount(3); + this.setMaxShardsPerStore(3); + }}); + pdConfig.setRaft(new com.baidu.hugegraph.pd.config.PDConfig().new Raft(){{ + this.setEnable(false); + }}); + pdConfig.setDiscovery(new PDConfig().new Discovery()); + pdConfig.setDataPath(path); + ConfigService configService = new ConfigService(pdConfig); + pdConfig = configService.loadConfig(); + } + + @After + public void teardown() throws Exception { + // pass + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + } + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java new file mode 100644 index 0000000000..a0b72cad6a --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java @@ -0,0 +1,21 @@ +package org.apache.hugegraph.pd.core; + +import org.apache.hugegraph.pd.core.meta.MetadataKeyHelperTest; + +import lombok.extern.slf4j.Slf4j; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + StoreNodeServiceTest.class, + MetadataKeyHelperTest.class +}) + +@Slf4j +public class PDCoreSuiteTest { + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java new file mode 100644 index 0000000000..567f1345da --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java @@ -0,0 +1,98 @@ +package org.apache.hugegraph.pd.core; + +import com.baidu.hugegraph.pd.PartitionService; +import com.baidu.hugegraph.pd.StoreNodeService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import lombok.extern.slf4j.Slf4j; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +@Slf4j +public class StoreNodeServiceTest extends BaseCoreTest { + + + @Test + public void testStoreNodeService() throws PDException { + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), + pdConfig.getInitialStoreMap().size() * pdConfig.getPartition().getMaxShardsPerStore() + / pdConfig.getPartition().getShardCount()); + StoreNodeService storeService = new StoreNodeService(pdConfig); + storeService.init(new PartitionService(pdConfig, storeService)); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + stores[i].getId()); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + for (Metapb.Store store : stores) { + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() + .setStoreId(store.getId()) + .build(); + storeService.heartBeat(stats); + } + + Assert.assertEquals(6, storeService.getActiveStores("").size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .setPartitionCount(10) + .build(); + // 分配shard + List shards = storeService.allocShards(graph, 1); + + + Assert.assertEquals(3, shards.size()); + // 设置leader + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), storeService.getShardGroups().size()); + Metapb.Shard leader = Metapb.Shard.newBuilder(shards.get(0)) + .setRole(Metapb.ShardRole.Leader).build(); + shards = new ArrayList<>(shards); + shards.set(0, leader); + // 增加shard + pdConfig.getPartition().setShardCount(5); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(1) + .addAllShards(shards).build(); + shards = storeService.reallocShards(shardGroup); + + Assert.assertEquals(5, shards.size()); + // 减少shard + pdConfig.getPartition().setShardCount(3); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(3, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + // 减少shard + pdConfig.getPartition().setShardCount(1); + graph = Metapb.Graph.newBuilder(graph).build(); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(1, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + for (Metapb.Store store : stores) { + storeService.removeStore(store.getId()); + } + Assert.assertEquals(0, storeService.getStores("").size()); + + + } + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java new file mode 100644 index 0000000000..0909fcf25a --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java @@ -0,0 +1,19 @@ +package org.apache.hugegraph.pd.core.meta; + +import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; +import org.junit.Test; + +import java.util.Arrays; + +import static org.junit.Assert.assertTrue; + +public class MetadataKeyHelperTest { + + @Test + public void testMoveTaskKey(){ + var key = MetadataKeyHelper.getMoveTaskKey("foo", 0, 1); + assertTrue(Arrays.equals(key, "TASK_MOVE/foo/0/1".getBytes())); + var key2 = MetadataKeyHelper.getMoveTaskPrefix("foo"); + assertTrue(Arrays.equals(key2, "TASK_MOVE/foo".getBytes())); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java new file mode 100644 index 0000000000..df7467163d --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java @@ -0,0 +1,20 @@ +package org.apache.hugegraph.pd.grpc; + +import org.junit.After; +import org.junit.BeforeClass; + + + +public class BaseGrpcTest { + + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java new file mode 100644 index 0000000000..acb4b12509 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java @@ -0,0 +1,15 @@ +package org.apache.hugegraph.pd.grpc; + +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ +}) + +@Slf4j +public class GrpcSuiteTest { + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java new file mode 100644 index 0000000000..57433f8f6b --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java @@ -0,0 +1,39 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.config.PDConfig; +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.BeforeClass; + +import java.net.http.HttpClient; +import java.io.File; + + +public class BaseServerTest { + public static HttpClient client; + public static String pdRestAddr; + @BeforeClass + public static void init() { + client = HttpClient.newHttpClient(); + pdRestAddr = "http://127.0.0.1:8620"; + } + + @After + public void teardown() { + // pass + } + + public static PDConfig getConfig(){ + FileUtils.deleteQuietly(new File("tmp/test/")); + PDConfig pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + this.setRaft(new Raft() {{ + setEnable(false); + }}); + this.setDataPath("tmp/test/"); + }}; + return pdConfig; + } + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java new file mode 100644 index 0000000000..2e91d62ce2 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java @@ -0,0 +1,92 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.ConfigService; +import com.baidu.hugegraph.pd.IdService; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +public class ConfigServiceTest { + + private PDConfig config = BaseServerTest.getConfig(); + + private ConfigService service; + + @Before + public void setUp() { + service = new ConfigService(config); + } + + @Test + public void testGetPDConfig() throws Exception { + // Setup + try{ + final Metapb.PDConfig config = Metapb.PDConfig.newBuilder() + .setVersion(0L) + .setPartitionCount(0) + .setShardCount(55) + .setMaxShardsPerStore(0) + .setTimestamp(0L).build(); + service.setPDConfig(config); + // Run the test + Metapb.PDConfig result = service.getPDConfig(0L); + + // Verify the results + Assert.assertTrue(result.getShardCount() == 55); + result = service.getPDConfig(); + Assert.assertTrue(result.getShardCount() == 55); + } catch (Exception e) { + + } finally { + + } + + } + + @Test + public void testGetGraphSpace() throws Exception { + // Setup + Metapb.GraphSpace space = Metapb.GraphSpace.newBuilder() + .setName("gs1") + .setTimestamp(0L).build(); + final List expectedResult = List.of(space); + service.setGraphSpace(space); + // Run the test + final List result = service.getGraphSpace( + "gs1"); + + Assert.assertTrue(result.size() == 1); + } + @Test + public void testUpdatePDConfig() { + try{ + final Metapb.PDConfig mConfig = Metapb.PDConfig.newBuilder() + .setVersion(0L) + .setPartitionCount(0) + .setShardCount(0) + .setMaxShardsPerStore(0) + .setTimestamp(0L) + .build(); + final PDConfig expectedResult = new PDConfig(); + expectedResult.setConfigService(new ConfigService(new PDConfig())); + expectedResult.setIdService(new IdService(new PDConfig())); + expectedResult.setClusterId(0L); + expectedResult.setPatrolInterval(0L); + expectedResult.setDataPath("dataPath"); + expectedResult.setMinStoreCount(0); + expectedResult.setInitialStoreList("initialStoreList"); + expectedResult.setHost("host"); + expectedResult.setVerifyPath("verifyPath"); + expectedResult.setLicensePath("licensePath"); + service.updatePDConfig(mConfig); + } catch (Exception e) { + + } finally { + + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java new file mode 100644 index 0000000000..5783cd73fd --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java @@ -0,0 +1,91 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.IdService; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.meta.IdMetaStore; +import org.apache.commons.io.FileUtils; +import org.junit.Assert; +import org.junit.Test; + +import java.io.File; + +public class IdServiceTest { + @Test + public void testCid() { + try{ + PDConfig pdConfig = BaseServerTest.getConfig(); + int max = 0x2000; + IdService idService = new IdService(pdConfig); + for (int i = 0; i < max; i++) { + idService.getCId("test", max); + } + idService.delCId("test", 1); + idService.delCId("test", 0x10); + idService.delCId("test", 0x100); + idService.delCId("test", 0x1000); + + Assert.assertEquals(1, idService.getCId("test", max)); + Assert.assertEquals(0x10, idService.getCId("test", max)); + Assert.assertEquals(0x100, idService.getCId("test", max)); + Assert.assertEquals(0x1000, idService.getCId("test", max)); + Assert.assertEquals(-1, idService.getCId("test", max)); + + idService.delCId("test", 1); + idService.delCId("test", 0x10); + idService.delCId("test", 0x100); + idService.delCId("test", 0x1000); + + long cid1 = idService.getCId("test", "name", max); + idService.delCIdDelay("test", "name", cid1); + long cid2 = idService.getCId("test", "name", max); + + Assert.assertEquals(cid1, cid2); + idService.delCIdDelay("test", "name", cid2); + Thread.sleep(5000); + long cid3 = idService.getCId("test", "name", max); + } catch (Exception e) { + + } + // MetadataFactory.closeStore(); + } + + @Test + public void testId() { + try{ + FileUtils.deleteQuietly(new File("tmp/testId/")); + IdMetaStore.CID_DEL_TIMEOUT = 2000; + PDConfig pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + this.setRaft(new Raft() {{ + setEnable(false); + }}); + this.setDataPath("tmp/testId/"); + }}; + IdService idService = new IdService(pdConfig); + long first = idService.getId("abc", 100); + Assert.assertEquals(first, 0L); + long second = idService.getId("abc", 100); + Assert.assertEquals(second, 100L); + idService.resetId("abc"); + first = idService.getId("abc", 100); + Assert.assertEquals(first, 0L); + } catch (Exception e) { + + } + // MetadataFactory.closeStore(); + } + @Test + public void testMember() { + try{ + PDConfig pdConfig = BaseServerTest.getConfig(); + IdService idService = new IdService(pdConfig); + idService.setPdConfig(pdConfig); + PDConfig config = idService.getPdConfig(); + config.getHost(); + } catch (Exception e) { + e.printStackTrace(); + } + // MetadataFactory.closeStore(); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java new file mode 100644 index 0000000000..fba4e837c4 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java @@ -0,0 +1,43 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.KvService; +import com.baidu.hugegraph.pd.config.PDConfig; +import org.junit.Assert; +import org.junit.Test; + +public class KvServiceTest { + + @Test + public void testKv() { + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + KvService service = new KvService(pdConfig); + String key = "kvTest"; + String kvTest = service.get(key); + Assert.assertEquals(kvTest, ""); + service.put(key, "kvTestValue"); + kvTest = service.get(key); + Assert.assertEquals(kvTest, "kvTestValue"); + service.scanWithPrefix(key); + service.delete(key); + service.put(key, "kvTestValue"); + service.deleteWithPrefix(key); + service.put(key, "kvTestValue", 1000L); + service.keepAlive(key); + } catch (Exception e) { + + } + } + + @Test + public void testMember() { + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + KvService service = new KvService(pdConfig); + service.setPdConfig(pdConfig); + PDConfig config = service.getPdConfig(); + } catch (Exception e) { + e.printStackTrace(); + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java new file mode 100644 index 0000000000..b48ba4ef68 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java @@ -0,0 +1,36 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.LogService; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.google.protobuf.Any; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +public class LogServiceTest { + + private PDConfig mockPdConfig = BaseServerTest.getConfig(); + + private LogService logServiceUnderTest; + + @Before + public void setUp() { + logServiceUnderTest = new LogService(mockPdConfig); + } + + @Test + public void testGetLog() throws Exception { + logServiceUnderTest.insertLog("action", "message", + Any.newBuilder().build()); + + // Run the test + final List result = logServiceUnderTest.getLog( + "action", 0L, System.currentTimeMillis()); + + // Verify the results + Assert.assertEquals(result.size(), 1); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java new file mode 100644 index 0000000000..9cf617de0f --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java @@ -0,0 +1,113 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.PartitionService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.MetaTask; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; +import com.baidu.hugegraph.pd.grpc.pulse.CleanType; + +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class PartitionServiceTest extends PdTestBase { + + private PartitionService service; + + @Before + public void init(){ + service = getPartitionService(); + } + + @Test + public void testCombinePartition() throws PDException { + buildEnv(); + // 0, 1, 2-> 0, 3,4,5->1, 6,7,8 ->2, 9,10, 11-> 3 + service.combinePartition(4); + + var partition = service.getPartitionById("graph0", 0); + assertEquals(0, partition.getStartKey()); + assertEquals(5462, partition.getEndKey()); + + var tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(11, tasks.size()); + + for (MetaTask.Task task : tasks){ + var newTask = task.toBuilder().setState(MetaTask.TaskState.Task_Success).build(); + getTaskService().reportTask(newTask); + } + + tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(0, tasks.size()); + } + + @Test + public void testCombinePartition2() throws PDException { + buildEnv(); + // 0, 1, 2-> 0, 3,4,5->1, 6,7,8 ->2, 9,10, 11-> 3 + service.combinePartition(4); + + var partition = service.getPartitionById("graph0", 0); + assertEquals(0, partition.getStartKey()); + assertEquals(5462, partition.getEndKey()); + + var tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(11, tasks.size()); + + for (MetaTask.Task task : tasks){ + var newTask = task.toBuilder().setState(MetaTask.TaskState.Task_Failure).build(); + getTaskService().reportTask(newTask); + } + + tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(0, tasks.size()); + } + + @Test + public void testHandleCleanTask(){ + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Clean_Partition) + .setPartition(Metapb.Partition.newBuilder().setGraphName("foo").setId(0).build()) + .setCleanPartition(CleanPartition.newBuilder() + .setCleanType(CleanType.CLEAN_TYPE_KEEP_RANGE) + .setDeletePartition(true) + .setKeyStart(0) + .setKeyEnd(10) + .build()) + .build(); + getTaskService().reportTask(task); + } + + private void buildEnv() throws PDException { + var storeInfoMeta = getStoreNodeService().getStoreInfoMeta(); + storeInfoMeta.updateStore(Metapb.Store.newBuilder() + .setId(99) + .setState(Metapb.StoreState.Up) + .build()); + + long lastId = 0; + for (int i = 0; i < 12; i++){ + Metapb.Shard shard = Metapb.Shard.newBuilder() + .setStoreId(99) + .setRole(Metapb.ShardRole.Leader) + .build(); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(i) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(List.of(shard)) + .build(); + storeInfoMeta.updateShardGroup(shardGroup); + + var partitionShard = service.getPartitionByCode("graph0", lastId); + if (partitionShard != null){ + lastId = partitionShard.getPartition().getEndKey(); + } + } + + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java new file mode 100644 index 0000000000..522ed30585 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java @@ -0,0 +1,194 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.ConfigService; +import com.baidu.hugegraph.pd.IdService; +import com.baidu.hugegraph.pd.PartitionInstructionListener; +import com.baidu.hugegraph.pd.PartitionService; +import com.baidu.hugegraph.pd.PartitionStatusListener; +import com.baidu.hugegraph.pd.StoreMonitorDataService; +import com.baidu.hugegraph.pd.StoreNodeService; +import com.baidu.hugegraph.pd.StoreStatusListener; +import com.baidu.hugegraph.pd.TaskScheduleService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; +import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; +import com.baidu.hugegraph.pd.grpc.pulse.DbCompaction; +import com.baidu.hugegraph.pd.grpc.pulse.MovePartition; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; +import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; +import com.baidu.hugegraph.pd.raft.RaftEngine; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.File; + +public class PdTestBase { + private static PDConfig pdConfig; + + private static StoreNodeService storeNodeService; + private static PartitionService partitionService; + private static TaskScheduleService taskService; + private static StoreMonitorDataService storeMonitorDataService; + + private static final String DATA_PATH = "/tmp/pd_data"; + + @BeforeClass + public static void initService() throws PDException { + deleteDir(new File(DATA_PATH)); + + PDConfig config = new PDConfig(); + config.setDataPath(DATA_PATH); + config.setMinStoreCount(3); + config.setInitialStoreList("127.0.0.1:8501"); + config.setHost("127.0.0.1"); + config.setVerifyPath(""); + config.setLicensePath(""); + PDConfig.Raft raft = new PDConfig().new Raft(); + raft.setAddress("127.0.0.1:8601"); + raft.setPeersList("127.0.0.1:8601"); + raft.setDataPath(DATA_PATH); + raft.setHost("127.0.0.1"); + raft.setGrpcPort(8688); + raft.setPort(8621); + + config.setRaft(raft); + + config.setStore(new PDConfig().new Store()); + config.setPartition( new PDConfig().new Partition(){{ + setShardCount(1); + setTotalCount(12); + setMaxShardsPerStore(12); + }}); + config.setDiscovery(new PDConfig().new Discovery()); + + pdConfig = config; + + var configService = new ConfigService(pdConfig); + configService.loadConfig(); + + var engine = RaftEngine.getInstance(); + engine.addStateListener(configService); + engine.init(pdConfig.getRaft()); + engine.waitingForLeader(5000); + + storeNodeService = new StoreNodeService(pdConfig); + partitionService = new PartitionService(pdConfig, storeNodeService); + taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService); + var idService = new IdService(pdConfig); + storeMonitorDataService = new StoreMonitorDataService(pdConfig); + RaftEngine.getInstance().addStateListener(partitionService); + pdConfig.setIdService(idService); + + + storeNodeService.init(partitionService); + partitionService.init(); + partitionService.addInstructionListener(new PartitionInstructionListener() { + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + + } + + @Override + public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException { + + } + + @Override + public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException { + + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException { + + } + + @Override + public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) + throws PDException { + + } + }); + + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition) { + + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); + + storeNodeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status) { + + } + + @Override + public void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); + + taskService.init(); + } + + @AfterClass + public static void shutdownService(){ + var instance = RaftEngine.getInstance(); + if (instance != null) { + instance.shutDown(); + } + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } + + public static StoreNodeService getStoreNodeService() { + return storeNodeService; + } + + public static PartitionService getPartitionService() { + return partitionService; + } + + public static PDConfig getPdConfig(){ + return pdConfig; + } + + public static TaskScheduleService getTaskService() { + return taskService; + } + + public static StoreMonitorDataService getStoreMonitorDataService() { + return storeMonitorDataService; + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java new file mode 100644 index 0000000000..59981fa452 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java @@ -0,0 +1,100 @@ +package org.apache.hugegraph.pd.service; + +import org.json.JSONException; +import org.json.JSONObject; +import org.junit.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; + +/** + * @author tianxiaohui + * @date 20221220 + **/ +public class RestApiTest extends BaseServerTest { + @Test + public void testQueryClusterInfo() throws URISyntaxException, IOException, InterruptedException, JSONException { + String url = pdRestAddr + "/v1/cluster"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryClusterMembers() throws URISyntaxException, IOException, InterruptedException, JSONException { + String url = pdRestAddr + "/v1/members"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryStoresInfo() throws URISyntaxException, IOException, InterruptedException, JSONException { + String url = pdRestAddr + "/v1/stores"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryGraphsInfo() throws IOException, InterruptedException, JSONException, URISyntaxException { + String url = pdRestAddr + "/v1/graphs"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryPartitionsInfo() throws IOException, InterruptedException, JSONException, URISyntaxException { + String url = pdRestAddr + "/v1/highLevelPartitions"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryDebugPartitionsInfo() throws URISyntaxException, IOException, InterruptedException { + String url = pdRestAddr + "/v1/partitions"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + assert response.statusCode() == 200; + } + + @Test + public void testQueryShards() throws URISyntaxException, IOException, InterruptedException, JSONException { + String url = pdRestAddr + "/v1/shards"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java new file mode 100644 index 0000000000..a38de53637 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java @@ -0,0 +1,24 @@ +package org.apache.hugegraph.pd.service; + +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + RestApiTest.class, + ConfigServiceTest.class, + IdServiceTest.class, + KvServiceTest.class, + LogServiceTest.class, + StoreServiceTest.class, + StoreNodeServiceNewTest.class, + StoreMonitorDataServiceTest.class, + TaskScheduleServiceTest.class, + PartitionServiceTest.class +}) + +@Slf4j +public class ServerSuiteTest { +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java new file mode 100644 index 0000000000..f2363f87fd --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java @@ -0,0 +1,63 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.StoreMonitorDataService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class StoreMonitorDataServiceTest extends PdTestBase{ + + StoreMonitorDataService service; + + @Before + public void init(){ + service = getStoreMonitorDataService(); + var store = getPdConfig().getStore(); + store.setMonitorDataEnabled(true); + store.setMonitorDataInterval("1s"); + getPdConfig().setStore(store); + } + + @Test + public void test() throws InterruptedException, PDException { + long now = System.currentTimeMillis() / 1000; + for (int i = 0; i < 5; i++) { + service.saveMonitorData(genStats()); + now = System.currentTimeMillis() / 1000; + Thread.sleep(1100); + } + assertTrue(service.getLatestStoreMonitorDataTimeStamp(1) == 0 || + service.getLatestStoreMonitorDataTimeStamp(1) == now); + + var data = service.getStoreMonitorData(1); + assertEquals(5, data.size()); + + assertNotNull(service.debugMonitorInfo(List.of(Metapb.RecordPair.newBuilder() + .setKey("key1") + .setValue(1) + .build()))); + + assertNotNull(service.getStoreMonitorDataText(1)); + + + service.removeExpiredMonitorData(1, now + 1); + assertEquals(0, service.getStoreMonitorData(1).size()); + } + + + private Metapb.StoreStats genStats(){ + return Metapb.StoreStats.newBuilder() + .setStoreId(1) + .addSystemMetrics(Metapb.RecordPair.newBuilder().setKey("key1").setValue(1).build()) + .build(); + } + + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java new file mode 100644 index 0000000000..163c23699a --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java @@ -0,0 +1,46 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.StoreNodeService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +public class StoreNodeServiceNewTest extends PdTestBase{ + private StoreNodeService service; + + @Before + public void init(){ + service = getStoreNodeService(); + } + + @Test + public void testGetTaskInfoMeta(){ + assertNotNull(service.getTaskInfoMeta()); + } + + public void testGetStoreInfoMeta(){ + assertNotNull(service.getStoreInfoMeta()); + } + + @Test + public void testRemoveShardGroup() throws PDException { + for (int i = 0; i < 12; i++) { + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build(); + service.getStoreInfoMeta().updateShardGroup(group); + } + + service.deleteShardGroup(11); + service.deleteShardGroup(10); + + assertEquals(10, getPdConfig().getConfigService().getPDConfig().getPartitionCount()); + // restore + getPdConfig().getConfigService().setPartitionCount(12); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java new file mode 100644 index 0000000000..b5e0cd97b7 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java @@ -0,0 +1,817 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.ConfigService; +import com.baidu.hugegraph.pd.IdService; +import com.baidu.hugegraph.pd.PartitionService; +import com.baidu.hugegraph.pd.StoreNodeService; +import com.baidu.hugegraph.pd.StoreStatusListener; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.MetaTask; +import com.baidu.hugegraph.pd.grpc.Metapb; +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.Before; +import org.junit.Test; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +public class StoreServiceTest { + + private PDConfig config; + + private StoreNodeService service; + + @Before + public void setUp() { + config = getConfig(); + service = new StoreNodeService(config); + } + + @Test + public void testInit() { + // Setup + PDConfig pdConfig = getConfig(); + final PDConfig pdConfig1 = getConfig(); + final PartitionService partitionService = new PartitionService(pdConfig, + new StoreNodeService( + pdConfig1)); + + // Run the test + service.init(partitionService); + + // Verify the results + } + + private PDConfig getConfig() { + PDConfig pdConfig = new PDConfig(); + pdConfig.setConfigService( + new ConfigService(BaseServerTest.getConfig())); + pdConfig.setIdService(new IdService(BaseServerTest.getConfig())); + pdConfig.setClusterId(0L); + pdConfig.setPatrolInterval(0L); + pdConfig.setDataPath("dataPath"); + pdConfig.setMinStoreCount(0); + pdConfig.setInitialStoreList("initialStoreList"); + pdConfig.setHost("host"); + pdConfig.setVerifyPath("verifyPath"); + pdConfig.setLicensePath("licensePath"); + PDConfig.Raft raft = new PDConfig().new Raft(); + raft.setEnable(false); + pdConfig.setRaft(raft); + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setShardCount(0); + pdConfig.setPartition(partition); + pdConfig.setInitialStoreMap(Map.ofEntries(Map.entry("value", "value"))); + return pdConfig; + } + + @Test + public void testIsOK() { + // Setup + // Run the test + final boolean result = service.isOK(); + + // Verify the results + assertThat(result).isTrue(); + } + + @Test + public void testRegister() throws Exception { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + + // Configure PDConfig.getInitialStoreMap(...). + final Map stringStringMap = Map.ofEntries( + Map.entry("value", "value")); + + // Run the test + final Metapb.Store result = service.register(store); + } + + @Test + public void testGetStore() throws Exception { + // Setup + try { + Metapb.GraphStats stats = Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole( + Metapb.ShardRole.None) + .build(); + Metapb.StoreStats storeStats = Metapb.StoreStats.newBuilder() + .setStoreId(0L) + .setPartitionCount( + 0) + .addGraphStats( + stats) + .build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder() + .setId(0L) + .setAddress( + "address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion( + "version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp( + 0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat( + 0L) + .setStats( + storeStats) + .setDataVersion(0) + .setCores(0) + .setDataPath( + "dataPath") + .build(); + + // Run the test + final Metapb.Store result = service.getStore(0L); + } catch (Exception e) { + + } + } + + @Test + public void testUpdateStore() throws Exception { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + + // Run the test + final Metapb.Store result = service.updateStore(store); + } + + @Test + public void testStoreTurnoff() throws Exception { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + + // Run the test + service.storeTurnoff(store); + + // Verify the results + } + + + @Test + public void testGetStores1() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = service.getStores(); + } + + @Test + public void testGetStores2() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = service.getStores("graphName"); + } + + + @Test + public void testGetStoreStatus() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = service.getStoreStatus(false); + + } + + @Test + public void testGetShardGroups() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + + // Run the test + final List result = service.getShardGroups(); + + } + + @Test + public void testGetShardGroup() throws Exception { + // Setup + final Metapb.ShardGroup expectedResult = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + + // Run the test + final Metapb.ShardGroup result = service.getShardGroup(0); + + // Verify the results + } + + + @Test + public void testGetShardGroupsByStore() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + + // Run the test + final List result = service.getShardGroupsByStore( + 0L); + } + + @Test + public void testGetActiveStores1() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = service.getActiveStores("graphName"); + + // Verify the results + } + + @Test + public void testGetActiveStores1ThrowsPDException() { + try { + List stores = service.getActiveStores(); + assertThat(stores.size() == 0); + } catch (Exception e) { + + } + } + + @Test + public void testGetTombStores() throws Exception { + // Setup + final List storeList = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Tombstone) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + service.register(storeList.get(0)); + + // Run the test + final List result = service.getTombStores(); + + // Verify the results + assertThat(result.size() == 1); + service.removeStore(result.get(0).getId()); + List stores = service.getStores(); + assertThat(stores.size() == 0); + } + + + @Test + public void testAllocShards() throws Exception { + // Setup + try { + final Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("graphName") + .setGraphState( + Metapb.GraphState + .newBuilder() + .setMode( + Metapb.GraphMode.ReadWrite) + .setReason( + Metapb.GraphModeReason.Quota) + .build()) + .build(); + final List expectedResult = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + + // Run the test + final List result = service.allocShards(graph, 0); + } catch (Exception e) { + + } + + } + + @Test + public void testReallocShards() throws Exception { + // Setup + try { + final Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + final List expectedResult = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + when(config.getPartition()).thenReturn(partition); + + // Run the test + final List result = service.reallocShards(shardGroup); + + // Verify the results + assertThat(result).isEqualTo(expectedResult); + } catch (Exception e) { + + } + + } + + @Test + public void testUpdateShardGroup() { + try { + final List shards = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Run the test + service.updateShardGroup(0, shards, 0, 0); + } catch (Exception e) { + + } finally { + + } + } + + @Test + public void testUpdateShardGroupState() throws Exception { + try { + service.updateShardGroupState(0, Metapb.PartitionState.PState_None); + } catch (Exception e) { + + } + } + + @Test + public void testHeartBeat() throws Exception { + // Setup + try { + final Metapb.StoreStats storeStats = Metapb.StoreStats.newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build(); + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + when(config.getMinStoreCount()).thenReturn(0); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + when(config.getPartition()).thenReturn(partition); + + // Run the test + final Metapb.ClusterStats result = service.heartBeat(storeStats); + + // Verify the results + assertThat(result).isEqualTo(expectedResult); + } catch (Exception e) { + + } + } + + + @Test + public void testUpdateClusterStatus1() { + // Setup + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + + // Run the test + final Metapb.ClusterStats result = service.updateClusterStatus( + Metapb.ClusterState.Cluster_OK); + } + + @Test + public void testUpdateClusterStatus2() { + // Setup + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + + // Run the test + final Metapb.ClusterStats result = service.updateClusterStatus( + Metapb.PartitionState.PState_None); + } + + @Test + public void testCheckStoreStatus() { + // Setup + // Run the test + service.checkStoreStatus(); + + // Verify the results + } + + @Test + public void testAddStatusListener() { + // Setup + final StoreStatusListener mockListener = mock( + StoreStatusListener.class); + + // Run the test + service.addStatusListener(mockListener); + + // Verify the results + } + + @Test + public void testOnStoreStatusChanged() { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + + // Verify the results + } + + @Test + public void testOnShardGroupSplit() { + // Setup + final Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + final List newShardGroups = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + final Consumer mockTask = mock(Consumer.class); + + // Verify the results + } + + @Test + public void testCheckStoreCanOffline() { + // Setup + final Metapb.Store currentStore = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + // Run the test + final boolean result = service.checkStoreCanOffline(currentStore); + + // Verify the results + assertThat(result).isTrue(); + } + + @Test + public void testShardGroupsDbCompaction() throws Exception { + // Setup + // Run the test + try { + service.shardGroupsDbCompaction(0, "tableName"); + } catch (Exception e) { + + } + + // Verify the results + } + + @Test + public void testGetQuota() throws Exception { + // Setup + // Run the test + try { + service.getQuota(); + } catch (Exception e) { + + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java new file mode 100644 index 0000000000..b787027cb6 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java @@ -0,0 +1,95 @@ +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.TaskScheduleService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertTrue; + +public class TaskScheduleServiceTest extends PdTestBase{ + + TaskScheduleService service; + + @Before + public void init(){ + service = getTaskService(); + } + + @Test + public void testStoreOffline(){ + + } + + public void testPatrolStores(){ + + } + + public void testPatrolPartitions(){ + + } + + public void testBalancePartitionShard(){ + + } + + @Test + public void testBalancePartitionLeader() throws PDException { + + var list = new ArrayList(); + for (int i = 0; i < 6; i++){ + getStoreNodeService().getStoreInfoMeta().updateShardGroup(genShardGroup(i)); + list.add(genPartition(i)); + } + + getPdConfig().getPartition().setShardCount(3); + + getPartitionService().updatePartition(list); + var rst = service.balancePartitionLeader(true); + assertTrue(rst.size() > 0 ); + // recover + getPdConfig().getPartition().setShardCount(1); + getStoreNodeService().getStoreInfoMeta().removeAll(); + } + + public void testSplitPartition(){ + + } + public void testSplitPartition2(){ + + } + + public void testCanAllPartitionsMovedOut(){ + + } + + private Metapb.ShardGroup genShardGroup(int groupId){ + return Metapb.ShardGroup.newBuilder() + .setId(groupId) + .addAllShards(genShards()) + .build(); + } + + private Metapb.Partition genPartition(int groupId){ + return Metapb.Partition.newBuilder() + .setId(groupId) + .setState(Metapb.PartitionState.PState_Normal) + .setGraphName("graph1") + .setStartKey(groupId * 10) + .setEndKey(groupId * 10 + 10) + .build(); + } + + private List genShards(){ + return List.of(Metapb.Shard.newBuilder().setStoreId(1).setRole(Metapb.ShardRole.Leader).build(), + Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Follower).build(), + Metapb.Shard.newBuilder().setStoreId(3).setRole(Metapb.ShardRole.Follower).build()); + } + +} + + diff --git a/local-release.sh b/local-release.sh new file mode 100755 index 0000000000..9d01cfd546 --- /dev/null +++ b/local-release.sh @@ -0,0 +1,8 @@ +#!/bin/bash +readonly VER=3.6.3 + +mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set + +mvn -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set +mvn --settings ./settings.xml -Dmaven.test.skip=true clean install +mvn versions:revert \ No newline at end of file diff --git a/mvnw b/mvnw new file mode 100644 index 0000000000..41c0f0c23d --- /dev/null +++ b/mvnw @@ -0,0 +1,310 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "`uname`" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="`/usr/libexec/java_home`" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=`java-config --jre-home` + fi +fi + +if [ -z "$M2_HOME" ] ; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/mvnw.cmd b/mvnw.cmd new file mode 100644 index 0000000000..86115719e5 --- /dev/null +++ b/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000000..5ebb8f0809 --- /dev/null +++ b/pom.xml @@ -0,0 +1,234 @@ + + + 4.0.0 + + org.apache.hugegraph + hugegraph-pd-root + 3.6.5-SNAPSHOT + pom + + + org.springframework.boot + spring-boot-starter-parent + 2.5.14 + + + + hg-pd-grpc + hg-pd-client + hg-pd-core + hg-pd-service + hg-pd-common + hg-pd-dist + hg-pd-clitools + hg-pd-test + + + + + 11 + 11 + 2.17.0 + + + + + + org.apache.logging.log4j + log4j-slf4j-impl + 2.17.0 + + + com.baidu.hugegraph + hg-pd-grpc + ${project.version} + + + com.baidu.hugegraph + hg-pd-common + ${project.version} + + + + + + + junit + junit + test + + + + + + Baidu_Local + http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local + + + Baidu_Local_Snapshots + http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots + + + + + + + + + + + + + + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + **/grpc/**.* + **/config/**.* + + + + + + prepare-agent + + + + + + + + + + client-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + client-test + + test + + test + + + + + + + + core-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + core-test + + test + + test + + + + + + + + cli-tools-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + cli-tools-test + + test + + test + + + + + + + + common-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + common-test + + test + + test + + + + + + + + service-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + service-test + + test + + test + + + + + + + + + + \ No newline at end of file diff --git a/settings.xml b/settings.xml new file mode 100644 index 0000000000..3158de73c3 --- /dev/null +++ b/settings.xml @@ -0,0 +1,113 @@ + + + + + + star-local + superstar + Superstar12345 + + + star-snapshot + superstar + Superstar12345 + + + + + baidu + + + baidu-nexus + http://maven.baidu-int.com/nexus/content/groups/public + + true + + + false + + + + baidu-nexus-snapshot + http://maven.baidu-int.com/nexus/content/groups/public-snapshots + + false + + + false + + + + + + star + http://10.14.139.8:8082/artifactory/star + + true + always + + + true + always + + + + + + Baidu_Local + http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local + + true + + + false + + + + Baidu_Local_Snapshots + http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots + + false + + + true + always + + + + + + baidu-nexus + http://maven.baidu-int.com/nexus/content/groups/public + + true + + + false + + + + baidu-nexus-snapshot + http://maven.baidu-int.com/nexus/content/groups/public-snapshots + + false + + + true + + + + + + + + + baidu + + \ No newline at end of file diff --git a/start_pd_server.sh b/start_pd_server.sh new file mode 100644 index 0000000000..3d96072e7a --- /dev/null +++ b/start_pd_server.sh @@ -0,0 +1,38 @@ +function rename() +{ + cfilelist=$(find -maxdepth 1 -type d -printf '%f\n' ) + for cfilename in $cfilelist + do + if [[ $cfilename =~ SNAPSHOT ]] + then + mv $cfilename ${cfilename/-?.?.?-SNAPSHOT/} + fi + done +} + +wget -q -O output.tar.gz $AGILE_PRODUCT_HTTP_URL +tar -zxf output.tar.gz +cd output +rm -rf hugegraph-pd +find . -name "*.tar.gz" -exec tar -zxvf {} \; +rename + + +# start pd +pushd hugegraph-pd +sed -i 's/initial-store-list:.*/initial-store-list: 127.0.0.1:8500\n initial-store-count: 1/' conf/application.yml +sed -i 's/,127.0.0.1:8611,127.0.0.1:8612//' conf/application.yml +bin/start-hugegraph-pd.sh +popd +jps +sleep 10 + + +# start store +pushd hugegraph-store +sed -i 's#local os=`uname`#local os=Linux#g' bin/util.sh +sed -i 's/export LD_PRELOAD/#export LD_PRELOAD/' bin/start-hugegraph-store.sh +bin/start-hugegraph-store.sh +popd +jps +sleep 5 \ No newline at end of file From 08f560f388c2eceb2e198cc0813664917f88aca2 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 6 May 2023 19:21:16 +0800 Subject: [PATCH 02/18] refact: unify LF line separator Change-Id: I3f38685af534468a51b79b7f45d24fdb30a74f34 --- .../apache/hugegraph/pd/meta/QueueStore.java | 68 +-- .../hugegraph/pd/metrics/PDMetrics.java | 198 ++++---- .../hugegraph/pd/model/PromTargetsModel.java | 144 +++--- .../pd/notice/NoticeBroadcaster.java | 314 ++++++------ .../hugegraph/pd/rest/PromTargetsAPI.java | 144 +++--- .../org/apache/hugegraph/pd/rest/TestAPI.java | 280 +++++------ .../pd/service/PromTargetsService.java | 468 +++++++++--------- .../apache/hugegraph/pd/util/HgMapCache.java | 164 +++--- .../org/apache/hugegraph/pd/util/IdUtil.java | 64 +-- settings.xml | 224 ++++----- 10 files changed, 1034 insertions(+), 1034 deletions(-) diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java index b3eb0c9714..6a23615d64 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java @@ -1,34 +1,34 @@ -package org.apache.hugegraph.pd.meta; - -import com.baidu.hugegraph.pd.common.HgAssert; -import com.baidu.hugegraph.pd.common.PDException; - -import org.apache.hugegraph.pd.config.PDConfig; - -import com.baidu.hugegraph.pd.grpc.Metapb; - -import java.util.List; - -/** - * @author lynn.bond@hotmail.com on 2022/2/10 - */ -public class QueueStore extends MetadataRocksDBStore { - QueueStore(PDConfig pdConfig) { - super(pdConfig); - } - - public void addItem(Metapb.QueueItem queueItem) throws PDException { - HgAssert.isArgumentNotNull(queueItem, "queueItem"); - byte[] key = MetadataKeyHelper.getQueueItemKey(queueItem.getItemId()); - put(key, queueItem.toByteString().toByteArray()); - } - - public void removeItem(String itemId) throws PDException { - remove(MetadataKeyHelper.getQueueItemKey(itemId)); - } - - public List getQueue() throws PDException { - byte[] prefix = MetadataKeyHelper.getQueueItemPrefix(); - return scanPrefix(Metapb.QueueItem.parser(), prefix); - } -} +package org.apache.hugegraph.pd.meta; + +import com.baidu.hugegraph.pd.common.HgAssert; +import com.baidu.hugegraph.pd.common.PDException; + +import org.apache.hugegraph.pd.config.PDConfig; + +import com.baidu.hugegraph.pd.grpc.Metapb; + +import java.util.List; + +/** + * @author lynn.bond@hotmail.com on 2022/2/10 + */ +public class QueueStore extends MetadataRocksDBStore { + QueueStore(PDConfig pdConfig) { + super(pdConfig); + } + + public void addItem(Metapb.QueueItem queueItem) throws PDException { + HgAssert.isArgumentNotNull(queueItem, "queueItem"); + byte[] key = MetadataKeyHelper.getQueueItemKey(queueItem.getItemId()); + put(key, queueItem.toByteString().toByteArray()); + } + + public void removeItem(String itemId) throws PDException { + remove(MetadataKeyHelper.getQueueItemKey(itemId)); + } + + public List getQueue() throws PDException { + byte[] prefix = MetadataKeyHelper.getQueueItemPrefix(); + return scanPrefix(Metapb.QueueItem.parser(), prefix); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java index 17f0dad1d2..bb67f78cb9 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java @@ -1,99 +1,99 @@ -package org.apache.hugegraph.pd.metrics; - -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import org.apache.hugegraph.pd.service.PDService; -import io.micrometer.core.instrument.Gauge; -import io.micrometer.core.instrument.MeterRegistry; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; - -/** - * @author lynn.bond@hotmail.com on 2022/1/5 - */ -@Component -@Slf4j -public final class PDMetrics { - public final static String PREFIX = "hg"; - private final static AtomicLong graphs = new AtomicLong(0); - private MeterRegistry registry; - - @Autowired - private PDService pdService; - - public synchronized void init(MeterRegistry meterRegistry) { - - if (registry == null) { - registry = meterRegistry; - registerMeters(); - } - - } - - private void registerMeters() { - Gauge.builder(PREFIX + ".up", () -> 1).register(registry); - - Gauge.builder(PREFIX + ".graphs", () -> updateGraphs()) - .description("Number of graphs registered in PD") - .register(registry); - - Gauge.builder(PREFIX + ".stores", () -> updateStores()) - .description("Number of stores registered in PD") - .register(registry); - - } - - private long updateGraphs() { - long buf = getGraphs(); - - if (buf != graphs.get()) { - graphs.set(buf); - registerGraphMetrics(); - } - return buf; - } - - private long updateStores() { - return getStores(); - } - - private long getGraphs() { - return getGraphMetas().size(); - } - - private long getStores(){ - try { - return this.pdService.getStoreNodeService().getStores(null).size(); - } catch (PDException e) { - log.error(e.getMessage(),e); - e.printStackTrace(); - } - return 0; - } - - private List getGraphMetas(){ - try { - return this.pdService.getPartitionService().getGraphs(); - } catch (PDException e) { - log.error(e.getMessage(),e); - } - return Collections.EMPTY_LIST; - } - - private void registerGraphMetrics(){ - this.getGraphMetas().forEach(meta->{ - Gauge.builder(PREFIX + ".partitions",this.pdService.getPartitionService() - ,e-> e.getPartitions(meta.getGraphName()).size()) - .description("Number of partitions assigned to a graph") - .tag("graph",meta.getGraphName()) - .register(this.registry); - - }); - } - -} +package org.apache.hugegraph.pd.metrics; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.service.PDService; +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author lynn.bond@hotmail.com on 2022/1/5 + */ +@Component +@Slf4j +public final class PDMetrics { + public final static String PREFIX = "hg"; + private final static AtomicLong graphs = new AtomicLong(0); + private MeterRegistry registry; + + @Autowired + private PDService pdService; + + public synchronized void init(MeterRegistry meterRegistry) { + + if (registry == null) { + registry = meterRegistry; + registerMeters(); + } + + } + + private void registerMeters() { + Gauge.builder(PREFIX + ".up", () -> 1).register(registry); + + Gauge.builder(PREFIX + ".graphs", () -> updateGraphs()) + .description("Number of graphs registered in PD") + .register(registry); + + Gauge.builder(PREFIX + ".stores", () -> updateStores()) + .description("Number of stores registered in PD") + .register(registry); + + } + + private long updateGraphs() { + long buf = getGraphs(); + + if (buf != graphs.get()) { + graphs.set(buf); + registerGraphMetrics(); + } + return buf; + } + + private long updateStores() { + return getStores(); + } + + private long getGraphs() { + return getGraphMetas().size(); + } + + private long getStores(){ + try { + return this.pdService.getStoreNodeService().getStores(null).size(); + } catch (PDException e) { + log.error(e.getMessage(),e); + e.printStackTrace(); + } + return 0; + } + + private List getGraphMetas(){ + try { + return this.pdService.getPartitionService().getGraphs(); + } catch (PDException e) { + log.error(e.getMessage(),e); + } + return Collections.EMPTY_LIST; + } + + private void registerGraphMetrics(){ + this.getGraphMetas().forEach(meta->{ + Gauge.builder(PREFIX + ".partitions",this.pdService.getPartitionService() + ,e-> e.getPartitions(meta.getGraphName()).size()) + .description("Number of partitions assigned to a graph") + .tag("graph",meta.getGraphName()) + .register(this.registry); + + }); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java index b7deee61be..12203456fa 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java @@ -1,72 +1,72 @@ -package org.apache.hugegraph.pd.model; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -/** - * @author lynn.bond@hotmail.com on 2022/2/14 - */ -public class PromTargetsModel { - private static final String LABEL_METRICS_PATH = "__metrics_path__"; - private static final String LABEL_SCHEME = "__scheme__"; - private static final String LABEL_JOB_NAME = "job"; - private static final String LABEL_CLUSTER = "cluster"; - - private Set targets = new HashSet<>(); - private Map labels = new HashMap<>(); - - public static PromTargetsModel of() { - return new PromTargetsModel(); - } - - private PromTargetsModel() {} - - public Set getTargets() { - return targets; - } - - public Map getLabels() { - return labels; - } - - public PromTargetsModel addTarget(String target) { - if (target == null) return this; - this.targets.add(target); - return this; - } - - public PromTargetsModel setTargets(Set targets) { - if (targets != null) { - this.targets = targets; - } - return this; - } - - public PromTargetsModel setMetricsPath(String path) { - return this.addLabel(LABEL_METRICS_PATH, path); - } - - public PromTargetsModel setScheme(String scheme) { - return this.addLabel(LABEL_SCHEME, scheme); - } - - public PromTargetsModel setClusterId(String clusterId){ - return this.addLabel(LABEL_CLUSTER,clusterId); - } - - public PromTargetsModel addLabel(String label, String value) { - if (label == null || value == null) return this; - this.labels.put(label, value); - return this; - } - - @Override - public String toString() { - return "PromTargetModel{" + - "targets=" + targets + - ", labels=" + labels + - '}'; - } -} +package org.apache.hugegraph.pd.model; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * @author lynn.bond@hotmail.com on 2022/2/14 + */ +public class PromTargetsModel { + private static final String LABEL_METRICS_PATH = "__metrics_path__"; + private static final String LABEL_SCHEME = "__scheme__"; + private static final String LABEL_JOB_NAME = "job"; + private static final String LABEL_CLUSTER = "cluster"; + + private Set targets = new HashSet<>(); + private Map labels = new HashMap<>(); + + public static PromTargetsModel of() { + return new PromTargetsModel(); + } + + private PromTargetsModel() {} + + public Set getTargets() { + return targets; + } + + public Map getLabels() { + return labels; + } + + public PromTargetsModel addTarget(String target) { + if (target == null) return this; + this.targets.add(target); + return this; + } + + public PromTargetsModel setTargets(Set targets) { + if (targets != null) { + this.targets = targets; + } + return this; + } + + public PromTargetsModel setMetricsPath(String path) { + return this.addLabel(LABEL_METRICS_PATH, path); + } + + public PromTargetsModel setScheme(String scheme) { + return this.addLabel(LABEL_SCHEME, scheme); + } + + public PromTargetsModel setClusterId(String clusterId){ + return this.addLabel(LABEL_CLUSTER,clusterId); + } + + public PromTargetsModel addLabel(String label, String value) { + if (label == null || value == null) return this; + this.labels.put(label, value); + return this; + } + + @Override + public String toString() { + return "PromTargetModel{" + + "targets=" + targets + + ", labels=" + labels + + '}'; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java index b76897ff93..20f8daab51 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -1,157 +1,157 @@ -package org.apache.hugegraph.pd.notice; - -import com.baidu.hugegraph.pd.common.HgAssert; -import lombok.extern.slf4j.Slf4j; - -import java.util.function.Function; -import java.util.function.Supplier; - -/** - * @author lynn.bond@hotmail.com on 2022/2/10 - */ -@Slf4j -public class NoticeBroadcaster { - private long noticeId; - private String durableId; - private Supplier noticeSupplier; - private Supplier durableSupplier; - private Function removeFunction; - private int state; //0=ready; 1=notified; 2=done ack; -1=error - private int counter; - private long timestamp; - - public static NoticeBroadcaster of(Supplier noticeSupplier) { - HgAssert.isArgumentNotNull(noticeSupplier, "noticeSupplier"); - return new NoticeBroadcaster(noticeSupplier); - } - - private NoticeBroadcaster(Supplier noticeSupplier) { - this.noticeSupplier = noticeSupplier; - this.timestamp = System.currentTimeMillis(); - } - - public NoticeBroadcaster setDurableSupplier(Supplier durableSupplier) { - this.durableSupplier = durableSupplier; - return this; - } - - public NoticeBroadcaster setRemoveFunction(Function removeFunction) { - this.removeFunction = removeFunction; - return this; - } - - public NoticeBroadcaster notifying() { - - if (this.state >= 2) { - log.warn("Aborted notifying as ack has done. notice: {}", this); - return this; - } - - this.counter++; - - if (this.durableId == null && this.durableSupplier != null) { - try { - this.durableId = this.durableSupplier.get(); - } catch (Throwable t) { - log.error("Failed to invoke durableSupplier, cause by:", t); - } - } - - try { - this.noticeId = this.noticeSupplier.get(); - state = 1; - } catch (Throwable t) { - state = -1; - log.error("Failed to invoke noticeSupplier: {}; cause by: " + this.noticeSupplier.toString(), t); - } - - return this; - } - - public boolean checkAck(long ackNoticeId) { - boolean flag = false; - - if (this.noticeId == ackNoticeId) { - flag = true; - this.state = 2; - } - - if (flag) { - this.doRemoveDurable(); - } - - return flag; - } - - public boolean doRemoveDurable() { - log.info("Removing NoticeBroadcaster is stating, noticeId:{}, durableId: {}" - , this.noticeId, this.durableId); - boolean flag = false; - - if (this.removeFunction == null) { - log.warn("The remove-function hasn't been set."); - return false; - } - - if (this.durableId == null) { - log.warn("The durableId hasn't been set."); - return false; - } - - try { - if (!(flag = this.removeFunction.apply(this.durableId))) { - log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}" - , this.noticeId, this.durableId); - } - } catch (Throwable t) { - log.error("Failed to remove NoticeBroadcaster, noticeId: " - + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t); - } - - return flag; - } - - public void setDurableId(String durableId) { - - if (HgAssert.isInvalid(durableId)) { - log.warn("Set an invalid durable-id to NoticeBroadcaster."); - } - - this.durableId = durableId; - } - - public long getNoticeId() { - return noticeId; - } - - public int getState() { - return state; - } - - public int getCounter() { - return counter; - } - - public String getDurableId() { - return durableId; - } - - public long getTimestamp() { - return timestamp; - } - - public void setTimestamp(long timestamp) { - this.timestamp = timestamp; - } - - @Override - public String toString() { - return "NoticeBroadcaster{" + - "noticeId=" + noticeId + - ", durableId='" + durableId + '\'' + - ", state=" + state + - ", counter=" + counter + - ", timestamp=" + timestamp + - '}'; - } -} +package org.apache.hugegraph.pd.notice; + +import com.baidu.hugegraph.pd.common.HgAssert; +import lombok.extern.slf4j.Slf4j; + +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * @author lynn.bond@hotmail.com on 2022/2/10 + */ +@Slf4j +public class NoticeBroadcaster { + private long noticeId; + private String durableId; + private Supplier noticeSupplier; + private Supplier durableSupplier; + private Function removeFunction; + private int state; //0=ready; 1=notified; 2=done ack; -1=error + private int counter; + private long timestamp; + + public static NoticeBroadcaster of(Supplier noticeSupplier) { + HgAssert.isArgumentNotNull(noticeSupplier, "noticeSupplier"); + return new NoticeBroadcaster(noticeSupplier); + } + + private NoticeBroadcaster(Supplier noticeSupplier) { + this.noticeSupplier = noticeSupplier; + this.timestamp = System.currentTimeMillis(); + } + + public NoticeBroadcaster setDurableSupplier(Supplier durableSupplier) { + this.durableSupplier = durableSupplier; + return this; + } + + public NoticeBroadcaster setRemoveFunction(Function removeFunction) { + this.removeFunction = removeFunction; + return this; + } + + public NoticeBroadcaster notifying() { + + if (this.state >= 2) { + log.warn("Aborted notifying as ack has done. notice: {}", this); + return this; + } + + this.counter++; + + if (this.durableId == null && this.durableSupplier != null) { + try { + this.durableId = this.durableSupplier.get(); + } catch (Throwable t) { + log.error("Failed to invoke durableSupplier, cause by:", t); + } + } + + try { + this.noticeId = this.noticeSupplier.get(); + state = 1; + } catch (Throwable t) { + state = -1; + log.error("Failed to invoke noticeSupplier: {}; cause by: " + this.noticeSupplier.toString(), t); + } + + return this; + } + + public boolean checkAck(long ackNoticeId) { + boolean flag = false; + + if (this.noticeId == ackNoticeId) { + flag = true; + this.state = 2; + } + + if (flag) { + this.doRemoveDurable(); + } + + return flag; + } + + public boolean doRemoveDurable() { + log.info("Removing NoticeBroadcaster is stating, noticeId:{}, durableId: {}" + , this.noticeId, this.durableId); + boolean flag = false; + + if (this.removeFunction == null) { + log.warn("The remove-function hasn't been set."); + return false; + } + + if (this.durableId == null) { + log.warn("The durableId hasn't been set."); + return false; + } + + try { + if (!(flag = this.removeFunction.apply(this.durableId))) { + log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}" + , this.noticeId, this.durableId); + } + } catch (Throwable t) { + log.error("Failed to remove NoticeBroadcaster, noticeId: " + + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t); + } + + return flag; + } + + public void setDurableId(String durableId) { + + if (HgAssert.isInvalid(durableId)) { + log.warn("Set an invalid durable-id to NoticeBroadcaster."); + } + + this.durableId = durableId; + } + + public long getNoticeId() { + return noticeId; + } + + public int getState() { + return state; + } + + public int getCounter() { + return counter; + } + + public String getDurableId() { + return durableId; + } + + public long getTimestamp() { + return timestamp; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + @Override + public String toString() { + return "NoticeBroadcaster{" + + "noticeId=" + noticeId + + ", durableId='" + durableId + '\'' + + ", state=" + state + + ", counter=" + counter + + ", timestamp=" + timestamp + + '}'; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java index b7a1ce2987..a4c9a0c232 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java @@ -1,72 +1,72 @@ -package org.apache.hugegraph.pd.rest; - -import org.apache.hugegraph.pd.model.PromTargetsModel; -import org.apache.hugegraph.pd.service.PromTargetsService; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.http.MediaType; -import org.springframework.http.ResponseEntity; -import org.springframework.web.bind.annotation.GetMapping; -import org.springframework.web.bind.annotation.PathVariable; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import java.util.Collections; -import java.util.List; -import java.util.Optional; - -/** - * @author lynn.bond@hotmail.com on 2022/2/14 - */ -@RestController -@Slf4j -@RequestMapping("/v1/prom") -public class PromTargetsAPI { - - @Autowired - private PromTargetsService service; - - @GetMapping(value = "/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) - public ResponseEntity> getPromTargets(@PathVariable(value = "appName", required = true) String appName) { - return ResponseEntity.of(Optional.ofNullable(this.service.getTargets(appName))); - } - - @GetMapping(value = "/targets-all", produces = MediaType.APPLICATION_JSON_VALUE) - public ResponseEntity> getPromAllTargets() { - return ResponseEntity.of(Optional.ofNullable(this.service.getAllTargets())); - } - - @GetMapping(value = "/demo/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) - public List getDemoTargets(@PathVariable(value = "appName", required = true) String targetType) { - - PromTargetsModel model =null; - switch (targetType) { - case "node": - model=PromTargetsModel.of() - .addTarget("10.14.139.26:8100") - .addTarget("10.14.139.27:8100") - .addTarget("10.14.139.28:8100") - .setMetricsPath("/metrics") - .setScheme("http"); - break; - case "store": - model=PromTargetsModel.of() - .addTarget("172.20.94.98:8521") - .addTarget("172.20.94.98:8522") - .addTarget("172.20.94.98:8523") - .setMetricsPath("/actuator/prometheus") - .setScheme("http"); - break; - case "pd": - model=PromTargetsModel.of() - .addTarget("172.20.94.98:8620") - .setMetricsPath("/actuator/prometheus"); - - break; - default: - - } - - return Collections.singletonList(model); - } -} +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.model.PromTargetsModel; +import org.apache.hugegraph.pd.service.PromTargetsService; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +/** + * @author lynn.bond@hotmail.com on 2022/2/14 + */ +@RestController +@Slf4j +@RequestMapping("/v1/prom") +public class PromTargetsAPI { + + @Autowired + private PromTargetsService service; + + @GetMapping(value = "/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getPromTargets(@PathVariable(value = "appName", required = true) String appName) { + return ResponseEntity.of(Optional.ofNullable(this.service.getTargets(appName))); + } + + @GetMapping(value = "/targets-all", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getPromAllTargets() { + return ResponseEntity.of(Optional.ofNullable(this.service.getAllTargets())); + } + + @GetMapping(value = "/demo/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) + public List getDemoTargets(@PathVariable(value = "appName", required = true) String targetType) { + + PromTargetsModel model =null; + switch (targetType) { + case "node": + model=PromTargetsModel.of() + .addTarget("10.14.139.26:8100") + .addTarget("10.14.139.27:8100") + .addTarget("10.14.139.28:8100") + .setMetricsPath("/metrics") + .setScheme("http"); + break; + case "store": + model=PromTargetsModel.of() + .addTarget("172.20.94.98:8521") + .addTarget("172.20.94.98:8522") + .addTarget("172.20.94.98:8523") + .setMetricsPath("/actuator/prometheus") + .setScheme("http"); + break; + case "pd": + model=PromTargetsModel.of() + .addTarget("172.20.94.98:8620") + .setMetricsPath("/actuator/prometheus"); + + break; + default: + + } + + return Collections.singletonList(model); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java index fe769cd473..e933f6c777 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java @@ -1,140 +1,140 @@ -package org.apache.hugegraph.pd.rest; - -import com.baidu.hugegraph.pd.RegistryService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.discovery.Query; -import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; -import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; -import com.baidu.hugegraph.pd.meta.MetadataFactory; -import com.baidu.hugegraph.pd.meta.QueueStore; - -import org.apache.hugegraph.pd.pulse.PDPulseSubject; -import org.apache.hugegraph.pd.watch.PDWatchSubject; - -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.Parser; -import lombok.extern.slf4j.Slf4j; - -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.http.MediaType; -import org.springframework.web.bind.annotation.*; - -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; - -/** - * @author lynn.bond@hotmail.com on 2022/2/9 - */ -@RestController -@Slf4j -@RequestMapping("/test") -public class TestAPI { - - @Autowired - private PDConfig pdConfig; - - @GetMapping(value = "/discovery/{appName}", produces = MediaType.TEXT_PLAIN_VALUE) - @ResponseBody - public String discovery(@PathVariable(value = "appName", required = true)String appName){ - RegistryService register =new RegistryService(pdConfig); - // Query query=Query.newBuilder().setAppName("hugegraph").build(); - AtomicLong label = new AtomicLong(); - HashMap labels = new HashMap<>(); - String labelValue = String.valueOf(label.incrementAndGet()); - //labels.put("address",labelValue); - Query query = Query.newBuilder().build(); - // Query query = Query.newBuilder().setAppName("hugegraph").set.build(); - - return register.getNodes(query).toString(); - } - - @GetMapping(value = "/pulse", produces = MediaType.TEXT_PLAIN_VALUE) - @ResponseBody - public String notifyClient() { - PDPulseSubject.notifyClient( - PartitionHeartbeatResponse.newBuilder() - .setPartition(Metapb.Partition.newBuilder() - .setId(8) - .setGraphName("graphName8")) - - .setChangeShard( - ChangeShard.newBuilder() - .setChangeTypeValue(8) - .addShard(Metapb.Shard.newBuilder() - .setRoleValue(8) - .setStoreId(8) - ) - ) - - ); - return "partition"; - } - - @GetMapping(value = "/partition", produces = MediaType.TEXT_PLAIN_VALUE) - @ResponseBody - public String noticePartition() { - PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER, "graph-test", 99); - return "partition"; - } - - @PutMapping(value = "/queue", produces = MediaType.TEXT_PLAIN_VALUE) - @ResponseBody - public String testPutQueue() { - this.putQueue(); - return "queue"; - } - - public void putQueue(){ - PartitionHeartbeatResponse response=PartitionHeartbeatResponse.newBuilder() - .setPartition(Metapb.Partition.newBuilder() - .setId(9) - .setGraphName("graphName")) - .setChangeShard( - ChangeShard.newBuilder() - .setChangeTypeValue(9) - .addShard(Metapb.Shard.newBuilder() - .setRoleValue(9) - .setStoreId(9) - ) - ).build(); - - Metapb.QueueItem.Builder builder=Metapb.QueueItem.newBuilder() - .setItemId("item-id") - .setItemClass("item-class") - .setItemContent(response.toByteString()); - - - QueueStore store= MetadataFactory.newQueueStore(pdConfig); - - try { - store.addItem(builder.setItemId("item-id-1").build()); - store.addItem(builder.setItemId("item-id-2").build()); - store.addItem(builder.setItemId("item-id-3").build()); - } catch (PDException e) { - e.printStackTrace(); - } - List queue=null; - try { - queue=store.getQueue(); - } catch (PDException e) { - e.printStackTrace(); - } - Parser parser= PartitionHeartbeatResponse.parser(); - - queue.stream().forEach(e->{ - PartitionHeartbeatResponse buf=null; - try { - buf=parser.parseFrom(e.getItemContent()); - } catch (InvalidProtocolBufferException ex) { - ex.printStackTrace(); - } - PDPulseSubject.notifyClient( PartitionHeartbeatResponse.newBuilder(buf)); - }); - - - - } -} +package org.apache.hugegraph.pd.rest; + +import com.baidu.hugegraph.pd.RegistryService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import com.baidu.hugegraph.pd.meta.MetadataFactory; +import com.baidu.hugegraph.pd.meta.QueueStore; + +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.watch.PDWatchSubject; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Parser; +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; + +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author lynn.bond@hotmail.com on 2022/2/9 + */ +@RestController +@Slf4j +@RequestMapping("/test") +public class TestAPI { + + @Autowired + private PDConfig pdConfig; + + @GetMapping(value = "/discovery/{appName}", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String discovery(@PathVariable(value = "appName", required = true)String appName){ + RegistryService register =new RegistryService(pdConfig); + // Query query=Query.newBuilder().setAppName("hugegraph").build(); + AtomicLong label = new AtomicLong(); + HashMap labels = new HashMap<>(); + String labelValue = String.valueOf(label.incrementAndGet()); + //labels.put("address",labelValue); + Query query = Query.newBuilder().build(); + // Query query = Query.newBuilder().setAppName("hugegraph").set.build(); + + return register.getNodes(query).toString(); + } + + @GetMapping(value = "/pulse", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyClient() { + PDPulseSubject.notifyClient( + PartitionHeartbeatResponse.newBuilder() + .setPartition(Metapb.Partition.newBuilder() + .setId(8) + .setGraphName("graphName8")) + + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue(8) + .addShard(Metapb.Shard.newBuilder() + .setRoleValue(8) + .setStoreId(8) + ) + ) + + ); + return "partition"; + } + + @GetMapping(value = "/partition", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String noticePartition() { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER, "graph-test", 99); + return "partition"; + } + + @PutMapping(value = "/queue", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String testPutQueue() { + this.putQueue(); + return "queue"; + } + + public void putQueue(){ + PartitionHeartbeatResponse response=PartitionHeartbeatResponse.newBuilder() + .setPartition(Metapb.Partition.newBuilder() + .setId(9) + .setGraphName("graphName")) + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue(9) + .addShard(Metapb.Shard.newBuilder() + .setRoleValue(9) + .setStoreId(9) + ) + ).build(); + + Metapb.QueueItem.Builder builder=Metapb.QueueItem.newBuilder() + .setItemId("item-id") + .setItemClass("item-class") + .setItemContent(response.toByteString()); + + + QueueStore store= MetadataFactory.newQueueStore(pdConfig); + + try { + store.addItem(builder.setItemId("item-id-1").build()); + store.addItem(builder.setItemId("item-id-2").build()); + store.addItem(builder.setItemId("item-id-3").build()); + } catch (PDException e) { + e.printStackTrace(); + } + List queue=null; + try { + queue=store.getQueue(); + } catch (PDException e) { + e.printStackTrace(); + } + Parser parser= PartitionHeartbeatResponse.parser(); + + queue.stream().forEach(e->{ + PartitionHeartbeatResponse buf=null; + try { + buf=parser.parseFrom(e.getItemContent()); + } catch (InvalidProtocolBufferException ex) { + ex.printStackTrace(); + } + PDPulseSubject.notifyClient( PartitionHeartbeatResponse.newBuilder(buf)); + }); + + + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java index df641564c1..21e8fb28b8 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java @@ -1,234 +1,234 @@ -package org.apache.hugegraph.pd.service; - -import com.baidu.hugegraph.pd.RegistryService; -import com.baidu.hugegraph.pd.common.HgAssert; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; -import com.baidu.hugegraph.pd.grpc.discovery.Query; - -import org.apache.hugegraph.pd.util.HgMapCache; -import org.apache.hugegraph.pd.model.PromTargetsModel; -import org.apache.hugegraph.pd.rest.MemberAPI; - -import lombok.extern.slf4j.Slf4j; - -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Service; - -import java.util.*; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -/** - * @author lynn.bond@hotmail.com on 2022/2/24 - */ -@Service -@Slf4j -public class PromTargetsService { - - @Autowired - private PDConfig pdConfig; - @Autowired - private PDService pdService; - - private RegistryService register; - - private final PromTargetsModel pdModel = PromTargetsModel.of() - .addLabel("__app_name", "pd") - .setScheme("http") - .setMetricsPath("/actuator/prometheus"); - - - private final PromTargetsModel storeModel = PromTargetsModel.of() - .addLabel("__app_name", "store") - .setScheme("http") - .setMetricsPath("/actuator/prometheus"); - - - private HgMapCache> targetsCache = HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H. - - private RegistryService getRegister() { - if (this.register == null) { - this.register = new RegistryService(this.pdConfig); - } - return this.register; - } - - public List getAllTargets() { - List res = new LinkedList<>(); - List buf = this.toModels(this.getRegister().getNodes(Query.newBuilder().build())); - - if (buf != null) { - res.addAll(buf); - } - - res.add(getPdTargets()); - res.add(getStoreTargets()); - - return res; - } - - /** - * @param appName - * @return null if it's not existing - */ - public List getTargets(String appName) { - HgAssert.isArgumentNotNull(appName, "appName"); - switch (appName) { - case "pd": - return Collections.singletonList(this.getPdTargets()); - case "store": - return Collections.singletonList(this.getStoreTargets()); - default: - return this.toModels(this.getRegister().getNodes(Query.newBuilder().setAppName(appName).build())); - } - } - - private PromTargetsModel getPdTargets() { - return setTargets(pdModel, () -> this.mergeCache("pd", getPdAddresses())); - } - - private PromTargetsModel getStoreTargets() { - return setTargets(storeModel, () -> this.mergeCache("store", getStoreAddresses())); - } - - private PromTargetsModel setTargets(PromTargetsModel model, Supplier> supplier) { - return model.setTargets(supplier.get()).setClusterId(String.valueOf(pdConfig.getClusterId())); - } - - /* to prevent the failure of connection between pd and store or pd and pd.*/ - //TODO: To add a schedule task to refresh targets, not to retrieve in every time. - private Set mergeCache(String key, Set set) { - Set buf = this.targetsCache.get(key); - - if (buf == null) { - buf = new HashSet<>(); - this.targetsCache.put(key, buf); - } - - if (set != null) { - buf.addAll(set); - } - - return buf; - } - - private List toModels(NodeInfos info) { - if (info == null) { - return null; - } - - List nodes = info.getInfoList(); - if (nodes == null || nodes.isEmpty()) { - return null; - } - - List res = - nodes.stream().map(e -> { - Map labels = e.getLabelsMap(); - - String target = labels.get("target"); - if (HgAssert.isInvalid(target)) return null; - - PromTargetsModel model = PromTargetsModel.of(); - model.addTarget(target); - model.addLabel("__app_name", e.getAppName()); - - labels.forEach((k, v) -> { - k = k.trim(); - switch (k) { - case "metrics": - model.setMetricsPath(v.trim()); - break; - case "scheme": - model.setScheme(v.trim()); - break; - default: - if (k.startsWith("__")) { - model.addLabel(k, v); - } - - } - }); - - - return model; - }) - .filter(e -> e != null) - .collect(Collectors.toList()); - - if (res.isEmpty()) { - return null; - } - return res; - } - - private Set getPdAddresses() { - MemberAPI.CallStreamObserverWrap response = new MemberAPI.CallStreamObserverWrap<>(); - pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); - List members = null; - - try { - members = response.get().get(0).getMembersList(); - } catch (Throwable e) { - log.error("Failed to get all pd members.", e); - } - - Set res = new HashSet<>(); - if (members != null) { - members.stream().forEach(e -> res.add(e.getRestUrl())); - } - - return res; - } - - private Set getStoreAddresses() { - Set res = new HashSet<>(); - List stores = null; - try { - stores = pdService.getStoreNodeService().getStores(); - } catch (PDException e) { - log.error("Failed to get all stores.", e); - } - - if (stores != null) { - stores.stream().forEach(e -> { - String buf = this.getRestAddress(e); - if (buf != null) { - res.add(buf); - } - }); - } - - return res; - } - - //TODO: optimized store registry data, to add host:port of REST server. - private String getRestAddress(Metapb.Store store) { - String address = store.getAddress(); - if (address == null || address.isEmpty()) return null; - try { - Optional port = store.getLabelsList().stream().map( - e -> { - if ("rest.port".equals(e.getKey())) { - return e.getValue(); - } - return null; - }).filter(e -> e != null).findFirst(); - - if (port.isPresent()) { - address = address.substring(0, address.indexOf(':') + 1); - address = address + port.get(); - - } - } catch (Throwable t) { - log.error("Failed to extract the REST address of store, cause by:", t); - } - return address; - - } -} +package org.apache.hugegraph.pd.service; + +import com.baidu.hugegraph.pd.RegistryService; +import com.baidu.hugegraph.pd.common.HgAssert; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; + +import org.apache.hugegraph.pd.util.HgMapCache; +import org.apache.hugegraph.pd.model.PromTargetsModel; +import org.apache.hugegraph.pd.rest.MemberAPI; + +import lombok.extern.slf4j.Slf4j; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.*; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +/** + * @author lynn.bond@hotmail.com on 2022/2/24 + */ +@Service +@Slf4j +public class PromTargetsService { + + @Autowired + private PDConfig pdConfig; + @Autowired + private PDService pdService; + + private RegistryService register; + + private final PromTargetsModel pdModel = PromTargetsModel.of() + .addLabel("__app_name", "pd") + .setScheme("http") + .setMetricsPath("/actuator/prometheus"); + + + private final PromTargetsModel storeModel = PromTargetsModel.of() + .addLabel("__app_name", "store") + .setScheme("http") + .setMetricsPath("/actuator/prometheus"); + + + private HgMapCache> targetsCache = HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H. + + private RegistryService getRegister() { + if (this.register == null) { + this.register = new RegistryService(this.pdConfig); + } + return this.register; + } + + public List getAllTargets() { + List res = new LinkedList<>(); + List buf = this.toModels(this.getRegister().getNodes(Query.newBuilder().build())); + + if (buf != null) { + res.addAll(buf); + } + + res.add(getPdTargets()); + res.add(getStoreTargets()); + + return res; + } + + /** + * @param appName + * @return null if it's not existing + */ + public List getTargets(String appName) { + HgAssert.isArgumentNotNull(appName, "appName"); + switch (appName) { + case "pd": + return Collections.singletonList(this.getPdTargets()); + case "store": + return Collections.singletonList(this.getStoreTargets()); + default: + return this.toModels(this.getRegister().getNodes(Query.newBuilder().setAppName(appName).build())); + } + } + + private PromTargetsModel getPdTargets() { + return setTargets(pdModel, () -> this.mergeCache("pd", getPdAddresses())); + } + + private PromTargetsModel getStoreTargets() { + return setTargets(storeModel, () -> this.mergeCache("store", getStoreAddresses())); + } + + private PromTargetsModel setTargets(PromTargetsModel model, Supplier> supplier) { + return model.setTargets(supplier.get()).setClusterId(String.valueOf(pdConfig.getClusterId())); + } + + /* to prevent the failure of connection between pd and store or pd and pd.*/ + //TODO: To add a schedule task to refresh targets, not to retrieve in every time. + private Set mergeCache(String key, Set set) { + Set buf = this.targetsCache.get(key); + + if (buf == null) { + buf = new HashSet<>(); + this.targetsCache.put(key, buf); + } + + if (set != null) { + buf.addAll(set); + } + + return buf; + } + + private List toModels(NodeInfos info) { + if (info == null) { + return null; + } + + List nodes = info.getInfoList(); + if (nodes == null || nodes.isEmpty()) { + return null; + } + + List res = + nodes.stream().map(e -> { + Map labels = e.getLabelsMap(); + + String target = labels.get("target"); + if (HgAssert.isInvalid(target)) return null; + + PromTargetsModel model = PromTargetsModel.of(); + model.addTarget(target); + model.addLabel("__app_name", e.getAppName()); + + labels.forEach((k, v) -> { + k = k.trim(); + switch (k) { + case "metrics": + model.setMetricsPath(v.trim()); + break; + case "scheme": + model.setScheme(v.trim()); + break; + default: + if (k.startsWith("__")) { + model.addLabel(k, v); + } + + } + }); + + + return model; + }) + .filter(e -> e != null) + .collect(Collectors.toList()); + + if (res.isEmpty()) { + return null; + } + return res; + } + + private Set getPdAddresses() { + MemberAPI.CallStreamObserverWrap response = new MemberAPI.CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + List members = null; + + try { + members = response.get().get(0).getMembersList(); + } catch (Throwable e) { + log.error("Failed to get all pd members.", e); + } + + Set res = new HashSet<>(); + if (members != null) { + members.stream().forEach(e -> res.add(e.getRestUrl())); + } + + return res; + } + + private Set getStoreAddresses() { + Set res = new HashSet<>(); + List stores = null; + try { + stores = pdService.getStoreNodeService().getStores(); + } catch (PDException e) { + log.error("Failed to get all stores.", e); + } + + if (stores != null) { + stores.stream().forEach(e -> { + String buf = this.getRestAddress(e); + if (buf != null) { + res.add(buf); + } + }); + } + + return res; + } + + //TODO: optimized store registry data, to add host:port of REST server. + private String getRestAddress(Metapb.Store store) { + String address = store.getAddress(); + if (address == null || address.isEmpty()) return null; + try { + Optional port = store.getLabelsList().stream().map( + e -> { + if ("rest.port".equals(e.getKey())) { + return e.getValue(); + } + return null; + }).filter(e -> e != null).findFirst(); + + if (port.isPresent()) { + address = address.substring(0, address.indexOf(':') + 1); + address = address + port.get(); + + } + } catch (Throwable t) { + log.error("Failed to extract the REST address of store, cause by:", t); + } + return address; + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java index 7d7b126579..d0cb0e0e25 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java @@ -1,82 +1,82 @@ -package org.apache.hugegraph.pd.util; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Supplier; - -/** - * @param - * @param - * @author lynn.bond@hotmail.com on 2022/3/10 - */ -public class HgMapCache { - private Map cache = new ConcurrentHashMap(); - private Supplier expiry; - - public static HgMapCache expiredOf(long interval){ - return new HgMapCache(new CycleIntervalPolicy(interval)); - } - - private HgMapCache(Supplier expiredPolicy) { - this.expiry = expiredPolicy; - } - - private boolean isExpired() { - if (expiry != null && expiry.get()) { - cache.clear(); - return true; - } - return false; - } - - public void put(K key, V value) { - if (key == null || value == null) return; - this.cache.put(key, value); - } - - - public V get(K key) { - if (isExpired()) return null; - return this.cache.get(key); - } - - public void removeAll() { - this.cache.clear(); - } - - public boolean remove(K key) { - if (key != null) { - this.cache.remove(key); - return true; - } - return false; - } - - public Map getAll() { - return this.cache; - } - - private static class CycleIntervalPolicy implements Supplier{ - private long expireTime=0; - private long interval=0; - - public CycleIntervalPolicy(long interval){ - this.interval=interval; - init(); - } - private void init(){ - expireTime=System.currentTimeMillis()+interval; - } - - @Override - public Boolean get() { - if(System.currentTimeMillis()>expireTime){ - init(); - return true; - } - return false; - } - - } - -} +package org.apache.hugegraph.pd.util; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +/** + * @param + * @param + * @author lynn.bond@hotmail.com on 2022/3/10 + */ +public class HgMapCache { + private Map cache = new ConcurrentHashMap(); + private Supplier expiry; + + public static HgMapCache expiredOf(long interval){ + return new HgMapCache(new CycleIntervalPolicy(interval)); + } + + private HgMapCache(Supplier expiredPolicy) { + this.expiry = expiredPolicy; + } + + private boolean isExpired() { + if (expiry != null && expiry.get()) { + cache.clear(); + return true; + } + return false; + } + + public void put(K key, V value) { + if (key == null || value == null) return; + this.cache.put(key, value); + } + + + public V get(K key) { + if (isExpired()) return null; + return this.cache.get(key); + } + + public void removeAll() { + this.cache.clear(); + } + + public boolean remove(K key) { + if (key != null) { + this.cache.remove(key); + return true; + } + return false; + } + + public Map getAll() { + return this.cache; + } + + private static class CycleIntervalPolicy implements Supplier{ + private long expireTime=0; + private long interval=0; + + public CycleIntervalPolicy(long interval){ + this.interval=interval; + init(); + } + private void init(){ + expireTime=System.currentTimeMillis()+interval; + } + + @Override + public Boolean get() { + if(System.currentTimeMillis()>expireTime){ + init(); + return true; + } + return false; + } + + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java index 844f306f8d..569702e6c4 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java @@ -1,32 +1,32 @@ -package org.apache.hugegraph.pd.util; - -import lombok.extern.slf4j.Slf4j; - -/** - * @author lynn.bond@hotmail.com on 2022/2/8 - */ -@Slf4j -public final class IdUtil { - private final static byte[] lock = new byte[0]; - - public static String createMillisStr(){ - return String.valueOf(createMillisId()); - } - - /** - * Create millisecond style ID; - * @return - */ - public static Long createMillisId() { - synchronized (lock) { - try { - Thread.sleep(1); - } catch (InterruptedException e) { - log.error("Failed to sleep", e); - } - - return System.currentTimeMillis(); - } - - } -} +package org.apache.hugegraph.pd.util; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/2/8 + */ +@Slf4j +public final class IdUtil { + private final static byte[] lock = new byte[0]; + + public static String createMillisStr(){ + return String.valueOf(createMillisId()); + } + + /** + * Create millisecond style ID; + * @return + */ + public static Long createMillisId() { + synchronized (lock) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + log.error("Failed to sleep", e); + } + + return System.currentTimeMillis(); + } + + } +} diff --git a/settings.xml b/settings.xml index 3158de73c3..083a6a0dc4 100644 --- a/settings.xml +++ b/settings.xml @@ -1,113 +1,113 @@ - - - - - - star-local - superstar - Superstar12345 - - - star-snapshot - superstar - Superstar12345 - - - - - baidu - - - baidu-nexus - http://maven.baidu-int.com/nexus/content/groups/public - - true - - - false - - - - baidu-nexus-snapshot - http://maven.baidu-int.com/nexus/content/groups/public-snapshots - - false - - - false - - - - - - star - http://10.14.139.8:8082/artifactory/star - - true - always - - - true - always - - - - - - Baidu_Local - http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local - - true - - - false - - - - Baidu_Local_Snapshots - http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots - - false - - - true - always - - - - - - baidu-nexus - http://maven.baidu-int.com/nexus/content/groups/public - - true - - - false - - - - baidu-nexus-snapshot - http://maven.baidu-int.com/nexus/content/groups/public-snapshots - - false - - - true - - - - - - - - - baidu - + + + + + + star-local + superstar + Superstar12345 + + + star-snapshot + superstar + Superstar12345 + + + + + baidu + + + baidu-nexus + http://maven.baidu-int.com/nexus/content/groups/public + + true + + + false + + + + baidu-nexus-snapshot + http://maven.baidu-int.com/nexus/content/groups/public-snapshots + + false + + + false + + + + + + star + http://10.14.139.8:8082/artifactory/star + + true + always + + + true + always + + + + + + Baidu_Local + http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local + + true + + + false + + + + Baidu_Local_Snapshots + http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots + + false + + + true + always + + + + + + baidu-nexus + http://maven.baidu-int.com/nexus/content/groups/public + + true + + + false + + + + baidu-nexus-snapshot + http://maven.baidu-int.com/nexus/content/groups/public-snapshots + + false + + + true + + + + + + + + + baidu + \ No newline at end of file From dd49f3f54bbefc2797395cb4ee95685f257ce682 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 6 May 2023 19:41:10 +0800 Subject: [PATCH 03/18] refact: add header & format in test & service & grpc & dist modules Change-Id: Id2fa1374d7fa85635906a7e75655e7dad56f1807 --- .gitattributes | 1 + hg-pd-dist/pom.xml | 21 +- .../assembly/descriptor/server-assembly.xml | 17 + .../assembly/static/bin/start-hugegraph-pd.sh | 17 + .../assembly/static/bin/stop-hugegraph-pd.sh | 17 + hg-pd-dist/src/assembly/static/bin/util.sh | 19 +- .../src/assembly/static/conf/log4j2.xml | 35 +- hg-pd-grpc/pom.xml | 21 +- hg-pd-grpc/src/main/proto/discovery.proto | 22 +- hg-pd-grpc/src/main/proto/kv.proto | 10 +- hg-pd-grpc/src/main/proto/metapb.proto | 28 +- hg-pd-grpc/src/main/proto/pd_common.proto | 36 +- hg-pd-grpc/src/main/proto/pd_pulse.proto | 140 +++--- hg-pd-grpc/src/main/proto/pd_watch.proto | 82 ++-- hg-pd-grpc/src/main/proto/pdpb.proto | 18 +- hg-pd-service/pom.xml | 21 +- .../pd/upgrade/VersionScriptFactory.java | 29 +- .../pd/upgrade/VersionUpgradeScript.java | 20 + .../upgrade/scripts/PartitionMetaUpgrade.java | 61 ++- .../pd/upgrade/scripts/TaskCleanUpgrade.java | 18 + .../hugegraph/pd/boot/HugePDServer.java | 22 +- .../pd/license/LicenseVerifierService.java | 235 +++++----- .../pd/license/LicenseVerifyManager.java | 28 +- .../hugegraph/pd/metrics/MetricsConfig.java | 20 +- .../hugegraph/pd/metrics/PDMetrics.java | 69 ++- .../apache/hugegraph/pd/model/DemoModel.java | 23 +- .../hugegraph/pd/model/GraphRestRequest.java | 17 + .../pd/model/GraphSpaceRestRequest.java | 17 + .../hugegraph/pd/model/PeerRestRequest.java | 17 + .../hugegraph/pd/model/PromTargetsModel.java | 48 +- .../pd/model/RegistryQueryRestRequest.java | 23 +- .../pd/model/RegistryRestRequest.java | 23 +- .../pd/model/RegistryRestResponse.java | 22 +- .../hugegraph/pd/model/RestApiResponse.java | 29 +- .../hugegraph/pd/model/StoreRestRequest.java | 17 + .../hugegraph/pd/model/TimeRangeRequest.java | 17 + .../pd/notice/NoticeBroadcaster.java | 71 +-- .../pd/pulse/AbstractObserverSubject.java | 85 ++-- .../hugegraph/pd/pulse/PDPulseSubject.java | 143 +++--- .../pd/pulse/PartitionHeartbeatSubject.java | 27 +- .../hugegraph/pd/pulse/PulseListener.java | 19 +- .../org/apache/hugegraph/pd/rest/API.java | 86 ++-- .../apache/hugegraph/pd/rest/GraphAPI.java | 76 ++- .../hugegraph/pd/rest/GraphSpaceAPI.java | 63 ++- .../apache/hugegraph/pd/rest/IndexAPI.java | 77 +-- .../apache/hugegraph/pd/rest/MemberAPI.java | 113 +++-- .../hugegraph/pd/rest/PartitionAPI.java | 124 +++-- .../hugegraph/pd/rest/PromTargetsAPI.java | 64 ++- .../apache/hugegraph/pd/rest/RegistryAPI.java | 75 ++- .../apache/hugegraph/pd/rest/ShardAPI.java | 46 +- .../apache/hugegraph/pd/rest/StoreAPI.java | 84 +++- .../org/apache/hugegraph/pd/rest/TaskAPI.java | 39 +- .../org/apache/hugegraph/pd/rest/TestAPI.java | 140 +++--- .../pd/service/DiscoveryService.java | 77 ++- .../pd/service/KvServiceGrpcImpl.java | 121 +++-- .../hugegraph/pd/service/PDPulseService.java | 90 ++-- .../hugegraph/pd/service/PDRestService.java | 56 ++- .../hugegraph/pd/service/PDService.java | 438 +++++++++++------- .../hugegraph/pd/service/PDWatchService.java | 21 +- .../pd/service/PromTargetsService.java | 145 +++--- .../hugegraph/pd/service/ServiceGrpc.java | 46 +- .../hugegraph/pd/service/UpgradeService.java | 44 +- .../apache/hugegraph/pd/util/DateUtil.java | 60 ++- .../apache/hugegraph/pd/util/HgMapCache.java | 46 +- .../org/apache/hugegraph/pd/util/IdUtil.java | 20 +- .../pd/util/grpc/StreamObserverUtil.java | 21 +- .../pd/watch/AbstractWatchSubject.java | 69 ++- .../hugegraph/pd/watch/KvWatchSubject.java | 91 ++-- .../hugegraph/pd/watch/NodeChangeSubject.java | 43 +- .../hugegraph/pd/watch/PDWatchSubject.java | 75 ++- .../pd/watch/PartitionChangeSubject.java | 42 +- .../pd/watch/ShardGroupChangeSubject.java | 38 +- hg-pd-service/src/main/resources/log4j2.xml | 35 +- .../src/test/java/live/PDServer0.java | 31 +- .../src/test/java/live/PDServer1.java | 32 +- .../src/test/java/live/PDServer2.java | 32 +- .../src/test/java/live/PDServer3.java | 32 +- hg-pd-service/src/test/resources/log4j2.xml | 37 +- hg-pd-test/pom.xml | 21 +- .../hugegraph/pd/client/BaseClientTest.java | 25 +- .../pd/client/DiscoveryClientTest.java | 30 +- .../hugegraph/pd/client/KvClientTest.java | 47 +- .../pd/client/PDClientSuiteTest.java | 20 +- .../hugegraph/pd/client/PDClientTest.java | 37 +- .../pd/clitools/BaseCliToolsTest.java | 18 +- .../pd/clitools/CliToolsSuiteTest.java | 20 +- .../hugegraph/pd/clitools/MainTest.java | 70 ++- .../hugegraph/pd/common/BaseCommonTest.java | 17 + .../hugegraph/pd/common/CommonSuiteTest.java | 22 +- .../hugegraph/pd/common/HgAssertTest.java | 51 +- .../hugegraph/pd/common/KVPairTest.java | 42 +- .../pd/common/MetadataKeyHelperTest.java | 27 +- .../pd/common/PartitionCacheTest.java | 241 +++++----- .../pd/common/PartitionUtilsTest.java | 20 +- .../hugegraph/pd/core/BaseCoreTest.java | 43 +- .../hugegraph/pd/core/PDCoreSuiteTest.java | 22 +- .../pd/core/StoreNodeServiceTest.java | 68 ++- .../pd/core/meta/MetadataKeyHelperTest.java | 26 +- .../hugegraph/pd/grpc/BaseGrpcTest.java | 18 +- .../hugegraph/pd/grpc/GrpcSuiteTest.java | 20 +- .../hugegraph/pd/service/BaseServerTest.java | 43 +- .../pd/service/ConfigServiceTest.java | 41 +- .../hugegraph/pd/service/IdServiceTest.java | 133 +++--- .../hugegraph/pd/service/KvServiceTest.java | 22 +- .../hugegraph/pd/service/LogServiceTest.java | 30 +- .../pd/service/PartitionServiceTest.java | 85 ++-- .../hugegraph/pd/service/PdTestBase.java | 66 ++- .../hugegraph/pd/service/RestApiTest.java | 88 ++-- .../hugegraph/pd/service/ServerSuiteTest.java | 20 +- .../service/StoreMonitorDataServiceTest.java | 58 ++- .../pd/service/StoreNodeServiceNewTest.java | 43 +- .../pd/service/StoreServiceTest.java | 39 +- .../pd/service/TaskScheduleServiceTest.java | 80 ++-- 113 files changed, 4077 insertions(+), 1910 deletions(-) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..4fd3cf5c2a --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf diff --git a/hg-pd-dist/pom.xml b/hg-pd-dist/pom.xml index f0c3037c9b..b3089d0800 100644 --- a/hg-pd-dist/pom.xml +++ b/hg-pd-dist/pom.xml @@ -1,6 +1,23 @@ - + + hugegraph-pd-root diff --git a/hg-pd-dist/src/assembly/descriptor/server-assembly.xml b/hg-pd-dist/src/assembly/descriptor/server-assembly.xml index 42c4a79633..a725093386 100644 --- a/hg-pd-dist/src/assembly/descriptor/server-assembly.xml +++ b/hg-pd-dist/src/assembly/descriptor/server-assembly.xml @@ -1,3 +1,20 @@ + + distribution false diff --git a/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh b/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh index 9eb60d1cd0..8ccd5fc279 100644 --- a/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh +++ b/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh @@ -1,5 +1,22 @@ #!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + if [ -z "$GC_OPTION" ];then GC_OPTION="" fi diff --git a/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh b/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh index eeb709271c..0183a20af2 100644 --- a/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh +++ b/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh @@ -1,5 +1,22 @@ #!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + abs_path() { SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ]; do diff --git a/hg-pd-dist/src/assembly/static/bin/util.sh b/hg-pd-dist/src/assembly/static/bin/util.sh index 188d4bb545..d32871f13f 100644 --- a/hg-pd-dist/src/assembly/static/bin/util.sh +++ b/hg-pd-dist/src/assembly/static/bin/util.sh @@ -1,8 +1,25 @@ #!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + function command_available() { local cmd=$1 - if [ `command -v $cmd >/dev/null 2>&1` ]; then + if [ $(command -v $cmd >/dev/null 2>&1) ]; then return 1 else return 0 diff --git a/hg-pd-dist/src/assembly/static/conf/log4j2.xml b/hg-pd-dist/src/assembly/static/conf/log4j2.xml index 275c8467c6..d68438f89a 100644 --- a/hg-pd-dist/src/assembly/static/conf/log4j2.xml +++ b/hg-pd-dist/src/assembly/static/conf/log4j2.xml @@ -1,5 +1,22 @@ + + @@ -25,7 +42,7 @@ - + @@ -33,8 +50,8 @@ - - + + @@ -52,7 +69,7 @@ - + @@ -60,8 +77,8 @@ - - + + @@ -80,7 +97,7 @@ - + @@ -88,8 +105,8 @@ - - + + diff --git a/hg-pd-grpc/pom.xml b/hg-pd-grpc/pom.xml index b55790263c..41308bacb4 100644 --- a/hg-pd-grpc/pom.xml +++ b/hg-pd-grpc/pom.xml @@ -1,6 +1,23 @@ - + + 4.0.0 diff --git a/hg-pd-grpc/src/main/proto/discovery.proto b/hg-pd-grpc/src/main/proto/discovery.proto index c76aadbd7c..1c716059ba 100644 --- a/hg-pd-grpc/src/main/proto/discovery.proto +++ b/hg-pd-grpc/src/main/proto/discovery.proto @@ -9,7 +9,7 @@ option java_multiple_files = true; service DiscoveryService { rpc register(NodeInfo) returns (RegisterInfo); rpc getNodes(Query) returns (NodeInfos); -// rpc getNodesByLabel(Conditions) returns (NodeInfos); + // rpc getNodesByLabel(Conditions) returns (NodeInfos); } /* requests */ @@ -19,12 +19,12 @@ message NodeInfo { string version = 3; string address = 4; int64 interval = 5; - map labels = 6; + map labels = 6; } message Query { string appName = 1; string version = 2; - map labels = 3; + map labels = 3; } message LeaseInfo { int64 registrationTs = 1; @@ -32,15 +32,15 @@ message LeaseInfo { int64 serverUpTs = 3; } message RegisterInfo { - NodeInfo nodeInfo = 1; - LeaseInfo leaseInfo = 2 ; - RegisterType type = 3 ; - pdpb.ResponseHeader header = 4; + NodeInfo nodeInfo = 1; + LeaseInfo leaseInfo = 2 ; + RegisterType type = 3 ; + pdpb.ResponseHeader header = 4; } enum RegisterType { - Register = 0; - Heartbeat = 1; - Dislodge = 2; + Register = 0; + Heartbeat = 1; + Dislodge = 2; } //message Condition{ // string label = 1; @@ -50,5 +50,5 @@ enum RegisterType { // string value = 2; //} message NodeInfos{ - repeated NodeInfo info = 1; + repeated NodeInfo info = 1; } \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/kv.proto b/hg-pd-grpc/src/main/proto/kv.proto index 3cd6d4b0b7..402b356ceb 100644 --- a/hg-pd-grpc/src/main/proto/kv.proto +++ b/hg-pd-grpc/src/main/proto/kv.proto @@ -31,7 +31,7 @@ message Kv { string value = 3; } message KvResponse { - pdpb.ResponseHeader header = 1; + pdpb.ResponseHeader header = 1; } message K{ @@ -46,7 +46,7 @@ message KResponse{ message ScanPrefixResponse { pdpb.ResponseHeader header = 1; - map kvs = 2; + map kvs = 2; } message LockRequest{ @@ -88,7 +88,7 @@ message WatchEvent { message WatchResponse { pdpb.ResponseHeader header = 1; - repeated WatchEvent events= 2; + repeated WatchEvent events = 2; int64 clientId = 3; WatchState state = 4; } @@ -102,7 +102,7 @@ enum WatchState { message WatchRequest { pdpb.RequestHeader header = 1; - WatchState state= 2; + WatchState state = 2; string key = 3; int64 clientId = 4; } @@ -110,7 +110,7 @@ message WatchRequest { message V{ string value = 1; int64 ttl = 2; - int64 st =3; + int64 st = 3; } message TTLRequest{ diff --git a/hg-pd-grpc/src/main/proto/metapb.proto b/hg-pd-grpc/src/main/proto/metapb.proto index 54477288b6..7f11d093b5 100644 --- a/hg-pd-grpc/src/main/proto/metapb.proto +++ b/hg-pd-grpc/src/main/proto/metapb.proto @@ -4,7 +4,7 @@ option java_package = "com.baidu.hugegraph.pd.grpc"; import "google/protobuf/any.proto"; enum ClusterState{ - // 集群健康 + // 集群健康 Cluster_OK = 0; // 分区警告,存在部分故障节点,短时间不影响读写 Cluster_Warn = 2; @@ -26,7 +26,7 @@ enum StoreState { // 未激活 Pending = 4; // 在线 - Up = 1; + Up = 1; // 离线 Offline = 2; // 下线中 @@ -63,11 +63,11 @@ message Store { } enum ShardRole { - None = 0; - Leader = 1; - Follower = 2; + None = 0; + Leader = 1; + Follower = 2; // Learner/None -> Learner - Learner = 3; + Learner = 3; } message Shard { @@ -180,7 +180,7 @@ message PartitionStats{ // 分区状态 PartitionState state = 8; repeated ShardStats shardStats = 9; - // 分区近似大小 + // 分区近似大小 uint64 approximate_size = 10; // 分区key的近似数量 uint64 approximate_keys = 13; @@ -195,8 +195,8 @@ message GraphStats{ uint64 approximate_size = 2; // 分区key的近似数量 uint64 approximate_keys = 3; -// // committed index -// uint64 committed_index = 4; + // // committed index + // uint64 committed_index = 4; uint32 partition_id = 5; ShardRole role = 6; // 当前工作状态 @@ -234,7 +234,7 @@ message QueryStats { } enum ShardState{ - SState_None = 0; + SState_None = 0; // 正常 SState_Normal = 1; // 安装快照 @@ -342,10 +342,10 @@ message PDConfig{ //消息持久化 message QueueItem{ - string item_id=1; - string item_class=2; - bytes item_content=3; - int64 timestamp=10; + string item_id = 1; + string item_class = 2; + bytes item_content = 3; + int64 timestamp = 10; } message LogRecord{ diff --git a/hg-pd-grpc/src/main/proto/pd_common.proto b/hg-pd-grpc/src/main/proto/pd_common.proto index eaab29fcc2..c5724e0112 100644 --- a/hg-pd-grpc/src/main/proto/pd_common.proto +++ b/hg-pd-grpc/src/main/proto/pd_common.proto @@ -5,32 +5,32 @@ option java_package = "com.baidu.hugegraph.pd.grpc.common"; option java_outer_classname = "HgPdCommonProto"; message RequestHeader { - // 集群 ID. - uint64 cluster_id = 1; - // 发送者 ID. - uint64 sender_id = 2; + // 集群 ID. + uint64 cluster_id = 1; + // 发送者 ID. + uint64 sender_id = 2; } message ResponseHeader { - // cluster_id is the ID of the cluster which sent the response. - uint64 cluster_id = 1; - Error error = 2; + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + Error error = 2; } enum ErrorType { - OK = 0; - UNKNOWN = 1; - STORE_NON_EXIST = 101; - STORE_TOMBSTONE = 103; - ALREADY_BOOTSTRAPPED = 4; - INCOMPATIBLE_VERSION = 5; - PARTITION_NOT_FOUND = 6; + OK = 0; + UNKNOWN = 1; + STORE_NON_EXIST = 101; + STORE_TOMBSTONE = 103; + ALREADY_BOOTSTRAPPED = 4; + INCOMPATIBLE_VERSION = 5; + PARTITION_NOT_FOUND = 6; - ETCD_READ_ERROR = 1000; - ETCD_WRITE_ERROR = 1001; + ETCD_READ_ERROR = 1000; + ETCD_WRITE_ERROR = 1001; } message Error { - ErrorType type = 1; - string message = 2; + ErrorType type = 1; + string message = 2; } \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/pd_pulse.proto b/hg-pd-grpc/src/main/proto/pd_pulse.proto index a5c0b71a5f..31c8d1e2fa 100644 --- a/hg-pd-grpc/src/main/proto/pd_pulse.proto +++ b/hg-pd-grpc/src/main/proto/pd_pulse.proto @@ -8,137 +8,137 @@ option java_package = "com.baidu.hugegraph.pd.grpc.pulse"; option java_outer_classname = "HgPdPulseProto"; service HgPdPulse { - rpc Pulse(stream PulseRequest) returns (stream PulseResponse); + rpc Pulse(stream PulseRequest) returns (stream PulseResponse); } /* requests */ message PulseRequest { - PulseCreateRequest create_request = 1; - PulseCancelRequest cancel_request = 2; - PulseNoticeRequest notice_request = 3; - PulseAckRequest ack_request = 4; + PulseCreateRequest create_request = 1; + PulseCancelRequest cancel_request = 2; + PulseNoticeRequest notice_request = 3; + PulseAckRequest ack_request = 4; } message PulseCreateRequest { - PulseType pulse_type = 1; + PulseType pulse_type = 1; } message PulseCancelRequest { - int64 observer_id = 1; + int64 observer_id = 1; } message PulseNoticeRequest { - int64 observer_id = 1; - oneof request_union { - PartitionHeartbeatRequest partition_heartbeat_request = 10; - } + int64 observer_id = 1; + oneof request_union { + PartitionHeartbeatRequest partition_heartbeat_request = 10; + } } message PulseAckRequest { - int64 observer_id = 1; - int64 notice_id = 2; + int64 observer_id = 1; + int64 notice_id = 2; } // 分区心跳,分区的peer增减、leader改变等事件发生时,由leader发送心跳。 // 同时pd对分区进行shard增减通过Response发送给leader message PartitionHeartbeatRequest { - RequestHeader header = 1; - // Leader Peer sending the heartbeat - metapb.PartitionStats states = 4; + RequestHeader header = 1; + // Leader Peer sending the heartbeat + metapb.PartitionStats states = 4; } /* responses */ message PulseResponse { - PulseType pulse_type = 1; - int64 observer_id = 2; - int32 status = 3; //0=ok,1=fail - int64 notice_id=4; - oneof response_union { - PartitionHeartbeatResponse partition_heartbeat_response = 10; - } + PulseType pulse_type = 1; + int64 observer_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id = 4; + oneof response_union { + PartitionHeartbeatResponse partition_heartbeat_response = 10; + } } message PartitionHeartbeatResponse { - ResponseHeader header = 1; - uint64 id = 3; - metapb.Partition partition = 2; - ChangeShard change_shard = 4; - - TransferLeader transfer_leader = 5; - // 拆分成多个分区,第一个SplitPartition是原分区,从第二开始是新分区 - SplitPartition split_partition = 6; - // rocksdb compaction 指定的表,null是针对所有 - DbCompaction db_compaction = 7; - // 将partition的数据,迁移到 target - MovePartition move_partition = 8; - // 清理partition的graph的数据 - CleanPartition clean_partition = 9; - // partition key range 变化 - PartitionKeyRange key_range = 10; + ResponseHeader header = 1; + uint64 id = 3; + metapb.Partition partition = 2; + ChangeShard change_shard = 4; + + TransferLeader transfer_leader = 5; + // 拆分成多个分区,第一个SplitPartition是原分区,从第二开始是新分区 + SplitPartition split_partition = 6; + // rocksdb compaction 指定的表,null是针对所有 + DbCompaction db_compaction = 7; + // 将partition的数据,迁移到 target + MovePartition move_partition = 8; + // 清理partition的graph的数据 + CleanPartition clean_partition = 9; + // partition key range 变化 + PartitionKeyRange key_range = 10; } /* Date model */ message ChangeShard { - repeated metapb.Shard shard = 1; - ConfChangeType change_type = 2; + repeated metapb.Shard shard = 1; + ConfChangeType change_type = 2; } message TransferLeader { - metapb.Shard shard = 1; + metapb.Shard shard = 1; } message SplitPartition { - repeated metapb.Partition new_partition = 1; + repeated metapb.Partition new_partition = 1; } message DbCompaction { - string table_name = 3; + string table_name = 3; } message MovePartition{ - // target partition的key range为,迁移后的新range - metapb.Partition target_partition = 1; - // partition 的 key start 和 key end的所有数据, - // 会迁移到 target partition 上 - uint64 key_start = 2; - uint64 key_end = 3; + // target partition的key range为,迁移后的新range + metapb.Partition target_partition = 1; + // partition 的 key start 和 key end的所有数据, + // 会迁移到 target partition 上 + uint64 key_start = 2; + uint64 key_end = 3; } message CleanPartition { - uint64 key_start = 1; - uint64 key_end = 2; - CleanType clean_type = 3; - bool delete_partition = 4; //是否删除分区 + uint64 key_start = 1; + uint64 key_end = 2; + CleanType clean_type = 3; + bool delete_partition = 4; //是否删除分区 } message PartitionKeyRange{ - uint32 partition_id = 1; - uint64 key_start = 2; - uint64 key_end = 3; + uint32 partition_id = 1; + uint64 key_start = 2; + uint64 key_end = 3; } /* enums */ enum PulseType { - PULSE_TYPE_UNKNOWN = 0; - PULSE_TYPE_PARTITION_HEARTBEAT = 1; + PULSE_TYPE_UNKNOWN = 0; + PULSE_TYPE_PARTITION_HEARTBEAT = 1; } enum PulseChangeType { - PULSE_CHANGE_TYPE_UNKNOWN = 0; - PULSE_CHANGE_TYPE_ADD = 1; - PULSE_CHANGE_TYPE_ALTER = 2; - PULSE_CHANGE_TYPE_DEL = 3; + PULSE_CHANGE_TYPE_UNKNOWN = 0; + PULSE_CHANGE_TYPE_ADD = 1; + PULSE_CHANGE_TYPE_ALTER = 2; + PULSE_CHANGE_TYPE_DEL = 3; } enum ConfChangeType { - CONF_CHANGE_TYPE_UNKNOWN = 0; - CONF_CHANGE_TYPE_ADD_NODE = 1; - CONF_CHANGE_TYPE_REMOVE_NODE = 2; - CONF_CHANGE_TYPE_ADD_LEARNER_NODE = 3; - CONF_CHANGE_TYPE_ADJUST = 4; // 调整shard,leader根据新的配置动态增减。 + CONF_CHANGE_TYPE_UNKNOWN = 0; + CONF_CHANGE_TYPE_ADD_NODE = 1; + CONF_CHANGE_TYPE_REMOVE_NODE = 2; + CONF_CHANGE_TYPE_ADD_LEARNER_NODE = 3; + CONF_CHANGE_TYPE_ADJUST = 4; // 调整shard,leader根据新的配置动态增减。 } enum CleanType { - CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range - CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range + CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range + CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range } \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/pd_watch.proto b/hg-pd-grpc/src/main/proto/pd_watch.proto index 0c1dc84e39..38e4af4ed2 100644 --- a/hg-pd-grpc/src/main/proto/pd_watch.proto +++ b/hg-pd-grpc/src/main/proto/pd_watch.proto @@ -7,78 +7,78 @@ option java_package = "com.baidu.hugegraph.pd.grpc.watch"; option java_outer_classname = "HgPdWatchProto"; service HgPdWatch { - rpc Watch(stream WatchRequest) returns (stream WatchResponse); + rpc Watch(stream WatchRequest) returns (stream WatchResponse); } message WatchRequest { - WatchCreateRequest create_request = 1; - WatchCancelRequest cancel_request = 2; + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; } message WatchCreateRequest { - WatchType watch_type = 1; + WatchType watch_type = 1; } message WatchCancelRequest { - int64 watcher_id = 1; + int64 watcher_id = 1; } message WatchResponse { - WatchType watch_type = 1; - int64 watcher_id = 2; - int32 status = 3; //0=ok,1=fail - int64 notice_id = 4; - string msg = 5; - oneof response_union { - WatchPartitionResponse partition_response = 10; - WatchNodeResponse node_response = 11; - WatchGraphResponse graph_response = 12; - WatchShardGroupResponse shard_group_response = 13; - } + WatchType watch_type = 1; + int64 watcher_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id = 4; + string msg = 5; + oneof response_union { + WatchPartitionResponse partition_response = 10; + WatchNodeResponse node_response = 11; + WatchGraphResponse graph_response = 12; + WatchShardGroupResponse shard_group_response = 13; + } } message WatchPartitionResponse { - string graph = 1; - int32 partition_id = 2; - WatchChangeType change_type = 3; + string graph = 1; + int32 partition_id = 2; + WatchChangeType change_type = 3; } message WatchNodeResponse { - string graph = 1; - uint64 node_id = 2; - NodeEventType node_event_type = 3; + string graph = 1; + uint64 node_id = 2; + NodeEventType node_event_type = 3; } message WatchGraphResponse { - metapb.Graph graph = 1; - WatchType type = 2; + metapb.Graph graph = 1; + WatchType type = 2; } message WatchShardGroupResponse { - metapb.ShardGroup shard_group = 1; - WatchChangeType type = 2; - int32 shard_group_id = 3; + metapb.ShardGroup shard_group = 1; + WatchChangeType type = 2; + int32 shard_group_id = 3; } enum WatchType { - WATCH_TYPE_UNKNOWN = 0; - WATCH_TYPE_PARTITION_CHANGE = 1; - WATCH_TYPE_STORE_NODE_CHANGE = 2; - WATCH_TYPE_GRAPH_CHANGE = 3; - WATCH_TYPE_SHARD_GROUP_CHANGE = 4; + WATCH_TYPE_UNKNOWN = 0; + WATCH_TYPE_PARTITION_CHANGE = 1; + WATCH_TYPE_STORE_NODE_CHANGE = 2; + WATCH_TYPE_GRAPH_CHANGE = 3; + WATCH_TYPE_SHARD_GROUP_CHANGE = 4; } enum WatchChangeType { - WATCH_CHANGE_TYPE_UNKNOWN = 0; - WATCH_CHANGE_TYPE_ADD = 1; - WATCH_CHANGE_TYPE_ALTER = 2; - WATCH_CHANGE_TYPE_DEL = 3; - WATCH_CHANGE_TYPE_SPECIAL1 = 4; + WATCH_CHANGE_TYPE_UNKNOWN = 0; + WATCH_CHANGE_TYPE_ADD = 1; + WATCH_CHANGE_TYPE_ALTER = 2; + WATCH_CHANGE_TYPE_DEL = 3; + WATCH_CHANGE_TYPE_SPECIAL1 = 4; } enum NodeEventType { - NODE_EVENT_TYPE_UNKNOWN = 0; - NODE_EVENT_TYPE_NODE_ONLINE = 1; - NODE_EVENT_TYPE_NODE_OFFLINE = 2; - NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3; + NODE_EVENT_TYPE_UNKNOWN = 0; + NODE_EVENT_TYPE_NODE_ONLINE = 1; + NODE_EVENT_TYPE_NODE_OFFLINE = 2; + NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3; } \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/pdpb.proto b/hg-pd-grpc/src/main/proto/pdpb.proto index e510c4d782..1535bb2668 100644 --- a/hg-pd-grpc/src/main/proto/pdpb.proto +++ b/hg-pd-grpc/src/main/proto/pdpb.proto @@ -126,9 +126,9 @@ enum ErrorType { // store上分区数量超过上限 Too_Many_Partitions_Per_Store = 1009; // license 错误 - LICENSE_ERROR= 107; + LICENSE_ERROR = 107; // license 认证错误 - LICENSE_VERIFY_ERROR= 108; + LICENSE_VERIFY_ERROR = 108; //分区下线正在进行 Store_Tombstone_Doing = 1010; @@ -329,8 +329,8 @@ message GetIdRequest{ message GetIdResponse{ ResponseHeader header = 1; - int64 id =2; - int32 delta =3; + int64 id = 2; + int32 delta = 3; } message ResetIdRequest{ @@ -409,8 +409,8 @@ message getChangePeerListResponse{ } enum OperationMode { - Auto = 0; - Expert = 1; + Auto = 0; + Expert = 1; } message SplitDataParam{ @@ -481,11 +481,11 @@ message GetPartitionStatsResponse{ } message BalanceLeadersRequest{ - RequestHeader header = 1; - } + RequestHeader header = 1; +} message BalanceLeadersResponse{ - ResponseHeader header = 1; + ResponseHeader header = 1; } message PutLicenseRequest{ diff --git a/hg-pd-service/pom.xml b/hg-pd-service/pom.xml index f93bf54f21..75e5c2a66d 100644 --- a/hg-pd-service/pom.xml +++ b/hg-pd-service/pom.xml @@ -1,6 +1,23 @@ - + + 4.0.0 diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java index add0cba5b2..c35e95a68b 100644 --- a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java +++ b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java @@ -1,26 +1,43 @@ -package com.baidu.hugegraph.pd.upgrade; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.upgrade.scripts.PartitionMetaUpgrade; -import com.baidu.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade; +package com.baidu.hugegraph.pd.upgrade; import java.util.LinkedList; import java.util.List; +import com.baidu.hugegraph.pd.upgrade.scripts.PartitionMetaUpgrade; +import com.baidu.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade; + public class VersionScriptFactory { private static volatile VersionScriptFactory factory; - private static List scripts = new LinkedList<>(); + private static final List scripts = new LinkedList<>(); static { registerScript(new PartitionMetaUpgrade()); registerScript(new TaskCleanUpgrade()); } - private VersionScriptFactory(){ + private VersionScriptFactory() { } - public static VersionScriptFactory getInstance(){ + public static VersionScriptFactory getInstance() { if (factory == null) { synchronized (VersionScriptFactory.class) { if (factory == null) { diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java index c0851dedad..0e216a0a7e 100644 --- a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java +++ b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package com.baidu.hugegraph.pd.upgrade; import com.baidu.hugegraph.pd.config.PDConfig; @@ -8,12 +25,14 @@ public interface VersionUpgradeScript { /** * the highest version that need to run upgrade instruction + * * @return high version */ String getHighVersion(); /** * the lowest version that need to run upgrade instruction + * * @return lower version */ String getLowVersion(); @@ -27,6 +46,7 @@ public interface VersionUpgradeScript { /** * the scrip just run once, ignore versions + * * @return run once script */ boolean isRunOnce(); diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java index 703842274a..4c7587d7e7 100644 --- a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java +++ b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java @@ -1,14 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package com.baidu.hugegraph.pd.upgrade.scripts; +import java.util.HashSet; + import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.config.PDConfig; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; import com.baidu.hugegraph.pd.meta.MetadataRocksDBStore; import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; -import lombok.extern.slf4j.Slf4j; -import java.util.HashSet; +import lombok.extern.slf4j.Slf4j; @Slf4j public class PartitionMetaUpgrade implements VersionUpgradeScript { @@ -31,9 +49,11 @@ public void runInstruction(PDConfig config) { try { var partSet = new HashSet(); - for (var graph : dbStore.scanPrefix(Metapb.Graph.parser(), MetadataKeyHelper.getGraphPrefix())) { + for (var graph : dbStore.scanPrefix(Metapb.Graph.parser(), + MetadataKeyHelper.getGraphPrefix())) { var graphPrefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName()); - for (var partition : dbStore.scanPrefix(Metapb.PartitionV36.parser(), graphPrefix)) { + for (var partition : dbStore.scanPrefix(Metapb.PartitionV36.parser(), + graphPrefix)) { var newPartition = trans(partition); var partId = partition.getId(); log.info("trans partition structure: from {} to {}", partition, newPartition); @@ -45,23 +65,24 @@ public void runInstruction(PDConfig config) { dbStore.put(key, newPartition.toByteArray()); // construct shard group - if (! partSet.contains(partId)) { + if (!partSet.contains(partId)) { var shardGroupKey = MetadataKeyHelper.getShardGroupKey(partId); var shardGroup = dbStore.getOne(Metapb.ShardGroup.parser(), shardGroupKey); if (shardGroup == null) { var shardList = partition.getShardsList(); if (shardList.size() > 0) { shardGroup = Metapb.ShardGroup.newBuilder() - .setId(partId) - .setVersion(partition.getVersion()) - .setConfVer(0) - .setState(partition.getState()) - .addAllShards(shardList) - .build(); + .setId(partId) + .setVersion(partition.getVersion()) + .setConfVer(0) + .setState(partition.getState()) + .addAllShards(shardList) + .build(); dbStore.put(shardGroupKey, shardGroup.toByteArray()); log.info("extract shard group from partition, {}", shardGroup); } else { - throw new PDException(1000, "trans partition failed, no shard list"); + throw new PDException(1000, + "trans partition failed, no shard list"); } } partSet.add(partId); @@ -87,13 +108,13 @@ public boolean isRunWithoutDataVersion() { private Metapb.Partition trans(Metapb.PartitionV36 partition) { return Metapb.Partition.newBuilder() - .setId(partition.getId()) - .setGraphName(partition.getGraphName()) - .setStartKey(partition.getStartKey()) - .setEndKey(partition.getEndKey()) - .setVersion(partition.getVersion()) - .setState(partition.getState()) - .setMessage(partition.getMessage()) - .build(); + .setId(partition.getId()) + .setGraphName(partition.getGraphName()) + .setStartKey(partition.getStartKey()) + .setEndKey(partition.getEndKey()) + .setVersion(partition.getVersion()) + .setState(partition.getState()) + .setMessage(partition.getMessage()) + .build(); } } diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java index a870386384..c3ed6b9ddc 100644 --- a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java +++ b/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package com.baidu.hugegraph.pd.upgrade.scripts; import com.baidu.hugegraph.pd.common.PDException; @@ -5,6 +22,7 @@ import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; import com.baidu.hugegraph.pd.meta.MetadataRocksDBStore; import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; + import lombok.extern.slf4j.Slf4j; @Slf4j diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java index 03f4b09cca..e64e99b8d2 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java @@ -1,14 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.boot; -import com.alipay.remoting.util.StringUtils; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.annotation.ComponentScan; +import com.alipay.remoting.util.StringUtils; + /** * PD服务启动类 */ -@ComponentScan(basePackages={"com.baidu.hugegraph.pd"}) +@ComponentScan(basePackages = {"com.baidu.hugegraph.pd"}) @SpringBootApplication public class HugePDServer { public static void main(String[] args) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java index b75e27656d..72b9832a43 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java @@ -1,6 +1,4 @@ /* - * Copyright 2017 HugeGraph Authors - * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -19,6 +17,24 @@ package org.apache.hugegraph.pd.license; +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.charset.Charset; +import java.text.SimpleDateFormat; +import java.time.Duration; +import java.time.Instant; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.prefs.Preferences; + +import org.apache.commons.lang3.StringUtils; +import org.springframework.stereotype.Service; +import org.springframework.util.Base64Utils; + import com.baidu.hugegraph.license.ExtraParam; import com.baidu.hugegraph.license.LicenseVerifyParam; import com.baidu.hugegraph.license.MachineInfo; @@ -33,6 +49,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.gson.Gson; import com.google.gson.internal.LinkedTreeMap; + import de.schlichtherle.license.CipherParam; import de.schlichtherle.license.DefaultCipherParam; import de.schlichtherle.license.DefaultKeyStoreParam; @@ -47,39 +64,44 @@ import io.grpc.stub.AbstractBlockingStub; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.springframework.stereotype.Service; -import org.springframework.util.Base64Utils; - -import java.io.File; -import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.charset.Charset; -import java.text.SimpleDateFormat; -import java.time.Duration; -import java.time.Instant; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.prefs.Preferences; @Service @Slf4j public class LicenseVerifierService { - private PDConfig pdConfig; private static final Duration CHECK_INTERVAL = Duration.ofMinutes(10); - private volatile Instant lastCheckTime = Instant.now(); - // private final LicenseVerifyParam verifyParam; - private LicenseVerifyManager manager; private static LicenseContent content; private static KvService kvService; - private static String contentKey = "contentKey"; - private static Gson mapper = new Gson(); - private final MachineInfo machineInfo; + private static final String contentKey = "contentKey"; + private static final Gson mapper = new Gson(); private static volatile boolean installed = false; + private final MachineInfo machineInfo; + SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + private final PDConfig pdConfig; + private final Instant lastCheckTime = Instant.now(); + // private final LicenseVerifyParam verifyParam; + private LicenseVerifyManager manager; + private ManagedChannel channel; + + // public static LicenseVerifierService instance() { + // if (INSTANCE == null) { + // synchronized (LicenseVerifierService.class) { + // if (INSTANCE == null) { + // INSTANCE = new LicenseVerifierService(); + // } + // } + // } + // return INSTANCE; + // } + + // public void verifyIfNeeded() { + // Instant now = Instant.now(); + // Duration interval = Duration.between(this.lastCheckTime, now); + // if (!interval.minus(CHECK_INTERVAL).isNegative()) { + // this.verify(); + // this.lastCheckTime = now; + // } + // } public LicenseVerifierService(PDConfig pdConfig) { this.pdConfig = pdConfig; @@ -88,6 +110,23 @@ public LicenseVerifierService(PDConfig pdConfig) { // verifyParam = initLicense(pdConfig); } + private static LicenseVerifyParam buildVerifyParam(String path) { + // NOTE: can't use JsonUtil due to it bind tinkerpop jackson + try { + ObjectMapper mapper = new ObjectMapper(); + File licenseParamFile = new File(path); + if (!licenseParamFile.exists()) { + log.warn("failed to get file:{}", path); + return null; + } + return mapper.readValue(licenseParamFile, LicenseVerifyParam.class); + } catch (IOException e) { + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, + String.format("Failed to read json stream to %s", + LicenseVerifyParam.class)); + } + } + public LicenseVerifyParam init() { LicenseVerifyParam verifyParam = null; if (!installed) { @@ -123,26 +162,31 @@ public LicenseVerifyParam init() { } if (RaftEngine.getInstance().getLeader() != null) { CountDownLatch latch = new CountDownLatch(1); - TTLRequest request = TTLRequest.newBuilder().setKey(contentKey).setValue( - mapper.toJson(content, LicenseContent.class)).setTtl(ttl).build(); - StreamObserver observer = new StreamObserver() { - @Override - public void onNext(TTLResponse value) { - info[0] = value; - latch.countDown(); - } + TTLRequest request = + TTLRequest.newBuilder().setKey(contentKey).setValue( + mapper.toJson(content, + LicenseContent.class)) + .setTtl(ttl).build(); + StreamObserver observer = + new StreamObserver() { + @Override + public void onNext(TTLResponse value) { + info[0] = value; + latch.countDown(); + } - @Override - public void onError(Throwable t) { - latch.countDown(); - } + @Override + public void onError(Throwable t) { + latch.countDown(); + } - @Override - public void onCompleted() { - latch.countDown(); - } - }; - redirectToLeader(KvServiceGrpc.getPutTTLMethod(), request, observer); + @Override + public void onCompleted() { + latch.countDown(); + } + }; + redirectToLeader(KvServiceGrpc.getPutTTLMethod(), request, + observer); latch.await(); Pdpb.Error error = info[0].getHeader().getError(); if (!error.getType().equals(Pdpb.ErrorType.OK)) { @@ -153,7 +197,8 @@ public void onCompleted() { } } else { - kvService.put(contentKey, mapper.toJson(content, LicenseContent.class), ttl); + kvService.put(contentKey, + mapper.toJson(content, LicenseContent.class), ttl); } installed = true; log.info("The license is successfully installed, valid for {} - {}", @@ -170,30 +215,9 @@ public void onCompleted() { return verifyParam; } - // public static LicenseVerifierService instance() { - // if (INSTANCE == null) { - // synchronized (LicenseVerifierService.class) { - // if (INSTANCE == null) { - // INSTANCE = new LicenseVerifierService(); - // } - // } - // } - // return INSTANCE; - // } - - // public void verifyIfNeeded() { - // Instant now = Instant.now(); - // Duration interval = Duration.between(this.lastCheckTime, now); - // if (!interval.minus(CHECK_INTERVAL).isNegative()) { - // this.verify(); - // this.lastCheckTime = now; - // } - // } - public synchronized void install(String md5) { } - SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); public HashMap getContext() throws Exception { try { @@ -243,7 +267,8 @@ public LicenseContent verify(int cores, int nodeCount) { if (licNodes != -1) { // licNodes为 -1时,表示不限制服务节点数目 if (nodeCount > licNodes) { - String msg = String.format("无效的节点个数: %s,授权数: %s", nodeCount, licNodes); + String msg = + String.format("无效的节点个数: %s,授权数: %s", nodeCount, licNodes); throw new PDRuntimeException( Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); } @@ -256,27 +281,44 @@ public LicenseContent verify(int cores, int nodeCount) { } } - private ManagedChannel channel; - public boolean isLeader() { return RaftEngine.getInstance().isLeader(); } + // private void verifyPublicCert(String expectMD5) { + // String path = this.verifyParam.publicKeyPath(); + // try (InputStream is = LicenseVerifierService.class.getResourceAsStream(path)) { + // String actualMD5 = DigestUtils.md5Hex(is); + // if (!actualMD5.equals(expectMD5)) { + // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Invalid public + // cert"); + // } + // } catch (IOException e) { + // log.error("Failed to read public cert", e); + // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Failed to read + // public cert", e); + // } + // } + private > void redirectToLeader( - MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver observer) { + MethodDescriptor method, ReqT req, + io.grpc.stub.StreamObserver observer) { try { if (channel == null) { synchronized (this) { if (channel == null) { channel = ManagedChannelBuilder - .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()).usePlaintext() + .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()) + .usePlaintext() .build(); } } - log.info("Grpc get leader address {}", RaftEngine.getInstance().getLeaderGrpcAddress()); + log.info("Grpc get leader address {}", + RaftEngine.getInstance().getLeaderGrpcAddress()); } - io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), req, + io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), + req, observer); } catch (Exception e) { e.printStackTrace(); @@ -284,45 +326,16 @@ private > void redirectTo } - // private void verifyPublicCert(String expectMD5) { - // String path = this.verifyParam.publicKeyPath(); - // try (InputStream is = LicenseVerifierService.class.getResourceAsStream(path)) { - // String actualMD5 = DigestUtils.md5Hex(is); - // if (!actualMD5.equals(expectMD5)) { - // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Invalid public cert"); - // } - // } catch (IOException e) { - // log.error("Failed to read public cert", e); - // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Failed to read public cert", e); - // } - // } - private LicenseParam initLicenseParam(LicenseVerifyParam param) { Preferences preferences = Preferences.userNodeForPackage(LicenseVerifierService.class); CipherParam cipherParam = new DefaultCipherParam(param.storePassword()); KeyStoreParam keyStoreParam = new DefaultKeyStoreParam(LicenseVerifierService.class, - param.publicKeyPath(), param.publicAlias(), + param.publicKeyPath(), + param.publicAlias(), param.storePassword(), null); return new DefaultLicenseParam(param.subject(), preferences, keyStoreParam, cipherParam); } - private static LicenseVerifyParam buildVerifyParam(String path) { - // NOTE: can't use JsonUtil due to it bind tinkerpop jackson - try { - ObjectMapper mapper = new ObjectMapper(); - File licenseParamFile = new File(path); - if (!licenseParamFile.exists()) { - log.warn("failed to get file:{}", path); - return null; - } - return mapper.readValue(licenseParamFile, LicenseVerifyParam.class); - } catch (IOException e) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, - String.format("Failed to read json stream to %s", - LicenseVerifyParam.class)); - } - } - public String getIpAndMac() { List actualIps = this.machineInfo.getIpAddress(); String host = pdConfig.getHost(); @@ -381,8 +394,11 @@ private void checkIpAndMac(ExtraParam param) { String expectFormatMac = expectMac.replaceAll(":", "-"); String actualFormatMac = actualMac.replaceAll(":", "-"); if (!actualFormatMac.equalsIgnoreCase(expectFormatMac)) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( - "The server's mac '%s' doesn't match the authorized '%s'", actualMac, expectMac)); + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, + String.format( + "The server's mac '%s' doesn't match the " + + "authorized '%s'", + actualMac, expectMac)); } } else { String expectFormatMac = expectMac.replaceAll(":", "-"); @@ -396,8 +412,11 @@ private void checkIpAndMac(ExtraParam param) { } } if (!matched) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( - "The server's macs %s don't match the authorized '%s'", actualMacs, expectMac)); + throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, + String.format( + "The server's macs %s don't match the " + + "authorized '%s'", + actualMacs, expectMac)); } } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java index ff86fc910e..5f54b2d6b4 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java @@ -1,6 +1,4 @@ /* - * Copyright 2017 HugeGraph Authors - * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -44,19 +42,6 @@ public LicenseVerifyManager(LicenseParam param) { super(param); } - @Override - protected synchronized void validate(LicenseContent content) throws LicenseContentException { - // Call super validate firstly to verify the common license parameters - try { - super.validate(content); - } catch (LicenseContentException e) { - // log.error("Failed to verify license", e); - throw e; - } - // Verify the customized license parameters. - getExtraParams(content); - } - public static ExtraParam getExtraParams(LicenseContent content) { List params; try { @@ -74,4 +59,17 @@ public static ExtraParam getExtraParams(LicenseContent content) { } return null; } + + @Override + protected synchronized void validate(LicenseContent content) throws LicenseContentException { + // Call super validate firstly to verify the common license parameters + try { + super.validate(content); + } catch (LicenseContentException e) { + // log.error("Failed to verify license", e); + throw e; + } + // Verify the customized license parameters. + getExtraParams(content); + } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java index 0bbba95f1e..2a2ec10750 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java @@ -1,11 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.metrics; -import io.micrometer.core.instrument.MeterRegistry; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; +import io.micrometer.core.instrument.MeterRegistry; + /** * @author lynn.bond@hotmail.com on 2022/01/05 */ diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java index bb67f78cb9..0c8fb55bed 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java @@ -1,17 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.metrics; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; -import org.apache.hugegraph.pd.service.PDService; + import io.micrometer.core.instrument.Gauge; import io.micrometer.core.instrument.MeterRegistry; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; /** * @author lynn.bond@hotmail.com on 2022/1/5 @@ -39,16 +58,16 @@ private void registerMeters() { Gauge.builder(PREFIX + ".up", () -> 1).register(registry); Gauge.builder(PREFIX + ".graphs", () -> updateGraphs()) - .description("Number of graphs registered in PD") - .register(registry); + .description("Number of graphs registered in PD") + .register(registry); Gauge.builder(PREFIX + ".stores", () -> updateStores()) - .description("Number of stores registered in PD") - .register(registry); + .description("Number of stores registered in PD") + .register(registry); } - private long updateGraphs() { + private long updateGraphs() { long buf = getGraphs(); if (buf != graphs.get()) { @@ -58,40 +77,40 @@ private long updateGraphs() { return buf; } - private long updateStores() { - return getStores(); + private long updateStores() { + return getStores(); } private long getGraphs() { return getGraphMetas().size(); } - private long getStores(){ + private long getStores() { try { return this.pdService.getStoreNodeService().getStores(null).size(); } catch (PDException e) { - log.error(e.getMessage(),e); + log.error(e.getMessage(), e); e.printStackTrace(); } return 0; } - private List getGraphMetas(){ + private List getGraphMetas() { try { return this.pdService.getPartitionService().getGraphs(); } catch (PDException e) { - log.error(e.getMessage(),e); + log.error(e.getMessage(), e); } return Collections.EMPTY_LIST; } - private void registerGraphMetrics(){ - this.getGraphMetas().forEach(meta->{ - Gauge.builder(PREFIX + ".partitions",this.pdService.getPartitionService() - ,e-> e.getPartitions(meta.getGraphName()).size()) - .description("Number of partitions assigned to a graph") - .tag("graph",meta.getGraphName()) - .register(this.registry); + private void registerGraphMetrics() { + this.getGraphMetas().forEach(meta -> { + Gauge.builder(PREFIX + ".partitions", this.pdService.getPartitionService() + , e -> e.getPartitions(meta.getGraphName()).size()) + .description("Number of partitions assigned to a graph") + .tag("graph", meta.getGraphName()) + .register(this.registry); }); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java index eb1ca0005e..0e366ef3cd 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; import java.util.Objects; @@ -48,8 +65,8 @@ public int hashCode() { @Override public String toString() { return "HgNodeStatus{" + - "status=" + status + - ", text='" + text + '\'' + - '}'; + "status=" + status + + ", text='" + text + '\'' + + '}'; } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java index 60340beb57..933f0f6d66 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java index 92b48982ea..86ba328699 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java index daa48dffad..d0f5ce73b3 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java index 12203456fa..091b91c7be 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; import java.util.HashMap; @@ -15,18 +32,26 @@ public class PromTargetsModel { private static final String LABEL_CLUSTER = "cluster"; private Set targets = new HashSet<>(); - private Map labels = new HashMap<>(); + private final Map labels = new HashMap<>(); + + private PromTargetsModel() { + } public static PromTargetsModel of() { return new PromTargetsModel(); } - private PromTargetsModel() {} - public Set getTargets() { return targets; } + public PromTargetsModel setTargets(Set targets) { + if (targets != null) { + this.targets = targets; + } + return this; + } + public Map getLabels() { return labels; } @@ -37,13 +62,6 @@ public PromTargetsModel addTarget(String target) { return this; } - public PromTargetsModel setTargets(Set targets) { - if (targets != null) { - this.targets = targets; - } - return this; - } - public PromTargetsModel setMetricsPath(String path) { return this.addLabel(LABEL_METRICS_PATH, path); } @@ -52,8 +70,8 @@ public PromTargetsModel setScheme(String scheme) { return this.addLabel(LABEL_SCHEME, scheme); } - public PromTargetsModel setClusterId(String clusterId){ - return this.addLabel(LABEL_CLUSTER,clusterId); + public PromTargetsModel setClusterId(String clusterId) { + return this.addLabel(LABEL_CLUSTER, clusterId); } public PromTargetsModel addLabel(String label, String value) { @@ -65,8 +83,8 @@ public PromTargetsModel addLabel(String label, String value) { @Override public String toString() { return "PromTargetModel{" + - "targets=" + targets + - ", labels=" + labels + - '}'; + "targets=" + targets + + ", labels=" + labels + + '}'; } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java index a001578736..a6cbe08632 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java @@ -1,9 +1,26 @@ -package org.apache.hugegraph.pd.model; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import lombok.Data; +package org.apache.hugegraph.pd.model; import java.util.HashMap; +import lombok.Data; + /** * @author zhangyingjie * @date 2022/2/8 @@ -13,5 +30,5 @@ public class RegistryQueryRestRequest { String appName; String version; - HashMap labels; + HashMap labels; } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java index 9682de795a..fd90ea3df3 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java @@ -1,9 +1,26 @@ -package org.apache.hugegraph.pd.model; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import lombok.Data; +package org.apache.hugegraph.pd.model; import java.util.HashMap; +import lombok.Data; + /** * @author zhangyingjie * @date 2022/2/8 @@ -16,5 +33,5 @@ public class RegistryRestRequest { String version; String address; String interval; - HashMap labels; + HashMap labels; } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java index a09744cff7..0517645fa9 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java @@ -1,9 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; +import java.io.Serializable; + import com.baidu.hugegraph.pd.grpc.Pdpb; -import lombok.Data; -import java.io.Serializable; +import lombok.Data; /** * @author zhangyingjie diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java index 410a3f528a..4b25fe023e 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java @@ -1,10 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; +import java.util.HashMap; + import com.baidu.hugegraph.pd.grpc.Pdpb; -import lombok.Data; -import java.io.Serializable; -import java.util.HashMap; +import lombok.Data; /** * @author tianxiaohui @@ -17,7 +34,7 @@ public class RestApiResponse { int status; public RestApiResponse(Object data, Pdpb.ErrorType status, String message) { - if (data == null){ + if (data == null) { data = new HashMap(); } this.data = data; @@ -29,8 +46,8 @@ public RestApiResponse() { } - public RestApiResponse(Object data, int status, String message){ - if (data == null){ + public RestApiResponse(Object data, int status, String message) { + if (data == null) { data = new HashMap(); } this.data = data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java index 30a02d49b7..bcf2b1288f 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java index cf0f03ddef..edad568a8b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.model; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java index 20f8daab51..1d88efd5c7 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -1,11 +1,29 @@ -package org.apache.hugegraph.pd.notice; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.common.HgAssert; -import lombok.extern.slf4j.Slf4j; +package org.apache.hugegraph.pd.notice; import java.util.function.Function; import java.util.function.Supplier; +import com.baidu.hugegraph.pd.common.HgAssert; + +import lombok.extern.slf4j.Slf4j; + /** * @author lynn.bond@hotmail.com on 2022/2/10 */ @@ -13,23 +31,23 @@ public class NoticeBroadcaster { private long noticeId; private String durableId; - private Supplier noticeSupplier; + private final Supplier noticeSupplier; private Supplier durableSupplier; private Function removeFunction; private int state; //0=ready; 1=notified; 2=done ack; -1=error private int counter; private long timestamp; - public static NoticeBroadcaster of(Supplier noticeSupplier) { - HgAssert.isArgumentNotNull(noticeSupplier, "noticeSupplier"); - return new NoticeBroadcaster(noticeSupplier); - } - private NoticeBroadcaster(Supplier noticeSupplier) { this.noticeSupplier = noticeSupplier; this.timestamp = System.currentTimeMillis(); } + public static NoticeBroadcaster of(Supplier noticeSupplier) { + HgAssert.isArgumentNotNull(noticeSupplier, "noticeSupplier"); + return new NoticeBroadcaster(noticeSupplier); + } + public NoticeBroadcaster setDurableSupplier(Supplier durableSupplier) { this.durableSupplier = durableSupplier; return this; @@ -62,7 +80,8 @@ public NoticeBroadcaster notifying() { state = 1; } catch (Throwable t) { state = -1; - log.error("Failed to invoke noticeSupplier: {}; cause by: " + this.noticeSupplier.toString(), t); + log.error("Failed to invoke noticeSupplier: {}; cause by: " + + this.noticeSupplier.toString(), t); } return this; @@ -105,21 +124,12 @@ public boolean doRemoveDurable() { } } catch (Throwable t) { log.error("Failed to remove NoticeBroadcaster, noticeId: " - + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t); + + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t); } return flag; } - public void setDurableId(String durableId) { - - if (HgAssert.isInvalid(durableId)) { - log.warn("Set an invalid durable-id to NoticeBroadcaster."); - } - - this.durableId = durableId; - } - public long getNoticeId() { return noticeId; } @@ -136,6 +146,15 @@ public String getDurableId() { return durableId; } + public void setDurableId(String durableId) { + + if (HgAssert.isInvalid(durableId)) { + log.warn("Set an invalid durable-id to NoticeBroadcaster."); + } + + this.durableId = durableId; + } + public long getTimestamp() { return timestamp; } @@ -147,11 +166,11 @@ public void setTimestamp(long timestamp) { @Override public String toString() { return "NoticeBroadcaster{" + - "noticeId=" + noticeId + - ", durableId='" + durableId + '\'' + - ", state=" + state + - ", counter=" + counter + - ", timestamp=" + timestamp + - '}'; + "noticeId=" + noticeId + + ", durableId='" + durableId + '\'' + + ", state=" + state + + ", counter=" + counter + + ", timestamp=" + timestamp + + '}'; } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java index 56e02226b7..e153fc7277 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java @@ -1,20 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.pulse; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; + +import javax.annotation.concurrent.ThreadSafe; + import com.baidu.hugegraph.pd.grpc.pulse.PulseNoticeRequest; import com.baidu.hugegraph.pd.grpc.pulse.PulseResponse; import com.baidu.hugegraph.pd.grpc.pulse.PulseType; import com.baidu.hugegraph.pd.util.IdUtil; + import io.grpc.Status; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import javax.annotation.concurrent.ThreadSafe; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.function.Consumer; -import java.util.function.Function; - /** * @author lynn.bond@hotmail.com created on 2021/11/9 */ @@ -45,12 +64,14 @@ void addObserver(Long observerId, StreamObserver responseObserver if (this.observerHolder.containsKey(observerId)) { responseObserver.onError( - new Exception("The observer-id[" + observerId + "] of " + this.pulseType.name() + new Exception( + "The observer-id[" + observerId + "] of " + this.pulseType.name() + " subject has been existing.")); return; } - log.info("Adding a " + this.pulseType + "'s observer, observer-id is [" + observerId + "]."); + log.info("Adding a " + this.pulseType + "'s observer, observer-id is [" + observerId + + "]."); this.observerHolder.put(observerId, responseObserver); } @@ -64,7 +85,8 @@ void addObserver(Long observerId, StreamObserver responseObserver */ void removeObserver(Long observerId, StreamObserver responseObserver) { synchronized (this.observerHolder) { - log.info("Removing a " + this.pulseType + "'s observer, observer-id is [" + observerId + "]."); + log.info("Removing a " + this.pulseType + "'s observer, observer-id is [" + observerId + + "]."); this.observerHolder.remove(observerId); } @@ -74,7 +96,6 @@ void removeObserver(Long observerId, StreamObserver responseObser abstract String toNoticeString(PulseResponse res); /** - * * @param c * @return notice ID */ @@ -82,7 +103,8 @@ protected long notifyClient(Consumer c) { synchronized (lock) { if (c == null) { - log.error(this.pulseType.name() + "'s notice was abandoned, caused by: notifyObserver(null)"); + log.error(this.pulseType.name() + + "'s notice was abandoned, caused by: notifyObserver(null)"); return -1; } @@ -95,38 +117,43 @@ protected long notifyClient(Consumer c) { long noticeId = IdUtil.createMillisId(); - Iterator>> iter = observerHolder.entrySet().iterator(); + Iterator>> iter = + observerHolder.entrySet().iterator(); // long start = System.currentTimeMillis(); while (iter.hasNext()) { Map.Entry> entry = iter.next(); Long observerId = entry.getKey(); - PulseResponse res = this.builder.setObserverId(observerId).setNoticeId(noticeId).build(); + PulseResponse res = + this.builder.setObserverId(observerId).setNoticeId(noticeId).build(); try { entry.getValue().onNext(res); } catch (Throwable e) { - log.error("Failed to send " + this.pulseType.name() + "'s notice[" + toNoticeString(res) - + "] to observer[" + observerId + "].", e); + log.error("Failed to send " + this.pulseType.name() + "'s notice[" + + toNoticeString(res) + + "] to observer[" + observerId + "].", e); // TODO: ? try multi-times? // iter.remove(); log.error("Removed a " + this.pulseType.name() + "'s observer[" + entry.getKey() - + "], because of once failure of sending.", e); + + "], because of once failure of sending.", e); } } - // log.info("notice client: notice id: {}, ts :{}, cost: {}", noticeId, System.currentTimeMillis(), + // log.info("notice client: notice id: {}, ts :{}, cost: {}", noticeId, System + // .currentTimeMillis(), // (System.currentTimeMillis() - start )/1000); return noticeId; } } - protected void notifyError(String message){ + protected void notifyError(String message) { synchronized (lock) { - Iterator>> iter = observerHolder.entrySet().iterator(); + Iterator>> iter = + observerHolder.entrySet().iterator(); while (iter.hasNext()) { Map.Entry> entry = iter.next(); Long observerId = entry.getKey(); @@ -135,8 +162,9 @@ protected void notifyError(String message){ entry.getValue().onError( Status.PERMISSION_DENIED.withDescription(message).asRuntimeException()); } catch (Throwable e) { - log.error("Failed to send " + this.pulseType.name() + "'s notice[" + toNoticeString(res) - + "] to observer[" + observerId + "].", e); + log.error("Failed to send " + this.pulseType.name() + "'s notice[" + + toNoticeString(res) + + "] to observer[" + observerId + "].", e); } } @@ -154,12 +182,14 @@ void addListener(Long listenerId, PulseListener listener) { if (this.listenerHolder.containsKey(listenerId)) { listener.onError( - new Exception("The listener-id[" + listenerId + "] of " + this.pulseType.name() + new Exception( + "The listener-id[" + listenerId + "] of " + this.pulseType.name() + " subject has been existing.")); return; } - log.info("Adding a " + this.pulseType + "'s listener, listener-id is [" + listenerId + "]."); + log.info("Adding a " + this.pulseType + "'s listener, listener-id is [" + listenerId + + "]."); this.listenerHolder.put(listenerId, listener); } @@ -174,14 +204,15 @@ void addListener(Long listenerId, PulseListener listener) { */ void removeListener(Long listenerId, PulseListener listener) { synchronized (this.listenerHolder) { - log.info("Removing a " + this.pulseType + "'s listener, listener-id is [" + listenerId + "]."); + log.info("Removing a " + this.pulseType + "'s listener, listener-id is [" + listenerId + + "]."); this.observerHolder.remove(listenerId); } listener.onCompleted(); } - abstract Function getNoticeHandler(); + abstract Function getNoticeHandler(); void handleClientNotice(PulseNoticeRequest noticeRequest) { @@ -193,7 +224,7 @@ void handleClientNotice(PulseNoticeRequest noticeRequest) { try { entry.getValue().onNext(getNoticeHandler().apply(noticeRequest)); } catch (Throwable e) { - log.error(e.getMessage(),e); + log.error(e.getMessage(), e); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java index 3e55177f37..536fdcdcde 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java @@ -1,27 +1,49 @@ -package org.apache.hugegraph.pd.pulse; - -import com.baidu.hugegraph.pd.common.HgAssert; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.pulse.*; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import org.apache.hugegraph.pd.notice.NoticeBroadcaster; -import org.apache.hugegraph.pd.util.IdUtil; +package org.apache.hugegraph.pd.pulse; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.Parser; -import io.grpc.stub.StreamObserver; -import lombok.extern.slf4j.Slf4j; +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; -import javax.annotation.concurrent.ThreadSafe; import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.notice.NoticeBroadcaster; +import org.apache.hugegraph.pd.util.IdUtil; + +import com.baidu.hugegraph.pd.common.HgAssert; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.pulse.*; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Parser; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; /** * @author lynn.bond@hotmail.com created on 2021/11/8 @@ -30,25 +52,31 @@ @Slf4j @ThreadSafe public class PDPulseSubject { - private final static long NOTICE_EXPIRATION_TIME=30*60*1000; - private final static int RETRYING_PERIOD_SECONDS=60; - private final static Map subjectHolder = new ConcurrentHashMap<>(); - private final static ConcurrentLinkedQueue broadcasterQueue = new ConcurrentLinkedQueue<>(); - private final static ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1); - - private static Supplier> queueRetrieveFunction = () -> Collections.emptyList(); + private final static long NOTICE_EXPIRATION_TIME = 30 * 60 * 1000; + private final static int RETRYING_PERIOD_SECONDS = 60; + private final static Map subjectHolder = + new ConcurrentHashMap<>(); + private final static ConcurrentLinkedQueue broadcasterQueue = + new ConcurrentLinkedQueue<>(); + private final static ScheduledExecutorService scheduledExecutor = + Executors.newScheduledThreadPool(1); + + private static Supplier> queueRetrieveFunction = + () -> Collections.emptyList(); private static Function queueDurableFunction = (e) -> true; private static Function queueRemoveFunction = (e) -> true; static { - subjectHolder.put(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name(), new PartitionHeartbeatSubject()); + subjectHolder.put(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name(), + new PartitionHeartbeatSubject()); // add some other type here... // ... } //Schedule tasks static { - scheduledExecutor.scheduleAtFixedRate(() -> doSchedule(), 0, RETRYING_PERIOD_SECONDS, TimeUnit.SECONDS); + scheduledExecutor.scheduleAtFixedRate(() -> doSchedule(), 0, RETRYING_PERIOD_SECONDS, + TimeUnit.SECONDS); } private static void doSchedule() { @@ -57,10 +85,11 @@ private static void doSchedule() { //retry broadcasterQueue.forEach(e -> { - e.notifying();}); + e.notifying(); + }); } - private static void appendQueue(){ + private static void appendQueue() { broadcasterQueue.addAll( getQueueItems() .parallelStream() @@ -68,18 +97,18 @@ private static void appendQueue(){ .stream() .anyMatch(b -> e.getItemId().equals(b.getDurableId())) ).map(e -> createBroadcaster(e)) - .peek(e->log.info("Appending notice: {}",e)) + .peek(e -> log.info("Appending notice: {}", e)) .filter(e -> e != null) .collect(Collectors.toList()) ); } - private static void expireQueue(){ - broadcasterQueue.removeIf(e->{ - if(System.currentTimeMillis()-e.getTimestamp()>=NOTICE_EXPIRATION_TIME){ - log.info("Notice was expired, trying to remove, notice: {}",e); + private static void expireQueue() { + broadcasterQueue.removeIf(e -> { + if (System.currentTimeMillis() - e.getTimestamp() >= NOTICE_EXPIRATION_TIME) { + log.info("Notice was expired, trying to remove, notice: {}", e); return e.doRemoveDurable(); - }else{ + } else { return false; } }); @@ -95,12 +124,14 @@ private static List getQueueItems() { return Collections.emptyList(); } - public static void setQueueRetrieveFunction(Supplier> queueRetrieveFunction) { + public static void setQueueRetrieveFunction( + Supplier> queueRetrieveFunction) { HgAssert.isArgumentNotNull(queueRetrieveFunction, "queueRetrieveFunction"); PDPulseSubject.queueRetrieveFunction = queueRetrieveFunction; } - public static void setQueueDurableFunction(Function queueDurableFunction) { + public static void setQueueDurableFunction( + Function queueDurableFunction) { HgAssert.isArgumentNotNull(queueDurableFunction, "queueDurableFunction"); PDPulseSubject.queueDurableFunction = queueDurableFunction; } @@ -116,7 +147,8 @@ public static void setQueueRemoveFunction(Function queueRemoveF * @param responseObserver * @return */ - public static StreamObserver addObserver(StreamObserver responseObserver) { + public static StreamObserver addObserver( + StreamObserver responseObserver) { isArgumentNotNull(responseObserver, "responseObserver"); return new PDPulseStreamObserver(responseObserver); } @@ -144,9 +176,9 @@ private static T getSubject(PulseType pulseType, Class clazz) { } private static NoticeBroadcaster createBroadcaster(Metapb.QueueItem item) { - PartitionHeartbeatResponse notice=toNotice(item); - if(notice==null)return null; - NoticeBroadcaster res=createBroadcaster(notice); + PartitionHeartbeatResponse notice = toNotice(item); + if (notice == null) return null; + NoticeBroadcaster res = createBroadcaster(notice); res.setDurableId(item.getItemId()); res.setTimestamp(item.getTimestamp()); return res; @@ -154,13 +186,14 @@ private static NoticeBroadcaster createBroadcaster(Metapb.QueueItem item) { private static NoticeBroadcaster createBroadcaster(PartitionHeartbeatResponse notice) { return NoticeBroadcaster.of(getNoticeSupplier(notice)) - .setDurableSupplier(getDurableSupplier(notice)) - .setRemoveFunction(getRemoveFunction()); + .setDurableSupplier(getDurableSupplier(notice)) + .setRemoveFunction(getRemoveFunction()); } public static Supplier getNoticeSupplier(PartitionHeartbeatResponse notice) { // TODO: PartitionHeartbeatSubject.class -> T - return () -> getSubject(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT, PartitionHeartbeatSubject.class) + return () -> getSubject(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT, + PartitionHeartbeatSubject.class) .notifyClient(notice); } @@ -173,7 +206,9 @@ private static Supplier getDurableSupplier(PartitionHeartbeatResponse no if (queueDurableFunction.apply(queueItem)) { res = queueItem.getItemId(); } else { - log.error("Failed to persist queue-item that contained PartitionHeartbeatResponse: {}" + log.error( + "Failed to persist queue-item that contained " + + "PartitionHeartbeatResponse: {}" , notice); } } catch (Throwable t) { @@ -200,11 +235,11 @@ private static Function getRemoveFunction() { private static Metapb.QueueItem toQueueItem(PartitionHeartbeatResponse notice) { return Metapb.QueueItem.newBuilder() - .setItemId(IdUtil.createMillisStr()) - .setItemClass(notice.getClass().getTypeName()) - .setItemContent(notice.toByteString()) - .setTimestamp(System.currentTimeMillis()) - .build(); + .setItemId(IdUtil.createMillisStr()) + .setItemClass(notice.getClass().getTypeName()) + .setItemContent(notice.toByteString()) + .setTimestamp(System.currentTimeMillis()) + .build(); } private static PartitionHeartbeatResponse toNotice(Metapb.QueueItem item) { @@ -230,7 +265,8 @@ public static void notifyError(String message) { * @param listener */ public static void listenPartitionHeartbeat(PulseListener listener) { - subjectHolder.get(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name()).addListener(createListenerId(), listener); + subjectHolder.get(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name()) + .addListener(createListenerId(), listener); } private static Long createListenerId() { @@ -256,7 +292,8 @@ private static class PDPulseStreamObserver implements StreamObservere.checkAck(noticeId)); + broadcasterQueue.removeIf(e -> e.checkAck(noticeId)); } private PulseType getPulseType(PulseCreateRequest request) { @@ -298,7 +335,8 @@ private AbstractObserverSubject getSubject(PulseType pulseType) { AbstractObserverSubject subject = subjectHolder.get(pulseType.name()); if (subject == null) { - responseObserver.onError(new Exception("Unsupported pulse-type: " + pulseType.name())); + responseObserver.onError( + new Exception("Unsupported pulse-type: " + pulseType.name())); return null; } @@ -328,8 +366,7 @@ public void onNext(PulseRequest pulseRequest) { if (pulseRequest.hasAckRequest()) { this.ackNotice(pulseRequest.getAckRequest().getNoticeId() - ,pulseRequest.getAckRequest().getObserverId()); - return; + , pulseRequest.getAckRequest().getObserverId()); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java index 67fca9c0c2..c19685c51b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java @@ -1,9 +1,26 @@ -package org.apache.hugegraph.pd.pulse; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.pulse.*; +package org.apache.hugegraph.pd.pulse; import java.util.function.Function; +import com.baidu.hugegraph.pd.grpc.pulse.*; + /** * @author lynn.bond@hotmail.com created on 2021/11/9 */ @@ -20,20 +37,20 @@ String toNoticeString(PulseResponse res) { @Override Function getNoticeHandler() { - return r->r.getPartitionHeartbeatRequest(); + return r -> r.getPartitionHeartbeatRequest(); } void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) { super.notifyClient(b -> { - b.setPartitionHeartbeatResponse(responseBuilder);; + b.setPartitionHeartbeatResponse(responseBuilder); }); } long notifyClient(PartitionHeartbeatResponse response) { return super.notifyClient(b -> { - b.setPartitionHeartbeatResponse(response);; + b.setPartitionHeartbeatResponse(response); }); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java index fd66f1f977..dd3758bf86 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java @@ -1,9 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.pulse; /** * @author lynn.bond@hotmail.com created on 2021/11/9 */ -public interface PulseListener { +public interface PulseListener { /** * Invoked on new notice. * diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java index cd552e4471..6d7680a2ee 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -1,5 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import com.baidu.hugegraph.pd.common.PDException; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; @@ -7,29 +28,25 @@ import com.google.protobuf.MessageOrBuilder; import com.google.protobuf.util.JsonFormat; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - public class API { + public static final String VERSION = "3.6.3"; + public static final String PD = "PD"; + public static final String STORE = "STORE"; public static String STATUS_KEY = "status"; public static String ERROR_KEY = "error"; public static String QUOTATION = "\""; public static String COMMA = ","; public static String COLON = ": "; - public static final String VERSION = "3.6.3"; - public static final String PD = "PD"; - public static final String STORE = "STORE"; - public String toJSON(List values, String key) { StringBuilder builder = new StringBuilder(); builder.append("{") - .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0").append(COMMA) - .append(QUOTATION).append(key).append(QUOTATION).append(COLON) - .append("[ "); + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append(key).append(QUOTATION).append(COLON) + .append("[ "); if (values != null) { values.forEach(s -> { @@ -49,13 +66,15 @@ public String toJSON(List values, String key) { public String toJSON(MessageOrBuilder value, String key) { StringBuilder builder = new StringBuilder(); builder.append("{") - .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0").append(COMMA) - .append(QUOTATION).append(key).append(QUOTATION).append(COLON); + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append(key).append(QUOTATION).append(COLON); try { - if (value != null) + if (value != null) { builder.append(JsonFormat.printer().print(value)); - else + } else { builder.append("{}"); + } builder.append("}"); return builder.toString(); } catch (InvalidProtocolBufferException e) { @@ -75,9 +94,9 @@ public String toJSON(Map> values) { if ((entryValue != null) && !(entryValue.isEmpty())) { entryValue.forEach(s -> { try { - if (s == null){ + if (s == null) { builder.append("null"); - }else{ + } else { builder.append(JsonFormat.printer().print(s)); } } catch (InvalidProtocolBufferException e) { @@ -97,23 +116,24 @@ public String toJSON(Map> values) { public String toJSON(PDException exception) { StringBuilder builder = new StringBuilder(); builder.append("{") - .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append(exception.getErrorCode()).append(COMMA) - .append(QUOTATION).append(ERROR_KEY).append(QUOTATION).append(COLON) - .append(QUOTATION).append(exception.getMessage()).append(QUOTATION); + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON) + .append(exception.getErrorCode()).append(COMMA) + .append(QUOTATION).append(ERROR_KEY).append(QUOTATION).append(COLON) + .append(QUOTATION).append(exception.getMessage()).append(QUOTATION); builder.append("}"); return builder.toString(); } public String toJSON(Exception exception) { - StringBuilder builder = new StringBuilder(); - builder.append("{") - .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("-1").append(COMMA) - .append(QUOTATION).append(ERROR_KEY).append(QUOTATION).append(COLON) - .append(QUOTATION).append(exception.getMessage()).append(QUOTATION); - builder.append("}"); - - return builder.toString(); + String builder = "{" + + QUOTATION + STATUS_KEY + QUOTATION + COLON + "-1" + + COMMA + + QUOTATION + ERROR_KEY + QUOTATION + COLON + + QUOTATION + exception.getMessage() + QUOTATION + + "}"; + + return builder; } /** @@ -138,13 +158,15 @@ public Map okMap(String k, Object v) { return map; } - public String toJSON(List values, JsonFormat.TypeRegistry registry) { + public String toJSON(List values, + JsonFormat.TypeRegistry registry) { StringBuilder builder = new StringBuilder(); builder.append("{") - .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0").append(COMMA) - .append(QUOTATION).append("log").append(QUOTATION).append(COLON) - .append("[ "); + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append("log").append(QUOTATION).append(COLON) + .append("[ "); JsonFormat.Printer printer = JsonFormat.printer().usingTypeRegistry(registry); if (values != null) { values.forEach(s -> { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java index fca2db50ae..184b65b93b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java @@ -1,28 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.servlet.http.HttpServletRequest; import org.apache.hugegraph.pd.model.GraphRestRequest; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; - -import lombok.Data; -import lombok.extern.slf4j.Slf4j; - import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; -import org.springframework.web.bind.annotation.*; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; -import javax.servlet.http.HttpServletRequest; -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; @RestController @Slf4j @@ -76,7 +99,8 @@ public RestApiResponse getGraphs() { return response; } - @PostMapping(value = "/graph/**", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @PostMapping(value = "/graph/**", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public String setGraph(@RequestBody GraphRestRequest body, HttpServletRequest request) { try { @@ -84,9 +108,10 @@ public String setGraph(@RequestBody GraphRestRequest body, HttpServletRequest re final String prefix = "/graph/"; final int limit = 2; String graphName = requestURL.split(prefix, limit)[1]; - graphName = URLDecoder.decode(graphName, "utf-8"); + graphName = URLDecoder.decode(graphName, StandardCharsets.UTF_8); Metapb.Graph curGraph = pdRestService.getGraph(graphName); - Metapb.Graph.Builder builder = Metapb.Graph.newBuilder(curGraph == null ? Metapb.Graph.getDefaultInstance() : curGraph); + Metapb.Graph.Builder builder = Metapb.Graph.newBuilder( + curGraph == null ? Metapb.Graph.getDefaultInstance() : curGraph); builder.setGraphName(graphName); if (body.getPartitionCount() > 0) { builder.setPartitionCount(body.getPartitionCount()); @@ -104,14 +129,15 @@ public String setGraph(@RequestBody GraphRestRequest body, HttpServletRequest re @GetMapping(value = "/graph/**", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody - public RestApiResponse getGraph(HttpServletRequest request) throws UnsupportedEncodingException { + public RestApiResponse getGraph(HttpServletRequest request) throws + UnsupportedEncodingException { RestApiResponse response = new RestApiResponse(); GraphStatistics statistics = null; String requestURL = request.getRequestURL().toString(); final String prefix = "/graph/"; final int limit = 2; String graphName = requestURL.split(prefix, limit)[1]; - graphName = URLDecoder.decode(graphName, "utf-8"); + graphName = URLDecoder.decode(graphName, StandardCharsets.UTF_8); try { Metapb.Graph graph = pdRestService.getGraph(graphName); if (graph != null) { @@ -193,9 +219,10 @@ public Partition(Metapb.Partition pt, Metapb.PartitionStats partitionStats) { shardsList.add(new Shard(shard1, partitionId)); } } else { - log.error("GraphAPI.Partition(), get shard group: {} returns null", pt.getId()); + log.error("GraphAPI.Partition(), get shard group: {} returns null", + pt.getId()); } - } catch (PDException e){ + } catch (PDException e) { log.error("Partition init failed, error: {}", e.getMessage()); } this.shards = shardsList; @@ -233,10 +260,11 @@ public GraphStatistics(Metapb.Graph graph) throws PDException { List graphStatsList = store.getStats().getGraphStatsList(); for (Metapb.GraphStats graphStats : graphStatsList) { if ((graphName.equals(graphStats.getGraphName())) - && (Metapb.ShardRole.Leader.equals(graphStats.getRole()))) { + && (Metapb.ShardRole.Leader.equals(graphStats.getRole()))) { keyCount += graphStats.getApproximateKeys(); dataSize += graphStats.getApproximateSize(); - partition2DataSize.put(graphStats.getPartitionId(), graphStats.getApproximateSize()); + partition2DataSize.put(graphStats.getPartitionId(), + graphStats.getApproximateSize()); } } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java index 3cbcaee7ac..8adf3c7583 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java @@ -1,25 +1,48 @@ -package org.apache.hugegraph.pd.rest; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; +package org.apache.hugegraph.pd.rest; -import org.apache.hugegraph.pd.service.PDRestService; -import org.apache.hugegraph.pd.model.GraphSpaceRestRequest; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.List; -import lombok.extern.slf4j.Slf4j; +import javax.servlet.http.HttpServletRequest; +import org.apache.hugegraph.pd.model.GraphSpaceRestRequest; +import org.apache.hugegraph.pd.service.PDRestService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; -import org.springframework.web.bind.annotation.*; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; -import javax.servlet.http.HttpServletRequest; -import java.net.URLDecoder; -import java.util.List; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; @RestController @Slf4j @RequestMapping("/v1") -public class GraphSpaceAPI extends API{ +public class GraphSpaceAPI extends API { @Autowired PDRestService pdRestService; @@ -35,18 +58,20 @@ public String getGraphSpaces() { } } - @PostMapping(value = "/graph-spaces/**", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @PostMapping(value = "/graph-spaces/**", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody - public String setGraphSpace(@RequestBody GraphSpaceRestRequest body, HttpServletRequest request) { + public String setGraphSpace(@RequestBody GraphSpaceRestRequest body, + HttpServletRequest request) { try { String requestURL = request.getRequestURL().toString(); String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1]; - graphSpaceName = URLDecoder.decode(graphSpaceName, "utf-8"); + graphSpaceName = URLDecoder.decode(graphSpaceName, StandardCharsets.UTF_8); Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder() - .setName(graphSpaceName) - .setStorageLimit(body.getStorageLimit()) - .build(); - Metapb.GraphSpace newGraphSpace = pdRestService.setGraphSpace(graphSpace); + .setName(graphSpaceName) + .setStorageLimit(body.getStorageLimit()) + .build(); + Metapb.GraphSpace newGraphSpace = pdRestService.setGraphSpace(graphSpace); return toJSON(newGraphSpace, "graph-spaces"); } catch (PDException exception) { return toJSON(exception); @@ -61,7 +86,7 @@ public String getGraphSpace(HttpServletRequest request) { try { String requestURL = request.getRequestURL().toString(); String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1]; - graphSpaceName = URLDecoder.decode(graphSpaceName, "utf-8"); + graphSpaceName = URLDecoder.decode(graphSpaceName, StandardCharsets.UTF_8); Metapb.GraphSpace graphSpace = pdRestService.getGraphSpace(graphSpaceName); return toJSON(graphSpace, "graphs-paces"); } catch (PDException exception) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java index 864ef2bba8..40b8dfcbeb 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @@ -1,18 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; - -import com.baidu.hugegraph.pd.raft.RaftEngine; - -import lombok.Data; -import lombok.extern.slf4j.Slf4j; - import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.GetMapping; @@ -20,10 +32,13 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import java.lang.management.ManagementFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.raft.RaftEngine; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; @RestController @Slf4j @@ -33,6 +48,7 @@ public class IndexAPI extends API { PDService pdService; @Autowired PDRestService pdRestService; + @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public BriefStatistics index() throws PDException, ExecutionException, InterruptedException { @@ -47,27 +63,19 @@ public BriefStatistics index() throws PDException, ExecutionException, Interrupt } - @Data - class BriefStatistics { - String state; - String leader; - int memberSize; - int storeSize; - int graphSize; - int partitionSize; - } - @GetMapping(value = "/v1/cluster", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public RestApiResponse cluster() throws InterruptedException, ExecutionException { Statistics statistics = new Statistics(); try { - statistics.state = String.valueOf(pdService.getStoreNodeService().getClusterStats().getState()); + statistics.state = + String.valueOf(pdService.getStoreNodeService().getClusterStats().getState()); String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); List pdList = new ArrayList<>(); for (Metapb.Member member : RaftEngine.getInstance().getMembers()) { Member member1 = new Member(member); - if ((leaderGrpcAddress != null) && (leaderGrpcAddress.equals(member.getGrpcUrl()))) { + if ((leaderGrpcAddress != null) && + (leaderGrpcAddress.equals(member.getGrpcUrl()))) { member1.role = "Leader"; statistics.pdLeader = member1; } else { @@ -88,7 +96,9 @@ public RestApiResponse cluster() throws InterruptedException, ExecutionException List graphs = pdRestService.getGraphs(); // 图的数量,只统计/g statistics.graphSize = graphs.stream().filter((g) -> (g.getGraphName() != null) - && (g.getGraphName().endsWith("/g"))).count(); + && + (g.getGraphName().endsWith("/g"))) + .count(); statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size(); statistics.shardCount = pdService.getConfigService().getPDConfig().getShardCount(); for (Metapb.Store store : pdService.getStoreNodeService().getStores()) { @@ -104,18 +114,29 @@ public RestApiResponse cluster() throws InterruptedException, ExecutionException if (graph.getState() == Metapb.PartitionState.UNRECOGNIZED) { continue; // 未识别不参与对比,不然会抛出异常 } - if ((graph.getState() != null) && (graph.getState().getNumber() > dataState.getNumber())) { + if ((graph.getState() != null) && + (graph.getState().getNumber() > dataState.getNumber())) { dataState = graph.getState(); } } statistics.dataState = dataState.name(); return new RestApiResponse(statistics, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); - } catch (PDException e){ + } catch (PDException e) { log.error("PD Exception: ", e); return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); } } + @Data + class BriefStatistics { + String state; + String leader; + int memberSize; + int storeSize; + int graphSize; + int partitionSize; + } + @Data class Store { long storeId; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java index c0f3fadd3d..1154d6cd7b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @@ -1,12 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import javax.servlet.http.HttpServletRequest; import org.apache.hugegraph.pd.model.PeerRestRequest; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; import com.baidu.hugegraph.pd.raft.RaftEngine; import io.grpc.stub.CallStreamObserver; @@ -14,18 +52,6 @@ import lombok.Data; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.http.MediaType; -import org.springframework.web.bind.annotation.*; - -import javax.servlet.http.HttpServletRequest; -import java.lang.management.ManagementFactory; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.*; - @RestController @Slf4j @RequestMapping("/v1") @@ -62,39 +88,43 @@ public RestApiResponse getMembers() throws InterruptedException, ExecutionExcept resultMap.put("pdList", members); resultMap.put("pdLeader", leader); resultMap.put("numOfService", members.size()); - resultMap.put("numOfNormalService", stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); + resultMap.put("numOfNormalService", + stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); resultMap.put("stateCountMap", stateCountMap); return new RestApiResponse(resultMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); } - @PostMapping(value = "/members/change", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @PostMapping(value = "/members/change", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public String changePeerList(@RequestBody PeerRestRequest body, HttpServletRequest request) { try { - Pdpb.ChangePeerListRequest rpcRequest = Pdpb.ChangePeerListRequest.newBuilder().setPeerList( - body.getPeerList()).build(); + Pdpb.ChangePeerListRequest rpcRequest = + Pdpb.ChangePeerListRequest.newBuilder().setPeerList( + body.getPeerList()).build(); CountDownLatch latch = new CountDownLatch(1); final Pdpb.ResponseHeader[] responseHeader = {null}; - StreamObserver observer = new StreamObserver() { - @Override - public void onNext(Pdpb.getChangePeerListResponse value) { - responseHeader[0] = value.getHeader(); - } - - @Override - public void onError(Throwable t) { - responseHeader[0] = Pdpb.ResponseHeader.newBuilder().setError( - Pdpb.Error.newBuilder().setType( - Pdpb.ErrorType.UNKNOWN).setMessage( - t.getMessage()).build()).build(); - latch.countDown(); - } - - @Override - public void onCompleted() { - latch.countDown(); - } - }; + StreamObserver observer = + new StreamObserver() { + @Override + public void onNext(Pdpb.getChangePeerListResponse value) { + responseHeader[0] = value.getHeader(); + } + + @Override + public void onError(Throwable t) { + responseHeader[0] = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType( + Pdpb.ErrorType.UNKNOWN).setMessage( + t.getMessage()).build()).build(); + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; pdService.changePeerList(rpcRequest, observer); latch.await(); return toJSON(responseHeader[0], "changeResult"); @@ -104,7 +134,8 @@ public void onCompleted() { } - public static class CallStreamObserverWrap extends CallStreamObserver implements Future> { + public static class CallStreamObserverWrap extends CallStreamObserver implements + Future> { CompletableFuture> future = new CompletableFuture<>(); List values = new ArrayList<>(); @@ -169,7 +200,9 @@ public List get() throws InterruptedException, ExecutionException { } @Override - public List get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + public List get(long timeout, TimeUnit unit) throws InterruptedException, + ExecutionException, + TimeoutException { return future.get(timeout, unit); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java index 7a35593b3b..8ccf0774bc 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java @@ -1,35 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import org.apache.commons.lang.time.DateFormatUtils; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.model.TimeRangeRequest; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.util.DateUtil; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; import com.google.protobuf.util.JsonFormat; + import lombok.Data; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang.time.DateFormatUtils; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.http.MediaType; -import org.springframework.web.bind.annotation.*; - -import java.util.*; -import java.util.concurrent.ExecutionException; - @RestController @Slf4j @RequestMapping("/v1") public class PartitionAPI extends API { + public static final String DEFAULT_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss"; @Autowired PDRestService pdRestService; - public static final String DEFAULT_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss"; - @GetMapping(value = "/highLevelPartitions", produces = MediaType.APPLICATION_JSON_VALUE) public RestApiResponse getHighLevelPartitions() { // 分区下多个图的信息 @@ -57,17 +82,20 @@ public RestApiResponse getHighLevelPartitions() { } // 计算分区的keyCount(不区分图) partition2KeyCount.put(graphStats.getPartitionId(), - partition2KeyCount.getOrDefault(graphStats.getPartitionId(), - graphStats.getApproximateKeys())); + partition2KeyCount.getOrDefault(graphStats.getPartitionId(), + graphStats.getApproximateKeys())); // 计算分区的dataSize, 通过累加图的大小实现 partition2DataSize.put(graphStats.getPartitionId(), - partition2DataSize.getOrDefault(graphStats.getPartitionId(), 0L) - + graphStats.getApproximateSize()); + partition2DataSize.getOrDefault(graphStats.getPartitionId(), + 0L) + + graphStats.getApproximateSize()); // 构造分区下的图信息 if (partitions2GraphsMap.get(graphStats.getPartitionId()) == null) { - partitions2GraphsMap.put(graphStats.getPartitionId(), new HashMap()); + partitions2GraphsMap.put(graphStats.getPartitionId(), + new HashMap()); } - Map partitionGraphsMap = partitions2GraphsMap.get(graphStats.getPartitionId()); + Map partitionGraphsMap = + partitions2GraphsMap.get(graphStats.getPartitionId()); partitionGraphsMap.put(graphStats.getGraphName(), new GraphStats(graphStats)); } } @@ -76,7 +104,8 @@ public RestApiResponse getHighLevelPartitions() { for (Metapb.Partition partition : partitionList) { // 补充分区内图信息的startKey, endKey if (partitions2GraphsMap.get(partition.getId()) != null) { - GraphStats graphStats = partitions2GraphsMap.get(partition.getId()).get(partition.getGraphName()); + GraphStats graphStats = + partitions2GraphsMap.get(partition.getId()).get(partition.getGraphName()); if (graphStats != null) { graphStats.startKey = partition.getStartKey(); graphStats.endKey = partition.getEndKey(); @@ -84,19 +113,23 @@ public RestApiResponse getHighLevelPartitions() { } // 构造分区整体信息(不区分图) if ((resultPartitionsMap.get(partition.getId()) == null) - && (!partition.getGraphName().endsWith("/s")) - ) { + && (!partition.getGraphName().endsWith("/s")) + ) { Metapb.PartitionStats partitionStats; try { - partitionStats = pdRestService.getPartitionStats(partition.getGraphName(), partition.getId()); + partitionStats = pdRestService.getPartitionStats(partition.getGraphName(), + partition.getId()); } catch (PDException e) { log.error("getPartitionStats error", e); partitionStats = null; } // 初始化分区信息 - HighLevelPartition resultPartition = new HighLevelPartition(partition, partitionStats); - resultPartition.keyCount = partition2KeyCount.getOrDefault(resultPartition.partitionId, 0L); - resultPartition.dataSize = partition2DataSize.getOrDefault(resultPartition.partitionId, 0L); + HighLevelPartition resultPartition = + new HighLevelPartition(partition, partitionStats); + resultPartition.keyCount = + partition2KeyCount.getOrDefault(resultPartition.partitionId, 0L); + resultPartition.dataSize = + partition2DataSize.getOrDefault(resultPartition.partitionId, 0L); for (ShardStats shard : resultPartition.shards) { // 对副本的地址,分区信息赋值 shard.address = storesMap.get(shard.storeId).getAddress(); @@ -104,7 +137,8 @@ public RestApiResponse getHighLevelPartitions() { } if ((partitionStats != null) && (partitionStats.getLeader() != null)) { long storeId = partitionStats.getLeader().getStoreId(); // 获取leader的storeId - resultPartition.leaderAddress = storesMap.get(storeId).getAddress(); // 获取leader的address + resultPartition.leaderAddress = + storesMap.get(storeId).getAddress(); // 获取leader的address } resultPartitionsMap.put(partition.getId(), resultPartition); } @@ -123,7 +157,8 @@ public RestApiResponse getHighLevelPartitions() { String graphName = entry1.getKey(); GraphStats tmpGraph = graphsMap.get(graphName); final int postfixLength = 2; - tmpGraph.graphName = tmpGraph.graphName.substring(0, tmpGraph.graphName.length() - postfixLength); + tmpGraph.graphName = tmpGraph.graphName.substring(0, tmpGraph.graphName.length() - + postfixLength); graphsList.add(tmpGraph); } graphsList.sort((o1, o2) -> o1.graphName.compareTo(o2.graphName)); @@ -182,7 +217,8 @@ public RestApiResponse getPartitions() { Partition partition = new Partition(pt); String graphName = partition.getGraphName(); partition.getShards().sort(Comparator.comparing(Shard::getStoreId)); - Metapb.PartitionStats partitionStats = pdRestService.getPartitionStats(graphName, pt.getId()); + Metapb.PartitionStats partitionStats = + pdRestService.getPartitionStats(graphName, pt.getId()); Map shardStats = new HashMap<>(); if (partitionStats != null) { String dateTime = DateFormatUtils.format( @@ -205,7 +241,8 @@ public RestApiResponse getPartitions() { } - HashMap storeRaftStats = raftMap.get(shard.getStoreId()); + HashMap storeRaftStats = + raftMap.get(shard.getStoreId()); if (storeRaftStats != null) { Metapb.RaftStats raftStats = storeRaftStats.get(partition.getId()); if (raftStats != null) { @@ -219,7 +256,8 @@ public RestApiResponse getPartitions() { partitions.add(partition); } - partitions.sort(Comparator.comparing(Partition::getGraphName).thenComparing(Partition::getId)); + partitions.sort( + Comparator.comparing(Partition::getGraphName).thenComparing(Partition::getId)); HashMap dataMap = new HashMap<>(); dataMap.put("partitions", partitions); return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); @@ -238,7 +276,8 @@ public String getPartitionsAndStats() { for (Metapb.Graph graph : pdRestService.getGraphs()) { List partitionList = new ArrayList<>(); List partitionStatsList = new ArrayList<>(); - for (Metapb.Partition partition : pdRestService.getPartitions(graph.getGraphName())) { + for (Metapb.Partition partition : pdRestService.getPartitions( + graph.getGraphName())) { Metapb.PartitionStats partitionStats = pdRestService .getPartitionStats(graph.getGraphName(), partition.getId()); partitionList.add(partition); @@ -247,10 +286,9 @@ public String getPartitionsAndStats() { graph2Partitions.put(graph.getGraphName(), partitionList); graph2PartitionStats.put(graph.getGraphName(), partitionStatsList); } - StringBuilder builder = new StringBuilder(); - builder.append("{\"partitions\":").append(toJSON(graph2Partitions)); - builder.append(",\"partitionStats\":").append(toJSON(graph2PartitionStats)).append("}"); - return builder.toString(); + String builder = "{\"partitions\":" + toJSON(graph2Partitions) + + ",\"partitionStats\":" + toJSON(graph2PartitionStats) + "}"; + return builder; } catch (PDException e) { log.error("PD exception:" + e); return toJSON(e); @@ -259,22 +297,24 @@ public String getPartitionsAndStats() { private Map getShardStats(Metapb.PartitionStats partitionStats) { Map stats = new HashMap<>(); - if (partitionStats.getShardStatsList() != null) + if (partitionStats.getShardStatsList() != null) { partitionStats.getShardStatsList().forEach(shardStats -> { stats.put(shardStats.getStoreId(), shardStats); }); + } return stats; } @PostMapping(value = "/partitions/log", consumes = MediaType.APPLICATION_JSON_VALUE, - produces = MediaType.APPLICATION_JSON_VALUE) + produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public String getPartitionLog(@RequestBody TimeRangeRequest request) { try { Date dateStart = DateUtil.getDate(request.getStartTime()); Date dateEnd = DateUtil.getDate(request.getEndTime()); - List changedRecords = pdRestService.getPartitionLog(dateStart.getTime(), - dateEnd.getTime()); + List changedRecords = + pdRestService.getPartitionLog(dateStart.getTime(), + dateEnd.getTime()); if (changedRecords != null) { JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry .newBuilder().add(Pdpb.SplitDataRequest.getDescriptor()).build(); @@ -367,7 +407,7 @@ class HighLevelPartition { shards = new ArrayList<>(); for (Metapb.ShardStats shardStats : partitionStats.getShardStatsList()) { if ((shardStats.getState() != Metapb.ShardState.UNRECOGNIZED) - && (shardStats.getState().getNumber() > tmpShardState.getNumber())) { + && (shardStats.getState().getNumber() > tmpShardState.getNumber())) { tmpShardState = shardStats.getState(); progress = shardStats.getProgress(); } @@ -379,7 +419,7 @@ class HighLevelPartition { for (Metapb.Shard shard : pdRestService.getShardList(partition.getId())) { shards.add(new ShardStats(shard)); } - } catch (PDException e){ + } catch (PDException e) { log.error("get shard list failed, {}", e.getMessage()); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java index a4c9a0c232..8c94cb4a5f 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java @@ -1,8 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + import org.apache.hugegraph.pd.model.PromTargetsModel; import org.apache.hugegraph.pd.service.PromTargetsService; -import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.http.ResponseEntity; @@ -11,9 +31,7 @@ import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; -import java.util.Collections; -import java.util.List; -import java.util.Optional; +import lombok.extern.slf4j.Slf4j; /** * @author lynn.bond@hotmail.com on 2022/2/14 @@ -27,7 +45,8 @@ public class PromTargetsAPI { private PromTargetsService service; @GetMapping(value = "/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) - public ResponseEntity> getPromTargets(@PathVariable(value = "appName", required = true) String appName) { + public ResponseEntity> getPromTargets( + @PathVariable(value = "appName", required = true) String appName) { return ResponseEntity.of(Optional.ofNullable(this.service.getTargets(appName))); } @@ -37,30 +56,31 @@ public ResponseEntity> getPromAllTargets() { } @GetMapping(value = "/demo/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) - public List getDemoTargets(@PathVariable(value = "appName", required = true) String targetType) { + public List getDemoTargets( + @PathVariable(value = "appName", required = true) String targetType) { - PromTargetsModel model =null; + PromTargetsModel model = null; switch (targetType) { case "node": - model=PromTargetsModel.of() - .addTarget("10.14.139.26:8100") - .addTarget("10.14.139.27:8100") - .addTarget("10.14.139.28:8100") - .setMetricsPath("/metrics") - .setScheme("http"); + model = PromTargetsModel.of() + .addTarget("10.14.139.26:8100") + .addTarget("10.14.139.27:8100") + .addTarget("10.14.139.28:8100") + .setMetricsPath("/metrics") + .setScheme("http"); break; case "store": - model=PromTargetsModel.of() - .addTarget("172.20.94.98:8521") - .addTarget("172.20.94.98:8522") - .addTarget("172.20.94.98:8523") - .setMetricsPath("/actuator/prometheus") - .setScheme("http"); + model = PromTargetsModel.of() + .addTarget("172.20.94.98:8521") + .addTarget("172.20.94.98:8522") + .addTarget("172.20.94.98:8523") + .setMetricsPath("/actuator/prometheus") + .setScheme("http"); break; case "pd": - model=PromTargetsModel.of() - .addTarget("172.20.94.98:8620") - .setMetricsPath("/actuator/prometheus"); + model = PromTargetsModel.of() + .addTarget("172.20.94.98:8620") + .setMetricsPath("/actuator/prometheus"); break; default: diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java index ea93674a50..64e5eb4d5f 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -1,12 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.common.PDRuntimeException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.Pdpb.GetMembersResponse; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.Query; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.commons.lang3.StringUtils; import org.apache.hugegraph.pd.license.LicenseVerifierService; import org.apache.hugegraph.pd.model.RegistryQueryRestRequest; import org.apache.hugegraph.pd.model.RegistryRestRequest; @@ -14,8 +33,6 @@ import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.GetMapping; @@ -25,12 +42,15 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import javax.servlet.http.HttpServletRequest; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.common.PDRuntimeException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.Pdpb.GetMembersResponse; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.Query; + +import lombok.extern.slf4j.Slf4j; /** * @author zhangyingjie @@ -46,13 +66,16 @@ public class RegistryAPI extends API { @Autowired PDService pdService; - @PostMapping(value = "/registry", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @PostMapping(value = "/registry", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody - public RegistryRestResponse register(@RequestBody RegistryRestRequest body, HttpServletRequest request) { + public RegistryRestResponse register(@RequestBody RegistryRestRequest body, + HttpServletRequest request) { RegistryRestResponse registryResponse = null; try { long interval = Long.valueOf(body.getInterval()).longValue(); - NodeInfo info = NodeInfo.newBuilder().setAppName(body.getAppName()).setVersion(body.getVersion()) + NodeInfo info = NodeInfo.newBuilder().setAppName(body.getAppName()) + .setVersion(body.getVersion()) .setAddress(body.getAddress()).putAllLabels(body.getLabels()) .setInterval(interval).build(); registryResponse = pdRestService.register(info); @@ -68,7 +91,8 @@ public RegistryRestResponse register(@RequestBody RegistryRestRequest body, Http return registryResponse; } - @PostMapping(value = "/registryInfo", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @PostMapping(value = "/registryInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public RegistryRestResponse getInfo(@RequestBody RegistryQueryRestRequest body, HttpServletRequest request) { @@ -76,9 +100,11 @@ public RegistryRestResponse getInfo(@RequestBody RegistryQueryRestRequest body, try { boolean labelNotEmpty = body.getLabels() != null && !body.getLabels().isEmpty(); Query query = Query.newBuilder() - .setAppName(StringUtils.isEmpty(body.getAppName()) ? "" : body.getAppName()) + .setAppName(StringUtils.isEmpty(body.getAppName()) ? "" : + body.getAppName()) .putAllLabels(labelNotEmpty ? body.getLabels() : new HashMap<>()) - .setVersion(StringUtils.isEmpty(body.getVersion()) ? "" : body.getVersion()) + .setVersion(StringUtils.isEmpty(body.getVersion()) ? "" : + body.getVersion()) .build(); ArrayList registryResponse = pdRestService.getNodeInfo(query); response.setErrorType(Pdpb.ErrorType.OK); @@ -98,8 +124,9 @@ public RegistryRestResponse allInfo(HttpServletRequest request) { RegistryRestResponse response = new RegistryRestResponse(); try { //1.normal registry - Query query = Query.newBuilder().setAppName("").putAllLabels(new HashMap<>()).setVersion("") - .build(); + Query query = + Query.newBuilder().setAppName("").putAllLabels(new HashMap<>()).setVersion("") + .build(); ArrayList registryResponse = pdRestService.getNodeInfo(query); //2.pd member LinkedList pdMembers = getMembers(); @@ -131,7 +158,7 @@ public RegistryRestResponse allInfo(HttpServletRequest request) { private LinkedList getMembers() throws Exception { CallStreamObserverWrap response = new CallStreamObserverWrap<>(); pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); - LinkedList members =new LinkedList<>(); + LinkedList members = new LinkedList<>(); List membersList = response.get().get(0).getMembersList(); for (Metapb.Member member : membersList) { RegistryRestRequest restRequest = new RegistryRestRequest(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java index 84a672c5ea..0a17a3f3f4 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java @@ -1,17 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; - -import lombok.Data; -import lombok.extern.slf4j.Slf4j; - import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.GetMapping; @@ -19,9 +32,12 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; @RestController @Slf4j @@ -44,7 +60,8 @@ public RestApiResponse getShards() { String graphName = graph.getGraphName(); List partitions = pdRestService.getPartitions(graphName); for (Metapb.Partition pt : partitions) { - Metapb.PartitionStats partitionStats = pdRestService.getPartitionStats(graphName, pt.getId()); + Metapb.PartitionStats partitionStats = + pdRestService.getPartitionStats(graphName, pt.getId()); if (partitionStats != null) { List shardStatsList = partitionStats.getShardStatsList(); for (Metapb.ShardStats shardStats : shardStatsList) { @@ -60,10 +77,13 @@ public RestApiResponse getShards() { } else { List shardList = new ArrayList<>(); var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId()); - if (shardGroup != null){ + if (shardGroup != null) { shardList = shardGroup.getShardsList(); } else { - log.error("ShardAPI.getShards(), get shards of group id: {} returns null.", pt.getId()); + log.error( + "ShardAPI.getShards(), get shards of group id: {} returns " + + "null.", + pt.getId()); } for (Metapb.Shard shard : shardList) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java index a07b7025ac..9f812c7a15 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java @@ -1,21 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.model.StoreRestRequest; import org.apache.hugegraph.pd.model.TimeRangeRequest; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.util.DateUtil; -import com.google.protobuf.util.JsonFormat; -import lombok.Data; -import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; -import org.springframework.web.bind.annotation.*; +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; -import java.util.*; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.google.protobuf.util.JsonFormat; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; @RestController @Slf4j @@ -40,7 +71,8 @@ public RestApiResponse getStores() { storeStatsList.sort((o1, o2) -> o1.address.compareTo(o2.address)); dataMap.put("stores", storeStatsList); dataMap.put("numOfService", storeStatsList.size()); - dataMap.put("numOfNormalService", stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); + dataMap.put("numOfNormalService", + stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); dataMap.put("stateCountMap", stateCountMap); return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); } catch (PDException e) { @@ -50,7 +82,8 @@ public RestApiResponse getStores() { } // 仅支持通过该接口修改 storeState - @PostMapping(value = "/store/{storeId}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @PostMapping(value = "/store/{storeId}", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public String setStore(@PathVariable long storeId, @RequestBody StoreRestRequest request) { try { @@ -95,8 +128,9 @@ public Map> shardLeaders() throws PDException { if (shard.getRole() == Metapb.ShardRole.Leader) { try { String ip = pdRestService.getStore(shard.getStoreId()).getRaftAddress(); - if (!leaders.containsKey(ip)) + if (!leaders.containsKey(ip)) { leaders.put(ip, new ArrayList<>()); + } leaders.get(ip).add(group.getId()); } catch (PDException e) { throw new RuntimeException(e); @@ -125,14 +159,16 @@ public String removeStore(@PathVariable(value = "storeId") Long storeId) { return "OK"; } - @PostMapping(value = "/store/log", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @PostMapping(value = "/store/log", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public String getStoreLog(@RequestBody TimeRangeRequest request) { try { Date dateStart = DateUtil.getDate(request.getStartTime()); Date dateEnd = DateUtil.getDate(request.getEndTime()); - List changedStore = pdRestService.getStoreStatusLog(dateStart.getTime(), - dateEnd.getTime()); + List changedStore = + pdRestService.getStoreStatusLog(dateStart.getTime(), + dateEnd.getTime()); if (changedStore != null) { JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry .newBuilder().add(Metapb.Store.getDescriptor()).build(); @@ -158,9 +194,11 @@ public RestApiResponse getStore(@PathVariable long storeId) { } if (store != null) { StoreStatistics resultStoreStats = resultStoreStats = new StoreStatistics(store); - return new RestApiResponse(resultStoreStats, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + return new RestApiResponse(resultStoreStats, Pdpb.ErrorType.OK, + Pdpb.ErrorType.OK.name()); } else { - return new RestApiResponse(null, Pdpb.ErrorType.STORE_ID_NOT_EXIST, Pdpb.ErrorType.STORE_ID_NOT_EXIST.name()); + return new RestApiResponse(null, Pdpb.ErrorType.STORE_ID_NOT_EXIST, + Pdpb.ErrorType.STORE_ID_NOT_EXIST.name()); } } @@ -181,7 +219,7 @@ public String getStoresAndStats() { @ResponseBody public RestApiResponse getStoreMonitorData(@PathVariable long storeId) { try { - List> result = pdRestService.getMonitorData(storeId); + List> result = pdRestService.getMonitorData(storeId); return new RestApiResponse(result, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); } catch (PDException e) { return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); @@ -254,11 +292,11 @@ class StoreStatistics { version = store.getVersion(); deployPath = store.getDeployPath(); final String prefix = "file:"; - if ((deployPath != null) && (deployPath.startsWith(prefix))){ + if ((deployPath != null) && (deployPath.startsWith(prefix))) { // 去掉前缀 deployPath = deployPath.substring(prefix.length()); } - if ((deployPath != null) && (deployPath.contains(".jar"))){ + if ((deployPath != null) && (deployPath.contains(".jar"))) { // 去掉jar包之后的信息 deployPath = deployPath.substring(0, deployPath.indexOf(".jar") + 4); } @@ -266,7 +304,7 @@ class StoreStatistics { startTimeStamp = store.getStartTimestamp(); try { serviceCreatedTimeStamp = pdRestService.getStore(store.getId()) - .getStats().getStartTime(); // 实例时间 + .getStats().getStartTime(); // 实例时间 final int base = 1000; serviceCreatedTimeStamp *= base; // 转化为毫秒 } catch (PDException e) { @@ -291,12 +329,14 @@ class StoreStatistics { // 图名只保留/g /m /s前面的部分 final int postfixLength = 2; graphNameSet.add(graphName.substring(0, graphName.length() - postfixLength)); - if ((graphStats.getGraphName() != null) && (graphStats.getGraphName().endsWith("/g"))) { + if ((graphStats.getGraphName() != null) && + (graphStats.getGraphName().endsWith("/g"))) { Partition pt = new Partition(graphStats); partitionStatsList.add(pt); } // 统计每个分区的keyCount - partition2KeyCount.put(graphStats.getPartitionId(), graphStats.getApproximateKeys()); + partition2KeyCount.put(graphStats.getPartitionId(), + graphStats.getApproximateKeys()); if (graphStats.getRole() == Metapb.ShardRole.Leader) { leaderPartitionIds.add(graphStats.getPartitionId()); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java index 3f5ddc16c6..3ff7117600 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java @@ -1,13 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; -import com.baidu.hugegraph.pd.common.KVPair; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; +import java.util.List; +import java.util.Map; import org.apache.hugegraph.pd.service.PDRestService; - -import lombok.extern.slf4j.Slf4j; - import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.GetMapping; @@ -15,8 +28,11 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import java.util.List; -import java.util.Map; +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; @RestController @Slf4j @@ -51,7 +67,7 @@ public String patrolPartitions() { @GetMapping(value = "/balancePartitions", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody - public Map> balancePartitions() { + public Map> balancePartitions() { try { Map> partitions = pdRestService.balancePartitions(); return partitions; @@ -63,15 +79,16 @@ public Map> balancePartitions() { @GetMapping(value = "/splitPartitions", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody - public String splitPartitions() { + public String splitPartitions() { try { - List partitions = pdRestService.splitPartitions(); + List partitions = pdRestService.splitPartitions(); return toJSON(partitions, "partitions"); } catch (PDException e) { e.printStackTrace(); return toJSON(e); } } + @GetMapping(value = "/balanceLeaders") public Map balanceLeaders() throws PDException { return pdRestService.balancePartitionLeader(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java index e933f6c777..856e8149a4 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java @@ -1,5 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.rest; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + import com.baidu.hugegraph.pd.RegistryService; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.config.PDConfig; @@ -9,21 +41,10 @@ import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; import com.baidu.hugegraph.pd.meta.MetadataFactory; import com.baidu.hugegraph.pd.meta.QueueStore; - -import org.apache.hugegraph.pd.pulse.PDPulseSubject; -import org.apache.hugegraph.pd.watch.PDWatchSubject; - import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Parser; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.http.MediaType; -import org.springframework.web.bind.annotation.*; - -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; +import lombok.extern.slf4j.Slf4j; /** * @author lynn.bond@hotmail.com on 2022/2/9 @@ -38,15 +59,15 @@ public class TestAPI { @GetMapping(value = "/discovery/{appName}", produces = MediaType.TEXT_PLAIN_VALUE) @ResponseBody - public String discovery(@PathVariable(value = "appName", required = true)String appName){ - RegistryService register =new RegistryService(pdConfig); - // Query query=Query.newBuilder().setAppName("hugegraph").build(); + public String discovery(@PathVariable(value = "appName", required = true) String appName) { + RegistryService register = new RegistryService(pdConfig); + // Query query=Query.newBuilder().setAppName("hugegraph").build(); AtomicLong label = new AtomicLong(); HashMap labels = new HashMap<>(); String labelValue = String.valueOf(label.incrementAndGet()); //labels.put("address",labelValue); Query query = Query.newBuilder().build(); - // Query query = Query.newBuilder().setAppName("hugegraph").set.build(); + // Query query = Query.newBuilder().setAppName("hugegraph").set.build(); return register.getNodes(query).toString(); } @@ -56,18 +77,18 @@ public String discovery(@PathVariable(value = "appName", required = true)String public String notifyClient() { PDPulseSubject.notifyClient( PartitionHeartbeatResponse.newBuilder() - .setPartition(Metapb.Partition.newBuilder() - .setId(8) - .setGraphName("graphName8")) - - .setChangeShard( - ChangeShard.newBuilder() - .setChangeTypeValue(8) - .addShard(Metapb.Shard.newBuilder() - .setRoleValue(8) - .setStoreId(8) - ) - ) + .setPartition(Metapb.Partition.newBuilder() + .setId(8) + .setGraphName("graphName8")) + + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue(8) + .addShard(Metapb.Shard.newBuilder() + .setRoleValue(8) + .setStoreId(8) + ) + ) ); return "partition"; @@ -87,27 +108,33 @@ public String testPutQueue() { return "queue"; } - public void putQueue(){ - PartitionHeartbeatResponse response=PartitionHeartbeatResponse.newBuilder() - .setPartition(Metapb.Partition.newBuilder() - .setId(9) - .setGraphName("graphName")) - .setChangeShard( - ChangeShard.newBuilder() - .setChangeTypeValue(9) - .addShard(Metapb.Shard.newBuilder() - .setRoleValue(9) - .setStoreId(9) - ) - ).build(); - - Metapb.QueueItem.Builder builder=Metapb.QueueItem.newBuilder() - .setItemId("item-id") - .setItemClass("item-class") - .setItemContent(response.toByteString()); - - - QueueStore store= MetadataFactory.newQueueStore(pdConfig); + public void putQueue() { + PartitionHeartbeatResponse response = PartitionHeartbeatResponse.newBuilder() + .setPartition( + Metapb.Partition.newBuilder() + .setId(9) + .setGraphName( + "graphName")) + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue( + 9) + .addShard( + Metapb.Shard.newBuilder() + .setRoleValue( + 9) + .setStoreId( + 9) + ) + ).build(); + + Metapb.QueueItem.Builder builder = Metapb.QueueItem.newBuilder() + .setItemId("item-id") + .setItemClass("item-class") + .setItemContent(response.toByteString()); + + + QueueStore store = MetadataFactory.newQueueStore(pdConfig); try { store.addItem(builder.setItemId("item-id-1").build()); @@ -116,25 +143,24 @@ public void putQueue(){ } catch (PDException e) { e.printStackTrace(); } - List queue=null; + List queue = null; try { - queue=store.getQueue(); + queue = store.getQueue(); } catch (PDException e) { e.printStackTrace(); } - Parser parser= PartitionHeartbeatResponse.parser(); + Parser parser = PartitionHeartbeatResponse.parser(); - queue.stream().forEach(e->{ - PartitionHeartbeatResponse buf=null; + queue.stream().forEach(e -> { + PartitionHeartbeatResponse buf = null; try { - buf=parser.parseFrom(e.getItemContent()); + buf = parser.parseFrom(e.getItemContent()); } catch (InvalidProtocolBufferException ex) { ex.printStackTrace(); } - PDPulseSubject.notifyClient( PartitionHeartbeatResponse.newBuilder(buf)); + PDPulseSubject.notifyClient(PartitionHeartbeatResponse.newBuilder(buf)); }); - } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java index b8554431d5..f279188ba8 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java @@ -1,5 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import javax.annotation.PostConstruct; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + import com.baidu.hugegraph.pd.RegistryService; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.common.PDRuntimeException; @@ -10,24 +39,15 @@ import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; import com.baidu.hugegraph.pd.grpc.discovery.Query; import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; -import org.apache.hugegraph.pd.license.LicenseVerifierService; -import org.apache.hugegraph.pd.pulse.PDPulseSubject; import com.baidu.hugegraph.pd.raft.RaftEngine; import com.baidu.hugegraph.pd.raft.RaftStateListener; -import org.apache.hugegraph.pd.watch.PDWatchSubject; + import io.grpc.CallOptions; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.MethodDescriptor; import io.grpc.stub.AbstractBlockingStub; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; -import org.lognet.springboot.grpc.GRpcService; -import org.springframework.beans.factory.annotation.Autowired; - -import javax.annotation.PostConstruct; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; /** * @author zhangyingjie @@ -35,13 +55,16 @@ **/ @Slf4j @GRpcService -public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplBase implements RaftStateListener { +public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplBase implements + RaftStateListener { - @Autowired - private PDConfig pdConfig; static final AtomicLong id = new AtomicLong(); + private static final String CORES = "cores"; RegistryService register = null; LicenseVerifierService licenseVerifierService; + @Autowired + private PDConfig pdConfig; + private ManagedChannel channel; @PostConstruct public void init() throws PDException { @@ -54,12 +77,11 @@ public void init() throws PDException { private Pdpb.ResponseHeader newErrorHeader(PDException e) { Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( - Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())).build(); + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())) + .build(); return header; } - private static final String CORES = "cores"; - @Override public void register(NodeInfo request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -92,7 +114,9 @@ public void register(NodeInfo request, io.grpc.stub.StreamObserver register.register(request, outTimes); String valueId = request.getId(); registerInfo = RegisterInfo.newBuilder().setNodeInfo(NodeInfo.newBuilder().setId( - "0".equals(valueId) ? String.valueOf(id.incrementAndGet()) : valueId).build()).build(); + "0".equals(valueId) ? + String.valueOf(id.incrementAndGet()) : valueId).build()) + .build(); } catch (PDException e) { registerInfo = RegisterInfo.newBuilder().setHeader(newErrorHeader(e)).build(); @@ -104,8 +128,9 @@ public void register(NodeInfo request, io.grpc.stub.StreamObserver registerInfo = RegisterInfo.newBuilder().setHeader(header).build(); log.debug("registerStore exception: ", ex); } catch (Exception e) { - Pdpb.Error error = Pdpb.Error.newBuilder().setTypeValue(Pdpb.ErrorType.UNKNOWN.getNumber()) - .setMessage(e.getMessage()).build(); + Pdpb.Error error = + Pdpb.Error.newBuilder().setTypeValue(Pdpb.ErrorType.UNKNOWN.getNumber()) + .setMessage(e.getMessage()).build(); Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); registerInfo = RegisterInfo.newBuilder().setHeader(header).build(); } @@ -122,27 +147,29 @@ public void getNodes(Query request, io.grpc.stub.StreamObserver respo responseObserver.onCompleted(); } - private ManagedChannel channel; - public boolean isLeader() { return RaftEngine.getInstance().isLeader(); } private > void redirectToLeader( - MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver observer) { + MethodDescriptor method, ReqT req, + io.grpc.stub.StreamObserver observer) { try { if (channel == null) { synchronized (this) { if (channel == null) { channel = ManagedChannelBuilder - .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()).usePlaintext() + .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()) + .usePlaintext() .build(); } } - log.info("Grpc get leader address {}", RaftEngine.getInstance().getLeaderGrpcAddress()); + log.info("Grpc get leader address {}", + RaftEngine.getInstance().getLeaderGrpcAddress()); } - io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), req, + io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), + req, observer); } catch (Exception e) { e.printStackTrace(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java index 59a4ffd30c..7b4952e906 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -1,5 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import javax.annotation.PostConstruct; + +import org.apache.hugegraph.pd.watch.KvWatchSubject; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + import com.baidu.hugegraph.pd.KvService; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.config.PDConfig; @@ -20,33 +51,25 @@ import com.baidu.hugegraph.pd.grpc.kv.WatchType; import com.baidu.hugegraph.pd.raft.RaftEngine; import com.baidu.hugegraph.pd.raft.RaftStateListener; -import org.apache.hugegraph.pd.watch.KvWatchSubject; + import io.grpc.ManagedChannel; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import org.lognet.springboot.grpc.GRpcService; -import org.springframework.beans.factory.annotation.Autowired; - -import javax.annotation.PostConstruct; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; /** * kv存储的核心实现类 */ @Slf4j @GRpcService -public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements RaftStateListener, ServiceGrpc { +public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements RaftStateListener, + ServiceGrpc { + KvService kvService; + AtomicLong count = new AtomicLong(); + String msg = "node is not leader,it is necessary to redirect to the leader on the client"; @Autowired private PDConfig pdConfig; - KvService kvService; - private ManagedChannel channel = null; + private final ManagedChannel channel = null; private KvWatchSubject subjects; private ScheduledExecutorService executor; @@ -61,7 +84,7 @@ public void init() { if (isLeader()) { subjects.keepClientAlive(); } - }, 0, KvWatchSubject.WATCH_TTL * 1 / 2, TimeUnit.MILLISECONDS); + }, 0, KvWatchSubject.WATCH_TTL / 2, TimeUnit.MILLISECONDS); } /** @@ -95,7 +118,6 @@ public void put(Kv request, StreamObserver responseObserver) { responseObserver.onCompleted(); } - /** * 普通的get * @@ -148,7 +170,8 @@ public void delete(K request, StreamObserver responseObserver) { response = builder.setHeader(getResponseHeader()).build(); } catch (PDException e) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, + responseObserver); return; } response = builder.setHeader(getResponseHeader(e)).build(); @@ -157,7 +180,6 @@ public void delete(K request, StreamObserver responseObserver) { responseObserver.onCompleted(); } - /** * 按前缀删除 * @@ -166,7 +188,8 @@ public void delete(K request, StreamObserver responseObserver) { */ public void deletePrefix(K request, StreamObserver responseObserver) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, + responseObserver); return; } KvResponse response; @@ -184,7 +207,8 @@ public void deletePrefix(K request, StreamObserver responseObserver) response = builder.setHeader(getResponseHeader()).build(); } catch (PDException e) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, + responseObserver); return; } response = builder.setHeader(getResponseHeader(e)).build(); @@ -201,7 +225,8 @@ public void deletePrefix(K request, StreamObserver responseObserver) */ public void scanPrefix(K request, StreamObserver responseObserver) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, + responseObserver); return; } ScanPrefixResponse response; @@ -211,7 +236,8 @@ public void scanPrefix(K request, StreamObserver responseObs response = builder.setHeader(getResponseHeader()).putAllKvs(kvs).build(); } catch (PDException e) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, + responseObserver); return; } response = builder.setHeader(getResponseHeader(e)).build(); @@ -220,8 +246,6 @@ public void scanPrefix(K request, StreamObserver responseObs responseObserver.onCompleted(); } - AtomicLong count = new AtomicLong(); - /** * 获取随机非0字符串做Id * @@ -236,9 +260,10 @@ private long getRandomLong() { } return result; } - String msg = "node is not leader,it is necessary to redirect to the leader on the client"; + /** * 普通的watch + * * @param request * @param responseObserver */ @@ -264,6 +289,7 @@ public void watch(WatchRequest request, StreamObserver responseOb /** * 普通的前缀watch + * * @param request * @param responseObserver */ @@ -289,6 +315,7 @@ public void watchPrefix(WatchRequest request, StreamObserver resp /** * 上面两个方法的通用方式 + * * @param request * @param responseObserver * @param isPrefix @@ -307,13 +334,14 @@ private void clientWatch(WatchRequest request, StreamObserver res } else { response = builder.setState(WatchState.Started).build(); } - String delimiter = isPrefix ? KvWatchSubject.PREFIX_DELIMITER : KvWatchSubject.KEY_DELIMITER; + String delimiter = + isPrefix ? KvWatchSubject.PREFIX_DELIMITER : KvWatchSubject.KEY_DELIMITER; subjects.addObserver(key, clientId, responseObserver, delimiter); synchronized (responseObserver) { responseObserver.onNext(response); } } catch (PDException e) { - if (!isLeader()){ + if (!isLeader()) { throw new PDException(-1, msg); } throw new PDException(e.getErrorCode(), e); @@ -324,6 +352,7 @@ private void clientWatch(WatchRequest request, StreamObserver res /** * 加锁 + * * @param request * @param responseObserver */ @@ -338,7 +367,9 @@ public void lock(LockRequest request, StreamObserver responseObser long clientId = request.getClientId(); if (clientId == 0) clientId = getRandomLong(); boolean locked = this.kvService.lock(request.getKey(), request.getTtl(), clientId); - response = builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId).build(); + response = + builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId) + .build(); } catch (PDException e) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver); @@ -395,7 +426,8 @@ public void isLocked(LockRequest request, StreamObserver responseO } catch (PDException e) { log.error("lock with error :", e); if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, + responseObserver); return; } response = builder.setHeader(getResponseHeader(e)).build(); @@ -406,6 +438,7 @@ public void isLocked(LockRequest request, StreamObserver responseO /** * 解锁 + * * @param request * @param responseObserver */ @@ -420,10 +453,12 @@ public void unlock(LockRequest request, StreamObserver responseObs long clientId = request.getClientId(); if (clientId == 0) throw new PDException(-1, "incorrect clientId: 0"); boolean unlocked = this.kvService.unlock(request.getKey(), clientId); - response = builder.setHeader(getResponseHeader()).setSucceed(unlocked).setClientId(clientId).build(); + response = builder.setHeader(getResponseHeader()).setSucceed(unlocked) + .setClientId(clientId).build(); } catch (PDException e) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, + responseObserver); return; } response = builder.setHeader(getResponseHeader(e)).build(); @@ -435,12 +470,14 @@ public void unlock(LockRequest request, StreamObserver responseObs /** * 锁续活 + * * @param request * @param responseObserver */ public void keepAlive(LockRequest request, StreamObserver responseObserver) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, + responseObserver); return; } LockResponse response; @@ -449,10 +486,13 @@ public void keepAlive(LockRequest request, StreamObserver response long clientId = request.getClientId(); if (clientId == 0) throw new PDException(-1, "incorrect clientId: 0"); boolean alive = this.kvService.keepAlive(request.getKey(), clientId); - response = builder.setHeader(getResponseHeader()).setSucceed(alive).setClientId(clientId).build(); + response = + builder.setHeader(getResponseHeader()).setSucceed(alive).setClientId(clientId) + .build(); } catch (PDException e) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, + responseObserver); return; } response = builder.setHeader(getResponseHeader(e)).build(); @@ -463,6 +503,7 @@ public void keepAlive(LockRequest request, StreamObserver response /** * 带超时时间的put + * * @param request * @param responseObserver */ @@ -478,7 +519,8 @@ public void putTTL(TTLRequest request, StreamObserver responseObser response = builder.setHeader(getResponseHeader()).setSucceed(true).build(); } catch (PDException e) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, + responseObserver); return; } response = builder.setHeader(getResponseHeader(e)).build(); @@ -489,12 +531,14 @@ public void putTTL(TTLRequest request, StreamObserver responseObser /** * 续活带有超时时间的key + * * @param request * @param responseObserver */ public void keepTTLAlive(TTLRequest request, StreamObserver responseObserver) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, + responseObserver); return; } TTLResponse response; @@ -504,7 +548,8 @@ public void keepTTLAlive(TTLRequest request, StreamObserver respons response = builder.setHeader(getResponseHeader()).setSucceed(true).build(); } catch (PDException e) { if (!isLeader()) { - redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, responseObserver); + redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, + responseObserver); return; } response = builder.setHeader(getResponseHeader(e)).build(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java index aae14ec789..674f70c30e 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java @@ -1,5 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + import com.baidu.hugegraph.pd.config.PDConfig; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; @@ -7,17 +33,10 @@ import com.baidu.hugegraph.pd.grpc.pulse.PulseResponse; import com.baidu.hugegraph.pd.meta.MetadataFactory; import com.baidu.hugegraph.pd.meta.QueueStore; -import org.apache.hugegraph.pd.pulse.PDPulseSubject; import com.baidu.hugegraph.pd.raft.RaftEngine; + import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import org.lognet.springboot.grpc.GRpcService; -import org.springframework.beans.factory.annotation.Autowired; - -import java.util.Collections; -import java.util.List; -import java.util.function.Function; -import java.util.function.Supplier; /** * @author lynn.bond@hotmail.com created on 2021/11/4 @@ -27,13 +46,15 @@ @GRpcService public class PDPulseService extends HgPdPulseGrpc.HgPdPulseImplBase { + private static final Supplier> queueRetrieveFunction = + () -> Collections.emptyList(); + private static final Function queueDurableFunction = (e) -> true; + private static final Function queueRemoveFunction = (e) -> true; @Autowired private PDConfig pdConfig; - - private QueueStore queueStore=null; - - public PDPulseService(){ - PDPulseSubject.setQueueRetrieveFunction(()->getQueue()); + private QueueStore queueStore = null; + public PDPulseService() { + PDPulseSubject.setQueueRetrieveFunction(() -> getQueue()); PDPulseSubject.setQueueDurableFunction(getQueueDurableFunction()); PDPulseSubject.setQueueRemoveFunction(getQueueRemoveFunction()); } @@ -43,30 +64,27 @@ public StreamObserver pulse(StreamObserver response return PDPulseSubject.addObserver(responseObserver); } - private static Supplier> queueRetrieveFunction = () -> Collections.emptyList(); - private static Function queueDurableFunction = (e) -> true; - private static Function queueRemoveFunction = (e) -> true; - - - private Function getQueueRemoveFunction(){ - return itemId->{ - try{ + private Function getQueueRemoveFunction() { + return itemId -> { + try { this.getQueueStore().removeItem(itemId); return true; - }catch (Throwable t){ - log.error("Failed to remove item from store, item-id: "+itemId+", cause by:",t); + } catch (Throwable t) { + log.error("Failed to remove item from store, item-id: " + itemId + ", cause by:", + t); } return false; }; } - private Function getQueueDurableFunction(){ - return item->{ - try{ + private Function getQueueDurableFunction() { + return item -> { + try { this.getQueueStore().addItem(item); return true; - }catch (Throwable t){ - log.error("Failed to add item to store, item: "+item.toString()+", cause by:",t); + } catch (Throwable t) { + log.error("Failed to add item to store, item: " + item.toString() + ", cause by:", + t); } return false; }; @@ -76,25 +94,25 @@ private boolean isLeader() { return RaftEngine.getInstance().isLeader(); } - private List getQueue(){ + private List getQueue() { - if(!isLeader()){ + if (!isLeader()) { return Collections.emptyList(); } - try{ + try { return this.getQueueStore().getQueue(); - }catch (Throwable t){ - log.error("Failed to retrieve queue from QueueStore, cause by:",t); + } catch (Throwable t) { + log.error("Failed to retrieve queue from QueueStore, cause by:", t); } log.warn("Returned empty queue list."); return Collections.emptyList(); } - private QueueStore getQueueStore(){ - if(this.queueStore==null){ - this.queueStore=MetadataFactory.newQueueStore(pdConfig); + private QueueStore getQueueStore() { + if (this.queueStore == null) { + this.queueStore = MetadataFactory.newQueueStore(pdConfig); } return this.queueStore; } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java index a7c0876fdd..bc08d8f29d 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java @@ -1,11 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.springframework.beans.factory.InitializingBean; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + import com.baidu.hugegraph.pd.ConfigService; import com.baidu.hugegraph.pd.LogService; -import com.baidu.hugegraph.pd.StoreMonitorDataService; -import com.baidu.hugegraph.pd.TaskScheduleService; import com.baidu.hugegraph.pd.PartitionService; +import com.baidu.hugegraph.pd.StoreMonitorDataService; import com.baidu.hugegraph.pd.StoreNodeService; +import com.baidu.hugegraph.pd.TaskScheduleService; import com.baidu.hugegraph.pd.common.HgAssert; import com.baidu.hugegraph.pd.common.KVPair; import com.baidu.hugegraph.pd.common.PDException; @@ -15,34 +44,24 @@ import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; import com.baidu.hugegraph.pd.grpc.discovery.Query; import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; -import org.apache.hugegraph.pd.model.RegistryRestRequest; -import org.apache.hugegraph.pd.model.RegistryRestResponse; + import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.InitializingBean; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Service; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; @Slf4j @Service public class PDRestService implements InitializingBean { private static final String EMPTY_STRING = ""; + @Autowired + PDService pdService; + @Autowired + DiscoveryService discoveryService; private StoreNodeService storeNodeService; private PartitionService partitionService; private TaskScheduleService monitorService; private ConfigService configService; private LogService logService; private StoreMonitorDataService storeMonitorDataService; - @Autowired - PDService pdService; - @Autowired - DiscoveryService discoveryService; /** * 初始化 @@ -120,7 +139,8 @@ public List patrolPartitions() throws PDException { return monitorService.patrolPartitions(); } - public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws PDException { + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws + PDException { return partitionService.getPartitionStats(graphName, partitionId); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java index a8bb78050f..cc9f0a01e5 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -1,5 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import javax.annotation.PostConstruct; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.util.CollectionUtils; + import com.alipay.sofa.jraft.Status; import com.baidu.hugegraph.pd.ConfigService; import com.baidu.hugegraph.pd.IdService; @@ -33,14 +68,8 @@ import com.baidu.hugegraph.pd.grpc.watch.WatchGraphResponse; import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; import com.baidu.hugegraph.pd.grpc.watch.WatchType; - -import org.apache.hugegraph.pd.license.LicenseVerifierService; -import org.apache.hugegraph.pd.watch.PDWatchSubject; -import org.apache.hugegraph.pd.pulse.PDPulseSubject; -import org.apache.hugegraph.pd.pulse.PulseListener; import com.baidu.hugegraph.pd.raft.RaftEngine; import com.baidu.hugegraph.pd.raft.RaftStateListener; -import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil; import io.grpc.CallOptions; import io.grpc.ManagedChannel; @@ -50,18 +79,6 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.io.FileUtils; -import org.lognet.springboot.grpc.GRpcService; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.util.CollectionUtils; - -import javax.annotation.PostConstruct; -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - @Slf4j @GRpcService public class PDService extends PDGrpc.PDImplBase implements RaftStateListener { @@ -80,8 +97,9 @@ public class PDService extends PDGrpc.PDImplBase implements RaftStateListener { private StoreMonitorDataService storeMonitorDataService; - private Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( + private final Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); + private ManagedChannel channel; private Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( @@ -91,7 +109,8 @@ private Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { private Pdpb.ResponseHeader newErrorHeader(PDException e) { Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( - Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())).build(); + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())) + .build(); return header; } @@ -111,7 +130,7 @@ public ConfigService getConfigService() { return configService; } - public StoreMonitorDataService getStoreMonitorDataService(){ + public StoreMonitorDataService getStoreMonitorDataService() { return this.storeMonitorDataService; } @@ -181,42 +200,52 @@ private PartitionHeartbeatResponse.Builder getBuilder(Metapb.Partition partition } @Override - public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { PDPulseSubject.notifyClient(getBuilder(partition).setChangeShard(changeShard)); } @Override - public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws - PDException { - PDPulseSubject.notifyClient(getBuilder(partition).setTransferLeader(transferLeader)); + public void transferLeader(Metapb.Partition partition, + TransferLeader transferLeader) throws + PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setTransferLeader(transferLeader)); } @Override - public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws - PDException { - PDPulseSubject.notifyClient(getBuilder(partition).setSplitPartition(splitPartition)); + public void splitPartition(Metapb.Partition partition, + SplitPartition splitPartition) throws + PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setSplitPartition(splitPartition)); } + @Override public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws - PDException { + PDException { PDPulseSubject.notifyClient(getBuilder(partition).setDbCompaction(dbCompaction)); } @Override - public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { PDPulseSubject.notifyClient(getBuilder(partition).setMovePartition(movePartition)); } @Override - public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { - PDPulseSubject.notifyClient(getBuilder(partition).setCleanPartition(cleanPartition)); + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setCleanPartition(cleanPartition)); } @Override - public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) throws PDException { PDPulseSubject.notifyClient(getBuilder(partition).setKeyRange(partitionKeyRange)); } @@ -234,7 +263,8 @@ public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) @Override public void onPartitionRemoved(Metapb.Partition partition) { - PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.DEL, partition.getGraphName(), + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.DEL, + partition.getGraphName(), partition.getId()); } @@ -242,21 +272,23 @@ public void onPartitionRemoved(Metapb.Partition partition) { storeNodeService.addShardGroupStatusListener(new ShardGroupStatusListener() { @Override - public void onShardListChanged(Metapb.ShardGroup shardGroup, Metapb.ShardGroup newShardGroup) { + public void onShardListChanged(Metapb.ShardGroup shardGroup, + Metapb.ShardGroup newShardGroup) { // invoked before change, saved to db and update cache. if (newShardGroup == null) { - PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.DEL, shardGroup.getId(), - shardGroup); + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.DEL, + shardGroup.getId(), + shardGroup); } else { PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.ALTER, - shardGroup.getId(), newShardGroup); + shardGroup.getId(), newShardGroup); } } @Override public void onShardListOp(Metapb.ShardGroup shardGroup) { PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.USER_DEFINED, - shardGroup.getId(), shardGroup); + shardGroup.getId(), shardGroup); } }); @@ -293,7 +325,8 @@ public void onGraphChange(Metapb.Graph graph, @Override public void onStoreRaftChanged(Metapb.Store store) { - PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_NODE_RAFT_CHANGE, "", store.getId()); + PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_NODE_RAFT_CHANGE, "", + store.getId()); } }); storeNodeService.init(partitionService); @@ -321,7 +354,8 @@ public void registerStore(Pdpb.RegisterStoreRequest request, Pdpb.RegisterStoreResponse response = null; try { Metapb.Store store = storeNodeService.register(request.getStore()); - response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(okHeader).setStoreId(store.getId()) + response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(okHeader) + .setStoreId(store.getId()) .build(); } catch (PDException e) { response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); @@ -346,7 +380,8 @@ public void getStore(Pdpb.GetStoreRequest request, Pdpb.GetStoreResponse response = null; try { Metapb.Store store = storeNodeService.getStore(request.getStoreId()); - response = Pdpb.GetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); + response = + Pdpb.GetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); } catch (PDException e) { response = Pdpb.GetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); log.error("{} getStore exception: {}", StreamObserverUtil.getRemoteIP(observer), e); @@ -361,7 +396,8 @@ public void getStore(Pdpb.GetStoreRequest request, * 修改Store状态等信息. * */ - public void setStore(Pdpb.SetStoreRequest request, StreamObserver observer) { + public void setStore(Pdpb.SetStoreRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getSetStoreMethod(), request, observer); return; @@ -372,15 +408,15 @@ public void setStore(Pdpb.SetStoreRequest request, StreamObserver activeStores = storeNodeService.getActiveStores(); if (lastStore.getState() == Metapb.StoreState.Up - && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, - "The number of active stores is less then " + pdConfig.getMinStoreCount()); + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); } - if (!storeNodeService.checkStoreCanOffline(request.getStore())){ + if (!storeNodeService.checkStoreCanOffline(request.getStore())) { throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, - "check activeStores or online shardsList size"); + "check activeStores or online shardsList size"); } - if (lastStore.getState() == Metapb.StoreState.Exiting){ + if (lastStore.getState() == Metapb.StoreState.Exiting) { // 如果已经是下线中的状态,则不作进一步处理 throw new PDException(Pdpb.ErrorType.Store_Tombstone_Doing_VALUE, - "Downline is in progress, do not resubmit"); + "Downline is in progress, do not resubmit"); } Map resultMap = taskService.canAllPartitionsMovedOut(lastStore); - if ((boolean) resultMap.get("flag") == true) { + if ((boolean) resultMap.get("flag")) { if (resultMap.get("current_store_is_online") != null - && (boolean) resultMap.get("current_store_is_online") == true) { + && (boolean) resultMap.get("current_store_is_online")) { log.info("updateStore removeActiveStores store {}", store.getId()); // 将在线的store的状态设置为下线中,等待副本迁移 store = Metapb.Store.newBuilder(lastStore) - .setState(Metapb.StoreState.Exiting).build(); + .setState(Metapb.StoreState.Exiting).build(); // 进行分区迁移操作 - taskService.movePartitions((Map>) resultMap.get("movedPartitions")); - }else { + taskService.movePartitions((Map>) resultMap.get( + "movedPartitions")); + } else { // store已经离线的,不做副本迁移 // 将状态改为Tombstone } - }else{ + } else { throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, - "the resources on other stores may be not enough to store " + - "the partitions of current store!"); + "the resources on other stores may be not enough to " + + "store " + + "the partitions of current store!"); } } // 替换license 都走grpc store = storeNodeService.updateStore(store); - response = Pdpb.SetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); + response = + Pdpb.SetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); } catch (PDException e) { response = Pdpb.SetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); log.error("setStore exception: ", e); @@ -491,8 +533,9 @@ public void getAllStores(Pdpb.GetAllStoresRequest request, } else { stores = storeNodeService.getStores(request.getGraphName()); } - response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) - .build(); + response = + Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) + .build(); } catch (PDException e) { response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build(); log.error("getAllStores exception: ", e); @@ -518,14 +561,15 @@ public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request, if (this.pdConfig.getStore().isMonitorDataEnabled()) { try { storeMonitorDataService.saveMonitorData(stats); - }catch (PDException e){ + } catch (PDException e) { log.error("save status failed, state:{}", stats); } // remove system_metrics stats = Metapb.StoreStats.newBuilder() - .mergeFrom(request.getStats()) - .clearField(Metapb.StoreStats.getDescriptor().findFieldByName("system_metrics")) - .build(); + .mergeFrom(request.getStats()) + .clearField(Metapb.StoreStats.getDescriptor().findFieldByName( + "system_metrics")) + .build(); } Pdpb.StoreHeartbeatResponse response = null; @@ -534,7 +578,8 @@ public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request, response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(okHeader) .setClusterStats(clusterStats).build(); } catch (PDException e) { - response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = + Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(newErrorHeader(e)).build(); log.error("storeHeartbeat exception: ", e); } catch (Exception e2) { response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader( @@ -559,9 +604,10 @@ public void getPartition(Pdpb.GetPartitionRequest request, } Pdpb.GetPartitionResponse response = null; try { - Metapb.PartitionShard partShard = partitionService.getPartitionShard(request.getGraphName(), - request.getKey() - .toByteArray()); + Metapb.PartitionShard partShard = + partitionService.getPartitionShard(request.getGraphName(), + request.getKey() + .toByteArray()); response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) .setPartition(partShard.getPartition()) .setLeader(partShard.getLeader()).build(); @@ -587,8 +633,9 @@ public void getPartitionByCode(Pdpb.GetPartitionByCodeRequest request, } Pdpb.GetPartitionResponse response = null; try { - Metapb.PartitionShard partShard = partitionService.getPartitionByCode(request.getGraphName(), - request.getCode()); + Metapb.PartitionShard partShard = + partitionService.getPartitionByCode(request.getGraphName(), + request.getCode()); response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) .setPartition(partShard.getPartition()) .setLeader(partShard.getLeader()).build(); @@ -600,7 +647,6 @@ public void getPartitionByCode(Pdpb.GetPartitionByCodeRequest request, observer.onCompleted(); } - /** * 根据partition_id查找partition */ @@ -613,11 +659,14 @@ public void getPartitionByID(Pdpb.GetPartitionByIDRequest request, } Pdpb.GetPartitionResponse response = null; try { - Metapb.PartitionShard partShard = partitionService.getPartitionShardById(request.getGraphName(), - request.getPartitionId()); + Metapb.PartitionShard partShard = + partitionService.getPartitionShardById(request.getGraphName(), + request.getPartitionId()); if (partShard == null) { throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, - String.format("partition: %s-%s not found", request.getGraphName(), request.getPartitionId())); + String.format("partition: %s-%s not found", + request.getGraphName(), + request.getPartitionId())); } response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) .setPartition(partShard.getPartition()) @@ -647,7 +696,8 @@ public void updatePartition(Pdpb.UpdatePartitionRequest request, response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { - response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = + Pdpb.UpdatePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); log.error("update partition exception: ", e); } observer.onNext(response); @@ -669,8 +719,10 @@ public void delPartition(Pdpb.DelPartitionRequest request, Metapb.Partition partition = partitionService.getPartitionById(request.getGraphName(), request.getPartitionId()); if (partition != null) { - partitionService.removePartition(request.getGraphName(), (int) request.getPartitionId()); - response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader).setPartition(partition) + partitionService.removePartition(request.getGraphName(), + (int) request.getPartitionId()); + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partition) .build(); } else { response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader).build(); @@ -695,15 +747,17 @@ public void scanPartitions(Pdpb.ScanPartitionsRequest request, } Pdpb.ScanPartitionsResponse response = null; try { - List partShards = partitionService.scanPartitions(request.getGraphName(), - request.getStartKey() - .toByteArray(), - request.getEndKey() - .toByteArray()); + List partShards = + partitionService.scanPartitions(request.getGraphName(), + request.getStartKey() + .toByteArray(), + request.getEndKey() + .toByteArray()); response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(okHeader) .addAllPartitions(partShards).build(); } catch (PDException e) { - response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = + Pdpb.ScanPartitionsResponse.newBuilder().setHeader(newErrorHeader(e)).build(); log.error("scanPartitions exception: ", e); } observer.onNext(response); @@ -725,7 +779,8 @@ public void getGraph(Pdpb.GetGraphRequest request, try { Metapb.Graph graph = partitionService.getGraph(graphName); if (graph != null) { - response = Pdpb.GetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + response = Pdpb.GetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph) + .build(); } else { Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.NOT_FOUND).build()).build(); @@ -752,7 +807,8 @@ public void setGraph(Pdpb.SetGraphRequest request, Metapb.Graph graph = request.getGraph(); try { graph = partitionService.updateGraph(graph); - response = Pdpb.SetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + response = + Pdpb.SetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); } catch (PDException e) { log.error("setGraph exception: ", e); response = Pdpb.SetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); @@ -775,8 +831,10 @@ public void delGraph(Pdpb.DelGraphRequest request, String graphName = request.getGraphName(); try { Metapb.Graph graph = partitionService.delGraph(graphName); - if (graph != null) - response = Pdpb.DelGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + if (graph != null) { + response = Pdpb.DelGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph) + .build(); + } } catch (PDException e) { response = Pdpb.DelGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); log.error("getGraph exception: ", e); @@ -805,19 +863,22 @@ public void queryPartitions(Pdpb.QueryPartitionsRequest request, if (query.hasPartitionId() && partition.getId() != query.getPartitionId()) { continue; } - if (query.hasGraphName() && !partition.getGraphName().equals(query.getGraphName())) { + if (query.hasGraphName() && + !partition.getGraphName().equals(query.getGraphName())) { continue; } long storeId = query.getStoreId(); if (query.hasStoreId() && query.getStoreId() != 0) { try { - storeNodeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { - if (shard.getStoreId() == storeId) { - result.add(partition); - } - }); - }catch (PDException e){ - log.error("query partitions error, req:{}, error:{}", request, e.getMessage()); + storeNodeService.getShardGroup(partition.getId()).getShardsList() + .forEach(shard -> { + if (shard.getStoreId() == storeId) { + result.add(partition); + } + }); + } catch (PDException e) { + log.error("query partitions error, req:{}, error:{}", request, + e.getMessage()); } } else { result.add(partition); @@ -825,14 +886,16 @@ public void queryPartitions(Pdpb.QueryPartitionsRequest request, } } Pdpb.QueryPartitionsResponse response = Pdpb.QueryPartitionsResponse.newBuilder() - .addAllPartitions(result).build(); + .addAllPartitions( + result).build(); observer.onNext(response); observer.onCompleted(); } @Override - public void getId(Pdpb.GetIdRequest request, StreamObserver responseObserver) { + public void getId(Pdpb.GetIdRequest request, + StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(PDGrpc.getGetIdMethod(), request, responseObserver); return; @@ -845,14 +908,16 @@ public void getId(Pdpb.GetIdRequest request, StreamObserver log.error("getId exception: ", e); return; } - Pdpb.GetIdResponse response = Pdpb.GetIdResponse.newBuilder().setId(id).setDelta(request.getDelta()) - .build(); + Pdpb.GetIdResponse response = + Pdpb.GetIdResponse.newBuilder().setId(id).setDelta(request.getDelta()) + .build(); responseObserver.onNext(response); responseObserver.onCompleted(); } @Override - public void resetId(Pdpb.ResetIdRequest request, StreamObserver responseObserver) { + public void resetId(Pdpb.ResetIdRequest request, + StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(PDGrpc.getResetIdMethod(), request, responseObserver); return; @@ -882,11 +947,13 @@ public void getMembers(Pdpb.GetMembersRequest request, try { response = Pdpb.GetMembersResponse.newBuilder() .addAllMembers(RaftEngine.getInstance().getMembers()) - .setLeader(RaftEngine.getInstance().getLocalMember()).build(); + .setLeader(RaftEngine.getInstance().getLocalMember()) + .build(); } catch (Exception e) { log.error("getMembers exception: ", e); - response = Pdpb.GetMembersResponse.newBuilder().setHeader(newErrorHeader(-1, e.getMessage())) + response = Pdpb.GetMembersResponse.newBuilder() + .setHeader(newErrorHeader(-1, e.getMessage())) .build(); } observer.onNext(response); @@ -904,8 +971,9 @@ public void getStoreStatus(Pdpb.GetAllStoresRequest request, try { List stores = null; stores = storeNodeService.getStoreStatus(request.getExcludeOfflineStores()); - response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) - .build(); + response = + Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) + .build(); } catch (PDException e) { response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build(); log.error("getAllStores exception: ", e); @@ -928,8 +996,9 @@ public void getPDConfig(Pdpb.GetPDConfigRequest request, try { Metapb.PDConfig pdConfig = null; pdConfig = configService.getPDConfig(request.getVersion()); - response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(okHeader).setPdConfig(pdConfig) - .build(); + response = + Pdpb.GetPDConfigResponse.newBuilder().setHeader(okHeader).setPdConfig(pdConfig) + .build(); } catch (PDException e) { response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build(); } @@ -949,29 +1018,30 @@ public void setPDConfig(Pdpb.SetPDConfigRequest request, } Pdpb.SetPDConfigResponse response = null; try { - if (request.getPdConfig().getShardCount() % 2 != 1){ + if (request.getPdConfig().getShardCount() % 2 != 1) { // 副本数奇偶校验 throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, - "shard count must be an odd number!"); + "shard count must be an odd number!"); } if (request.getPdConfig().getShardCount() > - storeNodeService.getActiveStores().size()){ + storeNodeService.getActiveStores().size()) { // 不能大于活跃的store数量 throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, - "shard count can't be greater than the number of active stores!"); + "shard count can't be greater than the number of active " + + "stores!"); } int oldShardCount = configService.getPDConfig().getShardCount(); int newShardCount = request.getPdConfig().getShardCount(); - if (newShardCount > oldShardCount){ + if (newShardCount > oldShardCount) { // 如果副本数增大,则检查store内部的资源是否够用 - if (! isResourceEnough(oldShardCount, newShardCount)) { + if (!isResourceEnough(oldShardCount, newShardCount)) { throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, - "There is not enough disk space left!"); + "There is not enough disk space left!"); } - if (! checkShardCount(newShardCount)) { + if (!checkShardCount(newShardCount)) { throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, - "the cluster can't support so many shard count!"); + "the cluster can't support so many shard count!"); } } configService.setPDConfig(request.getPdConfig()); @@ -1032,7 +1102,8 @@ public void setGraphSpace(Pdpb.SetGraphSpaceRequest request, * 数据分裂 * */ - public void splitData(Pdpb.SplitDataRequest request, StreamObserver observer) { + public void splitData(Pdpb.SplitDataRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getSplitDataMethod(), request, observer); return; @@ -1052,15 +1123,17 @@ public void splitData(Pdpb.SplitDataRequest request, StreamObserver observer) { + public void splitGraphData(Pdpb.SplitGraphDataRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getSplitGraphDataMethod(), request, observer); return; } logService.insertLog(LogService.PARTITION_CHANGE, "splitGraphData", request); - Pdpb.SplitDataResponse response ; + Pdpb.SplitDataResponse response; try { - partitionService.splitPartition(partitionService.getGraph(request.getGraphName()), request.getToCount()); + partitionService.splitPartition(partitionService.getGraph(request.getGraphName()), + request.getToCount()); response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { log.error("splitGraphData exception {}", e); @@ -1074,7 +1147,7 @@ public void splitGraphData(Pdpb.SplitGraphDataRequest request, StreamObserver observer) { + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getMovePartitionMethod(), request, observer); return; @@ -1106,12 +1179,12 @@ public void getClusterStats(Pdpb.GetClusterStatsRequest request, } Pdpb.GetClusterStatsResponse response = null; response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(okHeader) - .setCluster(storeNodeService.getClusterStats()).build(); + .setCluster(storeNodeService.getClusterStats()) + .build(); observer.onNext(response); observer.onCompleted(); } - /** *
      * 汇报分区分裂等任务执行结果
@@ -1152,35 +1225,37 @@ public void getPartitionStats(Pdpb.GetPartitionStatsRequest request,
                                                      .setPartitionStats(stats).build();
         } catch (PDException e) {
             log.error("getPartitionStats exception {}", e);
-            response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(newErrorHeader(e))
+                                                     .build();
         }
 
         observer.onNext(response);
         observer.onCompleted();
     }
 
-
-    private ManagedChannel channel;
-
     public boolean isLeader() {
         return RaftEngine.getInstance().isLeader();
     }
 
     private > void redirectToLeader(
-            MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver observer) {
+            MethodDescriptor method, ReqT req,
+            io.grpc.stub.StreamObserver observer) {
         try {
             if (channel == null) {
                 synchronized (this) {
                     if (channel == null) {
                         channel = ManagedChannelBuilder
-                                .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()).usePlaintext()
+                                .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress())
+                                .usePlaintext()
                                 .build();
                     }
                 }
-                log.info("Grpc get leader address {}", RaftEngine.getInstance().getLeaderGrpcAddress());
+                log.info("Grpc get leader address {}",
+                         RaftEngine.getInstance().getLeaderGrpcAddress());
             }
 
-            io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), req,
+            io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT),
+                                                    req,
                                                     observer);
         } catch (Exception e) {
             e.printStackTrace();
@@ -1200,14 +1275,17 @@ public void changePeerList(Pdpb.ChangePeerListRequest request,
         Pdpb.getChangePeerListResponse response;
         try {
             Status status = RaftEngine.getInstance().changePeerList(request.getPeerList());
-            Pdpb.ResponseHeader responseHeader = status.isOk() ? okHeader : newErrorHeader(status.getCode(),
-                                                                                           status.getErrorMsg());
-            response = Pdpb.getChangePeerListResponse.newBuilder().setHeader(responseHeader).build();
+            Pdpb.ResponseHeader responseHeader =
+                    status.isOk() ? okHeader : newErrorHeader(status.getCode(),
+                                                              status.getErrorMsg());
+            response =
+                    Pdpb.getChangePeerListResponse.newBuilder().setHeader(responseHeader).build();
 
         } catch (Exception e) {
             log.error("changePeerList exception: ", e);
             response = Pdpb.getChangePeerListResponse.newBuilder()
-                                                     .setHeader(newErrorHeader(-1, e.getMessage())).build();
+                                                     .setHeader(newErrorHeader(-1, e.getMessage()))
+                                                     .build();
         }
         observer.onNext(response);
         observer.onCompleted();
@@ -1247,14 +1325,16 @@ public void balanceLeaders(Pdpb.BalanceLeadersRequest request,
             response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(okHeader).build();
         } catch (PDException e) {
             log.error("balance Leaders exception {}", e);
-            response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(newErrorHeader(e)).build();
+            response =
+                    Pdpb.BalanceLeadersResponse.newBuilder().setHeader(newErrorHeader(e)).build();
         }
         observer.onNext(response);
         observer.onCompleted();
     }
 
     @Override
-    public void putLicense(PutLicenseRequest request, StreamObserver responseObserver) {
+    public void putLicense(PutLicenseRequest request,
+                           StreamObserver responseObserver) {
         PutLicenseResponse response = null;
         boolean moved = false;
         String bakPath = pdConfig.getLicensePath() + "-bak";
@@ -1279,12 +1359,14 @@ public void putLicense(PutLicenseRequest request, StreamObserver observer) {
@@ -1296,42 +1378,44 @@ public void delStore(Pdpb.DetStoreRequest request,
         Pdpb.DetStoreResponse response = null;
         try {
             Metapb.Store store = storeNodeService.getStore(storeId);
-            if (Metapb.StoreState.Tombstone == store.getState()){
+            if (Metapb.StoreState.Tombstone == store.getState()) {
                 // 只有已经被下线(Tombstone)的store可以被删除
                 storeNodeService.removeStore(storeId);
                 response = Pdpb.DetStoreResponse.newBuilder()
-                        .setHeader(okHeader)
-                        .setStore(store)
-                        .build();
-            }else{
+                                                .setHeader(okHeader)
+                                                .setStore(store)
+                                                .build();
+            } else {
                 throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DELETION_VALUE,
-                        "the store can't be deleted, please check store state!");
+                                      "the store can't be deleted, please check store state!");
             }
         } catch (PDException e) {
             log.error("delete store exception: {}", e);
             response = Pdpb.DetStoreResponse.newBuilder()
-                    .setHeader(newErrorHeader(e)).build();
+                                            .setHeader(newErrorHeader(e)).build();
         }
         observer.onNext(response);
         observer.onCompleted();
     }
 
     /**
-     *  check the shard whether exceed the cluster's max shard group count
+     * check the shard whether exceed the cluster's max shard group count
      *
      * @param newShardCount new shard count
      * @return true if can be set to new shard count, otherwise false
      */
     private boolean checkShardCount(int newShardCount) {
         try {
-            var maxCount = pdConfig.getPartition().getMaxShardsPerStore() * storeNodeService.getActiveStores().size() /
-                    pdConfig.getConfigService().getPartitionCount();
+            var maxCount = pdConfig.getPartition().getMaxShardsPerStore() *
+                           storeNodeService.getActiveStores().size() /
+                           pdConfig.getConfigService().getPartitionCount();
 
             if (newShardCount > maxCount) {
-                log.error("new shard count :{} exceed current cluster max shard count {}", newShardCount, maxCount);
+                log.error("new shard count :{} exceed current cluster max shard count {}",
+                          newShardCount, maxCount);
                 return false;
             }
-        }catch (Exception e) {
+        } catch (Exception e) {
             log.error("checkShardCount: {}", e.getMessage());
         }
         return true;
@@ -1362,16 +1446,12 @@ public boolean isResourceEnough(int oldShardCount, int newShardCount) {
             newDataSize = (long) Math.ceil(currentDataSize * expansionRatio);
             // 统计所有活跃的store里面可用的空间
             List activeStores = storeNodeService.getActiveStores();
-            for (Metapb.Store store : activeStores){
+            for (Metapb.Store store : activeStores) {
                 Metapb.StoreStats storeStats = store.getStats();
                 totalAvaible += storeStats.getAvailable();
             }
             // 考虑当分区均匀分配的情况下,资源是否可用
-            if (totalAvaible > newDataSize - currentDataSize){
-                return true;
-            }else{
-                return false;
-            }
+            return totalAvaible > newDataSize - currentDataSize;
         } catch (PDException e) {
             e.printStackTrace();
             return false;
@@ -1383,7 +1463,8 @@ public boolean isResourceEnough(int oldShardCount, int newShardCount) {
      * 对rocksdb进行compaction
      * 
*/ - public void dbCompaction(Pdpb.DbCompactionRequest request, StreamObserver observer) { + public void dbCompaction(Pdpb.DbCompactionRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getDbCompactionMethod(), request, observer); return; @@ -1410,13 +1491,14 @@ public void combineCluster(Pdpb.CombineClusterRequest request, return; } - Pdpb.CombineClusterResponse response ; + Pdpb.CombineClusterResponse response; - try{ + try { partitionService.combinePartition(request.getToCount()); response = Pdpb.CombineClusterResponse.newBuilder().setHeader(okHeader).build(); - }catch (PDException e){ - response = Pdpb.CombineClusterResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } catch (PDException e) { + response = + Pdpb.CombineClusterResponse.newBuilder().setHeader(newErrorHeader(e)).build(); } observer.onNext(response); @@ -1431,12 +1513,12 @@ public void combineGraph(Pdpb.CombineGraphRequest request, return; } - Pdpb.CombineGraphResponse response ; + Pdpb.CombineGraphResponse response; - try{ + try { partitionService.combineGraphPartition(request.getGraphName(), request.getToCount()); response = Pdpb.CombineGraphResponse.newBuilder().setHeader(okHeader).build(); - }catch (PDException e){ + } catch (PDException e) { response = Pdpb.CombineGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); } @@ -1458,14 +1540,15 @@ public void deleteShardGroup(Pdpb.DeleteShardGroupRequest request, storeNodeService.deleteShardGroup(request.getGroupId()); response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { - response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = + Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); } public void getShardGroup(Pdpb.GetShardGroupRequest request, - io.grpc.stub.StreamObserver observer) { + io.grpc.stub.StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getGetShardGroupMethod(), request, observer); return; @@ -1474,7 +1557,8 @@ public void getShardGroup(Pdpb.GetShardGroupRequest request, // TODO try { Metapb.ShardGroup shardGroup = storeNodeService.getShardGroup(request.getGroupId()); - response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(okHeader).setShardGroup(shardGroup).build(); + response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(okHeader) + .setShardGroup(shardGroup).build(); } catch (PDException e) { log.error("getPartitionStats exception", e); response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); @@ -1494,7 +1578,7 @@ public void updateShardGroup(Pdpb.UpdateShardGroupRequest request, Pdpb.UpdateShardGroupResponse response; var group = request.getShardGroup(); storeNodeService.updateShardGroup(group.getId(), group.getShardsList(), - group.getVersion(), group.getConfVer()); + group.getVersion(), group.getConfVer()); response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); @@ -1503,7 +1587,8 @@ public void updateShardGroup(Pdpb.UpdateShardGroupRequest request, } @Override - public void updateShardGroupOp(Pdpb.ChangeShardRequest request, StreamObserver observer) { + public void updateShardGroupOp(Pdpb.ChangeShardRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getUpdateShardGroupOpMethod(), request, observer); return; @@ -1524,7 +1609,8 @@ public void updateShardGroupOp(Pdpb.ChangeShardRequest request, StreamObserver

observer) { + public void changeShard(Pdpb.ChangeShardRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getChangeShardMethod(), request, observer); return; @@ -1535,7 +1621,7 @@ public void changeShard(Pdpb.ChangeShardRequest request, StreamObserver> targetsCache = HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H. + private final HgMapCache> targetsCache = + HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H. private RegistryService getRegister() { if (this.register == null) { @@ -60,7 +79,8 @@ private RegistryService getRegister() { public List getAllTargets() { List res = new LinkedList<>(); - List buf = this.toModels(this.getRegister().getNodes(Query.newBuilder().build())); + List buf = + this.toModels(this.getRegister().getNodes(Query.newBuilder().build())); if (buf != null) { res.addAll(buf); @@ -84,7 +104,8 @@ public List getTargets(String appName) { case "store": return Collections.singletonList(this.getStoreTargets()); default: - return this.toModels(this.getRegister().getNodes(Query.newBuilder().setAppName(appName).build())); + return this.toModels(this.getRegister() + .getNodes(Query.newBuilder().setAppName(appName).build())); } } @@ -97,7 +118,8 @@ private PromTargetsModel getStoreTargets() { } private PromTargetsModel setTargets(PromTargetsModel model, Supplier> supplier) { - return model.setTargets(supplier.get()).setClusterId(String.valueOf(pdConfig.getClusterId())); + return model.setTargets(supplier.get()) + .setClusterId(String.valueOf(pdConfig.getClusterId())); } /* to prevent the failure of connection between pd and store or pd and pd.*/ @@ -129,37 +151,37 @@ private List toModels(NodeInfos info) { List res = nodes.stream().map(e -> { - Map labels = e.getLabelsMap(); - - String target = labels.get("target"); - if (HgAssert.isInvalid(target)) return null; - - PromTargetsModel model = PromTargetsModel.of(); - model.addTarget(target); - model.addLabel("__app_name", e.getAppName()); - - labels.forEach((k, v) -> { - k = k.trim(); - switch (k) { - case "metrics": - model.setMetricsPath(v.trim()); - break; - case "scheme": - model.setScheme(v.trim()); - break; - default: - if (k.startsWith("__")) { - model.addLabel(k, v); - } - - } - }); - - - return model; - }) - .filter(e -> e != null) - .collect(Collectors.toList()); + Map labels = e.getLabelsMap(); + + String target = labels.get("target"); + if (HgAssert.isInvalid(target)) return null; + + PromTargetsModel model = PromTargetsModel.of(); + model.addTarget(target); + model.addLabel("__app_name", e.getAppName()); + + labels.forEach((k, v) -> { + k = k.trim(); + switch (k) { + case "metrics": + model.setMetricsPath(v.trim()); + break; + case "scheme": + model.setScheme(v.trim()); + break; + default: + if (k.startsWith("__")) { + model.addLabel(k, v); + } + + } + }); + + + return model; + }) + .filter(e -> e != null) + .collect(Collectors.toList()); if (res.isEmpty()) { return null; @@ -168,7 +190,8 @@ private List toModels(NodeInfos info) { } private Set getPdAddresses() { - MemberAPI.CallStreamObserverWrap response = new MemberAPI.CallStreamObserverWrap<>(); + MemberAPI.CallStreamObserverWrap response = + new MemberAPI.CallStreamObserverWrap<>(); pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); List members = null; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java index 75033c3317..084728b857 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java @@ -1,16 +1,31 @@ -package org.apache.hugegraph.pd.service; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import org.apache.hugegraph.pd.watch.PDWatchSubject; +package org.apache.hugegraph.pd.service; import java.util.concurrent.ConcurrentHashMap; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; import com.baidu.hugegraph.pd.raft.RaftEngine; import com.baidu.hugegraph.pd.raft.RaftStateListener; -import com.baidu.hugegraph.pd.watch.PDWatchSubject; import io.grpc.CallOptions; import io.grpc.ManagedChannel; @@ -23,8 +38,12 @@ **/ public interface ServiceGrpc extends RaftStateListener { + ConcurrentHashMap channels = new ConcurrentHashMap(); + default Pdpb.ResponseHeader getResponseHeader(PDException e) { - Pdpb.Error error = Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage()).build(); + Pdpb.Error error = + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage()) + .build(); Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(error).build(); return header; } @@ -39,16 +58,17 @@ default boolean isLeader() { return RaftEngine.getInstance().isLeader(); } - ConcurrentHashMap channels = new ConcurrentHashMap(); - - default void redirectToLeader(ManagedChannel channel, MethodDescriptor method, - ReqT req, io.grpc.stub.StreamObserver observer) { + default void redirectToLeader(ManagedChannel channel, + MethodDescriptor method, + ReqT req, + io.grpc.stub.StreamObserver observer) { try { String address = RaftEngine.getInstance().getLeaderGrpcAddress(); if ((channel = channels.get(address)) == null) { synchronized (this) { if ((channel = channels.get(address)) == null) { - ManagedChannel c = ManagedChannelBuilder.forTarget(address).usePlaintext().build(); + ManagedChannel c = + ManagedChannelBuilder.forTarget(address).usePlaintext().build(); channels.put(address, c); channel = c; } @@ -64,13 +84,13 @@ default void redirectToLeader(ManagedChannel channel, MethodDescri @Override default void onRaftLeaderChanged() { - synchronized (this){ + synchronized (this) { if (!isLeader()) { try { String message = "lose leader"; PDPulseSubject.notifyError(message); PDWatchSubject.notifyError(message); - }catch (Exception e){ + } catch (Exception e) { e.printStackTrace(); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java index 4e45e9027b..f996c8fe64 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package com.baidu.hugegraph.pd.service; import com.baidu.hugegraph.pd.KvService; @@ -6,6 +23,7 @@ import com.baidu.hugegraph.pd.rest.API; import com.baidu.hugegraph.pd.upgrade.VersionScriptFactory; import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; + import lombok.extern.slf4j.Slf4j; @Slf4j @@ -15,11 +33,11 @@ public class UpgradeService { private static final String RUN_LOG_PREFIX = "SCRIPT_RUN_LOG"; - private PDConfig pdConfig; + private final PDConfig pdConfig; - private KvService kvService; + private final KvService kvService; - public UpgradeService (PDConfig pdConfig){ + public UpgradeService(PDConfig pdConfig) { this.pdConfig = pdConfig; this.kvService = new KvService(pdConfig); } @@ -30,7 +48,7 @@ public void upgrade() throws PDException { VersionScriptFactory factory = VersionScriptFactory.getInstance(); var dataVersion = getDataVersion(); log.info("now db data version : {}", dataVersion); - for(VersionUpgradeScript script : factory.getScripts()) { + for (VersionUpgradeScript script : factory.getScripts()) { // 执行过,run once的跳过 if (isExecuted(script.getClass().getName()) && script.isRunOnce()) { log.info("Script {} is Executed and is run once", script.getClass().getName()); @@ -39,8 +57,13 @@ public void upgrade() throws PDException { // 判断跳过的条件 if (dataVersion == null && !script.isRunWithoutDataVersion() || dataVersion != null && - !versionCompare(dataVersion, script.getHighVersion(), script.getLowVersion())) { - log.info("Script {} is did not match version requirements, current data version:{}, current version:{}" + !versionCompare( + dataVersion, + script.getHighVersion(), + script.getLowVersion())) { + log.info( + "Script {} is did not match version requirements, current data " + + "version:{}, current version:{}" + "script run version({} to {}), run without data version:{}", script.getClass().getName(), dataVersion, @@ -73,11 +96,10 @@ private String getDataVersion() throws PDException { private boolean versionCompare(String dataVersion, String high, String low) { var currentVersion = API.VERSION; - if (!high.equals(VersionUpgradeScript.UNLIMITED_VERSION) && high.compareTo(dataVersion) < 0 - || !low.equals(VersionUpgradeScript.UNLIMITED_VERSION) && low.compareTo(currentVersion) > 0){ - return false; - } - return true; + return (high.equals(VersionUpgradeScript.UNLIMITED_VERSION) || + high.compareTo(dataVersion) >= 0) + && (low.equals(VersionUpgradeScript.UNLIMITED_VERSION) || + low.compareTo(currentVersion) <= 0); } private void writeCurrentDataVersion() throws PDException { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java index 6e61d9a252..709b65a6d0 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java @@ -1,32 +1,50 @@ -package org.apache.hugegraph.pd.util; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import org.apache.commons.lang3.time.DateUtils; +package org.apache.hugegraph.pd.util; import java.text.ParseException; import java.util.Date; +import org.apache.commons.lang3.time.DateUtils; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; + /** * @author zhangyingjie * @date 2022/3/23 **/ public class DateUtil { - private static String DATE = "yyyy-MM-dd"; - private static String DATETIME = "yyyy-MM-dd HH:mm:ss"; - private static String DATETIME_MM = "yyyy-MM-dd HH:mm"; - private static String DATETIME_SSS = "yyyy-MM-dd HH:mm:ss.SSS"; - private static String TIME = "HH:mm"; - private static String TIME_SS = "HH:mm:ss"; - private static String SYS_DATE = "yyyy/MM/dd"; - private static String SYS_DATETIME = "yyyy/MM/dd HH:mm:ss"; - private static String SYS_DATETIME_MM = "yyyy/MM/dd HH:mm"; - private static String SYS_DATETIME_SSS = "yyyy/MM/dd HH:mm:ss.SSS"; - private static String NONE_DATE = "yyyyMMdd"; - private static String NONE_DATETIME = "yyyyMMddHHmmss"; - private static String NONE_DATETIME_MM = "yyyyMMddHHmm"; - private static String NONE_DATETIME_SSS = "yyyyMMddHHmmssSSS"; - private static String[] PATTERNS =new String[]{ + private static final String DATE = "yyyy-MM-dd"; + private static final String DATETIME = "yyyy-MM-dd HH:mm:ss"; + private static final String DATETIME_MM = "yyyy-MM-dd HH:mm"; + private static final String DATETIME_SSS = "yyyy-MM-dd HH:mm:ss.SSS"; + private static final String TIME = "HH:mm"; + private static final String TIME_SS = "HH:mm:ss"; + private static final String SYS_DATE = "yyyy/MM/dd"; + private static final String SYS_DATETIME = "yyyy/MM/dd HH:mm:ss"; + private static final String SYS_DATETIME_MM = "yyyy/MM/dd HH:mm"; + private static final String SYS_DATETIME_SSS = "yyyy/MM/dd HH:mm:ss.SSS"; + private static final String NONE_DATE = "yyyyMMdd"; + private static final String NONE_DATETIME = "yyyyMMddHHmmss"; + private static final String NONE_DATETIME_MM = "yyyyMMddHHmm"; + private static final String NONE_DATETIME_SSS = "yyyyMMddHHmmssSSS"; + private static final String[] PATTERNS = new String[]{ DATE, DATETIME, DATETIME_MM, @@ -43,13 +61,13 @@ public class DateUtil { NONE_DATETIME_SSS }; - public static String[] getDefaultPattern(){ + public static String[] getDefaultPattern() { return PATTERNS; } public static Date getDate(String date) throws PDException { try { - return DateUtils.parseDate(date,getDefaultPattern()); + return DateUtils.parseDate(date, getDefaultPattern()); } catch (ParseException e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java index d0cb0e0e25..a905ca3665 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.util; import java.util.Map; @@ -10,17 +27,17 @@ * @author lynn.bond@hotmail.com on 2022/3/10 */ public class HgMapCache { - private Map cache = new ConcurrentHashMap(); - private Supplier expiry; - - public static HgMapCache expiredOf(long interval){ - return new HgMapCache(new CycleIntervalPolicy(interval)); - } + private final Map cache = new ConcurrentHashMap(); + private final Supplier expiry; private HgMapCache(Supplier expiredPolicy) { this.expiry = expiredPolicy; } + public static HgMapCache expiredOf(long interval) { + return new HgMapCache(new CycleIntervalPolicy(interval)); + } + private boolean isExpired() { if (expiry != null && expiry.get()) { cache.clear(); @@ -56,21 +73,22 @@ public Map getAll() { return this.cache; } - private static class CycleIntervalPolicy implements Supplier{ - private long expireTime=0; - private long interval=0; + private static class CycleIntervalPolicy implements Supplier { + private long expireTime = 0; + private long interval = 0; - public CycleIntervalPolicy(long interval){ - this.interval=interval; + public CycleIntervalPolicy(long interval) { + this.interval = interval; init(); } - private void init(){ - expireTime=System.currentTimeMillis()+interval; + + private void init() { + expireTime = System.currentTimeMillis() + interval; } @Override public Boolean get() { - if(System.currentTimeMillis()>expireTime){ + if (System.currentTimeMillis() > expireTime) { init(); return true; } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java index 569702e6c4..b92056d735 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.util; import lombok.extern.slf4j.Slf4j; @@ -9,12 +26,13 @@ public final class IdUtil { private final static byte[] lock = new byte[0]; - public static String createMillisStr(){ + public static String createMillisStr() { return String.valueOf(createMillisId()); } /** * Create millisecond style ID; + * * @return */ public static Long createMillisId() { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java index e1fc4a3b10..c3893cc3ae 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java @@ -1,11 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.util.grpc; +import java.lang.reflect.Field; + import io.grpc.Grpc; import io.grpc.ServerCall; import io.grpc.stub.StreamObserver; -import java.lang.reflect.Field; - public class StreamObserverUtil { static Object fieldLock = new Object(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java index fa053db456..06a7d521a6 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java @@ -1,18 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.watch; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.function.Consumer; + +import javax.annotation.concurrent.ThreadSafe; + import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; import com.baidu.hugegraph.pd.grpc.watch.WatchType; import com.google.protobuf.util.JsonFormat; + import io.grpc.Status; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import javax.annotation.concurrent.ThreadSafe; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.function.Consumer; - /** * @author lynn.bond@hotmail.com created on 2021/11/5 */ @@ -33,12 +52,14 @@ void addObserver(Long watcherId, StreamObserver responseObserver) if (this.watcherHolder.containsKey(watcherId)) { responseObserver.onError( - new Exception("The watcher-id[" + watcherId + "] of " + this.watchType.name() + new Exception( + "The watcher-id[" + watcherId + "] of " + this.watchType.name() + " subject has been existing, please unwatch it first")); return; } - log.info("Adding a "+this.watchType+"'s watcher, watcher-id is ["+ watcherId+"]."); + log.info("Adding a " + this.watchType + "'s watcher, watcher-id is [" + watcherId + + "]."); this.watcherHolder.put(watcherId, responseObserver); } @@ -46,7 +67,8 @@ void addObserver(Long watcherId, StreamObserver responseObserver) void removeObserver(Long watcherId, StreamObserver responseObserver) { synchronized (this.watcherHolder) { - log.info("Removing a "+this.watchType+"'s watcher, watcher-id is ["+ watcherId+"]."); + log.info("Removing a " + this.watchType + "'s watcher, watcher-id is [" + watcherId + + "]."); this.watcherHolder.remove(watcherId); } responseObserver.onCompleted(); @@ -54,9 +76,10 @@ void removeObserver(Long watcherId, StreamObserver responseObserv abstract String toNoticeString(WatchResponse res); - public void notifyError(String message){ + public void notifyError(String message) { synchronized (lock) { - Iterator>> iter = watcherHolder.entrySet().iterator(); + Iterator>> iter = + watcherHolder.entrySet().iterator(); while (iter.hasNext()) { Map.Entry> entry = iter.next(); Long watcherId = entry.getKey(); @@ -65,7 +88,8 @@ public void notifyError(String message){ entry.getValue().onError( Status.PERMISSION_DENIED.withDescription(message).asRuntimeException()); } catch (Throwable e) { - //log.error("Failed to send " + this.watchType.name() + "'s error message [" + toNoticeString(res) + //log.error("Failed to send " + this.watchType.name() + "'s error message [" + // + toNoticeString(res) // + "] to watcher[" + watcherId + "].", e); } @@ -101,19 +125,21 @@ protected void notifyWatcher(WatchResponse.Builder response) { protected void notifyWatcher(Consumer c) { synchronized (lock) { - if(c==null){ - log.error(this.watchType.name()+"'s notice was abandoned, caused by: notifyWatcher(null)"); + if (c == null) { + log.error(this.watchType.name() + + "'s notice was abandoned, caused by: notifyWatcher(null)"); return; } - try{ + try { c.accept(this.builder.clear()); - }catch (Throwable t){ - log.error(this.watchType.name()+"'s notice was abandoned, caused by:",t ); + } catch (Throwable t) { + log.error(this.watchType.name() + "'s notice was abandoned, caused by:", t); return; } - Iterator>> iter = watcherHolder.entrySet().iterator(); + Iterator>> iter = + watcherHolder.entrySet().iterator(); while (iter.hasNext()) { Map.Entry> entry = iter.next(); @@ -123,14 +149,15 @@ protected void notifyWatcher(Consumer c) { try { entry.getValue().onNext(res); } catch (Throwable e) { - log.error("Failed to send " + this.watchType.name() + "'s notice[" + toNoticeString(res) - + "] to watcher[" + watcherId + "].", e); + log.error("Failed to send " + this.watchType.name() + "'s notice[" + + toNoticeString(res) + + "] to watcher[" + watcherId + "].", e); // TODO: ? try multi-times? iter.remove(); log.error("Removed a " + this.watchType.name() + "'s watcher[" + entry.getKey() - + "], because of once failure of sending.", e); + + "], because of once failure of sending.", e); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index 88bfbfff1c..0aeac9c140 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -1,5 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.watch; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.BiPredicate; + import com.baidu.hugegraph.pd.KvService; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.config.PDConfig; @@ -8,30 +33,28 @@ import com.baidu.hugegraph.pd.grpc.kv.WatchResponse; import com.baidu.hugegraph.pd.grpc.kv.WatchState; import com.baidu.hugegraph.pd.grpc.kv.WatchType; + import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import java.util.Arrays; -import java.util.LinkedList; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.function.BiPredicate; - -/** watch订阅、响应处理类 +/** + * watch订阅、响应处理类 + * * @author zhangyingjie * @date 2022/6/21 **/ @Slf4j public class KvWatchSubject { - private KvService kvService; public static final String KEY_DELIMITER = "KW"; public static final String PREFIX_DELIMITER = "PW"; public static final String ALL_PREFIX = "W"; public static final long WATCH_TTL = 20000L; - private static final ConcurrentMap> clients = new ConcurrentHashMap<>(); + private static final ConcurrentMap> clients = + new ConcurrentHashMap<>(); + BiPredicate equal = (kvKey, watchKey) -> kvKey.equals(watchKey); + BiPredicate startWith = (kvKey, watchKey) -> kvKey.startsWith(watchKey); + private final KvService kvService; /** * 会使用以下三组key: @@ -50,7 +73,8 @@ public String getWatchKey(String key, String watchDelimiter) { private void addWatchKey(String key, String delimiter, long clientId) throws PDException { String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); kvService.put(watchKey, "", WATCH_TTL); - String clientFirstKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key, clientId); + String clientFirstKey = + KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key, clientId); kvService.put(clientFirstKey, "", WATCH_TTL); } @@ -63,15 +87,17 @@ private void removeWatchKey(String key, String delimiter, long clientId) throws /** * 增加观察者 - * @param key 观察的key - * @param clientId 客户端标识 + * + * @param key 观察的key + * @param clientId 客户端标识 * @param observer * @param delimiter 观察类型标识符,对前缀监听或者对key的监听可以通过此参数区分 * @throws PDException */ public void addObserver(String key, long clientId, StreamObserver observer, String delimiter) throws PDException { - String keyWithoutPrefix = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + String keyWithoutPrefix = + KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); clients.putIfAbsent(keyWithoutPrefix, observer); addWatchKey(key, delimiter, clientId); log.info("client:{},start to watch key:{}", clientId, key); @@ -79,22 +105,22 @@ public void addObserver(String key, long clientId, StreamObserver public void removeObserver(String key, long clientId, String delimiter) throws PDException { removeWatchKey(key, delimiter, clientId); - String keyWithoutPrefix = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + String keyWithoutPrefix = + KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); clients.remove(keyWithoutPrefix); } - BiPredicate equal = (kvKey, watchKey) -> kvKey.equals(watchKey); - BiPredicate startWith = (kvKey, watchKey) -> kvKey.startsWith(watchKey); - /** * 通知观察者方法,key和prefix都使用此方法,predicate不同 + * * @param key * @param watchType 观察类型,一般是增加和删除 * @param predicate 判断等于或者是前匹配,用来适配key或prefix观察 * @param kvs * @throws PDException */ - public void notifyObserver(String key, WatchType watchType, BiPredicate predicate, + public void notifyObserver(String key, WatchType watchType, + BiPredicate predicate, WatchKv... kvs) throws PDException { boolean isEqual = predicate.equals(equal); String watchDelimiter = isEqual ? KEY_DELIMITER : PREFIX_DELIMITER; @@ -108,7 +134,7 @@ public void notifyObserver(String key, WatchType watchType, BiPredicate watchEvents = new LinkedList<>(); for (WatchKv kv : kvs) { String kvKey = kv.getKey(); @@ -116,13 +142,16 @@ public void notifyObserver(String key, WatchType watchType, BiPredicate observer = clients.get(keyAndClient); - watchResponse = WatchResponse.newBuilder().setState(WatchState.Started).setClientId(clientId) - .addAllEvents(watchEvents).build(); + watchResponse = + WatchResponse.newBuilder().setState(WatchState.Started).setClientId(clientId) + .addAllEvents(watchEvents).build(); try { if (observer != null) { @@ -139,7 +168,8 @@ public void notifyObserver(String key, WatchType watchType, BiPredicate>> entries = clients.entrySet(); - Map.Entry>[] array = entries.toArray(new Map.Entry[0]); + Map.Entry>[] array = + entries.toArray(new Map.Entry[0]); Arrays.stream(array).parallel().forEach(entry -> { StreamObserver value = entry.getValue(); String key = entry.getKey(); @@ -176,7 +207,8 @@ public void keepClientAlive() { String aliveKey = entryKey.replaceFirst(removes, ""); boolean keepAliveKey = kvService.keepAlive(aliveKey); boolean keepAliveEntry = kvService.keepAlive(entryKey); - // log.info("keep alive client:{},{}:{},{}:{}", client, aliveKey, keepAliveKey, + // log.info("keep alive client:{},{}:{},{}:{}", client, aliveKey, + // keepAliveKey, // entryKey, // keepAliveEntry); done = true; @@ -216,7 +248,8 @@ private void removeClient(StreamObserver value, String key, Strin * 通知客户端leader切换了,重连 */ public void notifyClientChangeLeader() { - WatchResponse response = WatchResponse.newBuilder().setState(WatchState.Leader_Changed).build(); + WatchResponse response = + WatchResponse.newBuilder().setState(WatchState.Leader_Changed).build(); for (Map.Entry> entry : clients.entrySet()) { StreamObserver value = entry.getValue(); String key = entry.getKey(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java index 1173f8c1bf..610baad59b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java @@ -1,16 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.watch; -import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; import javax.annotation.concurrent.ThreadSafe; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentValid; +import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; /** * The subject of partition change. + * * @author lynn.bond@hotmail.com created on 2021/11/26 */ @ThreadSafe @@ -24,9 +41,9 @@ final class NodeChangeSubject extends AbstractWatchSubject { String toNoticeString(WatchResponse res) { StringBuilder sb = new StringBuilder(); return sb.append("graph:").append(res.getNodeResponse().getGraph()) - .append(",") - .append("nodeId:").append(res.getNodeResponse().getNodeId()) - .toString(); + .append(",") + .append("nodeId:").append(res.getNodeResponse().getNodeId()) + .toString(); } public void notifyWatcher(NodeEventType nodeEventType, String graph, long nodeId) { @@ -35,16 +52,16 @@ public void notifyWatcher(NodeEventType nodeEventType, String graph, long nodeId super.notifyWatcher(builder -> { builder.setNodeResponse( builder.getNodeResponseBuilder().clear() - .setGraph(graph) - .setNodeId(nodeId) - .setNodeEventType(nodeEventType) - .build() + .setGraph(graph) + .setNodeId(nodeId) + .setNodeEventType(nodeEventType) + .build() ); }); } - public void notifyError(String message){ + public void notifyError(String message) { super.notifyError(message); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java index 0328dcafad..6915050189 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java @@ -1,7 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.watch; import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import javax.annotation.concurrent.ThreadSafe; + import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; @@ -9,13 +31,10 @@ import com.baidu.hugegraph.pd.grpc.watch.WatchRequest; import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; import com.baidu.hugegraph.pd.grpc.watch.WatchType; + import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -import javax.annotation.concurrent.ThreadSafe; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - /** * @author lynn.bond@hotmail.com created on 2021/11/4 */ @@ -25,26 +44,34 @@ public class PDWatchSubject implements StreamObserver { public final static Map subjectHolder = new ConcurrentHashMap<>(); private final static byte[] lock = new byte[0]; + static { + subjectHolder.put(WatchType.WATCH_TYPE_PARTITION_CHANGE.name(), + new PartitionChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name(), new NodeChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_GRAPH_CHANGE.name(), new NodeChangeSubject()); + subjectHolder.put(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name(), + new com.baidu.hugegraph.pd.watch.ShardGroupChangeSubject()); + } + private final StreamObserver responseObserver; private AbstractWatchSubject subject; private Long watcherId; - static { - subjectHolder.put(WatchType.WATCH_TYPE_PARTITION_CHANGE.name(), new PartitionChangeSubject()); - subjectHolder.put(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name(), new NodeChangeSubject()); - subjectHolder.put(WatchType.WATCH_TYPE_GRAPH_CHANGE.name(), new NodeChangeSubject()); - subjectHolder.put(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name(), new ShardGroupChangeSubject()); + private PDWatchSubject(StreamObserver responseObserver) { + this.responseObserver = responseObserver; } - public static StreamObserver addObserver(StreamObserver responseObserver) { + public static StreamObserver addObserver( + StreamObserver responseObserver) { isArgumentNotNull(responseObserver, "responseObserver"); return new PDWatchSubject(responseObserver); } /** * Notify partition change - * @param changeType change type - * @param graph name of graph + * + * @param changeType change type + * @param graph name of graph * @param partitionId id of partition */ public static void notifyPartitionChange(ChangeType changeType, String graph, int partitionId) { @@ -53,16 +80,19 @@ public static void notifyPartitionChange(ChangeType changeType, String graph, in } - public static void notifyShardGroupChange(ChangeType changeType, int groupId, Metapb.ShardGroup group) { - ((ShardGroupChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name())) + public static void notifyShardGroupChange(ChangeType changeType, int groupId, + Metapb.ShardGroup group) { + ((com.baidu.hugegraph.pd.watch.ShardGroupChangeSubject) subjectHolder.get( + WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name())) .notifyWatcher(changeType.getGrpcType(), groupId, group); } /** * Notify store-node change + * * @param changeType change type - * @param graph name of graph - * @param nodeId id of partition + * @param graph name of graph + * @param nodeId id of partition */ public static void notifyNodeChange(NodeEventType changeType, String graph, long nodeId) { ((NodeChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name())) @@ -74,15 +104,11 @@ public static void notifyChange(WatchType type, subjectHolder.get(type.name()).notifyWatcher(builder); } - public static void notifyError(String message){ - subjectHolder.forEach((k, v)->{ + public static void notifyError(String message) { + subjectHolder.forEach((k, v) -> { v.notifyError(message); }); } - - private PDWatchSubject(StreamObserver responseObserver) { - this.responseObserver = responseObserver; - } private static Long createWatcherId() { synchronized (lock) { @@ -101,7 +127,8 @@ private static Long createWatcherId() { private void cancelWatcher() { if (this.subject == null) { - this.responseObserver.onError(new Exception("Invoke cancel-watch before create-watch.")); + this.responseObserver.onError( + new Exception("Invoke cancel-watch before create-watch.")); return; } @@ -174,7 +201,7 @@ public enum ChangeType { ALTER(WatchChangeType.WATCH_CHANGE_TYPE_ALTER), DEL(WatchChangeType.WATCH_CHANGE_TYPE_DEL), - USER_DEFINED (WatchChangeType.WATCH_CHANGE_TYPE_SPECIAL1); + USER_DEFINED(WatchChangeType.WATCH_CHANGE_TYPE_SPECIAL1); private final WatchChangeType grpcType; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java index 1a69612844..556d6b20ba 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java @@ -1,16 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.watch; -import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentValid; import javax.annotation.concurrent.ThreadSafe; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentValid; +import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import com.baidu.hugegraph.pd.grpc.watch.WatchType; /** * The subject of partition change. + * * @author lynn.bond@hotmail.com created on 2021/11/5 */ @ThreadSafe @@ -24,9 +42,9 @@ final class PartitionChangeSubject extends AbstractWatchSubject { String toNoticeString(WatchResponse res) { StringBuilder sb = new StringBuilder(); return sb.append("graph:").append(res.getPartitionResponse().getGraph()) - .append(",") - .append("partitionId:").append(res.getPartitionResponse().getPartitionId()) - .toString(); + .append(",") + .append("partitionId:").append(res.getPartitionResponse().getPartitionId()) + .toString(); } public void notifyWatcher(WatchChangeType changeType, String graph, int partitionId) { @@ -36,10 +54,10 @@ public void notifyWatcher(WatchChangeType changeType, String graph, int partitio super.notifyWatcher(builder -> { builder.setPartitionResponse( builder.getPartitionResponseBuilder().clear() - .setGraph(graph) - .setPartitionId(partitionId) - .setChangeType(changeType) - .build() + .setGraph(graph) + .setPartitionId(partitionId) + .setChangeType(changeType) + .build() ); }); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java index 8046636808..f0c9643b72 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java @@ -1,13 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package com.baidu.hugegraph.pd.watch; +import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +import org.apache.hugegraph.pd.watch.AbstractWatchSubject; + import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; import com.baidu.hugegraph.pd.grpc.watch.WatchType; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; - -public class ShardGroupChangeSubject extends AbstractWatchSubject{ +public class ShardGroupChangeSubject extends AbstractWatchSubject { protected ShardGroupChangeSubject() { super(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE); @@ -17,20 +36,21 @@ protected ShardGroupChangeSubject() { String toNoticeString(WatchResponse res) { StringBuilder sb = new StringBuilder(); sb.append("shard group:") - .append(res.getShardGroupResponse().getShardGroup().toString().replace("\n", " ")); + .append(res.getShardGroupResponse().getShardGroup().toString().replace("\n", " ")); return sb.toString(); } - public void notifyWatcher(WatchChangeType changeType, int groupId, Metapb.ShardGroup shardGroup) { + public void notifyWatcher(WatchChangeType changeType, int groupId, + Metapb.ShardGroup shardGroup) { isArgumentNotNull(changeType, "changeType"); super.notifyWatcher(builder -> { builder.setShardGroupResponse( builder.getShardGroupResponseBuilder().clear() - .setShardGroupId(groupId) - .setType(changeType) - .setShardGroup(shardGroup) - .build() + .setShardGroupId(groupId) + .setType(changeType) + .setShardGroup(shardGroup) + .build() ); }); } diff --git a/hg-pd-service/src/main/resources/log4j2.xml b/hg-pd-service/src/main/resources/log4j2.xml index a157b6412b..125b8ea9f7 100644 --- a/hg-pd-service/src/main/resources/log4j2.xml +++ b/hg-pd-service/src/main/resources/log4j2.xml @@ -1,5 +1,22 @@ + + @@ -25,7 +42,7 @@ - + @@ -33,8 +50,8 @@ - - + + @@ -52,7 +69,7 @@ - + @@ -60,8 +77,8 @@ - - + + @@ -80,7 +97,7 @@ - + @@ -88,8 +105,8 @@ - - + + diff --git a/hg-pd-service/src/test/java/live/PDServer0.java b/hg-pd-service/src/test/java/live/PDServer0.java index 536e38fe9e..1ca4e30b08 100644 --- a/hg-pd-service/src/test/java/live/PDServer0.java +++ b/hg-pd-service/src/test/java/live/PDServer0.java @@ -1,14 +1,30 @@ -package live; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import org.apache.hugegraph.pd.boot.HugePDServer; -import org.apache.commons.io.FileUtils; -import org.springframework.boot.SpringApplication; +package live; import java.io.File; import java.io.IOException; +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.springframework.boot.SpringApplication; + /** - * * @author zhangyingjie * @date 2022/1/9 **/ @@ -20,7 +36,8 @@ public class PDServer0 { public static void main(String[] args) { //deleteDirectory(new File(DATA_PATH)); - SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + SpringApplication.run(HugePDServer.class, + String.format("--spring.profiles.active=%s", SERVER_NAME)); System.out.println(SERVER_NAME + " started."); } @@ -28,7 +45,7 @@ public static void deleteDirectory(File dir) { try { FileUtils.deleteDirectory(dir); } catch (IOException e) { - System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + System.out.printf("Failed to start ....,%s%n", e.getMessage()); } } diff --git a/hg-pd-service/src/test/java/live/PDServer1.java b/hg-pd-service/src/test/java/live/PDServer1.java index 987533228d..c9219e8bc2 100644 --- a/hg-pd-service/src/test/java/live/PDServer1.java +++ b/hg-pd-service/src/test/java/live/PDServer1.java @@ -1,12 +1,29 @@ -package live; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import org.apache.hugegraph.pd.boot.HugePDServer; -import org.apache.commons.io.FileUtils; -import org.springframework.boot.SpringApplication; +package live; import java.io.File; import java.io.IOException; +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.springframework.boot.SpringApplication; + /** * @author zhangyingjie * @date 2022/1/9 @@ -17,8 +34,9 @@ public class PDServer1 { static String DATA_PATH = "tmp/8686"; public static void main(String[] args) { - deleteDirectory(new File(DATA_PATH)); - SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + deleteDirectory(new File(DATA_PATH)); + SpringApplication.run(HugePDServer.class, + String.format("--spring.profiles.active=%s", SERVER_NAME)); System.out.println(SERVER_NAME + " started."); } @@ -26,7 +44,7 @@ public static void deleteDirectory(File dir) { try { FileUtils.deleteDirectory(dir); } catch (IOException e) { - System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + System.out.printf("Failed to start ....,%s%n", e.getMessage()); } } diff --git a/hg-pd-service/src/test/java/live/PDServer2.java b/hg-pd-service/src/test/java/live/PDServer2.java index 904fa5b4cd..fb9c9bf024 100644 --- a/hg-pd-service/src/test/java/live/PDServer2.java +++ b/hg-pd-service/src/test/java/live/PDServer2.java @@ -1,12 +1,29 @@ -package live; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import org.apache.hugegraph.pd.boot.HugePDServer; -import org.apache.commons.io.FileUtils; -import org.springframework.boot.SpringApplication; +package live; import java.io.File; import java.io.IOException; +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.springframework.boot.SpringApplication; + /** * @author zhangyingjie * @date 2022/1/9 @@ -17,8 +34,9 @@ public class PDServer2 { static String DATA_PATH = "tmp/8687"; public static void main(String[] args) { - // deleteDirectory(new File(DATA_PATH)); - SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + // deleteDirectory(new File(DATA_PATH)); + SpringApplication.run(HugePDServer.class, + String.format("--spring.profiles.active=%s", SERVER_NAME)); System.out.println(SERVER_NAME + " started."); } @@ -26,7 +44,7 @@ public static void deleteDirectory(File dir) { try { FileUtils.deleteDirectory(dir); } catch (IOException e) { - System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + System.out.printf("Failed to start ....,%s%n", e.getMessage()); } } diff --git a/hg-pd-service/src/test/java/live/PDServer3.java b/hg-pd-service/src/test/java/live/PDServer3.java index 7927496599..8807eaeccd 100644 --- a/hg-pd-service/src/test/java/live/PDServer3.java +++ b/hg-pd-service/src/test/java/live/PDServer3.java @@ -1,12 +1,29 @@ -package live; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import org.apache.hugegraph.pd.boot.HugePDServer; -import org.apache.commons.io.FileUtils; -import org.springframework.boot.SpringApplication; +package live; import java.io.File; import java.io.IOException; +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.springframework.boot.SpringApplication; + /** * @author zhangyingjie * @date 2022/1/9 @@ -17,8 +34,9 @@ public class PDServer3 { static String DATA_PATH = "tmp/8688"; public static void main(String[] args) { - // deleteDirectory(new File(DATA_PATH)); - SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + // deleteDirectory(new File(DATA_PATH)); + SpringApplication.run(HugePDServer.class, + String.format("--spring.profiles.active=%s", SERVER_NAME)); System.out.println(SERVER_NAME + " started."); } @@ -26,7 +44,7 @@ public static void deleteDirectory(File dir) { try { FileUtils.deleteDirectory(dir); } catch (IOException e) { - System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + System.out.printf("Failed to start ....,%s%n", e.getMessage()); } } diff --git a/hg-pd-service/src/test/resources/log4j2.xml b/hg-pd-service/src/test/resources/log4j2.xml index a157b6412b..68c8326973 100644 --- a/hg-pd-service/src/test/resources/log4j2.xml +++ b/hg-pd-service/src/test/resources/log4j2.xml @@ -1,6 +1,23 @@ - + + + logs @@ -25,7 +42,7 @@ - + @@ -33,8 +50,8 @@ - - + + @@ -52,7 +69,7 @@ - + @@ -60,8 +77,8 @@ - - + + @@ -80,7 +97,7 @@ - + @@ -88,8 +105,8 @@ - - + + diff --git a/hg-pd-test/pom.xml b/hg-pd-test/pom.xml index 602bdb13d2..0ac87fa932 100644 --- a/hg-pd-test/pom.xml +++ b/hg-pd-test/pom.xml @@ -1,6 +1,23 @@ - + + hugegraph-pd-root diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java index 3e2c205bb3..8a2a4fbc98 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java @@ -1,19 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; -import com.baidu.hugegraph.pd.client.PDClient; -import com.baidu.hugegraph.pd.client.PDConfig; import org.junit.After; import org.junit.BeforeClass; import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; +import com.baidu.hugegraph.pd.client.PDClient; +import com.baidu.hugegraph.pd.client.PDConfig; + @RunWith(MockitoJUnitRunner.class) public class BaseClientTest { public static PDClient pdClient; - - public long storeId = 0; public final String storeAddr = "localhost"; public final String graphName = "default/hugegraph/g"; + public long storeId = 0; @BeforeClass public static void beforeClass() throws Exception { diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java index 9b269ca9cb..925f8ada22 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java @@ -1,15 +1,33 @@ -package org.apache.hugegraph.pd.client; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.client.DiscoveryClientImpl; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.Query; -import org.junit.Before; -import org.junit.Test; +package org.apache.hugegraph.pd.client; import java.util.HashMap; import java.util.Map; import java.util.function.Consumer; +import org.junit.Before; +import org.junit.Test; + +import com.baidu.hugegraph.pd.client.DiscoveryClientImpl; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.Query; + public class DiscoveryClientTest { private DiscoveryClientImpl client; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java index 3cec45e974..09dce3f6d0 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java @@ -1,5 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import java.util.function.Consumer; + +import org.apache.commons.lang3.StringUtils; +import org.junit.Before; +import org.junit.Test; + import com.baidu.hugegraph.pd.client.KvClient; import com.baidu.hugegraph.pd.client.PDConfig; import com.baidu.hugegraph.pd.grpc.kv.KResponse; @@ -9,18 +35,14 @@ import com.baidu.hugegraph.pd.grpc.kv.WatchResponse; import com.baidu.hugegraph.pd.grpc.kv.WatchState; import com.baidu.hugegraph.pd.grpc.kv.WatchType; + import io.grpc.stub.AbstractBlockingStub; import io.grpc.stub.AbstractStub; -import org.apache.commons.lang3.StringUtils; -import static org.assertj.core.api.Assertions.assertThat; -import org.junit.Before; -import org.junit.Test; -import static org.mockito.Mockito.mock; - -import java.util.function.Consumer; public class KvClientTest extends BaseClientTest { + String key = "key"; + String value = "value"; private KvClient client; @Before @@ -32,12 +54,10 @@ public void setUp() { public void testCreateStub() { // Setup // Run the test - try{ + try { final AbstractStub result = client.createStub(); } catch (Exception e) { - } finally { - } @@ -48,18 +68,13 @@ public void testCreateStub() { public void testCreateBlockingStub() { // Setup // Run the test - try{ + try { final AbstractBlockingStub result = client.createBlockingStub(); } catch (Exception e) { - } finally { - } } - String key = "key"; - String value = "value"; - @Test public void testPutAndGet() throws Exception { // Run the test diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java index f862196c37..f969c218a1 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java @@ -1,9 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; -import lombok.extern.slf4j.Slf4j; import org.junit.runner.RunWith; import org.junit.runners.Suite; +import lombok.extern.slf4j.Slf4j; + @RunWith(Suite.class) @Suite.SuiteClasses({ diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java index ddedb78c74..c067258a82 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java @@ -1,18 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; +import org.junit.Test; +import org.mockito.Mockito; + import com.baidu.hugegraph.pd.client.PDClient; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.MetaTask; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.Pdpb; -import org.junit.Test; -import org.mockito.Mockito; - /** * @author zhengfuquan * @date 2022/11/28 @@ -31,6 +48,7 @@ public void testDbCompaction() { System.out.println("pdclienttest testDbCompaction end"); } + @Test public void testRegisterStore() { Metapb.Store store = Metapb.Store.newBuilder().build(); @@ -42,7 +60,7 @@ public void testRegisterStore() { } @Test - public void testSetGraph(){ + public void testSetGraph() { Metapb.Graph graph = Metapb.Graph.newBuilder().setGraphName("test").build(); try { pdClient.setGraph(graph); @@ -68,6 +86,7 @@ public void testGetStore() { e.printStackTrace(); } } + @Test public void testUpdateStore() { Metapb.Store store = Metapb.Store.newBuilder().build(); @@ -79,7 +98,7 @@ public void testUpdateStore() { } @Test - public void testGetActiveStores(){ + public void testGetActiveStores() { try { pdClient.getActiveStores("test"); } catch (PDException e) { @@ -107,7 +126,7 @@ public void testGetAllStores() { // } @Test - public void testKeyToCode(){ + public void testKeyToCode() { pdClient.keyToCode("test", "test".getBytes(StandardCharsets.UTF_8)); } @@ -115,7 +134,7 @@ public void testKeyToCode(){ public void testScanPartitions() { try { pdClient.scanPartitions("test", "1".getBytes(StandardCharsets.UTF_8), - "9".getBytes(StandardCharsets.UTF_8)); + "9".getBytes(StandardCharsets.UTF_8)); } catch (PDException e) { e.printStackTrace(); } @@ -308,8 +327,8 @@ public void testSplitData() { try { Metapb.PDConfig config = pdClient.getPDConfig(); pdClient.setPDConfig(config.toBuilder() - .setMaxShardsPerStore(12) - .build()); + .setMaxShardsPerStore(12) + .build()); System.out.println(pdClient.getPDConfig()); // 开始分区分裂 diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java index 248eba841b..b00049cc95 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java @@ -1,10 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.clitools; import org.junit.After; import org.junit.BeforeClass; - public class BaseCliToolsTest { @BeforeClass public static void init() { diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java index 6e69aa1e05..89b125a5f3 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java @@ -1,9 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.clitools; -import lombok.extern.slf4j.Slf4j; import org.junit.runner.RunWith; import org.junit.runners.Suite; +import lombok.extern.slf4j.Slf4j; + @RunWith(Suite.class) @Suite.SuiteClasses({ diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java index d52dd09e11..f72fe65c60 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java @@ -1,21 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.clitools; +import java.util.Arrays; +import java.util.List; + +import org.junit.Test; + import com.baidu.hugegraph.pd.clitools.Main; import com.baidu.hugegraph.pd.common.PDException; -import lombok.extern.slf4j.Slf4j; -import org.junit.Test; -import java.util.Arrays; -import java.util.List; +import lombok.extern.slf4j.Slf4j; @Slf4j -public class MainTest extends BaseCliToolsTest{ +public class MainTest extends BaseCliToolsTest { + public static boolean test2sup(List arrays, int tail, int res) { + System.out.printf("%d %d%n", tail, res); + if (tail == 0) { + System.out.printf("a = %d %d%n", tail, res); + return false; + } else if (tail == 1) { + System.out.printf("b = %d %d%n", arrays.get(0), res); + return (arrays.get(0) == res); + } else if (tail == 2) { + System.out.printf("c = %d %d %d%n", arrays.get(0), arrays.get(1), res); + return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || + (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); + } else { + return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || + test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); + } + } + @Test public void getConfig() throws PDException { Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad"}); } + // @Test public void setBatchTrue() throws PDException { Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad= true "}); @@ -30,6 +68,7 @@ public void setBatchFalse() throws PDException { public void getConfig2() throws PDException { Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount"}); } + // @Test public void setShardCount1() throws PDException { Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=1"}); @@ -41,28 +80,11 @@ public void setShardCount3() throws PDException { } @Test - public void test2(){ - Integer[] a = new Integer[] { 1, 0, 3, 2}; + public void test2() { + Integer[] a = new Integer[]{1, 0, 3, 2}; List aa = Arrays.asList(a); System.out.printf(test2sup(aa, aa.size(), 0) ? "TRUE" : "FALSE"); } - public static boolean test2sup (List arrays, int tail, int res) { - System.out.println(String.format("%d %d", tail, res)); - if (tail == 0) { - System.out.println(String.format("a = %d %d", tail, res)); - return false; - } else if (tail == 1) { - System.out.println(String.format("b = %d %d", arrays.get(0), res)); - return (arrays.get(0) == res); - } else if (tail == 2) { - System.out.println(String.format("c = %d %d %d", arrays.get(0), arrays.get(1), res)); - return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || - (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); - } else { - return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || - test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); - } - } } \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java index 2b5a9e531b..97052ffd3a 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; import org.junit.After; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java index 5e2ebb405e..0430520dc9 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java @@ -1,13 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; import org.apache.hugegraph.pd.service.IdServiceTest; import org.apache.hugegraph.pd.service.KvServiceTest; - -import lombok.extern.slf4j.Slf4j; - import org.junit.runner.RunWith; import org.junit.runners.Suite; +import lombok.extern.slf4j.Slf4j; + @RunWith(Suite.class) @Suite.SuiteClasses({ diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java index ee24075af2..62ac894395 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java @@ -1,12 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; -import org.junit.Test; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.HashMap; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import org.junit.Test; import com.baidu.hugegraph.pd.common.HgAssert; @@ -65,38 +82,46 @@ public void testIsNotNull() { @Test public void testIsInvalid() { - assertFalse(HgAssert.isInvalid( "abc", "test")); - assertTrue(HgAssert.isInvalid( "", null)); + assertFalse(HgAssert.isInvalid("abc", "test")); + assertTrue(HgAssert.isInvalid("", null)); } @Test public void testIsInvalidByte() { - assertTrue(HgAssert.isInvalid( new byte[0])); - assertFalse(HgAssert.isInvalid( new byte[1])); + assertTrue(HgAssert.isInvalid(new byte[0])); + assertFalse(HgAssert.isInvalid(new byte[1])); } @Test public void testIsInvalidMap() { assertTrue(HgAssert.isInvalid(new HashMap())); - assertFalse(HgAssert.isInvalid(new HashMap(){{put(1, 1);}})); + assertFalse(HgAssert.isInvalid(new HashMap() {{ + put(1, 1); + }})); } @Test public void testIsInvalidCollection() { assertTrue(HgAssert.isInvalid(new ArrayList())); - assertFalse(HgAssert.isInvalid(new ArrayList(){{add(1);}})); + assertFalse(HgAssert.isInvalid(new ArrayList() {{ + add(1); + }})); } @Test public void testIsContains() { - assertTrue(HgAssert.isContains(new Object[]{new Integer(1), new Long(2)}, new Long(2))); - assertFalse(HgAssert.isContains(new Object[]{new Integer(1), new Long(2)}, new Long(3))); + assertTrue(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, Long.valueOf(2))); + assertFalse(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, Long.valueOf(3))); } @Test public void testIsContainsT() { - assertTrue(HgAssert.isContains(new ArrayList<>(){{add(1);}}, 1)); - assertFalse(HgAssert.isContains(new ArrayList<>(){{add(1);}}, 2)); + assertTrue(HgAssert.isContains(new ArrayList<>() {{ + add(1); + }}, 1)); + assertFalse(HgAssert.isContains(new ArrayList<>() {{ + add(1); + }}, 2)); } @Test diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java index bc7bedad12..240d015f22 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java @@ -1,57 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; -import org.junit.Before; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.util.Objects; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Test; import com.baidu.hugegraph.pd.common.KVPair; public class KVPairTest { KVPair pair; + @Before - public void init(){ + public void init() { pair = new KVPair<>("key", 1); } @Test - public void testGetKey(){ + public void testGetKey() { assertEquals(pair.getKey(), "key"); } @Test - public void testSetKey(){ + public void testSetKey() { pair.setKey("key2"); assertEquals(pair.getKey(), "key2"); } @Test - public void testGetValue(){ + public void testGetValue() { assertTrue(Objects.equals(pair.getValue(), 1)); } @Test - public void testSetValue(){ + public void testSetValue() { pair.setValue(2); assertTrue(Objects.equals(pair.getValue(), 2)); } @Test - public void testToString(){ + public void testToString() { } @Test - public void testHashCode(){ + public void testHashCode() { } @Test - public void testEquals(){ + public void testEquals() { var pair2 = new KVPair<>("key", 1); assertTrue(pair2.equals(pair)); } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java index 3a03f2104b..577b7d1762 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java @@ -1,10 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; import static org.assertj.core.api.Assertions.assertThat; + import org.junit.Test; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; + public class MetadataKeyHelperTest { @Test @@ -124,7 +143,7 @@ public void testGetQueueItemKey() { @Test public void testGetSpitTaskKey() { - assertThat(MetadataKeyHelper.getSplitTaskKey( "graphName", 0)).contains( + assertThat(MetadataKeyHelper.getSplitTaskKey("graphName", 0)).contains( MetadataKeyHelper.getDelimiter()); } @@ -190,7 +209,7 @@ public void testGetDelimiter() { @Test public void testGetStringBuilderHelper() { - try{ + try { MetadataKeyHelper.getStringBuilderHelper(); } catch (Exception e) { diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java index a3438d743d..1fde63b3b3 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java @@ -1,33 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; -import com.baidu.hugegraph.pd.common.KVPair; -import com.baidu.hugegraph.pd.common.PartitionCache; -import com.baidu.hugegraph.pd.grpc.Metapb; -import org.junit.Before; -import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Objects; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Test; + +import com.baidu.hugegraph.pd.common.PartitionCache; +import com.baidu.hugegraph.pd.grpc.Metapb; public class PartitionCacheTest { - private PartitionCache cache ; + private PartitionCache cache; + + private static Metapb.Partition createPartition(int pid, String graphName, long start, + long end) { + return Metapb.Partition.newBuilder() + .setId(pid) + .setGraphName(graphName) + .setStartKey(start) + .setEndKey(end) + .setState(Metapb.PartitionState.PState_Normal) + .setVersion(1) + .build(); + } + + private static Metapb.ShardGroup creteShardGroup(int pid) { + return Metapb.ShardGroup.newBuilder() + .addShards( + Metapb.Shard.newBuilder().setStoreId(0) + .setRole(Metapb.ShardRole.Leader).build() + ) + .setId(pid) + .setVersion(0) + .setConfVer(0) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + } + + private static Metapb.Shard createShard() { + return Metapb.Shard.newBuilder() + .setStoreId(0) + .setRole(Metapb.ShardRole.Leader) + .build(); + } + + private static Metapb.Store createStore(long storeId) { + return Metapb.Store.newBuilder() + .setId(storeId) + .setAddress("127.0.0.1") + .setCores(4) + .setVersion("1") + .setDataPath("/tmp/junit") + .setDataVersion(1) + .setLastHeartbeat(System.currentTimeMillis()) + .setStartTimestamp(System.currentTimeMillis()) + .setState(Metapb.StoreState.Up) + .setDeployPath("/tmp/junit") + .build(); + } + + private static Metapb.Graph createGraph(String graphName, int partitionCount) { + return Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + } + + private static Metapb.ShardGroup createShardGroup() { + List shards = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + shards.add(Metapb.Shard.newBuilder() + .setStoreId(i) + .setRole(i == 0 ? Metapb.ShardRole.Leader : + Metapb.ShardRole.Follower) + .build() + ); + } + + return Metapb.ShardGroup.newBuilder() + .setId(1) + .setVersion(1) + .setConfVer(1) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(shards) + .build(); + } @Before - public void setup(){ + public void setup() { cache = new PartitionCache(); } @Test - public void testGetPartitionById(){ + public void testGetPartitionById() { var partition = createPartition(0, "graph0", 0, 65535); cache.updateShardGroup(creteShardGroup(0)); cache.updatePartition(partition); @@ -41,13 +134,13 @@ public void testGetPartitionByKey() throws UnsupportedEncodingException { var partition = createPartition(0, "graph0", 0, 65535); cache.updateShardGroup(creteShardGroup(0)); cache.updatePartition(partition); - var ret = cache.getPartitionByKey("graph0", "0".getBytes("utf-8")); + var ret = cache.getPartitionByKey("graph0", "0".getBytes(StandardCharsets.UTF_8)); assertNotNull(ret); assertEquals(ret.getKey(), partition); } @Test - public void getPartitionByCode(){ + public void getPartitionByCode() { var partition = createPartition(0, "graph0", 0, 1024); cache.updateShardGroup(creteShardGroup(0)); cache.updatePartition(partition); @@ -58,7 +151,7 @@ public void getPartitionByCode(){ } @Test - public void testGetPartitions(){ + public void testGetPartitions() { var partition1 = createPartition(0, "graph0", 0, 1024); cache.updateShardGroup(creteShardGroup(0)); cache.updatePartition(partition1); @@ -71,7 +164,7 @@ public void testGetPartitions(){ } @Test - public void testAddPartition(){ + public void testAddPartition() { var partition = createPartition(0, "graph0", 0, 65535); cache.addPartition("graph0", 0, partition); var ret = cache.getPartitionById("graph0", 0); @@ -89,7 +182,7 @@ public void testAddPartition(){ } @Test - public void testUpdatePartition(){ + public void testUpdatePartition() { var partition = createPartition(0, "graph0", 0, 65535); cache.updateShardGroup(creteShardGroup(0)); cache.addPartition("graph0", 0, partition); @@ -102,7 +195,7 @@ public void testUpdatePartition(){ } @Test - public void testUpdatePartition2(){ + public void testUpdatePartition2() { var partition = createPartition(0, "graph0", 0, 1024); cache.updateShardGroup(creteShardGroup(0)); assertTrue(cache.updatePartition(partition)); @@ -114,7 +207,7 @@ public void testUpdatePartition2(){ } @Test - public void testRemovePartition(){ + public void testRemovePartition() { var partition = createPartition(0, "graph0", 0, 1024); cache.updateShardGroup(creteShardGroup(0)); cache.updatePartition(partition); @@ -125,7 +218,7 @@ public void testRemovePartition(){ } @Test - public void testRange(){ + public void testRange() { var partition1 = createPartition(1, "graph0", 0, 3); var partition2 = createPartition(2, "graph0", 3, 6); cache.updatePartition(partition1); @@ -149,7 +242,7 @@ public void testRange(){ } @Test - public void testRange2(){ + public void testRange2() { var partition1 = createPartition(1, "graph0", 0, 3); var partition2 = createPartition(2, "graph0", 3, 6); cache.updatePartition(partition1); @@ -168,9 +261,8 @@ public void testRange2(){ System.out.println(cache.debugCacheByGraphName("graph0")); } - @Test - public void testRemovePartitions(){ + public void testRemovePartitions() { var partition1 = createPartition(0, "graph0", 0, 1024); var partition2 = createPartition(1, "graph0", 1024, 2048); cache.updateShardGroup(creteShardGroup(0)); @@ -182,10 +274,8 @@ public void testRemovePartitions(){ assertEquals(cache.getPartitions("graph0").size(), 0); } - - @Test - public void testRemoveAll(){ + public void testRemoveAll() { var partition1 = createPartition(0, "graph0", 0, 1024); var partition2 = createPartition(1, "graph0", 1024, 2048); var partition3 = createPartition(0, "graph1", 0, 2048); @@ -203,35 +293,35 @@ public void testRemoveAll(){ } @Test - public void testUpdateShardGroup(){ + public void testUpdateShardGroup() { var shardGroup = createShardGroup(); cache.updateShardGroup(shardGroup); assertNotNull(cache.getShardGroup(shardGroup.getId())); } @Test - public void testGetShardGroup(){ + public void testGetShardGroup() { var shardGroup = createShardGroup(); cache.updateShardGroup(shardGroup); assertTrue(Objects.equals(cache.getShardGroup(shardGroup.getId()), shardGroup)); } @Test - public void testAddStore(){ + public void testAddStore() { var store = createStore(1); cache.addStore(1L, store); assertEquals(cache.getStoreById(1L), store); } @Test - public void testGetStoreById(){ + public void testGetStoreById() { var store = createStore(1); cache.addStore(1L, store); assertEquals(cache.getStoreById(1L), store); } @Test - public void testRemoveStore(){ + public void testRemoveStore() { var store = createStore(1); cache.addStore(1L, store); assertEquals(cache.getStoreById(1L), store); @@ -241,7 +331,7 @@ public void testRemoveStore(){ } @Test - public void testHasGraph(){ + public void testHasGraph() { var partition = createPartition(0, "graph0", 0, 65535); cache.updateShardGroup(creteShardGroup(0)); cache.updatePartition(partition); @@ -250,7 +340,7 @@ public void testHasGraph(){ } @Test - public void testUpdateGraph(){ + public void testUpdateGraph() { var graph = createGraph("graph0", 10); cache.updateGraph(graph); assertEquals(cache.getGraph("graph0"), graph); @@ -260,14 +350,14 @@ public void testUpdateGraph(){ } @Test - public void testGetGraph(){ + public void testGetGraph() { var graph = createGraph("graph0", 12); cache.updateGraph(graph); assertEquals(cache.getGraph("graph0"), graph); } @Test - public void testGetGraphs(){ + public void testGetGraphs() { var graph1 = createGraph("graph0", 12); var graph2 = createGraph("graph1", 12); var graph3 = createGraph("graph2", 12); @@ -278,7 +368,7 @@ public void testGetGraphs(){ } @Test - public void testReset(){ + public void testReset() { var graph1 = createGraph("graph0", 12); var graph2 = createGraph("graph1", 12); var graph3 = createGraph("graph2", 12); @@ -291,86 +381,15 @@ public void testReset(){ } @Test - public void testUpdateShardGroupLeader(){ + public void testUpdateShardGroupLeader() { var shardGroup = createShardGroup(); cache.updateShardGroup(shardGroup); - var leader = Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Leader).build(); + var leader = + Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Leader).build(); cache.updateShardGroupLeader(shardGroup.getId(), leader); assertEquals(cache.getLeaderShard(shardGroup.getId()), leader); } - private static Metapb.Partition createPartition(int pid, String graphName, long start, long end){ - return Metapb.Partition.newBuilder() - .setId(pid) - .setGraphName(graphName) - .setStartKey(start) - .setEndKey(end) - .setState(Metapb.PartitionState.PState_Normal) - .setVersion(1) - .build(); - } - - private static Metapb.ShardGroup creteShardGroup(int pid) { - return Metapb.ShardGroup.newBuilder() - .addShards( - Metapb.Shard.newBuilder().setStoreId(0).setRole(Metapb.ShardRole.Leader).build() - ) - .setId(pid) - .setVersion(0) - .setConfVer(0) - .setState(Metapb.PartitionState.PState_Normal) - .build(); - } - - private static Metapb.Shard createShard(){ - return Metapb.Shard.newBuilder() - .setStoreId(0) - .setRole(Metapb.ShardRole.Leader) - .build(); - } - - private static Metapb.Store createStore(long storeId){ - return Metapb.Store.newBuilder() - .setId(storeId) - .setAddress("127.0.0.1") - .setCores(4) - .setVersion("1") - .setDataPath("/tmp/junit") - .setDataVersion(1) - .setLastHeartbeat(System.currentTimeMillis()) - .setStartTimestamp(System.currentTimeMillis()) - .setState(Metapb.StoreState.Up) - .setDeployPath("/tmp/junit") - .build(); - } - - private static Metapb.Graph createGraph(String graphName, int partitionCount){ - return Metapb.Graph.newBuilder() - .setGraphName(graphName) - .setPartitionCount(partitionCount) - .setState(Metapb.PartitionState.PState_Normal) - .build(); - } - - private static Metapb.ShardGroup createShardGroup(){ - List shards = new ArrayList<>() ; - for (int i = 0 ; i < 3 ; i ++ ) { - shards.add(Metapb.Shard.newBuilder() - .setStoreId(i) - .setRole( i == 0 ? Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) - .build() - ); - } - - return Metapb.ShardGroup.newBuilder() - .setId(1) - .setVersion(1) - .setConfVer(1) - .setState(Metapb.PartitionState.PState_Normal) - .addAllShards(shards) - .build(); - } - } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java index 958d916191..6d6802ab22 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java @@ -1,11 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; -import lombok.extern.slf4j.Slf4j; import org.junit.Assert; import org.junit.Test; import com.baidu.hugegraph.pd.common.PartitionUtils; +import lombok.extern.slf4j.Slf4j; + @Slf4j public class PartitionUtilsTest extends BaseCommonTest { diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java index 72292625ce..9dc8dfb0c0 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java @@ -1,18 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.core; -import com.baidu.hugegraph.pd.ConfigService; -import com.baidu.hugegraph.pd.config.PDConfig; +import java.io.File; +import java.io.IOException; + import org.apache.commons.io.FileUtils; import org.junit.After; import org.junit.BeforeClass; -import java.io.File; -import java.io.IOException; +import com.baidu.hugegraph.pd.ConfigService; +import com.baidu.hugegraph.pd.config.PDConfig; public class BaseCoreTest { static com.baidu.hugegraph.pd.config.PDConfig pdConfig; + @BeforeClass public static void init() throws Exception { String path = "tmp/unitTest"; @@ -20,7 +39,7 @@ public static void init() throws Exception { pdConfig = new com.baidu.hugegraph.pd.config.PDConfig() {{ this.setClusterId(100); this.setInitialStoreList("127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502," + - "127.0.0.1:8503,127.0.0.1:8504,127.0.0.1:8505"); + "127.0.0.1:8503,127.0.0.1:8504,127.0.0.1:8505"); }}; pdConfig.setStore(new com.baidu.hugegraph.pd.config.PDConfig().new Store() {{ @@ -32,7 +51,7 @@ public static void init() throws Exception { this.setShardCount(3); this.setMaxShardsPerStore(3); }}); - pdConfig.setRaft(new com.baidu.hugegraph.pd.config.PDConfig().new Raft(){{ + pdConfig.setRaft(new com.baidu.hugegraph.pd.config.PDConfig().new Raft() {{ this.setEnable(false); }}); pdConfig.setDiscovery(new PDConfig().new Discovery()); @@ -41,16 +60,16 @@ public static void init() throws Exception { pdConfig = configService.loadConfig(); } - @After - public void teardown() throws Exception { - // pass - } - public static void deleteDirectory(File dir) { try { FileUtils.deleteDirectory(dir); } catch (IOException e) { - System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + System.out.printf("Failed to start ....,%s%n", e.getMessage()); } } + + @After + public void teardown() throws Exception { + // pass + } } \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java index a0b72cad6a..fc4e5c9ae0 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java @@ -1,12 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.core; import org.apache.hugegraph.pd.core.meta.MetadataKeyHelperTest; - -import lombok.extern.slf4j.Slf4j; - import org.junit.runner.RunWith; import org.junit.runners.Suite; +import lombok.extern.slf4j.Slf4j; + @RunWith(Suite.class) @Suite.SuiteClasses({ diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java index 567f1345da..94bb05fdbc 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java @@ -1,15 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.core; +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; + import com.baidu.hugegraph.pd.PartitionService; import com.baidu.hugegraph.pd.StoreNodeService; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; -import lombok.extern.slf4j.Slf4j; -import org.junit.Assert; -import org.junit.Test; -import java.util.ArrayList; -import java.util.List; +import lombok.extern.slf4j.Slf4j; @Slf4j public class StoreNodeServiceTest extends BaseCoreTest { @@ -18,21 +37,23 @@ public class StoreNodeServiceTest extends BaseCoreTest { @Test public void testStoreNodeService() throws PDException { Assert.assertEquals(pdConfig.getPartition().getTotalCount(), - pdConfig.getInitialStoreMap().size() * pdConfig.getPartition().getMaxShardsPerStore() - / pdConfig.getPartition().getShardCount()); + pdConfig.getInitialStoreMap().size() * + pdConfig.getPartition().getMaxShardsPerStore() + / pdConfig.getPartition().getShardCount()); StoreNodeService storeService = new StoreNodeService(pdConfig); storeService.init(new PartitionService(pdConfig, storeService)); int count = 6; Metapb.Store[] stores = new Metapb.Store[count]; for (int i = 0; i < count; i++) { Metapb.Store store = Metapb.Store.newBuilder() - .setId(0) - .setAddress("127.0.0.1:850" + i) - .setDeployPath("/data") - .addLabels(Metapb.StoreLabel.newBuilder() - .setKey("namespace") - .setValue("default").build()) - .build(); + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); stores[i] = storeService.register(store); System.out.println("新注册store, id = " + stores[i].getId()); } @@ -40,34 +61,35 @@ public void testStoreNodeService() throws PDException { for (Metapb.Store store : stores) { Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() - .setStoreId(store.getId()) - .build(); + .setStoreId(store.getId()) + .build(); storeService.heartBeat(stats); } Assert.assertEquals(6, storeService.getActiveStores("").size()); Metapb.Graph graph = Metapb.Graph.newBuilder() - .setGraphName("defaultGH") - .setPartitionCount(10) - .build(); + .setGraphName("defaultGH") + .setPartitionCount(10) + .build(); // 分配shard List shards = storeService.allocShards(graph, 1); Assert.assertEquals(3, shards.size()); // 设置leader - Assert.assertEquals(pdConfig.getPartition().getTotalCount(), storeService.getShardGroups().size()); + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), + storeService.getShardGroups().size()); Metapb.Shard leader = Metapb.Shard.newBuilder(shards.get(0)) - .setRole(Metapb.ShardRole.Leader).build(); + .setRole(Metapb.ShardRole.Leader).build(); shards = new ArrayList<>(shards); shards.set(0, leader); // 增加shard pdConfig.getPartition().setShardCount(5); Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() - .setId(1) - .addAllShards(shards).build(); + .setId(1) + .addAllShards(shards).build(); shards = storeService.reallocShards(shardGroup); Assert.assertEquals(5, shards.size()); diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java index 0909fcf25a..4c64e355ee 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java @@ -1,16 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.core.meta; -import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; -import org.junit.Test; +import static org.junit.Assert.assertTrue; import java.util.Arrays; -import static org.junit.Assert.assertTrue; +import org.junit.Test; + +import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; public class MetadataKeyHelperTest { @Test - public void testMoveTaskKey(){ + public void testMoveTaskKey() { var key = MetadataKeyHelper.getMoveTaskKey("foo", 0, 1); assertTrue(Arrays.equals(key, "TASK_MOVE/foo/0/1".getBytes())); var key2 = MetadataKeyHelper.getMoveTaskPrefix("foo"); diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java index df7467163d..917a3f74fd 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java @@ -1,10 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.grpc; import org.junit.After; import org.junit.BeforeClass; - public class BaseGrpcTest { @BeforeClass diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java index acb4b12509..4806f3ed02 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java @@ -1,9 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.grpc; -import lombok.extern.slf4j.Slf4j; import org.junit.runner.RunWith; import org.junit.runners.Suite; +import lombok.extern.slf4j.Slf4j; + @RunWith(Suite.class) @Suite.SuiteClasses({ diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java index 57433f8f6b..05526caf97 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java @@ -1,29 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; -import com.baidu.hugegraph.pd.config.PDConfig; +import java.io.File; +import java.net.http.HttpClient; + import org.apache.commons.io.FileUtils; import org.junit.After; import org.junit.BeforeClass; -import java.net.http.HttpClient; -import java.io.File; +import com.baidu.hugegraph.pd.config.PDConfig; public class BaseServerTest { public static HttpClient client; public static String pdRestAddr; - @BeforeClass - public static void init() { - client = HttpClient.newHttpClient(); - pdRestAddr = "http://127.0.0.1:8620"; - } - @After - public void teardown() { - // pass + @BeforeClass + public static void init() { + client = HttpClient.newHttpClient(); + pdRestAddr = "http://127.0.0.1:8620"; } - public static PDConfig getConfig(){ + public static PDConfig getConfig() { FileUtils.deleteQuietly(new File("tmp/test/")); PDConfig pdConfig = new PDConfig() {{ this.setClusterId(100); @@ -36,4 +50,9 @@ public static PDConfig getConfig(){ return pdConfig; } + @After + public void teardown() { + // pass + } + } \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java index 2e91d62ce2..f5ebdb5b0c 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java @@ -1,18 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; -import com.baidu.hugegraph.pd.ConfigService; -import com.baidu.hugegraph.pd.IdService; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; +import java.util.List; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import java.util.List; +import com.baidu.hugegraph.pd.ConfigService; +import com.baidu.hugegraph.pd.IdService; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; public class ConfigServiceTest { - private PDConfig config = BaseServerTest.getConfig(); + private final PDConfig config = BaseServerTest.getConfig(); private ConfigService service; @@ -24,7 +42,7 @@ public void setUp() { @Test public void testGetPDConfig() throws Exception { // Setup - try{ + try { final Metapb.PDConfig config = Metapb.PDConfig.newBuilder() .setVersion(0L) .setPartitionCount(0) @@ -41,8 +59,6 @@ public void testGetPDConfig() throws Exception { Assert.assertTrue(result.getShardCount() == 55); } catch (Exception e) { - } finally { - } } @@ -51,7 +67,7 @@ public void testGetPDConfig() throws Exception { public void testGetGraphSpace() throws Exception { // Setup Metapb.GraphSpace space = Metapb.GraphSpace.newBuilder() - .setName("gs1") + .setName("gs1") .setTimestamp(0L).build(); final List expectedResult = List.of(space); service.setGraphSpace(space); @@ -61,9 +77,10 @@ public void testGetGraphSpace() throws Exception { Assert.assertTrue(result.size() == 1); } + @Test public void testUpdatePDConfig() { - try{ + try { final Metapb.PDConfig mConfig = Metapb.PDConfig.newBuilder() .setVersion(0L) .setPartitionCount(0) @@ -85,8 +102,6 @@ public void testUpdatePDConfig() { service.updatePDConfig(mConfig); } catch (Exception e) { - } finally { - } } } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java index 5783cd73fd..6555d0742c 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java @@ -1,83 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; -import com.baidu.hugegraph.pd.IdService; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.meta.IdMetaStore; +import java.io.File; + import org.apache.commons.io.FileUtils; import org.junit.Assert; import org.junit.Test; -import java.io.File; +import com.baidu.hugegraph.pd.IdService; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.meta.IdMetaStore; public class IdServiceTest { - @Test + @Test public void testCid() { - try{ - PDConfig pdConfig = BaseServerTest.getConfig(); - int max = 0x2000; - IdService idService = new IdService(pdConfig); - for (int i = 0; i < max; i++) { - idService.getCId("test", max); - } - idService.delCId("test", 1); - idService.delCId("test", 0x10); - idService.delCId("test", 0x100); - idService.delCId("test", 0x1000); + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + int max = 0x2000; + IdService idService = new IdService(pdConfig); + for (int i = 0; i < max; i++) { + idService.getCId("test", max); + } + idService.delCId("test", 1); + idService.delCId("test", 0x10); + idService.delCId("test", 0x100); + idService.delCId("test", 0x1000); - Assert.assertEquals(1, idService.getCId("test", max)); - Assert.assertEquals(0x10, idService.getCId("test", max)); - Assert.assertEquals(0x100, idService.getCId("test", max)); - Assert.assertEquals(0x1000, idService.getCId("test", max)); - Assert.assertEquals(-1, idService.getCId("test", max)); + Assert.assertEquals(1, idService.getCId("test", max)); + Assert.assertEquals(0x10, idService.getCId("test", max)); + Assert.assertEquals(0x100, idService.getCId("test", max)); + Assert.assertEquals(0x1000, idService.getCId("test", max)); + Assert.assertEquals(-1, idService.getCId("test", max)); - idService.delCId("test", 1); - idService.delCId("test", 0x10); - idService.delCId("test", 0x100); - idService.delCId("test", 0x1000); + idService.delCId("test", 1); + idService.delCId("test", 0x10); + idService.delCId("test", 0x100); + idService.delCId("test", 0x1000); - long cid1 = idService.getCId("test", "name", max); - idService.delCIdDelay("test", "name", cid1); - long cid2 = idService.getCId("test", "name", max); + long cid1 = idService.getCId("test", "name", max); + idService.delCIdDelay("test", "name", cid1); + long cid2 = idService.getCId("test", "name", max); - Assert.assertEquals(cid1, cid2); - idService.delCIdDelay("test", "name", cid2); - Thread.sleep(5000); - long cid3 = idService.getCId("test", "name", max); - } catch (Exception e) { + Assert.assertEquals(cid1, cid2); + idService.delCIdDelay("test", "name", cid2); + Thread.sleep(5000); + long cid3 = idService.getCId("test", "name", max); + } catch (Exception e) { - } - // MetadataFactory.closeStore(); + } + // MetadataFactory.closeStore(); } @Test public void testId() { - try{ - FileUtils.deleteQuietly(new File("tmp/testId/")); - IdMetaStore.CID_DEL_TIMEOUT = 2000; - PDConfig pdConfig = new PDConfig() {{ - this.setClusterId(100); - this.setPatrolInterval(1); - this.setRaft(new Raft() {{ - setEnable(false); - }}); - this.setDataPath("tmp/testId/"); - }}; - IdService idService = new IdService(pdConfig); - long first = idService.getId("abc", 100); - Assert.assertEquals(first, 0L); - long second = idService.getId("abc", 100); - Assert.assertEquals(second, 100L); - idService.resetId("abc"); - first = idService.getId("abc", 100); - Assert.assertEquals(first, 0L); - } catch (Exception e) { + try { + FileUtils.deleteQuietly(new File("tmp/testId/")); + IdMetaStore.CID_DEL_TIMEOUT = 2000; + PDConfig pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + this.setRaft(new Raft() {{ + setEnable(false); + }}); + this.setDataPath("tmp/testId/"); + }}; + IdService idService = new IdService(pdConfig); + long first = idService.getId("abc", 100); + Assert.assertEquals(first, 0L); + long second = idService.getId("abc", 100); + Assert.assertEquals(second, 100L); + idService.resetId("abc"); + first = idService.getId("abc", 100); + Assert.assertEquals(first, 0L); + } catch (Exception ignored) { - } + } // MetadataFactory.closeStore(); } + @Test public void testMember() { - try{ + try { PDConfig pdConfig = BaseServerTest.getConfig(); IdService idService = new IdService(pdConfig); idService.setPdConfig(pdConfig); diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java index fba4e837c4..826d3a41c9 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java @@ -1,10 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; -import com.baidu.hugegraph.pd.KvService; -import com.baidu.hugegraph.pd.config.PDConfig; import org.junit.Assert; import org.junit.Test; +import com.baidu.hugegraph.pd.KvService; +import com.baidu.hugegraph.pd.config.PDConfig; + public class KvServiceTest { @Test diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java index b48ba4ef68..667217b4cd 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java @@ -1,18 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; -import com.baidu.hugegraph.pd.LogService; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.google.protobuf.Any; +import java.util.List; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import java.util.List; +import com.baidu.hugegraph.pd.LogService; +import com.baidu.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.google.protobuf.Any; public class LogServiceTest { - private PDConfig mockPdConfig = BaseServerTest.getConfig(); + private final PDConfig mockPdConfig = BaseServerTest.getConfig(); private LogService logServiceUnderTest; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java index 9cf617de0f..1dd6370acb 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java @@ -1,5 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; +import static org.junit.Assert.assertEquals; + +import java.util.List; + +import org.junit.Before; +import org.junit.Test; + import com.baidu.hugegraph.pd.PartitionService; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.MetaTask; @@ -7,19 +31,12 @@ import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; import com.baidu.hugegraph.pd.grpc.pulse.CleanType; -import org.junit.Before; -import org.junit.Test; - -import java.util.List; - -import static org.junit.Assert.assertEquals; - public class PartitionServiceTest extends PdTestBase { private PartitionService service; @Before - public void init(){ + public void init() { service = getPartitionService(); } @@ -36,7 +53,7 @@ public void testCombinePartition() throws PDException { var tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); assertEquals(11, tasks.size()); - for (MetaTask.Task task : tasks){ + for (MetaTask.Task task : tasks) { var newTask = task.toBuilder().setState(MetaTask.TaskState.Task_Success).build(); getTaskService().reportTask(newTask); } @@ -58,7 +75,7 @@ public void testCombinePartition2() throws PDException { var tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); assertEquals(11, tasks.size()); - for (MetaTask.Task task : tasks){ + for (MetaTask.Task task : tasks) { var newTask = task.toBuilder().setState(MetaTask.TaskState.Task_Failure).build(); getTaskService().reportTask(newTask); } @@ -68,43 +85,47 @@ public void testCombinePartition2() throws PDException { } @Test - public void testHandleCleanTask(){ + public void testHandleCleanTask() { MetaTask.Task task = MetaTask.Task.newBuilder() - .setType(MetaTask.TaskType.Clean_Partition) - .setPartition(Metapb.Partition.newBuilder().setGraphName("foo").setId(0).build()) - .setCleanPartition(CleanPartition.newBuilder() - .setCleanType(CleanType.CLEAN_TYPE_KEEP_RANGE) - .setDeletePartition(true) - .setKeyStart(0) - .setKeyEnd(10) - .build()) - .build(); + .setType(MetaTask.TaskType.Clean_Partition) + .setPartition( + Metapb.Partition.newBuilder().setGraphName("foo") + .setId(0).build()) + .setCleanPartition(CleanPartition.newBuilder() + .setCleanType( + CleanType.CLEAN_TYPE_KEEP_RANGE) + .setDeletePartition(true) + .setKeyStart(0) + .setKeyEnd(10) + .build()) + .build(); getTaskService().reportTask(task); } private void buildEnv() throws PDException { var storeInfoMeta = getStoreNodeService().getStoreInfoMeta(); storeInfoMeta.updateStore(Metapb.Store.newBuilder() - .setId(99) - .setState(Metapb.StoreState.Up) - .build()); + .setId(99) + .setState(Metapb.StoreState.Up) + .build()); long lastId = 0; - for (int i = 0; i < 12; i++){ + for (int i = 0; i < 12; i++) { Metapb.Shard shard = Metapb.Shard.newBuilder() - .setStoreId(99) - .setRole(Metapb.ShardRole.Leader) - .build(); + .setStoreId(99) + .setRole(Metapb.ShardRole.Leader) + .build(); Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() - .setId(i) - .setState(Metapb.PartitionState.PState_Normal) - .addAllShards(List.of(shard)) - .build(); + .setId(i) + .setState( + Metapb.PartitionState.PState_Normal) + .addAllShards(List.of(shard)) + .build(); storeInfoMeta.updateShardGroup(shardGroup); var partitionShard = service.getPartitionByCode("graph0", lastId); - if (partitionShard != null){ + if (partitionShard != null) { lastId = partitionShard.getPartition().getEndKey(); } } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java index 522ed30585..deb48a7c24 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java @@ -1,5 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; +import java.io.File; + +import org.junit.AfterClass; +import org.junit.BeforeClass; + import com.baidu.hugegraph.pd.ConfigService; import com.baidu.hugegraph.pd.IdService; import com.baidu.hugegraph.pd.PartitionInstructionListener; @@ -20,21 +42,15 @@ import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; import com.baidu.hugegraph.pd.raft.RaftEngine; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.File; public class PdTestBase { + private static final String DATA_PATH = "/tmp/pd_data"; private static PDConfig pdConfig; - private static StoreNodeService storeNodeService; private static PartitionService partitionService; private static TaskScheduleService taskService; private static StoreMonitorDataService storeMonitorDataService; - private static final String DATA_PATH = "/tmp/pd_data"; - @BeforeClass public static void initService() throws PDException { deleteDir(new File(DATA_PATH)); @@ -57,7 +73,7 @@ public static void initService() throws PDException { config.setRaft(raft); config.setStore(new PDConfig().new Store()); - config.setPartition( new PDConfig().new Partition(){{ + config.setPartition(new PDConfig().new Partition() {{ setShardCount(1); setTotalCount(12); setMaxShardsPerStore(12); @@ -87,37 +103,44 @@ public static void initService() throws PDException { partitionService.init(); partitionService.addInstructionListener(new PartitionInstructionListener() { @Override - public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { } @Override - public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException { + public void transferLeader(Metapb.Partition partition, + TransferLeader transferLeader) throws PDException { } @Override - public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException { + public void splitPartition(Metapb.Partition partition, + SplitPartition splitPartition) throws PDException { } @Override - public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException { + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { } @Override - public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { } @Override - public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { } @Override - public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) throws PDException { } @@ -125,7 +148,8 @@ public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRang partitionService.addStatusListener(new PartitionStatusListener() { @Override - public void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition) { + public void onPartitionChanged(Metapb.Partition partition, + Metapb.Partition newPartition) { } @@ -137,12 +161,14 @@ public void onPartitionRemoved(Metapb.Partition partition) { storeNodeService.addStatusListener(new StoreStatusListener() { @Override - public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status) { + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { } @Override - public void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, Metapb.GraphState stateNew) { + public void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { } @@ -156,7 +182,7 @@ public void onStoreRaftChanged(Metapb.Store store) { } @AfterClass - public static void shutdownService(){ + public static void shutdownService() { var instance = RaftEngine.getInstance(); if (instance != null) { instance.shutDown(); @@ -180,7 +206,7 @@ public static PartitionService getPartitionService() { return partitionService; } - public static PDConfig getPdConfig(){ + public static PDConfig getPdConfig() { return pdConfig; } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java index 59981fa452..4719c22f8a 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java @@ -1,8 +1,21 @@ -package org.apache.hugegraph.pd.service; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import org.json.JSONException; -import org.json.JSONObject; -import org.junit.Test; +package org.apache.hugegraph.pd.service; import java.io.IOException; import java.net.URI; @@ -10,89 +23,100 @@ import java.net.http.HttpRequest; import java.net.http.HttpResponse; +import org.json.JSONException; +import org.json.JSONObject; +import org.junit.Test; + /** * @author tianxiaohui * @date 20221220 **/ public class RestApiTest extends BaseServerTest { @Test - public void testQueryClusterInfo() throws URISyntaxException, IOException, InterruptedException, JSONException { + public void testQueryClusterInfo() throws URISyntaxException, IOException, InterruptedException, + JSONException { String url = pdRestAddr + "/v1/cluster"; HttpRequest request = HttpRequest.newBuilder() - .uri(new URI(url)) - .GET() - .build(); + .uri(new URI(url)) + .GET() + .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); JSONObject obj = new JSONObject(response.body()); assert obj.getInt("status") == 0; } @Test - public void testQueryClusterMembers() throws URISyntaxException, IOException, InterruptedException, JSONException { + public void testQueryClusterMembers() throws URISyntaxException, IOException, + InterruptedException, JSONException { String url = pdRestAddr + "/v1/members"; HttpRequest request = HttpRequest.newBuilder() - .uri(new URI(url)) - .GET() - .build(); + .uri(new URI(url)) + .GET() + .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); JSONObject obj = new JSONObject(response.body()); assert obj.getInt("status") == 0; } @Test - public void testQueryStoresInfo() throws URISyntaxException, IOException, InterruptedException, JSONException { + public void testQueryStoresInfo() throws URISyntaxException, IOException, InterruptedException, + JSONException { String url = pdRestAddr + "/v1/stores"; HttpRequest request = HttpRequest.newBuilder() - .uri(new URI(url)) - .GET() - .build(); + .uri(new URI(url)) + .GET() + .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); JSONObject obj = new JSONObject(response.body()); assert obj.getInt("status") == 0; } @Test - public void testQueryGraphsInfo() throws IOException, InterruptedException, JSONException, URISyntaxException { + public void testQueryGraphsInfo() throws IOException, InterruptedException, JSONException, + URISyntaxException { String url = pdRestAddr + "/v1/graphs"; HttpRequest request = HttpRequest.newBuilder() - .uri(new URI(url)) - .GET() - .build(); + .uri(new URI(url)) + .GET() + .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); JSONObject obj = new JSONObject(response.body()); assert obj.getInt("status") == 0; } @Test - public void testQueryPartitionsInfo() throws IOException, InterruptedException, JSONException, URISyntaxException { + public void testQueryPartitionsInfo() throws IOException, InterruptedException, JSONException, + URISyntaxException { String url = pdRestAddr + "/v1/highLevelPartitions"; HttpRequest request = HttpRequest.newBuilder() - .uri(new URI(url)) - .GET() - .build(); + .uri(new URI(url)) + .GET() + .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); JSONObject obj = new JSONObject(response.body()); assert obj.getInt("status") == 0; } @Test - public void testQueryDebugPartitionsInfo() throws URISyntaxException, IOException, InterruptedException { + public void testQueryDebugPartitionsInfo() throws URISyntaxException, IOException, + InterruptedException { String url = pdRestAddr + "/v1/partitions"; HttpRequest request = HttpRequest.newBuilder() - .uri(new URI(url)) - .GET() - .build(); + .uri(new URI(url)) + .GET() + .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); assert response.statusCode() == 200; } @Test - public void testQueryShards() throws URISyntaxException, IOException, InterruptedException, JSONException { + public void testQueryShards() throws URISyntaxException, IOException, InterruptedException, + JSONException { String url = pdRestAddr + "/v1/shards"; HttpRequest request = HttpRequest.newBuilder() - .uri(new URI(url)) - .GET() - .build(); + .uri(new URI(url)) + .GET() + .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); JSONObject obj = new JSONObject(response.body()); assert obj.getInt("status") == 0; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java index a38de53637..26ac056143 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java @@ -1,9 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; -import lombok.extern.slf4j.Slf4j; import org.junit.runner.RunWith; import org.junit.runners.Suite; +import lombok.extern.slf4j.Slf4j; + @RunWith(Suite.class) @Suite.SuiteClasses({ diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java index f2363f87fd..347de349cc 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java @@ -1,23 +1,41 @@ -package org.apache.hugegraph.pd.service; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.StoreMonitorDataService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import org.junit.Before; -import org.junit.Test; - -import java.util.List; +package org.apache.hugegraph.pd.service; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -public class StoreMonitorDataServiceTest extends PdTestBase{ +import java.util.List; + +import org.junit.Before; +import org.junit.Test; + +import com.baidu.hugegraph.pd.StoreMonitorDataService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +public class StoreMonitorDataServiceTest extends PdTestBase { StoreMonitorDataService service; @Before - public void init(){ + public void init() { service = getStoreMonitorDataService(); var store = getPdConfig().getStore(); store.setMonitorDataEnabled(true); @@ -30,19 +48,19 @@ public void test() throws InterruptedException, PDException { long now = System.currentTimeMillis() / 1000; for (int i = 0; i < 5; i++) { service.saveMonitorData(genStats()); - now = System.currentTimeMillis() / 1000; + now = System.currentTimeMillis() / 1000; Thread.sleep(1100); } assertTrue(service.getLatestStoreMonitorDataTimeStamp(1) == 0 || - service.getLatestStoreMonitorDataTimeStamp(1) == now); + service.getLatestStoreMonitorDataTimeStamp(1) == now); var data = service.getStoreMonitorData(1); assertEquals(5, data.size()); assertNotNull(service.debugMonitorInfo(List.of(Metapb.RecordPair.newBuilder() - .setKey("key1") - .setValue(1) - .build()))); + .setKey("key1") + .setValue(1) + .build()))); assertNotNull(service.getStoreMonitorDataText(1)); @@ -52,11 +70,13 @@ public void test() throws InterruptedException, PDException { } - private Metapb.StoreStats genStats(){ + private Metapb.StoreStats genStats() { return Metapb.StoreStats.newBuilder() - .setStoreId(1) - .addSystemMetrics(Metapb.RecordPair.newBuilder().setKey("key1").setValue(1).build()) - .build(); + .setStoreId(1) + .addSystemMetrics( + Metapb.RecordPair.newBuilder().setKey("key1").setValue(1) + .build()) + .build(); } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java index 163c23699a..f877b20bdb 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java @@ -1,28 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; -import com.baidu.hugegraph.pd.StoreNodeService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; +import com.baidu.hugegraph.pd.StoreNodeService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; -public class StoreNodeServiceNewTest extends PdTestBase{ +public class StoreNodeServiceNewTest extends PdTestBase { private StoreNodeService service; @Before - public void init(){ + public void init() { service = getStoreNodeService(); } @Test - public void testGetTaskInfoMeta(){ + public void testGetTaskInfoMeta() { assertNotNull(service.getTaskInfoMeta()); } - public void testGetStoreInfoMeta(){ + public void testGetStoreInfoMeta() { assertNotNull(service.getStoreInfoMeta()); } @@ -30,9 +48,10 @@ public void testGetStoreInfoMeta(){ public void testRemoveShardGroup() throws PDException { for (int i = 0; i < 12; i++) { Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() - .setId(i) - .setState(Metapb.PartitionState.PState_Offline) - .build(); + .setId(i) + .setState( + Metapb.PartitionState.PState_Offline) + .build(); service.getStoreInfoMeta().updateShardGroup(group); } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java index b5e0cd97b7..40351d6819 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java @@ -1,5 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +import org.junit.Before; +import org.junit.Test; + import com.baidu.hugegraph.pd.ConfigService; import com.baidu.hugegraph.pd.IdService; import com.baidu.hugegraph.pd.PartitionService; @@ -8,15 +36,6 @@ import com.baidu.hugegraph.pd.config.PDConfig; import com.baidu.hugegraph.pd.grpc.MetaTask; import com.baidu.hugegraph.pd.grpc.Metapb; -import static org.assertj.core.api.Assertions.assertThat; -import org.junit.Before; -import org.junit.Test; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.util.List; -import java.util.Map; -import java.util.function.Consumer; public class StoreServiceTest { @@ -589,8 +608,6 @@ public void testUpdateShardGroup() { service.updateShardGroup(0, shards, 0, 0); } catch (Exception e) { - } finally { - } } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java index b787027cb6..9bff87cd1e 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java @@ -1,39 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.service; -import com.baidu.hugegraph.pd.TaskScheduleService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import org.junit.Before; -import org.junit.Test; +import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.List; -import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Test; -public class TaskScheduleServiceTest extends PdTestBase{ +import com.baidu.hugegraph.pd.TaskScheduleService; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +public class TaskScheduleServiceTest extends PdTestBase { TaskScheduleService service; @Before - public void init(){ + public void init() { service = getTaskService(); } @Test - public void testStoreOffline(){ + public void testStoreOffline() { } - public void testPatrolStores(){ + public void testPatrolStores() { } - public void testPatrolPartitions(){ + public void testPatrolPartitions() { } - public void testBalancePartitionShard(){ + public void testBalancePartitionShard() { } @@ -41,7 +59,7 @@ public void testBalancePartitionShard(){ public void testBalancePartitionLeader() throws PDException { var list = new ArrayList(); - for (int i = 0; i < 6; i++){ + for (int i = 0; i < 6; i++) { getStoreNodeService().getStoreInfoMeta().updateShardGroup(genShardGroup(i)); list.add(genPartition(i)); } @@ -50,42 +68,44 @@ public void testBalancePartitionLeader() throws PDException { getPartitionService().updatePartition(list); var rst = service.balancePartitionLeader(true); - assertTrue(rst.size() > 0 ); + assertTrue(rst.size() > 0); // recover getPdConfig().getPartition().setShardCount(1); getStoreNodeService().getStoreInfoMeta().removeAll(); } - public void testSplitPartition(){ + public void testSplitPartition() { } - public void testSplitPartition2(){ + + public void testSplitPartition2() { } - public void testCanAllPartitionsMovedOut(){ + public void testCanAllPartitionsMovedOut() { } - private Metapb.ShardGroup genShardGroup(int groupId){ + private Metapb.ShardGroup genShardGroup(int groupId) { return Metapb.ShardGroup.newBuilder() - .setId(groupId) - .addAllShards(genShards()) - .build(); + .setId(groupId) + .addAllShards(genShards()) + .build(); } - private Metapb.Partition genPartition(int groupId){ + private Metapb.Partition genPartition(int groupId) { return Metapb.Partition.newBuilder() - .setId(groupId) - .setState(Metapb.PartitionState.PState_Normal) - .setGraphName("graph1") - .setStartKey(groupId * 10) - .setEndKey(groupId * 10 + 10) - .build(); + .setId(groupId) + .setState(Metapb.PartitionState.PState_Normal) + .setGraphName("graph1") + .setStartKey(groupId * 10) + .setEndKey(groupId * 10 + 10) + .build(); } - private List genShards(){ - return List.of(Metapb.Shard.newBuilder().setStoreId(1).setRole(Metapb.ShardRole.Leader).build(), + private List genShards() { + return List.of( + Metapb.Shard.newBuilder().setStoreId(1).setRole(Metapb.ShardRole.Leader).build(), Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Follower).build(), Metapb.Shard.newBuilder().setStoreId(3).setRole(Metapb.ShardRole.Follower).build()); } From 61ecb66e8fa9a6e253d13a62d5fa444d7d2864f1 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 6 May 2023 19:44:31 +0800 Subject: [PATCH 04/18] refact: add header/format in core & common & clitools & client modules Change-Id: Id8ec0a0d754f646c59f7251158dd89de669b2016 --- hg-pd-client/pom.xml | 21 +- .../hugegraph/pd/client/AbstractClient.java | 96 ++- .../pd/client/AbstractClientStubProxy.java | 40 +- .../hugegraph/pd/client/Discoverable.java | 20 +- .../hugegraph/pd/client/DiscoveryClient.java | 47 +- .../pd/client/DiscoveryClientImpl.java | 39 +- .../apache/hugegraph/pd/client/KvClient.java | 91 ++- .../hugegraph/pd/client/LicenseClient.java | 24 +- .../apache/hugegraph/pd/client/PDClient.java | 575 ++++++++------ .../apache/hugegraph/pd/client/PDConfig.java | 26 +- .../apache/hugegraph/pd/client/PDPulse.java | 63 +- .../hugegraph/pd/client/PDPulseImpl.java | 44 +- .../apache/hugegraph/pd/client/PDWatch.java | 52 +- .../hugegraph/pd/client/PDWatchImpl.java | 56 +- .../hugegraph/pd/pulse/PartitionNotice.java | 31 +- .../hugegraph/pd/pulse/PulseServerNotice.java | 18 + .../apache/hugegraph/pd/watch/NodeEvent.java | 80 +- .../apache/hugegraph/pd/watch/PDWatcher.java | 17 + .../hugegraph/pd/watch/PartitionEvent.java | 72 +- .../apache/hugegraph/pd/watch/WatchType.java | 23 +- .../hugegraph/pd/PartitionCacheTest.java | 42 +- .../hugegraph/pd/StoreRegisterTest.java | 60 +- .../pd/client/DiscoveryClientImplTest.java | 109 +-- .../pd/client/LicenseClientImplTest.java | 36 +- .../hugegraph/pd/client/PDPulseTest.java | 41 +- .../hugegraph/pd/client/PDWatchTest.java | 54 +- .../pd/client/test/HgPDTestUtil.java | 17 + hg-pd-clitools/pom.xml | 21 +- .../apache/hugegraph/pd/clitools/Main.java | 43 +- .../hugegraph/pd/clitools/MainTest.java | 65 +- hg-pd-common/pom.xml | 21 +- .../apache/hugegraph/pd/common/HgAssert.java | 26 +- .../apache/hugegraph/pd/common/KVPair.java | 55 +- .../hugegraph/pd/common/PDException.java | 19 +- .../pd/common/PDRuntimeException.java | 17 + .../hugegraph/pd/common/PartitionCache.java | 120 +-- .../hugegraph/pd/common/PartitionUtils.java | 25 +- hg-pd-core/pom.xml | 21 +- .../apache/hugegraph/pd/ConfigService.java | 58 +- .../org/apache/hugegraph/pd/IdService.java | 37 +- .../org/apache/hugegraph/pd/KvService.java | 105 +-- .../org/apache/hugegraph/pd/LogService.java | 30 +- .../pd/PartitionInstructionListener.java | 31 +- .../apache/hugegraph/pd/PartitionService.java | 710 ++++++++++-------- .../hugegraph/pd/PartitionStatusListener.java | 18 + .../apache/hugegraph/pd/RegistryService.java | 26 +- .../pd/ShardGroupStatusListener.java | 17 + .../hugegraph/pd/StoreMonitorDataService.java | 145 ++-- .../apache/hugegraph/pd/StoreNodeService.java | 383 ++++++---- .../hugegraph/pd/StoreStatusListener.java | 20 +- .../hugegraph/pd/TaskScheduleService.java | 340 +++++---- .../apache/hugegraph/pd/config/PDConfig.java | 112 +-- .../hugegraph/pd/meta/ConfigMetaStore.java | 34 +- .../hugegraph/pd/meta/DiscoveryMetaStore.java | 59 +- .../apache/hugegraph/pd/meta/IdMetaStore.java | 117 +-- .../org/apache/hugegraph/pd/meta/LogMeta.java | 28 +- .../hugegraph/pd/meta/MetadataFactory.java | 38 +- .../hugegraph/pd/meta/MetadataKeyHelper.java | 181 +++-- .../pd/meta/MetadataRocksDBStore.java | 46 +- .../hugegraph/pd/meta/MetadataStoreBase.java | 60 +- .../hugegraph/pd/meta/PartitionMeta.java | 110 +-- .../apache/hugegraph/pd/meta/QueueStore.java | 24 +- .../hugegraph/pd/meta/StoreInfoMeta.java | 79 +- .../hugegraph/pd/meta/TaskInfoMeta.java | 60 +- .../pd/raft/FutureClosureAdapter.java | 27 +- .../apache/hugegraph/pd/raft/KVOperation.java | 57 +- .../hugegraph/pd/raft/KVStoreClosure.java | 17 + .../apache/hugegraph/pd/raft/RaftEngine.java | 94 ++- .../hugegraph/pd/raft/RaftRpcClient.java | 37 +- .../hugegraph/pd/raft/RaftRpcProcessor.java | 111 +-- .../hugegraph/pd/raft/RaftStateListener.java | 17 + .../hugegraph/pd/raft/RaftStateMachine.java | 92 ++- .../hugegraph/pd/raft/RaftTaskHandler.java | 17 + .../apache/hugegraph/pd/raft/ZipUtils.java | 55 +- .../pd/store/BaseKVStoreClosure.java | 18 + .../apache/hugegraph/pd/store/HgKVStore.java | 26 +- .../hugegraph/pd/store/HgKVStoreImpl.java | 75 +- .../org/apache/hugegraph/pd/store/KV.java | 34 +- .../hugegraph/pd/store/RaftKVStore.java | 88 ++- .../hugegraph/pd/MonitorServiceTest.java | 51 +- .../hugegraph/pd/PartitionServiceTest.java | 27 +- .../hugegraph/pd/StoreNodeServiceTest.java | 308 ++++---- .../org/apache/hugegraph/pd/UnitTestBase.java | 17 + .../pd/common/PartitionUtilsTest.java | 20 +- .../hugegraph/pd/store/HgKVStoreImplTest.java | 40 +- hg-pd-core/src/test/resources/log4j2.xml | 35 +- 86 files changed, 4156 insertions(+), 2202 deletions(-) diff --git a/hg-pd-client/pom.xml b/hg-pd-client/pom.xml index f11d5069aa..124830a7a9 100644 --- a/hg-pd-client/pom.xml +++ b/hg-pd-client/pom.xml @@ -1,6 +1,23 @@ - + + 4.0.0 diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java index 46ab774d04..cd83f16fcb 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import java.io.Closeable; @@ -37,6 +54,8 @@ @Slf4j public abstract class AbstractClient implements Closeable { + public static Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); protected final Pdpb.RequestHeader header; protected final AbstractClientStubProxy stubProxy; protected final PDConfig config; @@ -50,36 +69,47 @@ protected AbstractClient(PDConfig config) { this.config = config; } + public static Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build(); + return header; + } + + protected static void handleErrors(Pdpb.ResponseHeader header) throws PDException { + if (header.hasError() && header.getError().getType() != Pdpb.ErrorType.OK) { + throw new PDException(header.getError().getTypeValue(), + String.format("PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } + } + private AbstractBlockingStub getBlockingStub() throws PDException { if (stubProxy.getBlockingStub() == null) { synchronized (this) { if (stubProxy.getBlockingStub() == null) { String host = resetStub(); - if (host.isEmpty()) throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, - "PD unreachable, pd.peers=" + config.getServerHost()); + if (host.isEmpty()) { + throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + + config.getServerHost()); + } } } } - ; return stubProxy.getBlockingStub(); } - public static Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( - Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); - - public static Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { - Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( - Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build(); - return header; - } - private AbstractStub getStub() throws PDException { if (stubProxy.getStub() == null) { synchronized (this) { if (stubProxy.getStub() == null) { String host = resetStub(); - if (host.isEmpty()) throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, - "PD unreachable, pd.peers=" + config.getServerHost()); + if (host.isEmpty()) { + throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + + config.getServerHost()); + } } } } @@ -99,7 +129,8 @@ private String resetStub() { .withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS); try { - GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder().setHeader(header).build(); + GetMembersRequest request = + Pdpb.GetMembersRequest.newBuilder().setHeader(header).build(); GetMembersResponse members = blockingStub.getMembers(request); Metapb.Member leader = members.getLeader(); leaderHost = leader.getGrpcUrl(); @@ -126,7 +157,9 @@ protected > RespT blockin MethodDescriptor method, ReqT req, int retry) throws PDException { AbstractBlockingStub stub = getBlockingStub(); try { - RespT resp = ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), req); + RespT resp = + ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), + req); return resp; } catch (Exception e) { log.error(method.getFullMethodName() + " exception, {}", e.getMessage()); @@ -150,15 +183,17 @@ private AbstractBlockingStub getConcurrentBlockingStub(String address) { return stub; } Channel ch = ManagedChannelBuilder.forTarget(address).usePlaintext().build(); - PDBlockingStub blockingStub = PDGrpc.newBlockingStub(ch).withDeadlineAfter(config.getGrpcTimeOut(), - TimeUnit.MILLISECONDS); + PDBlockingStub blockingStub = + PDGrpc.newBlockingStub(ch).withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); stubs.put(address, blockingStub); return blockingStub; } protected KVPair concurrentBlockingUnaryCall( - MethodDescriptor method, ReqT req, Predicate predicate) throws PDException { + MethodDescriptor method, ReqT req, Predicate predicate) throws + PDException { LinkedList hostList = this.stubProxy.getHostList(); if (this.stubs == null) { synchronized (this) { @@ -169,7 +204,9 @@ protected KVPair concurrentBlockingUnaryCall( } Stream respTStream = hostList.parallelStream().map((address) -> { AbstractBlockingStub stub = getConcurrentBlockingStub(address); - RespT resp = ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), req); + RespT resp = + ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), + req); return resp; }); KVPair pair; @@ -187,8 +224,9 @@ protected KVPair concurrentBlockingUnaryCall( } protected void streamingCall(MethodDescriptor method, ReqT request, - StreamObserver responseObserver, int retry) throws - PDException { + StreamObserver responseObserver, + int retry) throws + PDException { AbstractStub stub = getStub(); try { ClientCall call = stub.getChannel().newCall(method, stub.getCallOptions()); @@ -207,16 +245,6 @@ protected void streamingCall(MethodDescriptor method, } } - - protected static void handleErrors(Pdpb.ResponseHeader header) throws PDException { - if (header.hasError() && header.getError().getType() != Pdpb.ErrorType.OK) - throw new PDException(header.getError().getTypeValue(), - String.format("PD request error, error code = %d, msg = %s", - header.getError().getTypeValue(), - header.getError().getMessage())); - } - - @Override public void close() { closeChannel(channel); @@ -230,12 +258,12 @@ public void close() { private void closeChannel(ManagedChannel channel) { try { - while (channel != null && !channel.shutdownNow().awaitTermination(100, TimeUnit.MILLISECONDS)) { + while (channel != null && + !channel.shutdownNow().awaitTermination(100, TimeUnit.MILLISECONDS)) { continue; } } catch (Exception e) { log.info("Close channel with error : {}.", e); - } finally { } } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java index 4fd5678407..e4c5cf63ee 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java @@ -1,10 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; +import java.util.LinkedList; + import io.grpc.stub.AbstractBlockingStub; import io.grpc.stub.AbstractStub; -import java.util.LinkedList; - /** * @author zhangyingjie * @date 2022/6/20 @@ -13,31 +30,30 @@ public class AbstractClientStubProxy { private AbstractBlockingStub blockingStub; private AbstractStub stub; - - public LinkedList getHostList() { - return hostList; - } - - private LinkedList hostList = new LinkedList<>(); + private final LinkedList hostList = new LinkedList<>(); public AbstractClientStubProxy(String[] hosts) { for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); } + public LinkedList getHostList() { + return hostList; + } + public String nextHost() { String host = hostList.poll(); hostList.offer(host); //移到尾部 return host; } - public void setBlockingStub(AbstractBlockingStub stub) { - this.blockingStub = stub; - } - public AbstractBlockingStub getBlockingStub() { return this.blockingStub; } + public void setBlockingStub(AbstractBlockingStub stub) { + this.blockingStub = stub; + } + public String getHost() { return hostList.peek(); } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java index fc769e34f8..c2daa5c55a 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java @@ -1,10 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; import com.baidu.hugegraph.pd.grpc.discovery.Query; -import java.util.Map; - /** * @author zhangyingjie * @date 2021/12/20 @@ -14,5 +29,6 @@ public interface Discoverable { NodeInfos getNodeInfos(Query query); void scheduleTask(); + void cancelTask(); } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java index 929cec19a0..3b0d2d88b3 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java @@ -1,14 +1,21 @@ -package org.apache.hugegraph.pd.client; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; -import com.baidu.hugegraph.pd.grpc.discovery.Query; -import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; -import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; -import lombok.extern.slf4j.Slf4j; +package org.apache.hugegraph.pd.client; import java.io.Closeable; import java.util.LinkedList; @@ -20,6 +27,17 @@ import java.util.function.Consumer; import java.util.function.Function; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; +import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; + +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import lombok.extern.slf4j.Slf4j; + /** * @author zhangyingjie * @date 2021/12/20 @@ -28,11 +46,11 @@ public abstract class DiscoveryClient implements Closeable, Discoverable { protected int period; //心跳周期 - private Timer timer = new Timer("serverHeartbeat", true); - private volatile int currentIndex; // 当前在用pd地址位置 LinkedList pdAddresses = new LinkedList<>(); ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - private volatile AtomicBoolean requireResetStub = new AtomicBoolean(false); + private final Timer timer = new Timer("serverHeartbeat", true); + private volatile int currentIndex; // 当前在用pd地址位置 + private final AtomicBoolean requireResetStub = new AtomicBoolean(false); private int maxTime = 6; private ManagedChannel channel = null; private DiscoveryServiceGrpc.DiscoveryServiceBlockingStub registerStub; @@ -66,8 +84,9 @@ private R tryWithTimes(Function function, V v) { ex = e; } } - if (ex != null) + if (ex != null) { log.error("Try discovery method with error: {}", ex.getMessage()); + } return null; } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java index e496991992..0b1e1d3898 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java @@ -1,25 +1,42 @@ -package org.apache.hugegraph.pd.client; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.RegisterType; +package org.apache.hugegraph.pd.client; import java.util.Map; import java.util.function.Consumer; +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; +import com.baidu.hugegraph.pd.grpc.discovery.RegisterType; + /** * @author zhangyingjie * @date 2021/12/20 **/ public class DiscoveryClientImpl extends DiscoveryClient { - private volatile String id ; - private RegisterType type; // 心跳类型,备用 - private String version; - private String appName; - private int times; // 心跳过期次数,备用 - private String address; - private Map labels; - private Consumer registerConsumer; + private final String id; + private final RegisterType type; // 心跳类型,备用 + private final String version; + private final String appName; + private final int times; // 心跳过期次数,备用 + private final String address; + private final Map labels; + private final Consumer registerConsumer; private DiscoveryClientImpl(Builder builder) { diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java index 29cfb88f37..908a49130d 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import java.io.Closeable; @@ -39,8 +56,8 @@ @Slf4j public class KvClient extends AbstractClient implements Closeable { - private AtomicLong clientId = new AtomicLong(0); - private Semaphore semaphore = new Semaphore(1); + private final AtomicLong clientId = new AtomicLong(0); + private final Semaphore semaphore = new Semaphore(1); public KvClient(PDConfig pdConfig) { super(pdConfig); @@ -103,7 +120,8 @@ public TTLResponse keepTTLAlive(String key) throws PDException { } public TTLResponse putTTL(String key, String value, long ttl) throws PDException { - TTLRequest request = TTLRequest.newBuilder().setKey(key).setValue(value).setTtl(ttl).build(); + TTLRequest request = + TTLRequest.newBuilder().setKey(key).setValue(value).setTtl(ttl).build(); TTLResponse response = blockingUnaryCall(KvServiceGrpc.getPutTTLMethod(), request); handleErrors(response.getHeader()); return response; @@ -115,30 +133,6 @@ private void onEvent(WatchResponse value, Consumer consumer) { if (value.getEventsCount() != 0) consumer.accept((T) value); } - BiConsumer listenWrapper = (key, consumer) -> { - try { - listen(key, consumer); - } catch (PDException e) { - try { - log.warn("start listen with warning:", e); - Thread.sleep(1000); - } catch (InterruptedException ex) { - } - } - }; - - BiConsumer prefixListenWrapper = (key, consumer) -> { - try { - listenPrefix(key, consumer); - } catch (PDException e) { - try { - log.warn("start listenPrefix with warning:", e); - Thread.sleep(1000); - } catch (InterruptedException ex) { - } - } - }; - private StreamObserver getObserver(String key, Consumer consumer, BiConsumer listenWrapper) { return new StreamObserver() { @@ -177,19 +171,40 @@ public void onCompleted() { } }; - } + } BiConsumer listenWrapper = (key, consumer) -> { + try { + listen(key, consumer); + } catch (PDException e) { + try { + log.warn("start listen with warning:", e); + Thread.sleep(1000); + } catch (InterruptedException ex) { + } + } + }; public void listen(String key, Consumer consumer) throws PDException { StreamObserver observer = getObserver(key, consumer, listenWrapper); acquire(); WatchRequest k = WatchRequest.newBuilder().setClientId(clientId.get()).setKey(key).build(); streamingCall(KvServiceGrpc.getWatchMethod(), k, observer, 1); - } + } BiConsumer prefixListenWrapper = (key, consumer) -> { + try { + listenPrefix(key, consumer); + } catch (PDException e) { + try { + log.warn("start listenPrefix with warning:", e); + Thread.sleep(1000); + } catch (InterruptedException ex) { + } + } + }; public void listenPrefix(String prefix, Consumer consumer) throws PDException { StreamObserver observer = getObserver(prefix, consumer, prefixListenWrapper); acquire(); - WatchRequest k = WatchRequest.newBuilder().setClientId(clientId.get()).setKey(prefix).build(); + WatchRequest k = + WatchRequest.newBuilder().setClientId(clientId.get()).setKey(prefix).build(); streamingCall(KvServiceGrpc.getWatchPrefixMethod(), k, observer, 1); } @@ -206,7 +221,6 @@ private void acquire() { } } - public List getWatchList(T response) { List values = new LinkedList<>(); List eventsList = response.getEventsList(); @@ -220,7 +234,6 @@ public List getWatchList(T response) { return values; } - public Map getWatchMap(T response) { Map values = new HashMap<>(); List eventsList = response.getEventsList(); @@ -236,10 +249,10 @@ public Map getWatchMap(T response) { return values; } - public LockResponse lock(String key, long ttl) throws PDException { acquire(); - LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl).build(); + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl) + .build(); LockResponse response = blockingUnaryCall(KvServiceGrpc.getLockMethod(), k); handleErrors(response.getHeader()); clientId.compareAndSet(0L, response.getClientId()); @@ -249,7 +262,8 @@ public LockResponse lock(String key, long ttl) throws PDException { public LockResponse lockWithoutReentrant(String key, long ttl) throws PDException { acquire(); - LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl).build(); + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl) + .build(); LockResponse response = blockingUnaryCall(KvServiceGrpc.getLockWithoutReentrantMethod(), k); handleErrors(response.getHeader()); clientId.compareAndSet(0L, response.getClientId()); @@ -264,7 +278,6 @@ public LockResponse isLocked(String key) throws PDException { return response; } - public LockResponse unlock(String key) throws PDException { assert clientId.get() != 0; LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); @@ -275,7 +288,6 @@ public LockResponse unlock(String key) throws PDException { return response; } - public LockResponse keepAlive(String key) throws PDException { assert clientId.get() != 0; LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); @@ -290,4 +302,9 @@ public LockResponse keepAlive(String key) throws PDException { public void close() { super.close(); } + + + + + } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java index 9adcd01dac..ce20c0aa01 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java @@ -1,9 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import com.baidu.hugegraph.pd.common.KVPair; import com.baidu.hugegraph.pd.grpc.PDGrpc; import com.baidu.hugegraph.pd.grpc.Pdpb; import com.google.protobuf.ByteString; + import io.grpc.stub.AbstractBlockingStub; import io.grpc.stub.AbstractStub; import lombok.extern.slf4j.Slf4j; @@ -31,7 +49,8 @@ protected AbstractBlockingStub createBlockingStub() { public Pdpb.PutLicenseResponse putLicense(byte[] content) { Pdpb.PutLicenseRequest request = Pdpb.PutLicenseRequest.newBuilder() - .setContent(ByteString.copyFrom(content)) + .setContent( + ByteString.copyFrom(content)) .build(); try { KVPair pair = concurrentBlockingUnaryCall( @@ -47,7 +66,8 @@ public Pdpb.PutLicenseResponse putLicense(byte[] content) { } catch (Exception e) { e.printStackTrace(); log.debug("put license with error:{} ", e); - Pdpb.ResponseHeader rh = newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); + Pdpb.ResponseHeader rh = + newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); return Pdpb.PutLicenseResponse.newBuilder().setHeader(rh).build(); } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java index accabbb63e..a3ec984ff6 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import java.util.ArrayList; @@ -29,6 +46,7 @@ /** * PD客户端实现类 + * * @author yanjinbing */ @Slf4j @@ -36,64 +54,53 @@ public class PDClient { private final PDConfig config; private final Pdpb.RequestHeader header; private final PartitionCache cache; + private final StubProxy stubProxy; + private final List eventListeners; private PDWatch.Watcher partitionWatcher; private PDWatch.Watcher storeWatcher; private PDWatch.Watcher graphWatcher; private PDWatch.Watcher shardGroupWatcher; private PDPulse pdPulse; - private final StubProxy stubProxy; - private final List eventListeners; - - public interface PDEventListener { - void onStoreChanged(NodeEvent event); - void onPartitionChanged(PartitionEvent event); - void onGraphChanged(WatchResponse event); - default void onShardGroupChanged(WatchResponse event) {}; - } - - static class StubProxy { - private volatile PDGrpc.PDBlockingStub stub; - private LinkedList hostList = new LinkedList<>(); - public StubProxy(String[] hosts) { - for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); - } + private PDClient(PDConfig config) { + this.config = config; + this.header = Pdpb.RequestHeader.getDefaultInstance(); + this.cache = new PartitionCache(); + this.stubProxy = new StubProxy(config.getServerHost().split(",")); + this.eventListeners = new CopyOnWriteArrayList<>(); - public String nextHost() { - String host = hostList.poll(); - hostList.offer(host); //移到尾部 - return host; - } - public void set(PDGrpc.PDBlockingStub stub){ this.stub = stub;} - public PDGrpc.PDBlockingStub get(){return this.stub;} - public String getHost(){ return hostList.peek();} - public int getHostCount(){ return hostList.size();} } + /** * 创建PDClient对象,并初始化stub + * * @param config * @return */ - public static PDClient create(PDConfig config){ + public static PDClient create(PDConfig config) { PDClient client = new PDClient(config); return client; } - private PDClient(PDConfig config) { - this.config = config; - this.header = Pdpb.RequestHeader.getDefaultInstance(); - this.cache = new PartitionCache(); - this.stubProxy = new StubProxy(config.getServerHost().split(",")); - this.eventListeners = new CopyOnWriteArrayList<>(); - + private static void handleResponseError(Pdpb.ResponseHeader header) throws + PDException { + if (header.hasError() && header.getError() + .getType() != Pdpb.ErrorType.OK) { + throw new PDException(header.getError().getTypeValue(), + String.format( + "PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } } - private synchronized void newBlockingStub() throws PDException { - if ( stubProxy.get() != null ) return; + private synchronized void newBlockingStub() throws PDException { + if (stubProxy.get() != null) return; String host = newLeaderStub(); - if ( host.isEmpty()) + if (host.isEmpty()) { throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, - "PD unreachable, pd.peers=" + config.getServerHost()); + "PD unreachable, pd.peers=" + config.getServerHost()); + } PDWatch pdWatch = new PDWatchImpl(host); if (config.isEnableCache()) { log.info("PDClient enable cache, init PDWatch object"); @@ -102,7 +109,8 @@ private synchronized void newBlockingStub() throws PDException { @Override public void onNext(PartitionEvent response) { // log.info("PDClient receive partition event {}-{} {}", - // response.getGraph(), response.getPartitionId(), response.getChangeType()); + // response.getGraph(), response.getPartitionId(), response + // .getChangeType()); invalidPartitionCache(response.getGraph(), response.getPartitionId()); if (response.getChangeType() == PartitionEvent.ChangeType.DEL) { @@ -128,7 +136,8 @@ public void onCompleted() { storeWatcher = pdWatch.watchNode(new PDWatch.Listener() { @Override public void onNext(NodeEvent response) { - log.info("PDClient receive store event {} {}", response.getEventType(), Long.toHexString(response.getNodeId())); + log.info("PDClient receive store event {} {}", response.getEventType(), + Long.toHexString(response.getNodeId())); invalidStoreCache(response.getNodeId()); eventListeners.forEach(listener -> { listener.onStoreChanged(response); @@ -170,18 +179,20 @@ public void onCompleted() { @Override public void onNext(WatchResponse response) { var shardResponse = response.getShardGroupResponse(); - log.info("PDClient receive shard group event: raft {}-{}", shardResponse.getShardGroupId(), - shardResponse.getType()); + log.info("PDClient receive shard group event: raft {}-{}", + shardResponse.getShardGroupId(), + shardResponse.getType()); if (config.isEnableCache()) { switch (shardResponse.getType()) { case WATCH_CHANGE_TYPE_DEL: cache.deleteShardGroup(shardResponse.getShardGroupId()); break; case WATCH_CHANGE_TYPE_ALTER: - cache.updateShardGroup(response.getShardGroupResponse().getShardGroup()); + cache.updateShardGroup( + response.getShardGroupResponse().getShardGroup()); break; default: - break; + break; } } eventListeners.forEach(listener -> listener.onShardGroupChanged(response)); @@ -216,7 +227,7 @@ private synchronized void closeStub(boolean closeWatcher) { graphWatcher = null; } - if (shardGroupWatcher != null){ + if (shardGroupWatcher != null) { shardGroupWatcher.close(); shardGroupWatcher = null; } @@ -233,18 +244,18 @@ private PDGrpc.PDBlockingStub getStub() throws PDException { TimeUnit.MILLISECONDS); } - private String newLeaderStub() { String leaderHost = ""; for (int i = 0; i < stubProxy.getHostCount(); i++) { String host = stubProxy.nextHost(); ManagedChannel channel = ManagedChannelBuilder.forTarget(host).usePlaintext().build(); PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(channel) - .withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS); + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); try { Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() - .setHeader(header) - .build(); + .setHeader(header) + .build(); Metapb.Member leader = stub.getMembers(request).getLeader(); if (!leader.getGrpcUrl().equalsIgnoreCase(host)) { leaderHost = leader.getGrpcUrl(); @@ -252,7 +263,8 @@ private String newLeaderStub() { channel.awaitTermination(10000, TimeUnit.MILLISECONDS); channel = ManagedChannelBuilder.forTarget(leaderHost).usePlaintext().build(); stubProxy.set(PDGrpc.newBlockingStub(channel) - .withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS)); + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS)); } else { stubProxy.set(stub); leaderHost = host; @@ -261,20 +273,22 @@ private String newLeaderStub() { break; } catch (Exception e) { log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(), - e.getCause() != null ? e.getCause().getMessage() : ""); + e.getCause() != null ? e.getCause().getMessage() : ""); } } return leaderHost; } + /** * Store注册,返回storeID,初次注册会返回新ID + * * @param store * @return */ public long registerStore(Metapb.Store store) throws PDException { Pdpb.RegisterStoreRequest request = Pdpb.RegisterStoreRequest.newBuilder() - .setHeader(header) - .setStore(store).build(); + .setHeader(header) + .setStore(store).build(); Pdpb.RegisterStoreResponse response = blockingUnaryCall(PDGrpc.getRegisterStoreMethod(), request); @@ -284,6 +298,7 @@ public long registerStore(Metapb.Store store) throws PDException { /** * 根据storeId返回Store对象 + * * @param storeId * @return * @throws PDException @@ -292,36 +307,41 @@ public Metapb.Store getStore(long storeId) throws PDException { Metapb.Store store = cache.getStoreById(storeId); if (store == null) { Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder() - .setHeader(header) - .setStoreId(storeId).build(); + .setHeader(header) + .setStoreId(storeId).build(); Pdpb.GetStoreResponse response = getStub().getStore(request); handleResponseError(response.getHeader()); store = response.getStore(); - if (config.isEnableCache()) + if (config.isEnableCache()) { cache.addStore(storeId, store); + } } return store; } /** * 更新Store信息,包括上下线等 + * * @param store * @return */ public Metapb.Store updateStore(Metapb.Store store) throws PDException { Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder() - .setHeader(header) - .setStore(store).build(); + .setHeader(header) + .setStore(store).build(); Pdpb.SetStoreResponse response = getStub().setStore(request); handleResponseError(response.getHeader()); store = response.getStore(); - if (config.isEnableCache()) + if (config.isEnableCache()) { cache.addStore(store.getId(), store); + } return store; } + /** * 返回活跃的Store + * * @param graphName * @return */ @@ -353,15 +373,16 @@ public List getActiveStores() throws PDException { /** * 返回活跃的Store + * * @param graphName * @return */ public List getAllStores(String graphName) throws PDException { Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() - .setHeader(header) - .setGraphName(graphName) - .setExcludeOfflineStores(false) - .build(); + .setHeader(header) + .setGraphName(graphName) + .setExcludeOfflineStores(false) + .build(); Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); handleResponseError(response.getHeader()); return response.getStoresList(); @@ -370,13 +391,14 @@ public List getAllStores(String graphName) throws PDException { /** * Store心跳,定期调用,保持在线状态 + * * @param stats * @throws PDException */ public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException { Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder() - .setHeader(header) - .setStats(stats).build(); + .setHeader(header) + .setStats(stats).build(); Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request); handleResponseError(response.getHeader()); return response.getClusterStats(); @@ -384,19 +406,22 @@ public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDExce /** * 查询Key所属分区信息 + * * @param graphName * @param key * @return * @throws PDException */ - public KVPair getPartition(String graphName, byte[] key) throws PDException { + public KVPair getPartition(String graphName, byte[] key) throws + PDException { // 先查cache,cache没有命中,在调用PD KVPair partShard = cache.getPartitionByKey(graphName, key); - if ( partShard == null ){ + if (partShard == null) { Pdpb.GetPartitionRequest request = Pdpb.GetPartitionRequest.newBuilder() - .setHeader(header) - .setGraphName(graphName) - .setKey(ByteString.copyFrom(key)).build(); + .setHeader(header) + .setGraphName(graphName) + .setKey(ByteString.copyFrom( + key)).build(); Pdpb.GetPartitionResponse response = blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request); handleResponseError(response.getHeader()); @@ -410,20 +435,27 @@ public KVPair getPartition(String graphName, byt /** * 根据hashcode查询所属分区信息 + * * @param graphName * @param hashCode * @return * @throws PDException */ - public KVPair getPartitionByCode(String graphName, long hashCode) + public KVPair getPartitionByCode(String graphName, + long hashCode) throws PDException { // 先查cache,cache没有命中,在调用PD - KVPair partShard = cache.getPartitionByCode(graphName, hashCode); - if ( partShard == null ){ + KVPair partShard = + cache.getPartitionByCode(graphName, hashCode); + if (partShard == null) { Pdpb.GetPartitionByCodeRequest request = Pdpb.GetPartitionByCodeRequest.newBuilder() - .setHeader(header) - .setGraphName(graphName) - .setCode(hashCode).build(); + .setHeader( + header) + .setGraphName( + graphName) + .setCode( + hashCode) + .build(); Pdpb.GetPartitionResponse response = blockingUnaryCall(PDGrpc.getGetPartitionByCodeMethod(), request); handleResponseError(response.getHeader()); @@ -436,29 +468,33 @@ public KVPair getPartitionByCode(String graphNam return partShard; } - - /** * 获取Key的哈希值 */ - public int keyToCode(String graphName, byte[] key){ + public int keyToCode(String graphName, byte[] key) { return PartitionUtils.calcHashcode(key); } /** * 根据分区id返回分区信息, RPC请求 + * * @param graphName * @param partId * @return * @throws PDException */ - public KVPair getPartitionById(String graphName, int partId) throws PDException { - KVPair partShard = cache.getPartitionById(graphName, partId); + public KVPair getPartitionById(String graphName, + int partId) throws PDException { + KVPair partShard = + cache.getPartitionById(graphName, partId); if (partShard == null) { Pdpb.GetPartitionByIDRequest request = Pdpb.GetPartitionByIDRequest.newBuilder() - .setHeader(header) - .setGraphName(graphName) - .setPartitionId(partId).build(); + .setHeader(header) + .setGraphName( + graphName) + .setPartitionId( + partId) + .build(); Pdpb.GetPartitionResponse response = blockingUnaryCall(PDGrpc.getGetPartitionByIDMethod(), request); handleResponseError(response.getHeader()); @@ -475,10 +511,11 @@ public Metapb.ShardGroup getShardGroup(int partId) throws PDException { Metapb.ShardGroup group = cache.getShardGroup(partId); if (group == null) { Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder() - .setHeader(header) - .setGroupId(partId) - .build(); - Pdpb.GetShardGroupResponse response = blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request); + .setHeader(header) + .setGroupId(partId) + .build(); + Pdpb.GetShardGroupResponse response = + blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request); handleResponseError(response.getHeader()); group = response.getShardGroup(); if (config.isEnableCache()) { @@ -490,10 +527,12 @@ public Metapb.ShardGroup getShardGroup(int partId) throws PDException { public void updateShardGroup(Metapb.ShardGroup shardGroup) throws PDException { Pdpb.UpdateShardGroupRequest request = Pdpb.UpdateShardGroupRequest.newBuilder() - .setHeader(header) - .setShardGroup(shardGroup) - .build(); - Pdpb.UpdateShardGroupResponse response = blockingUnaryCall(PDGrpc.getUpdateShardGroupMethod(), request); + .setHeader(header) + .setShardGroup( + shardGroup) + .build(); + Pdpb.UpdateShardGroupResponse response = + blockingUnaryCall(PDGrpc.getUpdateShardGroupMethod(), request); handleResponseError(response.getHeader()); if (config.isEnableCache()) { @@ -503,39 +542,47 @@ public void updateShardGroup(Metapb.ShardGroup shardGroup) throws PDException { /** * 返回startKey和endKey跨越的所有分区信息 + * * @param graphName * @param startKey * @param endKey * @return * @throws PDException */ - public List> scanPartitions(String graphName, byte[] startKey, byte[] endKey) throws PDException { + public List> scanPartitions(String graphName, + byte[] startKey, + byte[] endKey) throws + PDException { List> partitions = new ArrayList<>(); KVPair startPartShard = getPartition(graphName, startKey); KVPair endPartShard = getPartition(graphName, endKey); - if (startPartShard == null || endPartShard == null) + if (startPartShard == null || endPartShard == null) { return null; + } partitions.add(startPartShard); while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey() - && startPartShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE /*排除最后一个分区*/) { + && startPartShard.getKey().getEndKey() < + PartitionUtils.MAX_VALUE /*排除最后一个分区*/) { startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey()); partitions.add(startPartShard); } return partitions; } + /** * 根据条件查询分区信息 + * * @return * @throws PDException */ public List getPartitionsByStore(long storeId) throws PDException { Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() - .setStoreId(storeId) - .build(); + .setStoreId(storeId) + .build(); Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() - .setQuery(query).build(); + .setQuery(query).build(); Pdpb.QueryPartitionsResponse response = blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); @@ -545,17 +592,19 @@ public List getPartitionsByStore(long storeId) throws PDExcept /** * 查找指定store上的指定partitionId + * * @return * @throws PDException */ - public List queryPartitions(long storeId, int partitionId) throws PDException { + public List queryPartitions(long storeId, int partitionId) throws + PDException { Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() - .setStoreId(storeId) - .setPartitionId(partitionId) - .build(); + .setStoreId(storeId) + .setPartitionId(partitionId) + .build(); Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() - .setQuery(query).build(); + .setQuery(query).build(); Pdpb.QueryPartitionsResponse response = blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); @@ -580,8 +629,8 @@ public List getPartitions(long storeId, String graphName) thro public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { Pdpb.SetGraphRequest request = Pdpb.SetGraphRequest.newBuilder() - .setGraph(graph) - .build(); + .setGraph(graph) + .build(); Pdpb.SetGraphResponse response = blockingUnaryCall(PDGrpc.getSetGraphMethod(), request); @@ -591,8 +640,8 @@ public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { public Metapb.Graph getGraph(String graphName) throws PDException { Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder() - .setGraphName(graphName) - .build(); + .setGraphName(graphName) + .build(); Pdpb.GetGraphResponse response = blockingUnaryCall(PDGrpc.getGetGraphMethod(), request); @@ -613,8 +662,8 @@ public Metapb.Graph getGraphWithOutException(String graphName) throws public Metapb.Graph delGraph(String graphName) throws PDException { Pdpb.DelGraphRequest request = Pdpb.DelGraphRequest.newBuilder() - .setGraphName(graphName) - .build(); + .setGraphName(graphName) + .build(); Pdpb.DelGraphResponse response = blockingUnaryCall(PDGrpc.getDelGraphMethod(), request); @@ -622,11 +671,13 @@ public Metapb.Graph delGraph(String graphName) throws PDException { return response.getGraph(); } - public List updatePartition(List partitions) throws PDException { + public List updatePartition(List partitions) throws + PDException { Pdpb.UpdatePartitionRequest request = Pdpb.UpdatePartitionRequest.newBuilder() - .addAllPartition(partitions) - .build(); + .addAllPartition( + partitions) + .build(); Pdpb.UpdatePartitionResponse response = blockingUnaryCall(PDGrpc.getUpdatePartitionMethod(), request); handleResponseError(response.getHeader()); @@ -634,47 +685,50 @@ public List updatePartition(List partitions) return response.getPartitionList(); } + public Metapb.Partition delPartition(String graphName, int partitionId) throws PDException { Pdpb.DelPartitionRequest request = Pdpb.DelPartitionRequest.newBuilder() - .setGraphName(graphName) - .setPartitionId(partitionId) - .build(); + .setGraphName(graphName) + .setPartitionId(partitionId) + .build(); Pdpb.DelPartitionResponse response = blockingUnaryCall(PDGrpc.getDelPartitionMethod(), request); handleResponseError(response.getHeader()); - invalidPartitionCache(graphName ,partitionId); + invalidPartitionCache(graphName, partitionId); return response.getPartition(); } + /** * 删除分区缓存 */ public void invalidPartitionCache(String graphName, int partitionId) { // 检查是否存在缓存 - if (null != cache.getPartitionById(graphName, partitionId)) + if (null != cache.getPartitionById(graphName, partitionId)) { cache.removePartition(graphName, partitionId); + } } - /** * 删除分区缓存 */ public void invalidPartitionCache() { // 检查是否存在缓存 - cache.removePartitions(); + cache.removePartitions(); } + /** * 删除分区缓存 */ - public void invalidStoreCache(long storeId){ - cache.removeStore(storeId); + public void invalidStoreCache(long storeId) { + cache.removeStore(storeId); } /** * Hugegraph server 调用,Leader发生改变,更新缓存 */ - public void updatePartitionLeader(String graphName, int partId, long leaderStoreId){ + public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) { KVPair partShard = null; try { partShard = this.getPartitionById(graphName, partId); @@ -684,16 +738,16 @@ public void updatePartitionLeader(String graphName, int partId, long leaderStore Metapb.Shard shard = null; List shards = new ArrayList<>(); - for(Metapb.Shard s : shardGroup.getShardsList()){ + for (Metapb.Shard s : shardGroup.getShardsList()) { if (s.getStoreId() == leaderStoreId) { shard = s; shards.add(Metapb.Shard.newBuilder(s) - .setStoreId(s.getStoreId()) - .setRole(Metapb.ShardRole.Leader).build()); - } else{ + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Leader).build()); + } else { shards.add(Metapb.Shard.newBuilder(s) - .setStoreId(s.getStoreId()) - .setRole(Metapb.ShardRole.Follower).build()); + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Follower).build()); } } @@ -711,22 +765,22 @@ public void updatePartitionLeader(String graphName, int partId, long leaderStore /** * Hugegraph-store调用,更新缓存 + * * @param partition */ - public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader){ + public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) { if (config.isEnableCache()) { cache.updatePartition(partition.getGraphName(), partition.getId(), partition); cache.updateShardGroupLeader(partition.getId(), leader); } } - public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException { Pdpb.GetIdRequest request = Pdpb.GetIdRequest.newBuilder() - .setHeader(header) - .setKey(key) - .setDelta(delta) - .build(); + .setHeader(header) + .setKey(key) + .setDelta(delta) + .build(); Pdpb.GetIdResponse response = blockingUnaryCall(PDGrpc.getGetIdMethod(), request); handleResponseError(response.getHeader()); return response; @@ -734,9 +788,9 @@ public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException { public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException { Pdpb.ResetIdRequest request = Pdpb.ResetIdRequest.newBuilder() - .setHeader(header) - .setKey(key) - .build(); + .setHeader(header) + .setKey(key) + .build(); Pdpb.ResetIdResponse response = blockingUnaryCall(PDGrpc.getResetIdMethod(), request); handleResponseError(response.getHeader()); return response; @@ -744,29 +798,30 @@ public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException { public Metapb.Member getLeader() throws PDException { Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() - .setHeader(header) - .build(); + .setHeader(header) + .build(); Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); handleResponseError(response.getHeader()); return response.getLeader(); } - public Pdpb.GetMembersResponse getMembers() throws PDException { + public Pdpb.GetMembersResponse getMembers() throws PDException { Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() .setHeader(header) .build(); Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); handleResponseError(response.getHeader()); - return response; + return response; } public Metapb.ClusterStats getClusterStats() throws PDException { Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder() - .setHeader(header) - .build(); - Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); + .setHeader(header) + .build(); + Pdpb.GetClusterStatsResponse response = + blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); handleResponseError(response.getHeader()); - return response.getCluster(); + return response.getCluster(); } private > RespT @@ -775,10 +830,12 @@ public Metapb.ClusterStats getClusterStats() throws PDException { } private > RespT - blockingUnaryCall(MethodDescriptor method, ReqT req, int retry) throws PDException { + blockingUnaryCall(MethodDescriptor method, ReqT req, int retry) throws + PDException { io.grpc.stub.AbstractBlockingStub stub = (AbstractBlockingStub) getStub(); try { - RespT resp = io.grpc.stub.ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), req); + RespT resp = io.grpc.stub.ClientCalls.blockingUnaryCall(stub.getChannel(), method, + stub.getCallOptions(), req); return resp; } catch (Exception e) { log.error(method.getFullMethodName() + " exception, {}", e.getMessage()); @@ -795,47 +852,38 @@ public Metapb.ClusterStats getClusterStats() throws PDException { return null; } - private static void handleResponseError(Pdpb.ResponseHeader header) throws - PDException { - if (header.hasError() && header.getError() - .getType() != Pdpb.ErrorType.OK) { - throw new PDException(header.getError().getTypeValue(), - String.format( - "PD request error, error code = %d, msg = %s", - header.getError().getTypeValue(), - header.getError().getMessage())); - } - } - - public void addEventListener(PDEventListener listener){ + public void addEventListener(PDEventListener listener) { eventListeners.add(listener); } - public PDWatch getWatchClient(){ - return new PDWatchImpl(stubProxy.getHost()); + public PDWatch getWatchClient() { + return new PDWatchImpl(stubProxy.getHost()); } - public PDPulse getPulseClient(){ + public PDPulse getPulseClient() { return this.pdPulse; } /** * 返回Store状态信息 - * */ public List getStoreStatus(boolean offlineExcluded) throws PDException { Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() .setHeader(header) - .setExcludeOfflineStores(offlineExcluded) + .setExcludeOfflineStores( + offlineExcluded) .build(); Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request); handleResponseError(response.getHeader()); List stores = response.getStoresList(); return stores; } - public void setGraphSpace(String graphSpaceName,long storageLimit) throws PDException { + + public void setGraphSpace(String graphSpaceName, long storageLimit) throws PDException { Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder().setName(graphSpaceName) - .setStorageLimit(storageLimit).setTimestamp(System.currentTimeMillis()).build(); + .setStorageLimit(storageLimit) + .setTimestamp(System.currentTimeMillis()) + .build(); Pdpb.SetGraphSpaceRequest request = Pdpb.SetGraphSpaceRequest.newBuilder() .setHeader(header) .setGraphSpace(graphSpace) @@ -859,37 +907,39 @@ public List getGraphSpace(String graphSpaceName) throws return graphSpaceList; } - public void setPDConfig(int partitionCount, String peerList, int shardCount, long version) throws PDException { + public void setPDConfig(int partitionCount, String peerList, int shardCount, + long version) throws PDException { Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().setPartitionCount(partitionCount) .setPeersList(peerList).setShardCount(shardCount) - .setVersion(version).setTimestamp(System.currentTimeMillis()) + .setVersion(version) + .setTimestamp(System.currentTimeMillis()) .build(); Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() - .setHeader(header) - .setPdConfig(pdConfig) - .build(); - Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); - handleResponseError(response.getHeader()); - } - - public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { - Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() - .setHeader(header) - .setPdConfig(pdConfig) - .build(); + .setHeader(header) + .setPdConfig(pdConfig) + .build(); Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); handleResponseError(response.getHeader()); } public Metapb.PDConfig getPDConfig() throws PDException { Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder() - .setHeader(header) - .build(); + .setHeader(header) + .build(); Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); handleResponseError(response.getHeader()); return response.getPdConfig(); } + public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + handleResponseError(response.getHeader()); + } + public Metapb.PDConfig getPDConfig(long version) throws PDException { Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder().setHeader( header).setVersion(version).build(); @@ -902,92 +952,103 @@ public void changePeerList(String peerList) throws PDException { Pdpb.ChangePeerListRequest request = Pdpb.ChangePeerListRequest.newBuilder() .setPeerList(peerList) .setHeader(header).build(); - Pdpb.getChangePeerListResponse response = blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request); + Pdpb.getChangePeerListResponse response = + blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request); handleResponseError(response.getHeader()); } /** * 工作模式 - * Auto:自动分裂,每个Store上分区数达到最大值 + * Auto:自动分裂,每个Store上分区数达到最大值 * * @throws PDException */ public void splitData() throws PDException { Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder() - .setHeader(header) - .setMode(Pdpb.OperationMode.Auto).build(); + .setHeader(header) + .setMode(Pdpb.OperationMode.Auto) + .build(); Pdpb.SplitDataResponse response = getStub().splitData(request); handleResponseError(response.getHeader()); } /** - * 工作模式 - * Auto:自动分裂,每个Store上分区数达到最大值 - * Expert:专家模式,需要指定splitParams + * 工作模式 + * Auto:自动分裂,每个Store上分区数达到最大值 + * Expert:专家模式,需要指定splitParams + * * @param mode * @param params * @throws PDException */ - public void splitData(Pdpb.OperationMode mode, List params) throws PDException { + public void splitData(Pdpb.OperationMode mode, List params) throws + PDException { Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder() - .setHeader(header) - .setMode(mode) - .addAllParam(params).build();; + .setHeader(header) + .setMode(mode) + .addAllParam(params).build(); Pdpb.SplitDataResponse response = getStub().splitData(request); handleResponseError(response.getHeader()); } public void splitGraphData(String graphName, int toCount) throws PDException { Pdpb.SplitGraphDataRequest request = Pdpb.SplitGraphDataRequest.newBuilder() - .setHeader(header) - .setGraphName(graphName) - .setToCount(toCount) - .build(); + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); Pdpb.SplitDataResponse response = getStub().splitGraphData(request); handleResponseError(response.getHeader()); } /** - * 自动转移,达到每个Store上分区数量相同 + * 自动转移,达到每个Store上分区数量相同 + * * @throws PDException */ public void balancePartition() throws PDException { Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder() - .setHeader(header) - .setMode(Pdpb.OperationMode.Auto).build(); + .setHeader(header) + .setMode( + Pdpb.OperationMode.Auto) + .build(); Pdpb.MovePartitionResponse response = getStub().movePartition(request); handleResponseError(response.getHeader()); } /** - * //工作模式 - * // Auto:自动转移,达到每个Store上分区数量相同 - * // Expert:专家模式,需要指定transferParams + * //工作模式 + * // Auto:自动转移,达到每个Store上分区数量相同 + * // Expert:专家模式,需要指定transferParams + * * @param mode * @param params * @throws PDException */ - public void movePartition(Pdpb.OperationMode mode, List params) throws PDException { + public void movePartition(Pdpb.OperationMode mode, List params) throws + PDException { Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder() - .setHeader(header) - .setMode(mode) - .addAllParam(params).build(); + .setHeader(header) + .setMode(mode) + .addAllParam(params).build(); Pdpb.MovePartitionResponse response = getStub().movePartition(request); handleResponseError(response.getHeader()); } - public void reportTask(MetaTask.Task task)throws PDException { + + public void reportTask(MetaTask.Task task) throws PDException { Pdpb.ReportTaskRequest request = Pdpb.ReportTaskRequest.newBuilder() - .setHeader(header) - .setTask(task).build(); + .setHeader(header) + .setTask(task).build(); Pdpb.ReportTaskResponse response = getStub().reportTask(request); handleResponseError(response.getHeader()); } - public Metapb.PartitionStats getPartitionsStats(String graph, int partId)throws PDException { + public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws PDException { Pdpb.GetPartitionStatsRequest request = Pdpb.GetPartitionStatsRequest.newBuilder() - .setHeader(header) - .setGraphName(graph) - .setPartitionId(partId).build(); + .setHeader(header) + .setGraphName(graph) + .setPartitionId(partId) + .build(); Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request); handleResponseError(response.getHeader()); return response.getPartitionStats(); @@ -998,8 +1059,8 @@ public Metapb.PartitionStats getPartitionsStats(String graph, int partId)throws */ public void balanceLeaders() throws PDException { Pdpb.BalanceLeadersRequest request = Pdpb.BalanceLeadersRequest.newBuilder() - .setHeader(header) - .build(); + .setHeader(header) + .build(); Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request); handleResponseError(response.getHeader()); } @@ -1009,9 +1070,9 @@ public void balanceLeaders() throws PDException { */ public Metapb.Store delStore(long storeId) throws PDException { Pdpb.DetStoreRequest request = Pdpb.DetStoreRequest.newBuilder() - .setHeader(header) - .setStoreId(storeId) - .build(); + .setHeader(header) + .setStoreId(storeId) + .build(); Pdpb.DetStoreResponse response = getStub().delStore(request); handleResponseError(response.getHeader()); return response.getStore(); @@ -1019,6 +1080,7 @@ public Metapb.Store delStore(long storeId) throws PDException { /** * 对rocksdb整体进行compaction + * * @throws PDException */ public void dbCompaction() throws PDException { @@ -1031,7 +1093,8 @@ public void dbCompaction() throws PDException { } /** - * 对rocksdb指定表进行compaction + * 对rocksdb指定表进行compaction + * * @param tableName * @throws PDException */ @@ -1051,7 +1114,7 @@ public void dbCompaction(String tableName) throws PDException { * @param toCount 缩容到分区的个数 * @throws PDException */ - public void combineCluster(int toCount) throws PDException{ + public void combineCluster(int toCount) throws PDException { Pdpb.CombineClusterRequest request = Pdpb.CombineClusterRequest .newBuilder() .setHeader(header) @@ -1063,11 +1126,12 @@ public void combineCluster(int toCount) throws PDException{ /** * 将单图缩容到 toCount个 - * @param graphName graph name - * @param toCount target count + * + * @param graphName graph name + * @param toCount target count * @throws PDException */ - public void combineGraph(String graphName, int toCount) throws PDException{ + public void combineGraph(String graphName, int toCount) throws PDException { Pdpb.CombineGraphRequest request = Pdpb.CombineGraphRequest .newBuilder() .setHeader(header) @@ -1090,30 +1154,32 @@ public void deleteShardGroup(int groupId) throws PDException { /** * 用于 store的 shard list重建 + * * @param groupId shard group id * @param shards shard list,delete when shards size is 0 */ public void updateShardGroupOp(int groupId, List shards) throws PDException { Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() - .setHeader(header) - .setGroupId(groupId) - .addAllShards(shards) - .build(); + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request); handleResponseError(response.getHeader()); } /** * invoke fireChangeShard command + * * @param groupId shard group id - * @param shards shard list + * @param shards shard list */ public void changeShard(int groupId, List shards) throws PDException { Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() - .setHeader(header) - .setGroupId(groupId) - .addAllShards(shards) - .build(); + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); Pdpb.ChangeShardResponse response = getStub().changeShard(request); handleResponseError(response.getHeader()); } @@ -1121,4 +1187,47 @@ public void changeShard(int groupId, List shards) throws PDExcepti public PartitionCache getCache() { return cache; } + + public interface PDEventListener { + void onStoreChanged(NodeEvent event); + + void onPartitionChanged(PartitionEvent event); + + void onGraphChanged(WatchResponse event); + + default void onShardGroupChanged(WatchResponse event) { + } + + } + + static class StubProxy { + private volatile PDGrpc.PDBlockingStub stub; + private final LinkedList hostList = new LinkedList<>(); + + public StubProxy(String[] hosts) { + for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); + } + + public String nextHost() { + String host = hostList.poll(); + hostList.offer(host); //移到尾部 + return host; + } + + public void set(PDGrpc.PDBlockingStub stub) { + this.stub = stub; + } + + public PDGrpc.PDBlockingStub get() { + return this.stub; + } + + public String getHost() { + return hostList.peek(); + } + + public int getHostCount() { + return hostList.size(); + } + } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java index 3b72ee89ad..64558436ea 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; public final class PDConfig { @@ -29,11 +46,14 @@ public static PDConfig of(String serverHost, long timeOut) { config.grpcTimeOut = timeOut; return config; } + public String getServerHost() { return serverHost; } - public long getGrpcTimeOut(){ return grpcTimeOut; } + public long getGrpcTimeOut() { + return grpcTimeOut; + } @Deprecated public PDConfig setEnablePDNotify(boolean enablePDNotify) { @@ -56,7 +76,7 @@ public PDConfig setEnableCache(boolean enableCache) { @Override public String toString() { return "PDConfig{" + - "serverHost='" + serverHost + '\'' + - '}'; + "serverHost='" + serverHost + '\'' + + '}'; } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java index 63dd780131..ca01327cb2 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java @@ -1,39 +1,56 @@ -package org.apache.hugegraph.pd.client; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; -import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; -import org.apache.hugegraph.pd.pulse.PulseServerNotice; +package org.apache.hugegraph.pd.client; import java.io.Closeable; import java.util.function.Consumer; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; + +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; + /** * Bidirectional communication interface of pd-client and pd-server + * * @author lynn.bond@hotmail.com created on 2021/11/9 */ public interface PDPulse { - /** - * - * @param listener - * @return - */ - Notifier connectPartition(Listener listener); - /*** inner static methods ***/ static Listener listener(Consumer onNext) { - return listener(onNext, t -> {}, () -> {}); + return listener(onNext, t -> { + }, () -> { + }); } static Listener listener(Consumer onNext, Consumer onError) { - return listener(onNext, onError, () -> {}); + return listener(onNext, onError, () -> { + }); } static Listener listener(Consumer onNext, Runnable onCompleted) { - return listener(onNext, t -> {}, onCompleted); + return listener(onNext, t -> { + }, onCompleted); } - static Listener listener(Consumer onNext, Consumer onError, Runnable onCompleted) { + static Listener listener(Consumer onNext, Consumer onError, + Runnable onCompleted) { return new Listener() { @Override public void onNext(T response) { @@ -57,6 +74,13 @@ public void onCompleted() { }; } + /** + * @param listener + * @return + */ + Notifier connectPartition( + Listener listener); + /** * Interface of pulse. */ @@ -67,13 +91,15 @@ interface Listener { * @param response the response. */ @Deprecated - default void onNext(T response){}; + default void onNext(T response) { + } /** * Invoked on new events. + * * @param notice a wrapper of response */ - default void onNotice(PulseServerNotice notice){ + default void onNotice(PulseServerNotice notice) { notice.ack(); } @@ -93,6 +119,7 @@ default void onNotice(PulseServerNotice notice){ /** * Interface of notifier that can send notice to server. + * * @param */ interface Notifier extends Closeable { @@ -104,12 +131,14 @@ interface Notifier extends Closeable { /** * Send notice to pd-server. + * * @return */ void notifyServer(T t); /** * Send an error report to pd-server. + * * @param error */ void crash(String error); diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java index 91ba2687fe..4dd00c0d27 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import java.util.concurrent.ExecutorService; @@ -14,6 +31,7 @@ import com.baidu.hugegraph.pd.grpc.pulse.PulseType; import com.baidu.hugegraph.pd.pulse.PartitionNotice; import com.google.common.util.concurrent.ThreadFactoryBuilder; + import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.stub.StreamObserver; @@ -27,12 +45,13 @@ final class PDPulseImpl implements PDPulse { private final HgPdPulseGrpc.HgPdPulseStub stub; - private ExecutorService threadPool ; + private final ExecutorService threadPool; // TODO: support several servers. public PDPulseImpl(String pdServerAddress) { this.stub = HgPdPulseGrpc.newStub(getChannel(pdServerAddress)); - var namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); + var namedThreadFactory = + new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); threadPool = Executors.newSingleThreadExecutor(namedThreadFactory); } @@ -41,13 +60,15 @@ private ManagedChannel getChannel(String target) { } @Override - public Notifier connectPartition(Listener listener) { + public Notifier connectPartition( + Listener listener) { return new PartitionHeartbeat(listener); } /*** PartitionHeartbeat's implement ***/ private class PartitionHeartbeat extends - AbstractConnector { + AbstractConnector { private long observerId = -1; PartitionHeartbeat(Listener listener) { @@ -84,7 +105,8 @@ public void onNext(PulseResponse pulseResponse) { } - private abstract class AbstractConnector implements Notifier, StreamObserver { + private abstract class AbstractConnector implements Notifier, + StreamObserver { Listener listener; StreamObserver reqStream; PulseType pulseType; @@ -99,7 +121,7 @@ private AbstractConnector(Listener listener, PulseType pulseType) { void init() { PulseCreateRequest.Builder builder = PulseCreateRequest.newBuilder() - .setPulseType(this.pulseType); + .setPulseType(this.pulseType); this.reqStream = PDPulseImpl.this.stub.pulse(this); this.reqStream.onNext(reqBuilder.clear().setCreateRequest(builder).build()); @@ -137,10 +159,12 @@ protected void ackNotice(long noticeId, long observerId) { threadPool.execute(() -> { // log.info("send ack: {}, ts: {}", noticeId, System.currentTimeMillis()); this.reqStream.onNext(reqBuilder.clear() - .setAckRequest( - this.ackBuilder.clear().setNoticeId(noticeId) - .setObserverId(observerId).build() - ).build() + .setAckRequest( + this.ackBuilder.clear() + .setNoticeId(noticeId) + .setObserverId(observerId) + .build() + ).build() ); }); } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java index 0662ebe52b..3dfb7de658 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java @@ -1,11 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; +import java.io.Closeable; +import java.util.function.Consumer; + import org.apache.hugegraph.pd.watch.NodeEvent; import org.apache.hugegraph.pd.watch.PartitionEvent; -import java.io.Closeable; -import java.util.function.Consumer; +import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; /** * @author lynn.bond@hotmail.com created on 2021/11/4 @@ -30,19 +48,6 @@ public interface PDWatch { //PDWatcher watchNode(String graph, Listener listener); - /** - * - * @param listener - * @return - */ - Watcher watchPartition(Listener listener); - - Watcher watchNode(Listener listener); - - Watcher watchGraph(Listener listener); - - Watcher watchShardGroup(Listener listener); - /*** inner static methods ***/ static Listener listener(Consumer onNext) { return listener(onNext, t -> { @@ -60,7 +65,8 @@ static Listener listener(Consumer onNext, Runnable onCompleted) { }, onCompleted); } - static Listener listener(Consumer onNext, Consumer onError, Runnable onCompleted) { + static Listener listener(Consumer onNext, Consumer onError, + Runnable onCompleted) { return new Listener() { @Override public void onNext(T response) { @@ -79,6 +85,18 @@ public void onCompleted() { }; } + /** + * @param listener + * @return + */ + Watcher watchPartition(Listener listener); + + Watcher watchNode(Listener listener); + + Watcher watchGraph(Listener listener); + + Watcher watchShardGroup(Listener listener); + /** * Interface of Watcher. diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java index bdbed212c2..e9ca7757cf 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java @@ -1,5 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; +import java.util.function.Supplier; + import com.baidu.hugegraph.pd.grpc.watch.HgPdWatchGrpc; import com.baidu.hugegraph.pd.grpc.watch.WatchCreateRequest; import com.baidu.hugegraph.pd.grpc.watch.WatchNodeResponse; @@ -9,12 +28,11 @@ import com.baidu.hugegraph.pd.grpc.watch.WatchType; import com.baidu.hugegraph.pd.watch.NodeEvent; import com.baidu.hugegraph.pd.watch.PartitionEvent; + import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.stub.StreamObserver; -import java.util.function.Supplier; - /** * @author lynn.bond@hotmail.com created on 2021/11/4 */ @@ -71,7 +89,7 @@ private GraphWatcher(Listener listener) { .newBuilder() .setWatchType(WatchType.WATCH_TYPE_GRAPH_CHANGE) .build() - ); + ); } @Override @@ -84,10 +102,10 @@ private class ShardGroupWatcher extends AbstractWatcher { private ShardGroupWatcher(Listener listener) { super(listener, - () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE) - .build() + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE) + .build() ); } @@ -101,10 +119,10 @@ private class PartitionWatcher extends AbstractWatcher { private PartitionWatcher(Listener listener) { super(listener, - () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_PARTITION_CHANGE) - .build() + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_PARTITION_CHANGE) + .build() ); } @@ -112,7 +130,8 @@ private PartitionWatcher(Listener listener) { public void onNext(WatchResponse watchResponse) { WatchPartitionResponse res = watchResponse.getPartitionResponse(); PartitionEvent event = new PartitionEvent(res.getGraph(), res.getPartitionId(), - PartitionEvent.ChangeType.grpcTypeOf(res.getChangeType())); + PartitionEvent.ChangeType.grpcTypeOf( + res.getChangeType())); this.listener.onNext(event); } } @@ -120,10 +139,10 @@ public void onNext(WatchResponse watchResponse) { private class NodeWatcher extends AbstractWatcher { private NodeWatcher(Listener listener) { super(listener, - () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_STORE_NODE_CHANGE) - .build() + () -> WatchCreateRequest + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_STORE_NODE_CHANGE) + .build() ); } @@ -131,7 +150,7 @@ private NodeWatcher(Listener listener) { public void onNext(WatchResponse watchResponse) { WatchNodeResponse res = watchResponse.getNodeResponse(); NodeEvent event = new NodeEvent(res.getGraph(), res.getNodeId(), - NodeEvent.EventType.grpcTypeOf(res.getNodeEventType())); + NodeEvent.EventType.grpcTypeOf(res.getNodeEventType())); this.listener.onNext(event); } } @@ -141,7 +160,8 @@ private abstract class AbstractWatcher implements Watcher, StreamObserver reqStream; Supplier requestSupplier; - private AbstractWatcher(Listener listener, Supplier requestSupplier) { + private AbstractWatcher(Listener listener, + Supplier requestSupplier) { this.listener = listener; this.requestSupplier = requestSupplier; this.init(); diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java index 2837eefd3a..e7226b8982 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java @@ -1,19 +1,36 @@ -package org.apache.hugegraph.pd.pulse; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import java.util.function.Consumer; +package org.apache.hugegraph.pd.pulse; import java.util.function.Consumer; import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; + /** * @author lynn.bond@hotmail.com created on 2022/2/13 */ -public class PartitionNotice implements PulseServerNotice{ - private long noticeId; - private Consumer ackConsumer; - private PartitionHeartbeatResponse content; +public class PartitionNotice implements PulseServerNotice { + private final long noticeId; + private final Consumer ackConsumer; + private final PartitionHeartbeatResponse content; - public PartitionNotice(long noticeId, Consumer ackConsumer, PartitionHeartbeatResponse content) { + public PartitionNotice(long noticeId, Consumer ackConsumer, + PartitionHeartbeatResponse content) { this.noticeId = noticeId; this.ackConsumer = ackConsumer; this.content = content; diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java index 03d298f4c8..468553714f 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.pulse; /** @@ -13,6 +30,7 @@ public interface PulseServerNotice { /** * Return a response object of gRPC stream. + * * @return */ T getContent(); diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java index dae8726388..508fa12efc 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java @@ -1,21 +1,38 @@ -package org.apache.hugegraph.pd.watch; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; +package org.apache.hugegraph.pd.watch; import java.util.Objects; +import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; + /** * @author lynn.bond@hotmail.com created on 2021/11/4 */ public class NodeEvent { - private String graph; - private long nodeId; - private EventType eventType; + private final String graph; + private final long nodeId; + private final EventType eventType; public NodeEvent(String graph, long nodeId, EventType eventType) { - this.graph=graph; - this.nodeId=nodeId; - this.eventType=eventType; + this.graph = graph; + this.nodeId = nodeId; + this.eventType = eventType; } public String getGraph() { @@ -30,6 +47,30 @@ public EventType getEventType() { return eventType; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NodeEvent nodeEvent = (NodeEvent) o; + return nodeId == nodeEvent.nodeId && Objects.equals(graph, + nodeEvent.graph) && + eventType == nodeEvent.eventType; + } + + @Override + public int hashCode() { + return Objects.hash(graph, nodeId, eventType); + } + + @Override + public String toString() { + return "NodeEvent{" + + "graph='" + graph + '\'' + + ", nodeId=" + nodeId + + ", eventType=" + eventType + + '}'; + } + public enum EventType { UNKNOWN, NODE_ONLINE, @@ -52,27 +93,4 @@ public static EventType grpcTypeOf(NodeEventType grpcType) { } } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - NodeEvent nodeEvent = (NodeEvent) o; - return nodeId == nodeEvent.nodeId && Objects.equals(graph, - nodeEvent.graph) && eventType == nodeEvent.eventType; - } - - @Override - public int hashCode() { - return Objects.hash(graph, nodeId, eventType); - } - - @Override - public String toString() { - return "NodeEvent{" + - "graph='" + graph + '\'' + - ", nodeId=" + nodeId + - ", eventType=" + eventType + - '}'; - } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java index 18a34e8a45..8a2a406904 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.watch; /** diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java index 04adb403a2..6241ac3e6f 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java @@ -1,16 +1,33 @@ -package org.apache.hugegraph.pd.watch; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; +package org.apache.hugegraph.pd.watch; import java.util.Objects; +import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; + /** * @author lynn.bond@hotmail.com created on 2021/11/4 */ public class PartitionEvent { - private String graph; - private int partitionId; - private ChangeType changeType; + private final String graph; + private final int partitionId; + private final ChangeType changeType; public PartitionEvent(String graph, int partitionId, ChangeType changeType) { this.graph = graph; @@ -30,6 +47,29 @@ public ChangeType getChangeType() { return this.changeType; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PartitionEvent that = (PartitionEvent) o; + return partitionId == that.partitionId && Objects.equals(graph, that.graph) && + changeType == that.changeType; + } + + @Override + public int hashCode() { + return Objects.hash(graph, partitionId, changeType); + } + + @Override + public String toString() { + return "PartitionEvent{" + + "graph='" + graph + '\'' + + ", partitionId=" + partitionId + + ", changeType=" + changeType + + '}'; + } + public enum ChangeType { UNKNOWN, ADD, @@ -49,26 +89,4 @@ public static ChangeType grpcTypeOf(WatchChangeType grpcType) { } } } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - PartitionEvent that = (PartitionEvent) o; - return partitionId == that.partitionId && Objects.equals(graph, that.graph) && changeType == that.changeType; - } - - @Override - public int hashCode() { - return Objects.hash(graph, partitionId, changeType); - } - - @Override - public String toString() { - return "PartitionEvent{" + - "graph='" + graph + '\'' + - ", partitionId=" + partitionId + - ", changeType=" + changeType + - '}'; - } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java index cafd4dd895..493f93c4eb 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.watch; /** @@ -7,10 +24,10 @@ enum WatchType { PARTITION_CHANGE(10); - private int value; + private final int value; - private WatchType(int value){ - this.value=value; + WatchType(int value) { + this.value = value; } } diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java index bb32ddd155..56040389c7 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import java.util.ArrayList; @@ -17,17 +34,18 @@ public class PartitionCacheTest { // @Test - public void test(){ + public void test() { PartitionCache cache = new PartitionCache(); - for(int i = 0; i< 10; i++) { - KVPair partShards = new KVPair<>(Metapb.Partition.newBuilder() - .setStartKey(i*10) - .setEndKey((i+1)*10) - .build(),null); + for (int i = 0; i < 10; i++) { + KVPair partShards = + new KVPair<>(Metapb.Partition.newBuilder() + .setStartKey(i * 10) + .setEndKey((i + 1) * 10) + .build(), null); cache.addPartition("aa", i, partShards.getKey()); } - for(int i = 0; i<100; i++){ + for (int i = 0; i < 100; i++) { KVPair partShards = cache.getPartitionByCode("aa", i); System.out.println(" " + i + " " + partShards.getKey().getStartKey()); } @@ -35,7 +53,7 @@ public void test(){ // @Test - public void test1(){ + public void test1() { Map> keyToPartIdCache = new HashMap<>(); // graphName + PartitionID组成key Map> partitionCache = new HashMap<>(); @@ -46,17 +64,19 @@ public void test1(){ keyToPartIdCache.put("a", TreeRangeMap.create()); keyToPartIdCache.get("a") - .put(Range.closedOpen(1L, 2L), 1); + .put(Range.closedOpen(1L, 2L), 1); allStoresCache.put("a", new ArrayList<>()); allStoresCache.get("a").add(Metapb.Store.newBuilder().setId(34).build()); - Map> keyToPartIdCache2 = cloneKeyToPartIdCache(keyToPartIdCache); + Map> keyToPartIdCache2 = + cloneKeyToPartIdCache(keyToPartIdCache); System.out.println(keyToPartIdCache2.size()); } - public Map> cloneKeyToPartIdCache(Map> cache) { + public Map> cloneKeyToPartIdCache( + Map> cache) { Map> cacheClone = new HashMap<>(); cache.forEach((k1, v1) -> { cacheClone.put(k1, TreeRangeMap.create()); diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java index 9797314775..34e0013853 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java @@ -1,27 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; +import java.nio.charset.StandardCharsets; +import java.util.List; + import org.apache.hugegraph.pd.client.PDClient; import org.apache.hugegraph.pd.client.PDConfig; import org.apache.hugegraph.pd.client.PDPulse; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.junit.Assert; +import org.junit.BeforeClass; + import com.baidu.hugegraph.pd.common.KVPair; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; -import org.apache.hugegraph.pd.pulse.PulseServerNotice; -import org.junit.Assert; -import org.junit.BeforeClass; -// import org.junit.Test; - -import java.nio.charset.StandardCharsets; -import java.util.List; public class StoreRegisterTest { private static PDClient pdClient; - - private long storeId = 0; private final String storeAddr = "localhost"; private final String graphName = "default/hugegraph/g"; + private long storeId = 0; @BeforeClass public static void beforeClass() throws Exception { @@ -48,6 +64,7 @@ public void testGetStore() throws PDException { Assert.assertTrue(store.getAddress().equals(storeAddr)); System.out.println(store); } + // @Test public void testGetActiveStores() throws PDException { testRegisterStore(); @@ -63,8 +80,8 @@ public void testGetActiveStores() throws PDException { public void testStoreHeartbeat() throws PDException { testRegisterStore(); Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() - .setStoreId(storeId) - .build(); + .setStoreId(storeId) + .build(); pdClient.storeHeartbeat(stats); List stores = pdClient.getActiveStores(graphName); boolean exist = false; @@ -78,7 +95,6 @@ public void testStoreHeartbeat() throws PDException { } - // @Test public void testPartitionHeartbeat() throws InterruptedException, PDException { testRegisterStore(); @@ -106,14 +122,20 @@ public void onCompleted() { } }); - KVPair partShard = pdClient.getPartition("test", "1".getBytes(StandardCharsets.UTF_8)); + KVPair partShard = + pdClient.getPartition("test", "1".getBytes(StandardCharsets.UTF_8)); notifier.notifyServer(PartitionHeartbeatRequest.newBuilder() - .setStates( - Metapb.PartitionStats.newBuilder() - .addGraphName("test") - .setId(partShard.getKey().getId()) - .setLeader(Metapb.Shard.newBuilder() - .setStoreId(1).build()))); + .setStates( + Metapb.PartitionStats.newBuilder() + .addGraphName( + "test") + .setId(partShard.getKey() + .getId()) + .setLeader( + Metapb.Shard.newBuilder() + .setStoreId( + 1) + .build()))); Thread.sleep(10000); diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java index 6abdb39929..692bfa2164 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java @@ -1,10 +1,21 @@ -package org.apache.hugegraph.pd.client; - -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; -import com.baidu.hugegraph.pd.grpc.discovery.Query; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import org.junit.Assert; -// import org.junit.Test; +package org.apache.hugegraph.pd.client; import java.util.HashMap; import java.util.Map; @@ -12,12 +23,18 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; +import org.junit.Assert; + +import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; +import com.baidu.hugegraph.pd.grpc.discovery.Query; + /** * @author zhangyingjie * @date 2021/12/21 **/ public class DiscoveryClientImplTest { + private static final AtomicLong label = new AtomicLong(); String address = "localhost:80"; int delay = 1000; int wait = delay * 3 + 500; @@ -27,32 +44,32 @@ public void registerStore() throws InterruptedException { HashMap labels = new HashMap<>(); - labels.put("metrics","/actuator/prometheus"); - labels.put("target","10.81.116.77:8520"); - labels.put("scheme","http"); - labels.put("__relabeling","http"); - labels.put("no_relabeling","http"); - getClient("store", "address1",labels); - - labels.put("metrics","/actuator/prometheus"); - labels.put("target","10.81.116.78:8520"); - labels.put("scheme","http"); - getClient("store", "address2",labels); - - labels.put("metrics","/actuator/prometheus"); - labels.put("target","10.81.116.79:8520"); - labels.put("scheme","http"); - getClient("store", "address3",labels); - - labels.put("metrics","/actuator/prometheus"); - labels.put("target","10.81.116.78:8620"); - labels.put("scheme","http"); - getClient("pd", "address1",labels); - - labels.put("metrics","/graph/metrics"); - labels.put("target","10.37.1.1:9200"); - labels.put("scheme","https"); - getClient("hugegraph", "address1",labels); + labels.put("metrics", "/actuator/prometheus"); + labels.put("target", "10.81.116.77:8520"); + labels.put("scheme", "http"); + labels.put("__relabeling", "http"); + labels.put("no_relabeling", "http"); + getClient("store", "address1", labels); + + labels.put("metrics", "/actuator/prometheus"); + labels.put("target", "10.81.116.78:8520"); + labels.put("scheme", "http"); + getClient("store", "address2", labels); + + labels.put("metrics", "/actuator/prometheus"); + labels.put("target", "10.81.116.79:8520"); + labels.put("scheme", "http"); + getClient("store", "address3", labels); + + labels.put("metrics", "/actuator/prometheus"); + labels.put("target", "10.81.116.78:8620"); + labels.put("scheme", "http"); + getClient("pd", "address1", labels); + + labels.put("metrics", "/graph/metrics"); + labels.put("target", "10.37.1.1:9200"); + labels.put("scheme", "https"); + getClient("hugegraph", "address1", labels); } // @Test @@ -64,7 +81,7 @@ public void testNodes() throws InterruptedException { // @Test public void testMultiNode() throws InterruptedException { for (int i = 0; i < 2; i++) { - register("app" + String.valueOf(i), address + i); + register("app" + i, address + i); } } @@ -87,22 +104,22 @@ public void testParallelMultiNode() throws InterruptedException { }).start(); } latch.await(); - Assert.assertTrue(exceptions.size() == 0); + Assert.assertEquals(0, exceptions.size()); } - private static AtomicLong label = new AtomicLong(); + private void register(String appName, String address) throws InterruptedException { HashMap labels = new HashMap<>(); String labelValue = String.valueOf(label.incrementAndGet()); - labels.put("address",labelValue); - labels.put("address1",labelValue); + labels.put("address", labelValue); + labels.put("address1", labelValue); Query query = Query.newBuilder().setAppName( appName).setVersion("0.13.0").putAllLabels(labels).build(); DiscoveryClientImpl discoveryClient = getClient(appName, address, labels); Thread.sleep(10000); NodeInfos nodeInfos1 = discoveryClient.getNodeInfos(query); Assert.assertTrue(nodeInfos1.getInfoCount() == 1); - DiscoveryClientImpl discoveryClient1 = getClient(appName, address + 0,labels); + DiscoveryClientImpl discoveryClient1 = getClient(appName, address + 0, labels); Thread.sleep(10000); Assert.assertTrue( discoveryClient.getNodeInfos(query).getInfoCount() == 2); @@ -120,15 +137,15 @@ private void register(String appName, String address) throws InterruptedExceptio discoveryClient1.close(); } - private DiscoveryClientImpl getClient(String appName, String address,Map labels) { + private DiscoveryClientImpl getClient(String appName, String address, Map labels) { DiscoveryClientImpl discoveryClient = null; - try{ - discoveryClient = DiscoveryClientImpl.newBuilder().setCenterAddress( - "localhost:8687,localhost:8686,localhost:8688").setAddress(address).setAppName( - appName).setDelay(delay).setVersion("0.13.0").setId( - "0").setLabels(labels).build(); - discoveryClient.scheduleTask(); - } catch(Exception e){ + try { + discoveryClient = DiscoveryClientImpl.newBuilder().setCenterAddress( + "localhost:8687,localhost:8686,localhost:8688").setAddress(address).setAppName( + appName).setDelay(delay).setVersion("0.13.0").setId( + "0").setLabels(labels).build(); + discoveryClient.scheduleTask(); + } catch (Exception e) { e.printStackTrace(); } diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java index 53fd34c563..34d403caac 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java @@ -1,18 +1,36 @@ -package org.apache.hugegraph.pd.client; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.kv.KResponse; -import com.baidu.hugegraph.pd.grpc.kv.KvResponse; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.io.FileUtils; -// import org.junit.Test; -import org.yaml.snakeyaml.Yaml; +package org.apache.hugegraph.pd.client; import java.io.File; import java.util.Iterator; import java.util.Map; import java.util.Properties; +import org.apache.commons.io.FileUtils; +import org.yaml.snakeyaml.Yaml; + +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.kv.KResponse; +import com.baidu.hugegraph.pd.grpc.kv.KvResponse; + +import lombok.extern.slf4j.Slf4j; + /** * @author zhangyingjie * @date 2021/12/21 @@ -53,6 +71,7 @@ public void getKv() { log.error("put license with error: {}", e); } } + // @Test public void putKv() { PDConfig pdConfig = PDConfig.of("10.14.139.70:8688"); @@ -67,6 +86,7 @@ public void putKv() { log.error("put license with error: {}", e); } } + // @Test public void putKvLocal() { PDConfig pdConfig = PDConfig.of("localhost:8686"); diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java index a2cabe7ad4..3833ef9f9b 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java @@ -1,14 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; -import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import org.apache.hugegraph.pd.client.test.HgPDTestUtil; import org.apache.hugegraph.pd.pulse.PulseServerNotice; import org.junit.BeforeClass; -// import org.junit.Test; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; +import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; /** * @author lynn.bond@hotmail.com created on 2021/11/8 @@ -16,9 +32,9 @@ public class PDPulseTest { private static PDClient pdClient; - private long storeId = 0; - private String storeAddress = "localhost"; - private String graphName = "graph1"; + private final long storeId = 0; + private final String storeAddress = "localhost"; + private final String graphName = "graph1"; @BeforeClass public static void beforeClass() throws Exception { @@ -34,9 +50,12 @@ public void listen() { PDPulse pulse = pdClient.getPulseClient(); CountDownLatch latch = new CountDownLatch(60); - PDPulse.Notifier notifier1 = pulse.connectPartition(new PulseListener(latch, "listener1")); - PDPulse.Notifier notifier2 = pulse.connectPartition(new PulseListener(latch, "listener2")); - PDPulse.Notifier notifier3 = pulse.connectPartition(new PulseListener(latch, "listener3")); + PDPulse.Notifier notifier1 = + pulse.connectPartition(new PulseListener(latch, "listener1")); + PDPulse.Notifier notifier2 = + pulse.connectPartition(new PulseListener(latch, "listener2")); + PDPulse.Notifier notifier3 = + pulse.connectPartition(new PulseListener(latch, "listener3")); try { latch.await(120, TimeUnit.SECONDS); @@ -60,7 +79,7 @@ public void listen() { private class PulseListener implements PDPulse.Listener { CountDownLatch latch = new CountDownLatch(10); - private String listenerName; + private final String listenerName; private PulseListener(CountDownLatch latch, String listenerName) { this.latch = latch; diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java index 7d7026f146..6f5f448121 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java @@ -1,13 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + import org.apache.hugegraph.pd.client.test.HgPDTestUtil; import org.apache.hugegraph.pd.watch.NodeEvent; - import org.junit.BeforeClass; -// import org.junit.Test; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; /** * @author lynn.bond@hotmail.com created on 2021/11/8 @@ -16,9 +31,9 @@ public class PDWatchTest { private static PDClient pdClient; - private long storeId = 0; - private String storeAddr = "localhost"; - private String graphName = "graph1"; + private final long storeId = 0; + private final String storeAddr = "localhost"; + private final String graphName = "graph1"; @BeforeClass public static void beforeClass() throws Exception { @@ -26,15 +41,16 @@ public static void beforeClass() throws Exception { } // @Test - public void watch(){ - PDWatch watch=pdClient.getWatchClient(); + public void watch() { + PDWatch watch = pdClient.getWatchClient(); CountDownLatch latch = new CountDownLatch(10); - PDWatch.Watcher watcher1=watch.watchPartition(new WatchListener(latch,"watcher1")); - PDWatch.Watcher watcher2=watch.watchPartition(new WatchListener(latch,"watcher2")); - PDWatch.Watcher watcher3=watch.watchPartition(new WatchListener(latch,"watcher3")); + PDWatch.Watcher watcher1 = watch.watchPartition(new WatchListener(latch, "watcher1")); + PDWatch.Watcher watcher2 = watch.watchPartition(new WatchListener(latch, "watcher2")); + PDWatch.Watcher watcher3 = watch.watchPartition(new WatchListener(latch, "watcher3")); - PDWatch.Watcher nodeWatcher1=watch.watchNode(new WatchListener(latch,"nodeWatcher1")); + PDWatch.Watcher nodeWatcher1 = + watch.watchNode(new WatchListener(latch, "nodeWatcher1")); try { latch.await(15, TimeUnit.SECONDS); @@ -46,13 +62,13 @@ public void watch(){ watcher3.close(); } - private class WatchListener implements PDWatch.Listener{ + private class WatchListener implements PDWatch.Listener { CountDownLatch latch = new CountDownLatch(10); - private String watcherName; + private final String watcherName; - private WatchListener(CountDownLatch latch,String watcherName){ - this.latch=latch; - this.watcherName=watcherName; + private WatchListener(CountDownLatch latch, String watcherName) { + this.latch = latch; + this.watcherName = watcherName; } @Override diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java index f82535e210..51152b74f4 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client.test; import java.nio.ByteBuffer; diff --git a/hg-pd-clitools/pom.xml b/hg-pd-clitools/pom.xml index 2419d07b7e..524ae5affc 100644 --- a/hg-pd-clitools/pom.xml +++ b/hg-pd-clitools/pom.xml @@ -1,6 +1,23 @@ - + + hugegraph-pd-root diff --git a/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java b/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java index 1e6c6cd651..cb05a8db74 100644 --- a/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java +++ b/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.clitools; import com.baidu.hugegraph.pd.client.PDClient; @@ -10,9 +27,9 @@ public class Main { public static void main(String[] args) throws PDException { - if ( args.length < 3){ + if (args.length < 3) { String error = " usage: pd-address config key[=value] \n key list: " + - "\n\tenableBatchLoad"; + "\n\tenableBatchLoad"; System.out.println(error); System.exit(0); } @@ -21,9 +38,8 @@ public static void main(String[] args) throws PDException { String param = args[2]; System.out.println(pd + " " + cmd + " " + param); System.out.println("Result: \n"); - switch (cmd){ - case "config": - doConfig(pd, param); + if (cmd.equals("config")) { + doConfig(pd, param); } } @@ -32,13 +48,14 @@ public static void doConfig(String pd, String param) throws PDException { String[] pair = param.split("="); String key = pair[0].trim(); Object value = null; - if ( pair.length > 1) + if (pair.length > 1) { value = pair[1].trim(); - if ( value == null){ + } + if (value == null) { Metapb.PDConfig pdConfig = pdClient.getPDConfig(); - switch (key){ + switch (key) { case "enableBatchLoad": - // value = pdConfig.getEnableBatchLoad(); + // value = pdConfig.getEnableBatchLoad(); break; case "shardCount": value = pdConfig.getShardCount(); @@ -46,13 +63,13 @@ public static void doConfig(String pd, String param) throws PDException { } System.out.println("Get config " + key + "=" + value); - }else{ + } else { Metapb.PDConfig.Builder builder = Metapb.PDConfig.newBuilder(); - switch (key){ + switch (key) { case "enableBatchLoad": - // builder.setEnableBatchLoad(Boolean.valueOf((String)value)); + // builder.setEnableBatchLoad(Boolean.valueOf((String)value)); case "shardCount": - builder.setShardCount(Integer.valueOf((String)value)); + builder.setShardCount(Integer.valueOf((String) value)); } pdClient.setPDConfig(builder.build()); System.out.println("Set config " + key + "=" + value); diff --git a/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java b/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java index 30d0efb0b6..65f21ed91e 100644 --- a/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java +++ b/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java @@ -1,16 +1,51 @@ -package org.apache.hugegraph.pd.clitools; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.common.PDException; -// import org.junit.Test; +package org.apache.hugegraph.pd.clitools; import java.util.Arrays; import java.util.List; +import com.baidu.hugegraph.pd.common.PDException; + public class MainTest { + public static boolean test2sup(List arrays, int tail, int res) { + System.out.printf("%d %d%n", tail, res); + if (tail == 0) { + System.out.printf("a = %d %d%n", tail, res); + return false; + } else if (tail == 1) { + System.out.printf("b = %d %d%n", arrays.get(0), res); + return (arrays.get(0) == res); + } else if (tail == 2) { + System.out.printf("c = %d %d %d%n", arrays.get(0), arrays.get(1), res); + return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || + (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); + } else { + return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || + test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); + } + } + // @Test public void getConfig() throws PDException { Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad"}); } + // @Test public void setBatchTrue() throws PDException { Main.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad= true "}); @@ -25,6 +60,7 @@ public void setBatchFalse() throws PDException { public void getConfig2() throws PDException { Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount"}); } + // @Test public void setShardCount1() throws PDException { Main.main(new String[]{"127.0.0.1:8686", "config", "shardCount=1"}); @@ -36,26 +72,9 @@ public void setShardCount3() throws PDException { } // @Test - public void test2(){ - Integer[] a = new Integer[] { 1, 0, 3, 2}; + public void test2() { + Integer[] a = new Integer[]{1, 0, 3, 2}; List aa = Arrays.asList(a); - System.out.printf(test2sup(aa, aa.size(),0)?"TRUE":"FALSE"); - } - public static boolean test2sup (List arrays, int tail, int res) { - System.out.println(String.format("%d %d", tail, res)); - if (tail == 0) { - System.out.println(String.format("a = %d %d", tail, res)); - return false; - } else if(tail == 1) { - System.out.println(String.format("b = %d %d", arrays.get(0), res)); - return (arrays.get(0) == res); - } else if(tail == 2) { - System.out.println(String.format("c = %d %d %d", arrays.get(0), arrays.get(1), res)); - return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || - (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); - } else { - return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || - test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); - } + System.out.printf(test2sup(aa, aa.size(), 0) ? "TRUE" : "FALSE"); } } diff --git a/hg-pd-common/pom.xml b/hg-pd-common/pom.xml index 89c8d3dca8..715cec30c0 100644 --- a/hg-pd-common/pom.xml +++ b/hg-pd-common/pom.xml @@ -1,6 +1,23 @@ - + + 4.0.0 diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java index f964329278..0d94758631 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; import java.util.Collection; @@ -63,18 +80,15 @@ public static boolean isInvalid(String... strs) { } public static boolean isInvalid(byte[] bytes) { - if (bytes == null || bytes.length == 0) return true; - return false; + return bytes == null || bytes.length == 0; } public static boolean isInvalid(Map map) { - if (map == null || map.isEmpty()) return true; - return false; + return map == null || map.isEmpty(); } public static boolean isInvalid(Collection list) { - if (list == null || list.isEmpty()) return true; - return false; + return list == null || list.isEmpty(); } public static boolean isContains(Collection list, T item) { diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java index 641fa38e97..9af56464ed 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; import java.io.Serializable; @@ -9,6 +26,21 @@ public class KVPair implements Serializable { * Key of this Pair. */ private K key; + /** + * Value of this this Pair. + */ + private V value; + + /** + * Creates a new pair + * + * @param key The key for this pair + * @param value The value to use for this pair + */ + public KVPair(K key, V value) { + this.key = key; + this.value = value; + } /** * Gets the key for this pair. @@ -19,15 +51,10 @@ public K getKey() { return key; } - public void setKey(K key){ + public void setKey(K key) { this.key = key; } - /** - * Value of this this Pair. - */ - private V value; - /** * Gets the value for this pair. * @@ -37,18 +64,7 @@ public V getValue() { return value; } - - public void setValue(V value){ - this.value = value; - } - /** - * Creates a new pair - * - * @param key The key for this pair - * @param value The value to use for this pair - */ - public KVPair(K key, V value) { - this.key = key; + public void setValue(V value) { this.value = value; } @@ -105,8 +121,7 @@ public boolean equals(Object o) { if (o instanceof KVPair) { KVPair pair = (KVPair) o; if (!Objects.equals(key, pair.key)) return false; - if (!Objects.equals(value, pair.value)) return false; - return true; + return Objects.equals(value, pair.value); } return false; } diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java index 77015c5c21..629989bc23 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java @@ -1,6 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; -public class PDException extends Exception{ +public class PDException extends Exception { private int errorCode = 0; public PDException(int error) { diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java index 1be1dea183..b0bfe2c0ae 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; /** diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java index 6f1968f604..a0cc9bc025 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java @@ -1,9 +1,21 @@ -package org.apache.hugegraph.pd.common; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.google.common.collect.Range; -import com.google.common.collect.RangeMap; -import com.google.common.collect.TreeRangeMap; +package org.apache.hugegraph.pd.common; import java.util.ArrayList; import java.util.HashMap; @@ -15,6 +27,11 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.baidu.hugegraph.pd.grpc.Metapb; +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + public class PartitionCache { // 每张图一个缓存 @@ -28,7 +45,7 @@ public class PartitionCache { private volatile Map graphCache; // 读写锁对象 - private ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); Lock writeLock = readWriteLock.writeLock(); public PartitionCache() { @@ -86,7 +103,7 @@ public KVPair getPartitionByCode(String graphNam public List getPartitions(String graphName) { List partitions = new ArrayList<>(); // partitionCache key: graph name + partition id - partitionCache.forEach((k,v) -> { + partitionCache.forEach((k, v) -> { if (k.startsWith(graphName)) { partitions.add(v); } @@ -118,13 +135,14 @@ public boolean addPartition(String graphName, int partId, Metapb.Partition parti // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) var graphRange = tmpKeyToPartIdCache.get(graphName); if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && - Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); } } tmpKeyToPartIdCache.get(graphName) - .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + .put(Range.closedOpen(partition.getStartKey(), + partition.getEndKey()), partId); partitionCache = tmpPartitionCache; keyToPartIdCache = tmpKeyToPartIdCache; return true; @@ -151,14 +169,15 @@ public void updatePartition(String graphName, int partId, Metapb.Partition parti if (old != null) { var graphRange = tmpKeyToPartIdCache.get(graphName); if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && - Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); } } tmpKeyToPartIdCache.get(graphName) - .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + .put(Range.closedOpen(partition.getStartKey(), + partition.getEndKey()), partId); partitionCache = tmpPartitionCache; keyToPartIdCache = tmpKeyToPartIdCache; } finally { @@ -185,19 +204,21 @@ public void removePartition(String graphName, int partId) { try { Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); Map tmpPartitionCache = clonePartitionCache(); - Metapb.Partition partition = tmpPartitionCache.remove(makePartitionKey(graphName, partId)); + Metapb.Partition partition = + tmpPartitionCache.remove(makePartitionKey(graphName, partId)); if (partition != null) { var graphRange = tmpKeyToPartIdCache.get(graphName); if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && - Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); } } partitionCache = tmpPartitionCache; keyToPartIdCache = tmpKeyToPartIdCache; - // log.info("PartitionCache.removePartition : (after){}", debugCacheByGraphName(graphName)); + // log.info("PartitionCache.removePartition : (after){}", debugCacheByGraphName + // (graphName)); } finally { writeLock.unlock(); } @@ -205,6 +226,7 @@ public void removePartition(String graphName, int partId) { /** * remove partition id of graph name + * * @param graphName * @param id */ @@ -227,6 +249,7 @@ public void removePartitions() { /** * remove partition cache of graphName + * * @param graphName */ public void removeAll(String graphName) { @@ -236,8 +259,8 @@ public void removeAll(String graphName) { Map tmpPartitionCache = clonePartitionCache(); var itr = tmpPartitionCache.entrySet().iterator(); while (itr.hasNext()) { - var entry = itr.next(); - if (entry.getKey().startsWith(graphName)) { + var entry = itr.next(); + if (entry.getKey().startsWith(graphName)) { itr.remove(); } } @@ -253,22 +276,20 @@ private String makePartitionKey(String graphName, int partId) { return graphName + "/" + partId; } - public boolean updateShardGroup(Metapb.ShardGroup shardGroup){ + public boolean updateShardGroup(Metapb.ShardGroup shardGroup) { Metapb.ShardGroup oldShardGroup = shardGroupCache.get(shardGroup.getId()); - if (oldShardGroup != null && oldShardGroup.equals(shardGroup)){ + if (oldShardGroup != null && oldShardGroup.equals(shardGroup)) { return false; } shardGroupCache.put(shardGroup.getId(), shardGroup); return true; } - public void deleteShardGroup(int shardGroupId){ - if (shardGroupCache.containsKey(shardGroupId)) { - shardGroupCache.remove(shardGroupId); - } + public void deleteShardGroup(int shardGroupId) { + shardGroupCache.remove(shardGroupId); } - public Metapb.ShardGroup getShardGroup(int groupId){ + public Metapb.ShardGroup getShardGroup(int groupId) { return shardGroupCache.get(groupId); } @@ -342,7 +363,7 @@ public void reset() { } } - public void clear(){ + public void clear() { reset(); } @@ -350,7 +371,7 @@ public String debugCacheByGraphName(String graphName) { StringBuilder builder = new StringBuilder(); builder.append("Graph:").append(graphName).append(", cache info: range info: {"); var rangeMap = keyToPartIdCache.get(graphName); - builder.append( rangeMap == null ? "" : rangeMap).append("}"); + builder.append(rangeMap == null ? "" : rangeMap).append("}"); if (rangeMap != null) { builder.append(", partition info : {"); @@ -359,8 +380,8 @@ public String debugCacheByGraphName(String graphName) { builder.append("[part_id:").append(v); if (partition != null) { builder.append(", start_key:").append(partition.getStartKey()) - .append(", end_key:").append(partition.getEndKey()) - .append(", state:").append(partition.getState().name()); + .append(", end_key:").append(partition.getEndKey()) + .append(", state:").append(partition.getState().name()); } builder.append("], "); }); @@ -368,16 +389,16 @@ public String debugCacheByGraphName(String graphName) { } builder.append(", graph info:{"); - var graph = graphCache.get(graphName); + var graph = graphCache.get(graphName); if (graph != null) { builder.append("partition_count:").append(graph.getPartitionCount()) - .append(", state:").append(graph.getState().name()); + .append(", state:").append(graph.getState().name()); } builder.append("}]"); return builder.toString(); } - public Metapb.Shard getLeaderShard(int partitionId){ + public Metapb.Shard getLeaderShard(int partitionId) { var shardGroup = shardGroupCache.get(partitionId); if (shardGroup != null) { for (Metapb.Shard shard : shardGroup.getShardsList()) { @@ -390,18 +411,19 @@ public Metapb.Shard getLeaderShard(int partitionId){ return null; } - public void updateShardGroupLeader(int partitionId, Metapb.Shard leader){ + public void updateShardGroupLeader(int partitionId, Metapb.Shard leader) { if (shardGroupCache.containsKey(partitionId) && leader != null) { - if (! Objects.equals(getLeaderShard(partitionId), leader)) { + if (!Objects.equals(getLeaderShard(partitionId), leader)) { var shardGroup = shardGroupCache.get(partitionId); var builder = Metapb.ShardGroup.newBuilder(shardGroup).clearShards(); for (var shard : shardGroup.getShardsList()) { builder.addShards( Metapb.Shard.newBuilder() - .setStoreId(shard.getStoreId()) - .setRole(shard.getStoreId() == leader.getStoreId() ? - Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) - .build() + .setStoreId(shard.getStoreId()) + .setRole(shard.getStoreId() == leader.getStoreId() ? + Metapb.ShardRole.Leader : + Metapb.ShardRole.Follower) + .build() ); } shardGroupCache.put(partitionId, builder.build()); @@ -409,22 +431,22 @@ public void updateShardGroupLeader(int partitionId, Metapb.Shard leader){ } } - public String debugShardGroup(){ + public String debugShardGroup() { StringBuilder builder = new StringBuilder(); builder.append("shard group cache:{"); - shardGroupCache.forEach((partitionId,shardGroup) ->{ + shardGroupCache.forEach((partitionId, shardGroup) -> { builder.append(partitionId).append("::{") - .append("version:").append(shardGroup.getVersion()) - .append(", conf_version:").append(shardGroup.getConfVer()) - .append(", state:").append(shardGroup.getState().name()) - .append(", shards:["); - - for (var shard : shardGroup.getShardsList()) { - builder.append("{store_id:").append(shard.getStoreId()) - .append(", role:").append(shard.getRole().name()) - .append("},"); - } - builder.append("], "); + .append("version:").append(shardGroup.getVersion()) + .append(", conf_version:").append(shardGroup.getConfVer()) + .append(", state:").append(shardGroup.getState().name()) + .append(", shards:["); + + for (var shard : shardGroup.getShardsList()) { + builder.append("{store_id:").append(shard.getStoreId()) + .append(", role:").append(shard.getRole().name()) + .append("},"); + } + builder.append("], "); }); builder.append("}"); return builder.toString(); diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java index b94225387c..869a686933 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java @@ -1,8 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; public class PartitionUtils { - public static final int MAX_VALUE = 0xffff; + public static final int MAX_VALUE = 0xffff; /** * 计算key的hashcode @@ -13,16 +30,18 @@ public class PartitionUtils { public static int calcHashcode(byte[] key) { final int p = 16777619; int hash = (int) 2166136261L; - for (byte element : key) + for (byte element : key) { hash = (hash ^ element) * p; + } hash += hash << 13; hash ^= hash >> 7; hash += hash << 3; hash ^= hash >> 17; hash += hash << 5; hash = hash & PartitionUtils.MAX_VALUE; - if ( hash == PartitionUtils.MAX_VALUE ) + if (hash == PartitionUtils.MAX_VALUE) { hash = PartitionUtils.MAX_VALUE - 1; + } return hash; } } diff --git a/hg-pd-core/pom.xml b/hg-pd-core/pom.xml index 904326841a..55ed09e1a6 100644 --- a/hg-pd-core/pom.xml +++ b/hg-pd-core/pom.xml @@ -1,6 +1,23 @@ - + + 4.0.0 diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java index 2a9f8af475..58b32ce171 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java @@ -1,25 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.List; import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.meta.ConfigMetaStore; import org.apache.hugegraph.pd.meta.MetadataFactory; import org.apache.hugegraph.pd.raft.RaftStateListener; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; import lombok.extern.slf4j.Slf4j; -import java.util.List; - @Slf4j public class ConfigService implements RaftStateListener { private PDConfig pdConfig; - private ConfigMetaStore meta; + private final ConfigMetaStore meta; - public ConfigService(PDConfig config){ + public ConfigService(PDConfig config) { this.pdConfig = config; config.setConfigService(this); meta = MetadataFactory.newConfigMeta(config); @@ -29,6 +45,7 @@ public ConfigService(PDConfig config){ public Metapb.PDConfig getPDConfig(long version) throws PDException { return this.meta.getPdConfig(version); } + public Metapb.PDConfig getPDConfig() throws PDException { return this.meta.getPdConfig(0); } @@ -36,9 +53,9 @@ public Metapb.PDConfig getPDConfig() throws PDException { public Metapb.PDConfig setPDConfig(Metapb.PDConfig mConfig) throws PDException { Metapb.PDConfig oldCfg = getPDConfig(); Metapb.PDConfig.Builder builder = oldCfg.toBuilder().mergeFrom(mConfig) - .setVersion(oldCfg.getVersion() + 1) - .setTimestamp(System.currentTimeMillis()); - mConfig = this.meta.setPdConfig(builder.build()); + .setVersion(oldCfg.getVersion() + 1) + .setTimestamp(System.currentTimeMillis()); + mConfig = this.meta.setPdConfig(builder.build()); log.info("PDConfig has been modified, new PDConfig is {}", mConfig); updatePDConfig(mConfig); return mConfig; @@ -50,26 +67,27 @@ public List getGraphSpace(String graphSpaceName) throws PDExc public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { return this.meta.setGraphSpace(graphSpace.toBuilder() - .setTimestamp(System.currentTimeMillis()) - .build()); + .setTimestamp(System.currentTimeMillis()) + .build()); } /** * 从存储中读取配置项,并覆盖全局的PDConfig对象 - + * * @return */ public PDConfig loadConfig() { try { Metapb.PDConfig mConfig = this.meta.getPdConfig(0); - if ( mConfig == null ){ + if (mConfig == null) { mConfig = Metapb.PDConfig.newBuilder() - .setPartitionCount(pdConfig.getInitialPartitionCount()) - .setShardCount(pdConfig.getPartition().getShardCount()) - .setVersion(1) - .setTimestamp(System.currentTimeMillis()) - .setMaxShardsPerStore(pdConfig.getPartition().getMaxShardsPerStore()) - .build(); + .setPartitionCount(pdConfig.getInitialPartitionCount()) + .setShardCount(pdConfig.getPartition().getShardCount()) + .setVersion(1) + .setTimestamp(System.currentTimeMillis()) + .setMaxShardsPerStore( + pdConfig.getPartition().getMaxShardsPerStore()) + .build(); this.meta.setPdConfig(mConfig); } pdConfig = updatePDConfig(mConfig); @@ -79,7 +97,7 @@ public PDConfig loadConfig() { return pdConfig; } - public synchronized PDConfig updatePDConfig(Metapb.PDConfig mConfig){ + public synchronized PDConfig updatePDConfig(Metapb.PDConfig mConfig) { log.info("update pd config: mConfig:{}", mConfig); pdConfig.getPartition().setShardCount(mConfig.getShardCount()); pdConfig.getPartition().setTotalCount(mConfig.getPartitionCount()); @@ -87,7 +105,7 @@ public synchronized PDConfig updatePDConfig(Metapb.PDConfig mConfig){ return pdConfig; } - public synchronized PDConfig setPartitionCount(int count){ + public synchronized PDConfig setPartitionCount(int count) { Metapb.PDConfig mConfig = null; try { mConfig = getPDConfig(); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java index ba642d28bc..407d6c48de 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import org.apache.hugegraph.pd.config.PDConfig; @@ -8,6 +25,14 @@ public class IdService { + private PDConfig pdConfig; + private final IdMetaStore meta; + + public IdService(PDConfig config) { + this.pdConfig = config; + meta = MetadataFactory.newHugeServerMeta(config); + } + public PDConfig getPdConfig() { return pdConfig; } @@ -16,14 +41,6 @@ public void setPdConfig(PDConfig pdConfig) { this.pdConfig = pdConfig; } - private PDConfig pdConfig; - private IdMetaStore meta; - - public IdService(PDConfig config) { - this.pdConfig = config; - meta = MetadataFactory.newHugeServerMeta(config); - } - public long getId(String key, int delta) throws PDException { return meta.getId(key, delta); } @@ -34,6 +51,7 @@ public void resetId(String key) throws PDException { /** * 获取自增循环不重复id, 达到上限后从0开始自增.自动跳过正在使用的cid + * * @param key * @param max * @return @@ -42,12 +60,14 @@ public void resetId(String key) throws PDException { public long getCId(String key, long max) throws PDException { return meta.getCId(key, max); } + public long getCId(String key, String name, long max) throws PDException { return meta.getCId(key, name, max); } /** * 删除一个自增循环id + * * @param key * @param cid * @return @@ -56,6 +76,7 @@ public long getCId(String key, String name, long max) throws PDException { public long delCId(String key, long cid) throws PDException { return meta.delCId(key, cid); } + public long delCIdDelay(String key, String name, long cid) throws PDException { return meta.delCIdDelay(key, name, cid); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java index fa73b29bd8..91f5225e0d 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java @@ -1,26 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; -import com.baidu.hugegraph.pd.common.PDException; +import java.nio.charset.Charset; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; import org.apache.hugegraph.pd.store.KV; +import org.springframework.stereotype.Service; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.kv.Kv; import com.baidu.hugegraph.pd.grpc.kv.V; - -import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; - import com.google.protobuf.InvalidProtocolBufferException; -import lombok.extern.slf4j.Slf4j; - -import org.springframework.stereotype.Service; -import java.nio.charset.Charset; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; +import lombok.extern.slf4j.Slf4j; /** * @author zhangyingjie @@ -30,21 +44,47 @@ public class KvService { - private PDConfig pdConfig; - private MetadataRocksDBStore meta; + public static final char KV_DELIMITER = '@'; // TODO 主前缀之后,增加类名做区分 private static final String TTL_PREFIX = "T"; private static final String KV_PREFIX = "K"; private static final String LOCK_PREFIX = "L"; - public static final char KV_DELIMITER = '@'; private static final String KV_PREFIX_DELIMITER = KV_PREFIX + KV_DELIMITER; private static final byte[] EMPTY_VALUE = new byte[0]; + private PDConfig pdConfig; + private final MetadataRocksDBStore meta; public KvService(PDConfig config) { this.pdConfig = config; meta = new MetadataRocksDBStore(config); } + public static String getKey(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + builder.append(KV_PREFIX).append(KV_DELIMITER); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static byte[] getKeyBytes(Object... keys) { + String key = getKey(keys); + return key.getBytes(Charset.defaultCharset()); + } + + public static String getKeyWithoutPrefix(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static String getDelimiter() { + return String.valueOf(KV_DELIMITER); + } + public PDConfig getPdConfig() { return pdConfig; } @@ -153,7 +193,7 @@ public List deleteWithPrefix(String key) throws PDException { * @return Records * @throws PDException */ - public Map scanRange(String keyStart, String keyEnd) throws PDException{ + public Map scanRange(String keyStart, String keyEnd) throws PDException { List list = meta.scanRange(getStoreKey(keyStart), getStoreKey(keyEnd)); Map map = new HashMap<>(); for (KV kv : list) { @@ -182,11 +222,7 @@ public Map scanWithPrefix(String key) throws PDException { public boolean locked(String key) throws PDException { String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); Map allLock = scanWithPrefix(lockKey); - if (allLock == null || allLock.size() == 0) { - return false; - } else { - return true; - } + return allLock != null && allLock.size() != 0; } private boolean owned(String key, long clientId) throws PDException { @@ -223,7 +259,6 @@ public boolean lockWithoutReentrant(String key, long ttl, } } - public boolean unlock(String key, long clientId) throws PDException { synchronized (KvService.class) { if (!owned(key, clientId)) return false; @@ -266,30 +301,4 @@ public void clearTTLData() { } } - public static String getKey(Object... keys) { - StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); - builder.append(KV_PREFIX).append(KV_DELIMITER); - for (Object key : keys) { - builder.append(key == null ? "" : key).append(KV_DELIMITER); - } - return builder.substring(0, builder.length() - 1); - } - - public static byte[] getKeyBytes(Object... keys) { - String key = getKey(keys); - return key.getBytes(Charset.defaultCharset()); - } - - public static String getKeyWithoutPrefix(Object... keys) { - StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); - for (Object key : keys) { - builder.append(key == null ? "" : key).append(KV_DELIMITER); - } - return builder.substring(0, builder.length() - 1); - } - - public static String getDelimiter() { - return String.valueOf(KV_DELIMITER); - } - } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java index 7336a583a5..520b6ad4ea 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java @@ -1,19 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.List; import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.meta.LogMeta; import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.springframework.stereotype.Service; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; import com.google.protobuf.Any; import com.google.protobuf.GeneratedMessageV3; -import lombok.extern.slf4j.Slf4j; -import org.springframework.stereotype.Service; - -import java.util.List; +import lombok.extern.slf4j.Slf4j; /** * @author zhangyingjie @@ -23,14 +39,12 @@ @Service public class LogService { - private LogMeta logMeta; - public static final String GRPC = "GRPC"; public static final String REST = "REST"; public static final String TASK = "TASK"; - public static final String NODE_CHANGE = "NODE_CHANGE"; public static final String PARTITION_CHANGE = "PARTITION_CHANGE"; + private final LogMeta logMeta; public LogService(PDConfig pdConfig) { logMeta = MetadataFactory.newLogMeta(pdConfig); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java index 3051f0ecab..893a7e9956 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import com.baidu.hugegraph.pd.common.PDException; @@ -10,24 +27,26 @@ import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; -import java.util.List; - /** * 分区命令监听 */ public interface PartitionInstructionListener { void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException; - void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException; + void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws + PDException; - void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException; + void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws + PDException; void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException; void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException; - void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException; + void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws + PDException; - void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) throws PDException; + void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) throws PDException; } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java index d8df4afe9c..9b65783cbe 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java @@ -1,15 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; -import com.baidu.hugegraph.pd.common.KVPair; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.common.PartitionUtils; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import org.apache.commons.lang3.StringUtils; import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.meta.MetadataFactory; import org.apache.hugegraph.pd.meta.PartitionMeta; import org.apache.hugegraph.pd.meta.TaskInfoMeta; import org.apache.hugegraph.pd.raft.RaftStateListener; +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.common.PartitionUtils; import com.baidu.hugegraph.pd.grpc.MetaTask; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.Pdpb; @@ -25,18 +52,6 @@ import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; - /** * 分区管理 */ @@ -44,30 +59,31 @@ public class PartitionService implements RaftStateListener { private final long Partition_Version_Skip = 0x0F; - private PartitionMeta partitionMeta; private final StoreNodeService storeService; - - private PDConfig pdConfig; + private final PartitionMeta partitionMeta; + private final PDConfig pdConfig; // 分区命令监听 - private List instructionListeners; + private final List instructionListeners; // 分区状态监听 - private List statusListeners; + private final List statusListeners; public PartitionService(PDConfig config, StoreNodeService storeService) { this.pdConfig = config; this.storeService = storeService; partitionMeta = MetadataFactory.newPartitionMeta(config); - instructionListeners = Collections.synchronizedList(new ArrayList()); + instructionListeners = + Collections.synchronizedList(new ArrayList()); statusListeners = Collections.synchronizedList(new ArrayList()); } - public void init() throws PDException{ + public void init() throws PDException { partitionMeta.init(); storeService.addStatusListener(new StoreStatusListener() { @Override - public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status) { - if ( status == Metapb.StoreState.Tombstone){ + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { + if (status == Metapb.StoreState.Tombstone) { // Store被停机,通知所有该store所有分区,迁移数据 storeOffline(store); } @@ -94,7 +110,8 @@ public void onStoreRaftChanged(Metapb.Store store) { * @param key * @return */ - public Metapb.PartitionShard getPartitionShard(String graphName, byte[] key) throws PDException { + public Metapb.PartitionShard getPartitionShard(String graphName, byte[] key) throws + PDException { long code = PartitionUtils.calcHashcode(key); return getPartitionByCode(graphName, code); } @@ -106,8 +123,9 @@ public Metapb.PartitionShard getPartitionShard(String graphName, byte[] key) thr * @param code * @return */ - public Metapb.PartitionShard getPartitionByCode(String graphName, long code) throws PDException { - if ( code < 0 || code >= PartitionUtils.MAX_VALUE) { + public Metapb.PartitionShard getPartitionByCode(String graphName, long code) throws + PDException { + if (code < 0 || code >= PartitionUtils.MAX_VALUE) { throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "code error"); } // 根据Code查找分区id,如果没有找到,创建新的分区 @@ -122,11 +140,14 @@ public Metapb.PartitionShard getPartitionByCode(String graphName, long code) thr } Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() - .setPartition(partition) - .setLeader(storeService.getLeader(partition, 0)) - .build(); - log.debug("{} Partition get code = {}, partition id = {}, start = {}, end = {}, leader = {}", - graphName, (code), partition.getId(), partition.getStartKey(), partition.getEndKey(), partShard.getLeader()); + .setPartition(partition) + .setLeader(storeService.getLeader( + partition, 0)) + .build(); + log.debug( + "{} Partition get code = {}, partition id = {}, start = {}, end = {}, leader = {}", + graphName, (code), partition.getId(), partition.getStartKey(), + partition.getEndKey(), partShard.getLeader()); return partShard; } @@ -139,17 +160,19 @@ public Metapb.PartitionShard getPartitionByCode(String graphName, long code) thr * @return * @throws PDException */ - public Metapb.PartitionShard getPartitionShardById(String graphName, int partId) throws PDException { + public Metapb.PartitionShard getPartitionShardById(String graphName, int partId) throws + PDException { Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); if (partition == null) { return null; } Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() - .setPartition(partition) - // 此处需要返回正确的leader,暂时默认取第一个 - .setLeader(storeService.getLeader(partition, 0)) - .build(); + .setPartition(partition) + // 此处需要返回正确的leader,暂时默认取第一个 + .setLeader(storeService.getLeader( + partition, 0)) + .build(); return partShard; } @@ -169,8 +192,8 @@ public List getPartitions() { return partitionMeta.getPartitions(); } - public List getPartitions(String graphName){ - if ( StringUtils.isAllEmpty(graphName)) { + public List getPartitions(String graphName) { + if (StringUtils.isAllEmpty(graphName)) { return partitionMeta.getPartitions(); } return partitionMeta.getPartitions(graphName); @@ -178,6 +201,7 @@ public List getPartitions(String graphName){ /** * 查找在store上的所有分区 + * * @param store * @return */ @@ -187,7 +211,7 @@ public List getPartitionByStore(Metapb.Store store) throws PDE getPartitions(graph.getGraphName()).forEach(partition -> { try { storeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { - if (shard.getStoreId() == store.getId()){ + if (shard.getStoreId() == store.getId()) { partitions.add(partition); } }); @@ -196,7 +220,7 @@ public List getPartitionByStore(Metapb.Store store) throws PDE } }); }); - return partitions; + return partitions; } /** @@ -213,9 +237,9 @@ private Metapb.Partition newPartition(String graphName, long code) throws PDExce partitionSize++; } - int partitionId = (int) (code / partitionSize); - long startKey = (long) partitionSize * partitionId; - long endKey = (long) partitionSize * (partitionId + 1); + int partitionId = (int) (code / partitionSize); + long startKey = (long) partitionSize * partitionId; + long endKey = (long) partitionSize * (partitionId + 1); // 检查本地 Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partitionId); @@ -224,13 +248,13 @@ private Metapb.Partition newPartition(String graphName, long code) throws PDExce // 分配store partition = Metapb.Partition.newBuilder() - .setId(partitionId) - .setVersion(0) - .setState(Metapb.PartitionState.PState_Normal) - .setStartKey(startKey) - .setEndKey(endKey) - .setGraphName(graphName) - .build(); + .setId(partitionId) + .setVersion(0) + .setState(Metapb.PartitionState.PState_Normal) + .setStartKey(startKey) + .setEndKey(endKey) + .setGraphName(graphName) + .build(); log.info("Create newPartition {}", partition); } @@ -261,7 +285,8 @@ protected int getPartitionId(String graphName, byte[] key) throws PDException { * @param startKey * @param endKey */ - public List scanPartitions(String graphName, byte[] startKey, byte[] endKey) + public List scanPartitions(String graphName, byte[] startKey, + byte[] endKey) throws PDException { int startPartId = getPartitionId(graphName, startKey); int endPartId = getPartitionId(graphName, endKey); @@ -271,10 +296,10 @@ public List scanPartitions(String graphName, byte[] start Metapb.Partition partition = partitionMeta.getPartitionById(graphName, id); partShards.add( Metapb.PartitionShard.newBuilder() - .setPartition(partition) - // 此处需要返回正确的leader,暂时默认取第一个 - .setLeader(storeService.getLeader(partition, 0)) - .build() + .setPartition(partition) + // 此处需要返回正确的leader,暂时默认取第一个 + .setLeader(storeService.getLeader(partition, 0)) + .build() ); } return partShards; @@ -291,27 +316,31 @@ public synchronized long updatePartition(List partitions) thro /** * 更新分区以及图的状态 + * * @param graph * @param partId * @param state * @throws PDException */ - public synchronized void updatePartitionState(String graph, int partId, Metapb.PartitionState state) throws PDException { + public synchronized void updatePartitionState(String graph, int partId, + Metapb.PartitionState state) throws PDException { Metapb.Partition partition = getPartitionById(graph, partId); - - if ( partition.getState() != state) { + + if (partition.getState() != state) { Metapb.Partition newPartition = partitionMeta.updatePartition(partition.toBuilder() - .setState(state).build()); + .setState(state) + .build()); onPartitionChanged(partition, newPartition); } } - public synchronized void updateGraphState(String graphName, Metapb.PartitionState state) throws PDException { + public synchronized void updateGraphState(String graphName, Metapb.PartitionState state) throws + PDException { Metapb.Graph graph = getGraph(graphName); if (graph != null) { partitionMeta.updateGraph(graph.toBuilder() - .setState(state).build()); + .setState(state).build()); } } @@ -333,21 +362,22 @@ public synchronized long removePartition(String graphName, int partId) throws PD updateGraphState(partition.getGraphName(), state); state = Metapb.PartitionState.PState_Normal; - for(Metapb.ShardGroup group : storeService.getShardGroups()){ - if ( group.getState().getNumber() > state.getNumber()) { + for (Metapb.ShardGroup group : storeService.getShardGroups()) { + if (group.getState().getNumber() > state.getNumber()) { state = group.getState(); } } storeService.updateClusterStatus(state); - }catch ( PDException e){ + } catch (PDException e) { log.error("onPartitionChanged", e); } return ret; } - public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws PDException { + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws + PDException { return partitionMeta.getPartitionStats(graphName, partitionId); } @@ -391,7 +421,8 @@ public synchronized Metapb.Graph updateGraph(Metapb.Graph graph) throws PDExcept Metapb.Graph lastGraph = partitionMeta.getAndCreateGraph(graph.getGraphName()); log.info("updateGraph graph: {}, last: {}", graph, lastGraph); - int partCount = (graph.getGraphName().endsWith("/s") || graph.getGraphName().endsWith("/m")) ? + int partCount = + (graph.getGraphName().endsWith("/s") || graph.getGraphName().endsWith("/m")) ? 1 : pdConfig.getPartition().getTotalCount(); // set the partition count to specified if legal. @@ -400,19 +431,20 @@ public synchronized Metapb.Graph updateGraph(Metapb.Graph graph) throws PDExcept } if (partCount == 0) { - throw new PDException(10010 ,"update graph error, partition count = 0"); + throw new PDException(10010, "update graph error, partition count = 0"); } graph = lastGraph.toBuilder() - .mergeFrom(graph) - .setPartitionCount(partCount) - .build(); + .mergeFrom(graph) + .setPartitionCount(partCount) + .build(); partitionMeta.updateGraph(graph); // 分区数发生改变 if (lastGraph.getPartitionCount() != graph.getPartitionCount()) { log.info("updateGraph graph: {}, partition count changed from {} to {}", - graph.getGraphName(), lastGraph.getPartitionCount(), graph.getPartitionCount()); + graph.getGraphName(), lastGraph.getPartitionCount(), + graph.getPartitionCount()); // TODO 修改图的分区数,需要进行数据迁移。 } return graph; @@ -438,12 +470,13 @@ public Map> getCommittedIndexStats() throws PDException /** * 存储被下线,迁移分区数据 + * * @param store */ public void storeOffline(Metapb.Store store) { try { log.info("storeOffline store id: {}, address: {}, state: {}", - store.getId(), store.getAddress(), store.getState()); + store.getId(), store.getAddress(), store.getState()); List partitions = getPartitionByStore(store); var partIds = new HashSet(); for (Metapb.Partition p : partitions) { @@ -464,7 +497,7 @@ public void storeOffline(Metapb.Store store) { public synchronized void shardOffline(Metapb.Partition partition, long storeId) { try { log.info("shardOffline Partition {} - {} shardOffline store : {}", - partition.getGraphName(), partition.getId(), storeId); + partition.getGraphName(), partition.getId(), storeId); // partition = getPartitionById(partition.getGraphName(), partition.getId()); // Metapb.Partition.Builder builder = Metapb.Partition.newBuilder(partition); // builder.clearShards(); @@ -481,13 +514,15 @@ public synchronized void shardOffline(Metapb.Partition partition, long storeId) } } - private boolean isShardListEquals(List list1, List list2){ + private boolean isShardListEquals(List list1, List list2) { if (list1 == list2) { return true; - }else if (list1 != null && list2 != null) { + } else if (list1 != null && list2 != null) { - var s1 = list1.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare).collect(Collectors.toList()); - var s2 = list2.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare).collect(Collectors.toList()); + var s1 = list1.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare) + .collect(Collectors.toList()); + var s2 = list2.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare) + .collect(Collectors.toList()); if (s1.size() == s2.size()) { for (int i = 0; i < s1.size(); i++) { @@ -504,11 +539,13 @@ private boolean isShardListEquals(List list1, List l /** * 重新分配shard + * * @param graph * @param partition * @throws PDException */ - public void reallocPartitionShards(Metapb.Graph graph, Metapb.Partition partition) throws PDException { + public void reallocPartitionShards(Metapb.Graph graph, Metapb.Partition partition) throws + PDException { if (partition == null) { return; } @@ -528,19 +565,23 @@ public void reallocPartitionShards(Metapb.Graph graph, Metapb.Partition partitio } } - public synchronized void reallocPartitionShards(String graphName, int partitionId) throws PDException { + public synchronized void reallocPartitionShards(String graphName, int partitionId) throws + PDException { reallocPartitionShards(partitionMeta.getGraph(graphName), - partitionMeta.getPartitionById(graphName, partitionId)); + partitionMeta.getPartitionById(graphName, partitionId)); } /** * 迁移分区副本 */ - public synchronized void movePartitionsShard(Integer partitionId, long fromStore, long toStore) { + public synchronized void movePartitionsShard(Integer partitionId, long fromStore, + long toStore) { try { - log.info("movePartitionsShard partitionId {} from store {} to store {}", partitionId, fromStore, toStore); + log.info("movePartitionsShard partitionId {} from store {} to store {}", partitionId, + fromStore, toStore); for (Metapb.Graph graph : getGraphs()) { - Metapb.Partition partition = this.getPartitionById(graph.getGraphName(), partitionId); + Metapb.Partition partition = + this.getPartitionById(graph.getGraphName(), partitionId); if (partition == null) { continue; } @@ -553,7 +594,8 @@ public synchronized void movePartitionsShard(Integer partitionId, long fromStore } }); - shards.add(Metapb.Shard.newBuilder().setStoreId(toStore).setRole(Metapb.ShardRole.Follower).build()); + shards.add(Metapb.Shard.newBuilder().setStoreId(toStore) + .setRole(Metapb.ShardRole.Follower).build()); // storeService.updateShardGroup(partitionId, shards, -1, -1); // storeService.onShardGroupStatusChanged(shardGroup, newShardGroup); @@ -568,13 +610,15 @@ public synchronized void movePartitionsShard(Integer partitionId, long fromStore /** * 把集群中所有的分区,拆成split + * * @param splits 拆分分区 */ - public synchronized void splitPartition(List> splits) throws PDException { + public synchronized void splitPartition(List> splits) throws + PDException { var tasks = new HashMap>>(); - for (var pair : splits){ - for (var partition : getPartitionById(pair.getKey())){ + for (var pair : splits) { + for (var partition : getPartitionById(pair.getKey())) { if (!tasks.containsKey(partition.getGraphName())) { tasks.put(partition.getGraphName(), new ArrayList<>()); } @@ -589,38 +633,42 @@ public synchronized void splitPartition(List> splits) t /** * 分区分裂, 把一个图拆分到N 个 - * @param graph graph + * + * @param graph graph * @param toCount target count * @throws PDException */ - public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws PDException{ + public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws PDException { var partitionCount = getPartitions(graph.getGraphName()).size(); var maxShardsPerStore = pdConfig.getPartition().getMaxShardsPerStore(); var shardCount = pdConfig.getPartition().getShardCount(); - if ( shardCount * toCount > storeService.getActiveStores().size() * maxShardsPerStore){ + if (shardCount * toCount > storeService.getActiveStores().size() * maxShardsPerStore) { throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, - "can't satisfy target shard group count, reached the upper limit of the cluster"); + "can't satisfy target shard group count, reached the upper " + + "limit of the cluster"); } if (toCount % partitionCount != 0 || toCount <= partitionCount) { throw new PDException(Pdpb.ErrorType.Invalid_Split_Partition_Count_VALUE, - "invalid split partition count, make sure to count is N time of current partition count"); + "invalid split partition count, make sure to count is N time of" + + " current partition count"); } // 由于是整数倍数,扩充因子为 toCount / current count var splitCount = toCount / partitionCount; var list = new ArrayList>(); - for (int i = 0 ; i < partitionCount; i ++) { + for (int i = 0; i < partitionCount; i++) { list.add(new KVPair<>(i, splitCount)); } splitPartition(graph, list); } - private synchronized void splitPartition(Metapb.Graph graph, List> splits) + private synchronized void splitPartition(Metapb.Graph graph, + List> splits) throws PDException { var taskInfoMeta = storeService.getTaskInfoMeta(); if (taskInfoMeta.scanSplitTask(graph.getGraphName()).size() > 0) { @@ -634,7 +682,8 @@ private synchronized void splitPartition(Metapb.Graph graph, List newPartitions = new ArrayList<>(); // 第一个分区也就是原分区 newPartitions.add(partition.toBuilder() - .setStartKey(partition.getStartKey()) - .setEndKey(partition.getStartKey() + splitLen) - .setId(partition.getId()) - .setState(Metapb.PartitionState.PState_Offline) - .build()); + .setStartKey(partition.getStartKey()) + .setEndKey(partition.getStartKey() + splitLen) + .setId(partition.getId()) + .setState(Metapb.PartitionState.PState_Offline) + .build()); int idx = 0; for (; idx < splitCount - 2; idx++) { newPartitions.add(partition.toBuilder() - .setStartKey(newPartitions.get(idx).getEndKey()) - .setEndKey(newPartitions.get(idx).getEndKey() + splitLen) - .setId(i) - .setState(Metapb.PartitionState.PState_Offline) - .build()); + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(newPartitions.get(idx).getEndKey() + + splitLen) + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build()); i += 1; } newPartitions.add(partition.toBuilder() - .setStartKey(newPartitions.get(idx).getEndKey()) - .setEndKey(partition.getEndKey()) - .setId(i) - .setState(Metapb.PartitionState.PState_Offline) - .build()); + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(partition.getEndKey()) + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build()); i += 1; // try to save new partitions, and repair shard group - for (int j = 0; j < newPartitions.size(); j ++) { + for (int j = 0; j < newPartitions.size(); j++) { var newPartition = newPartitions.get(j); if (j != 0) { @@ -680,8 +730,8 @@ private synchronized void splitPartition(Metapb.Graph graph, List 0) { - fireTransferLeader(partitions.get(0), TransferLeader.newBuilder().setShard(shard).build()); + fireTransferLeader(partitions.get(0), + TransferLeader.newBuilder().setShard(shard).build()); } // for (Metapb.Graph graph : getGraphs()) { // Metapb.Partition partition = this.getPartitionById(graph.getGraphName(), partId); // if (partition != null) { -// fireTransferLeader(partition, TransferLeader.newBuilder().setShard(shard).build()); +// fireTransferLeader(partition, TransferLeader.newBuilder().setShard(shard) +// .build()); // } // } } catch (PDException e) { @@ -740,9 +793,9 @@ public void transferLeader(Integer partId, Metapb.Shard shard) { public void combinePartition(int toCount) throws PDException { int shardsTotalCount = getShardGroupCount(); - for (var graph : getGraphs()){ + for (var graph : getGraphs()) { // 对所有大于toCount分区的图,都进行缩容 - if (graph.getPartitionCount() > toCount){ + if (graph.getPartitionCount() > toCount) { combineGraphPartition(graph, toCount, shardsTotalCount); } } @@ -752,7 +805,7 @@ public void combinePartition(int toCount) throws PDException { * 针对单个图,进行分区合并 * * @param graphName the name of the graph - * @param toCount the target partition count + * @param toCount the target partition count * @throws PDException when query errors */ @@ -763,35 +816,41 @@ public void combineGraphPartition(String graphName, int toCount) throws PDExcept /** * 单图合并的内部实现 * - * @param graph the name of the graph - * @param toCount the target partition count + * @param graph the name of the graph + * @param toCount the target partition count * @param shardCount the shard count of the clusters * @throws PDException when query errors */ private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, int shardCount) throws PDException { - if (graph == null){ - throw new PDException(1, "Graph not exists, try to use full graph name, like /DEFAULT/GRAPH_NAME/g"); + if (graph == null) { + throw new PDException(1, + "Graph not exists, try to use full graph name, like " + + "/DEFAULT/GRAPH_NAME/g"); } log.info("Combine graph {} partition, from {}, to {}, with shard count:{}", - graph.getGraphName(), graph.getPartitionCount(), toCount, shardCount); - - if (! checkTargetCount(graph.getPartitionCount(), toCount, shardCount)) { - log.error("Combine partition, illegal toCount:{}, graph:{}", toCount, graph.getGraphName()); - throw new PDException(2, "illegal partition toCount, should between 1 ~ shard group count and " + - " can be dived by shard group count"); + graph.getGraphName(), graph.getPartitionCount(), toCount, shardCount); + + if (!checkTargetCount(graph.getPartitionCount(), toCount, shardCount)) { + log.error("Combine partition, illegal toCount:{}, graph:{}", toCount, + graph.getGraphName()); + throw new PDException(2, + "illegal partition toCount, should between 1 ~ shard group " + + "count and " + + " can be dived by shard group count"); } var taskInfoMeta = storeService.getTaskInfoMeta(); - if (taskInfoMeta.scanMoveTask(graph.getGraphName()).size() > 0 ) { + if (taskInfoMeta.scanMoveTask(graph.getGraphName()).size() > 0) { throw new PDException(3, "Graph Combine process exists"); } // 按照 key start 排序,合并后的key range 是连续的 var partitions = getPartitions(graph.getGraphName()).stream() - .sorted(Comparator.comparing(Metapb.Partition::getStartKey)) - .collect(Collectors.toList()); + .sorted(Comparator.comparing( + Metapb.Partition::getStartKey)) + .collect(Collectors.toList()); // 分区编号不一定是连续的 var sortPartitions = getPartitions(graph.getGraphName()) @@ -803,16 +862,16 @@ private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 共12个分区, 合并成4个 // 方案:0,1,2 => 0, 3,4,5 -> 1, 6,7,8 ->2, 9,10,11 -> 3 // 保证分区的连续性. - for (int i = 0; i < toCount; i ++ ){ + for (int i = 0; i < toCount; i++) { var startKey = partitions.get(i * groupSize).getStartKey(); var endKey = partitions.get(i * groupSize + groupSize - 1).getEndKey(); // compose the key range // the start key and end key should be changed if combine success. var targetPartition = Metapb.Partition.newBuilder(sortPartitions.get(i)) - .setStartKey(startKey) - .setEndKey(endKey) - .build(); + .setStartKey(startKey) + .setEndKey(endKey) + .build(); for (int j = 0; j < groupSize; j++) { var partition = partitions.get(i * groupSize + j); @@ -821,21 +880,23 @@ private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, continue; } - log.info("combine partition of graph :{}, from part id {} to {}", partition.getGraphName(), - partition.getId(), targetPartition.getId()); + log.info("combine partition of graph :{}, from part id {} to {}", + partition.getGraphName(), + partition.getId(), targetPartition.getId()); MovePartition movePartition = MovePartition.newBuilder() - .setTargetPartition(targetPartition) - .setKeyStart(partition.getStartKey()) - .setKeyEnd(partition.getEndKey()) - .build(); + .setTargetPartition(targetPartition) + .setKeyStart(partition.getStartKey()) + .setKeyEnd(partition.getEndKey()) + .build(); taskInfoMeta.addMovePartitionTask(partition, movePartition); // source 下线 - updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Offline); + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Offline); fireMovePartition(partition, movePartition); } // target 下线 updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), - Metapb.PartitionState.PState_Offline); + Metapb.PartitionState.PState_Offline); } storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); @@ -843,12 +904,14 @@ private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, /** * 通过 storeService 获取 raft group 总数 + * * @return the count of raft groups */ private int getShardGroupCount() { try { - return Optional.ofNullable(storeService.getShardGroups()).orElseGet(ArrayList::new).size(); - }catch (PDException e){ + return Optional.ofNullable(storeService.getShardGroups()).orElseGet(ArrayList::new) + .size(); + } catch (PDException e) { log.error("get shard group failed, error: {}", e); } return 0; @@ -858,21 +921,19 @@ private int getShardGroupCount() { * 判断图分区是否能够从from合并到to个 * * @param fromCount 现在的分区数 - * @param toCount 目标分区数 + * @param toCount 目标分区数 * @return true when available , or otherwise */ - private boolean checkTargetCount(int fromCount, int toCount, int shardCount){ + private boolean checkTargetCount(int fromCount, int toCount, int shardCount) { // 要介于 1 ~ N 中间,而且可以整除 - if (toCount < 1 || toCount >= fromCount || fromCount % toCount != 0 || toCount >= shardCount) { - return false; - } - - return true; + return toCount >= 1 && toCount < fromCount && fromCount % toCount == 0 && + toCount < shardCount; } /** * 处理分区心跳, 记录Leader信息 * 检查term和version,比较是否是最新的消息 + * * @param stats */ public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { @@ -882,9 +943,11 @@ public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { // (shard group 由pd控制, 在分裂等操作后,可能出现短暂不一致的情况,以pd为准) // store控制shard leader if (shardGroup != null && - (shardGroup.getVersion() < stats.getLeaderTerm() || shardGroup.getConfVer() < stats.getConfVer())) { + (shardGroup.getVersion() < stats.getLeaderTerm() || + shardGroup.getConfVer() < stats.getConfVer())) { storeService.updateShardGroup(stats.getId(), - stats.getShardList(), stats.getLeaderTerm(), stats.getConfVer()); + stats.getShardList(), stats.getLeaderTerm(), + stats.getConfVer()); } List partitions = getPartitionById(stats.getId()); @@ -894,11 +957,12 @@ public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { } // 统计信息 partitionMeta.updatePartitionStats(stats.toBuilder() - .setTimestamp(System.currentTimeMillis()).build()); + .setTimestamp(System.currentTimeMillis()).build()); } /** * 检查shard状态,离线shard影响到分区状态 + * * @param stats */ private void checkShardState(Metapb.Partition partition, Metapb.PartitionStats stats) { @@ -906,44 +970,52 @@ private void checkShardState(Metapb.Partition partition, Metapb.PartitionStats s try { int offCount = 0; for (Metapb.ShardStats shard : stats.getShardStatsList()) { - if (shard.getState() == Metapb.ShardState.SState_Offline) + if (shard.getState() == Metapb.ShardState.SState_Offline) { offCount++; + } } if (partition.getState() != Metapb.PartitionState.PState_Offline) { if (offCount == 0) { - updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Normal); + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Normal); } else if (offCount * 2 < stats.getShardStatsCount()) { - updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Warn); - } else - updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Warn); + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Warn); + } else { + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Warn); + } } } catch (Exception e) { log.error("Partition {}-{} checkShardState exception {}", - partition.getGraphName(), partition.getId(), e); + partition.getGraphName(), partition.getId(), e); } } - - public void addInstructionListener(PartitionInstructionListener event){ + public void addInstructionListener(PartitionInstructionListener event) { instructionListeners.add(event); } - public void addStatusListener(PartitionStatusListener listener){ + public void addStatusListener(PartitionStatusListener listener) { statusListeners.add(listener); } /** * 发起改变shard命令 + * * @param changeType */ - protected void fireChangeShard(Metapb.Partition partition, List shards, ConfChangeType changeType) { - log.info("fireChangeShard partition: {}-{}, changeType:{} {}", partition.getGraphName(), partition.getId(), changeType, shards); + protected void fireChangeShard(Metapb.Partition partition, List shards, + ConfChangeType changeType) { + log.info("fireChangeShard partition: {}-{}, changeType:{} {}", partition.getGraphName(), + partition.getId(), changeType, shards); instructionListeners.forEach(cmd -> { try { cmd.changeShard(partition, ChangeShard.newBuilder() - .addAllShard(shards).setChangeType(changeType).build()); - }catch (Exception e){ + .addAllShard(shards).setChangeType(changeType) + .build()); + } catch (Exception e) { log.error("fireChangeShard", e); } }); @@ -959,15 +1031,16 @@ public void changeShard(int groupId, List shards) throws PDExcepti /** * 发送分区分裂消息 + * * @param partition */ protected void fireSplitPartition(Metapb.Partition partition, SplitPartition splitPartition) { log.info("fireSplitPartition partition: {}-{}, split :{}", - partition.getGraphName(), partition.getId(), splitPartition); + partition.getGraphName(), partition.getId(), splitPartition); instructionListeners.forEach(cmd -> { try { cmd.splitPartition(partition, splitPartition); - }catch (Exception e){ + } catch (Exception e) { log.error("fireSplitPartition", e); } }); @@ -978,11 +1051,11 @@ protected void fireSplitPartition(Metapb.Partition partition, SplitPartition spl */ protected void fireTransferLeader(Metapb.Partition partition, TransferLeader transferLeader) { log.info("fireTransferLeader partition: {}-{}, leader :{}", - partition.getGraphName(), partition.getId(), transferLeader); + partition.getGraphName(), partition.getId(), transferLeader); instructionListeners.forEach(cmd -> { try { cmd.transferLeader(partition, transferLeader); - }catch (Exception e){ + } catch (Exception e) { log.error("fireSplitPartition", e); } }); @@ -990,17 +1063,18 @@ protected void fireTransferLeader(Metapb.Partition partition, TransferLeader tra /** * 发送分区移动数据的消息 - * @param partition 原分区 + * + * @param partition 原分区 * @param movePartition 目标分区,包含 key range */ protected void fireMovePartition(Metapb.Partition partition, MovePartition movePartition) { log.info("fireMovePartition partition: {} -> {}", - partition, movePartition); + partition, movePartition); instructionListeners.forEach(cmd -> { try { cmd.movePartition(partition, movePartition); - }catch (Exception e){ + } catch (Exception e) { log.error("fireMovePartition", e); } }); @@ -1008,25 +1082,26 @@ protected void fireMovePartition(Metapb.Partition partition, MovePartition moveP protected void fireCleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) { log.info("fireCleanPartition partition: {} -> just keep : {}->{}", - partition.getId(), cleanPartition.getKeyStart(), cleanPartition.getKeyEnd()); + partition.getId(), cleanPartition.getKeyStart(), cleanPartition.getKeyEnd()); instructionListeners.forEach(cmd -> { try { cmd.cleanPartition(partition, cleanPartition); - }catch (Exception e){ + } catch (Exception e) { log.error("cleanPartition", e); } }); } - protected void fireChangePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) { + protected void fireChangePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) { log.info("fireChangePartitionKeyRange partition: {}-{} -> key range {}", - partition.getGraphName(), partition.getId(), partitionKeyRange); + partition.getGraphName(), partition.getId(), partitionKeyRange); instructionListeners.forEach(cmd -> { try { cmd.changePartitionKeyRange(partition, partitionKeyRange); - }catch (Exception e){ + } catch (Exception e) { log.error("cleanPartition", e); } }); @@ -1034,6 +1109,7 @@ protected void fireChangePartitionKeyRange(Metapb.Partition partition, Partition /** * 处理图迁移任务 + * * @param task */ public synchronized void handleMoveTask(MetaTask.Task task) throws PDException { @@ -1042,24 +1118,31 @@ public synchronized void handleMoveTask(MetaTask.Task task) throws PDException { var movePartition = task.getMovePartition(); MetaTask.Task pdMetaTask = taskInfoMeta.getMovePartitionTask(partition.getGraphName(), - movePartition.getTargetPartition().getId(), - partition.getId()); + movePartition.getTargetPartition() + .getId(), + partition.getId()); - log.info("report move task, graph:{}, pid : {}->{}, state: {}", task.getPartition().getGraphName(), - task.getPartition().getId(), task.getMovePartition().getTargetPartition().getId(), task.getState()); + log.info("report move task, graph:{}, pid : {}->{}, state: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), task.getMovePartition().getTargetPartition().getId(), + task.getState()); // 已经被处理(前面有failed) if (pdMetaTask != null) { - var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); - taskInfoMeta.updateMovePartitionTask(newTask); + var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); + taskInfoMeta.updateMovePartitionTask(newTask); List subTasks = taskInfoMeta.scanMoveTask(partition.getGraphName()); - var finished = subTasks.stream().allMatch(t -> - t.getState() == MetaTask.TaskState.Task_Success || t.getState() == MetaTask.TaskState.Task_Failure); + var finished = subTasks.stream().allMatch(t -> + t.getState() == + MetaTask.TaskState.Task_Success || + t.getState() == + MetaTask.TaskState.Task_Failure); if (finished) { - var allSuccess = subTasks.stream().allMatch(t -> t.getState() == MetaTask.TaskState.Task_Success); + var allSuccess = subTasks.stream().allMatch( + t -> t.getState() == MetaTask.TaskState.Task_Success); if (allSuccess) { log.info("graph:{} combine task all success!", partition.getGraphName()); handleMoveTaskAllSuccess(subTasks, partition.getGraphName(), taskInfoMeta); @@ -1073,71 +1156,77 @@ public synchronized void handleMoveTask(MetaTask.Task task) throws PDException { /** * 当所有的迁移子任务成功: - * 1. 发送清理source分区指令 - * 2. 设置target上线, 更新key range, 更新 graph partition count - * 3. 删除move task,任务结束 + * 1. 发送清理source分区指令 + * 2. 设置target上线, 更新key range, 更新 graph partition count + * 3. 删除move task,任务结束 * - * @param subTasks all move sub tasks - * @param graphName graph name + * @param subTasks all move sub tasks + * @param graphName graph name * @param taskInfoMeta task info meta * @throws PDException returns if write db failed */ private void handleMoveTaskAllSuccess(List subTasks, String graphName, - TaskInfoMeta taskInfoMeta) throws PDException { + TaskInfoMeta taskInfoMeta) throws PDException { var targetPartitionIds = new HashSet(); var targetPartitions = new ArrayList(); - var deleteFlags = subTasks.stream().map(task -> task.getMovePartition().getTargetPartition().getId()) - .collect(Collectors.toSet()); + var deleteFlags = + subTasks.stream().map(task -> task.getMovePartition().getTargetPartition().getId()) + .collect(Collectors.toSet()); for (MetaTask.Task subTask : subTasks) { var source = subTask.getPartition(); var targetPartition = subTask.getMovePartition().getTargetPartition(); // 是否处理过 - if (! targetPartitionIds.contains(targetPartition.getId())) { + if (!targetPartitionIds.contains(targetPartition.getId())) { // 更新range var old = getPartitionById(targetPartition.getGraphName(), targetPartition.getId()); var newPartition = Metapb.Partition.newBuilder(old) - .setStartKey(targetPartition.getStartKey()) - .setEndKey(targetPartition.getEndKey()) - .setState(Metapb.PartitionState.PState_Normal) - .build(); + .setStartKey(targetPartition.getStartKey()) + .setEndKey(targetPartition.getEndKey()) + .setState(Metapb.PartitionState.PState_Normal) + .build(); // 在 key range之前更新,避免store没有分区的问题, 需要到pd查询 updatePartition(List.of(newPartition)); targetPartitions.add(newPartition); // 发送key range 变更消息 PartitionKeyRange partitionKeyRange = PartitionKeyRange.newBuilder() - .setPartitionId(old.getId()) - .setKeyStart(targetPartition.getStartKey()) - .setKeyEnd(targetPartition.getEndKey()) - .build(); + .setPartitionId(old.getId()) + .setKeyStart( + targetPartition.getStartKey()) + .setKeyEnd( + targetPartition.getEndKey()) + .build(); // 通知store - fireChangePartitionKeyRange(old.toBuilder().setState(Metapb.PartitionState.PState_Normal).build(), + fireChangePartitionKeyRange( + old.toBuilder().setState(Metapb.PartitionState.PState_Normal).build(), partitionKeyRange); // 将 target 设置为上线. source 理论上可能被删掉,所以不处理 updatePartitionState(newPartition.getGraphName(), newPartition.getId(), - Metapb.PartitionState.PState_Normal); + Metapb.PartitionState.PState_Normal); targetPartitionIds.add(targetPartition.getId()); } CleanPartition cleanPartition = CleanPartition.newBuilder() - .setKeyStart(source.getStartKey()) - .setKeyEnd(source.getEndKey()) - .setCleanType(CleanType.CLEAN_TYPE_EXCLUDE_RANGE) - // target 的 partition只需要清理数据,不需要删除分区 - .setDeletePartition(!deleteFlags.contains(source.getId())) - .build(); + .setKeyStart(source.getStartKey()) + .setKeyEnd(source.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_EXCLUDE_RANGE) + // target 的 partition只需要清理数据,不需要删除分区 + .setDeletePartition(!deleteFlags.contains( + source.getId())) + .build(); log.info("pd clean data: {}-{}, key range:{}-{}, type:{}, delete partition:{}", - source.getGraphName(), - source.getId(), - cleanPartition.getKeyStart(), - cleanPartition.getKeyEnd(), - CleanType.CLEAN_TYPE_EXCLUDE_RANGE, - cleanPartition.getDeletePartition()); + source.getGraphName(), + source.getId(), + cleanPartition.getKeyStart(), + cleanPartition.getKeyEnd(), + CleanType.CLEAN_TYPE_EXCLUDE_RANGE, + cleanPartition.getDeletePartition()); // 清理掉被移动分区的数据 fireCleanPartition(source, cleanPartition); @@ -1149,7 +1238,8 @@ private void handleMoveTaskAllSuccess(List subTasks, String graph // 更新target 分区状态,source 可能被删掉,所以不处理 targetPartitions.forEach(p -> { try { - updatePartitionState(p.getGraphName(), p.getId(), Metapb.PartitionState.PState_Normal); + updatePartitionState(p.getGraphName(), p.getId(), + Metapb.PartitionState.PState_Normal); } catch (PDException e) { throw new RuntimeException(e); } @@ -1159,8 +1249,8 @@ private void handleMoveTaskAllSuccess(List subTasks, String graph // 更新graph partition count var graph = getGraph(graphName).toBuilder() - .setPartitionCount(targetPartitionIds.size()) - .build(); + .setPartitionCount(targetPartitionIds.size()) + .build(); updateGraph(graph); // 事务完成 @@ -1169,22 +1259,24 @@ private void handleMoveTaskAllSuccess(List subTasks, String graph /** * 如果缩容任务有失败的,回滚合并操作 - * 1. 清理原来的target 分区,将迁移过来的数据再删掉 - * 2. 将source/target 分区设置为上线 - * 3. 删除task,任务结束 + * 1. 清理原来的target 分区,将迁移过来的数据再删掉 + * 2. 将source/target 分区设置为上线 + * 3. 删除task,任务结束 * - * @param graphName graph name + * @param graphName graph name * @param taskInfoMeta task info meta * @throws PDException return if write to db failed */ - private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) throws PDException { + private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) throws + PDException { // 发送清理target分区的任务, 回滚target分区 var targetPartitionIds = new HashSet(); - for (var metaTask : taskInfoMeta.scanMoveTask(graphName)){ + for (var metaTask : taskInfoMeta.scanMoveTask(graphName)) { var source = metaTask.getPartition(); // 设置 source 为上线 - updatePartitionState(source.getGraphName(), source.getId(), Metapb.PartitionState.PState_Normal); + updatePartitionState(source.getGraphName(), source.getId(), + Metapb.PartitionState.PState_Normal); var movedPartition = metaTask.getMovePartition().getTargetPartition(); if (targetPartitionIds.contains(movedPartition.getId())) { @@ -1194,17 +1286,19 @@ private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) var targetPartition = getPartitionById(graphName, movedPartition.getId()); CleanPartition cleanPartition = CleanPartition.newBuilder() - .setKeyStart(targetPartition.getStartKey()) - .setKeyEnd(targetPartition.getEndKey()) - .setCleanType(CleanType.CLEAN_TYPE_KEEP_RANGE) - .setDeletePartition(false) - .build(); + .setKeyStart( + targetPartition.getStartKey()) + .setKeyEnd(targetPartition.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_KEEP_RANGE) + .setDeletePartition(false) + .build(); fireCleanPartition(targetPartition, cleanPartition); targetPartitionIds.add(targetPartition.getId()); // 设置target 上线 updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), - Metapb.PartitionState.PState_Normal); + Metapb.PartitionState.PState_Normal); } // 清理掉任务列表 taskInfoMeta.removeMoveTaskPrefix(graphName); @@ -1212,15 +1306,16 @@ private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) /** * 处理clean task + * * @param task clean task */ - public void handleCleanPartitionTask(MetaTask.Task task){ + public void handleCleanPartitionTask(MetaTask.Task task) { log.info("clean task {} -{}, key range:{}~{}, report: {}", - task.getPartition().getGraphName(), - task.getPartition().getId(), - task.getCleanPartition().getKeyStart(), - task.getCleanPartition().getKeyEnd(), - task.getState() + task.getPartition().getGraphName(), + task.getPartition().getId(), + task.getCleanPartition().getKeyStart(), + task.getCleanPartition().getKeyEnd(), + task.getState() ); // 如果失败重试? @@ -1231,10 +1326,12 @@ public synchronized void handleSplitTask(MetaTask.Task task) throws PDException var taskInfoMeta = storeService.getTaskInfoMeta(); var partition = task.getPartition(); - MetaTask.Task pdMetaTask = taskInfoMeta.getSplitTask(partition.getGraphName(), partition.getId()); + MetaTask.Task pdMetaTask = + taskInfoMeta.getSplitTask(partition.getGraphName(), partition.getId()); - log.info("report split task, graph:{}, pid : {}, state: {}", task.getPartition().getGraphName(), - task.getPartition().getId(), task.getState()); + log.info("report split task, graph:{}, pid : {}, state: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), task.getState()); if (pdMetaTask != null) { var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); @@ -1242,11 +1339,15 @@ public synchronized void handleSplitTask(MetaTask.Task task) throws PDException List subTasks = taskInfoMeta.scanSplitTask(partition.getGraphName()); - var finished = subTasks.stream().allMatch(t -> - t.getState() == MetaTask.TaskState.Task_Success || t.getState() == MetaTask.TaskState.Task_Failure); + var finished = subTasks.stream().allMatch(t -> + t.getState() == + MetaTask.TaskState.Task_Success || + t.getState() == + MetaTask.TaskState.Task_Failure); if (finished) { - var allSuccess = subTasks.stream().allMatch(t -> t.getState() == MetaTask.TaskState.Task_Success); + var allSuccess = subTasks.stream().allMatch( + t -> t.getState() == MetaTask.TaskState.Task_Success); if (allSuccess) { log.info("graph:{} split task all success!", partition.getGraphName()); handleSplitTaskAllSuccess(subTasks, partition.getGraphName(), taskInfoMeta); @@ -1257,46 +1358,51 @@ public synchronized void handleSplitTask(MetaTask.Task task) throws PDException } } - private void handleSplitTaskAllSuccess(List subTasks, String graphName, TaskInfoMeta taskInfoMeta) + private void handleSplitTaskAllSuccess(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) throws PDException { int addedPartitions = 0; var partitions = new ArrayList(); - for (MetaTask.Task subTask: subTasks) { + for (MetaTask.Task subTask : subTasks) { var source = subTask.getPartition(); var newPartition = subTask.getSplitPartition().getNewPartitionList().get(0); // 发送key range 变更消息 PartitionKeyRange partitionKeyRange = PartitionKeyRange.newBuilder() - .setPartitionId(source.getId()) - .setKeyStart(newPartition.getStartKey()) - .setKeyEnd(newPartition.getEndKey()) - .build(); + .setPartitionId(source.getId()) + .setKeyStart( + newPartition.getStartKey()) + .setKeyEnd( + newPartition.getEndKey()) + .build(); // 通知store fireChangePartitionKeyRange(source, partitionKeyRange); // 将 target 设置为上线. source 理论上可能被删掉,所以不处理 CleanPartition cleanPartition = CleanPartition.newBuilder() - .setKeyStart(newPartition.getStartKey()) - .setKeyEnd(newPartition.getEndKey()) - .setCleanType(CleanType.CLEAN_TYPE_KEEP_RANGE) - // target 的 partition只需要清理数据,不需要删除分区 - .setDeletePartition(false) - .build(); + .setKeyStart(newPartition.getStartKey()) + .setKeyEnd(newPartition.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_KEEP_RANGE) + // target 的 partition只需要清理数据,不需要删除分区 + .setDeletePartition(false) + .build(); log.info("pd clean data: {}-{}, key range:{}-{}, type:{}, delete partition:{}", - source.getGraphName(), - source.getId(), - cleanPartition.getKeyStart(), - cleanPartition.getKeyEnd(), - CleanType.CLEAN_TYPE_EXCLUDE_RANGE, - cleanPartition.getDeletePartition()); + source.getGraphName(), + source.getId(), + cleanPartition.getKeyStart(), + cleanPartition.getKeyEnd(), + CleanType.CLEAN_TYPE_EXCLUDE_RANGE, + cleanPartition.getDeletePartition()); fireCleanPartition(source, cleanPartition); // 更新partition state for (var sp : subTask.getSplitPartition().getNewPartitionList()) { - partitions.add(sp.toBuilder().setState(Metapb.PartitionState.PState_Normal).build()); + partitions.add( + sp.toBuilder().setState(Metapb.PartitionState.PState_Normal).build()); } addedPartitions += subTask.getSplitPartition().getNewPartitionCount() - 1; @@ -1308,40 +1414,45 @@ private void handleSplitTaskAllSuccess(List subTasks, String grap var graph = getGraph(graphName); // set partition count - if (pdConfig.getConfigService().getPartitionCount() != storeService.getShardGroups().size()) { + if (pdConfig.getConfigService().getPartitionCount() != + storeService.getShardGroups().size()) { pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size()); - log.info("set the partition count of config server to {}", storeService.getShardGroups().size()); + log.info("set the partition count of config server to {}", + storeService.getShardGroups().size()); } // 更新graph partition count var newGraph = graph.toBuilder() - .setPartitionCount(graph.getPartitionCount() + addedPartitions) - .build(); + .setPartitionCount(graph.getPartitionCount() + addedPartitions) + .build(); updateGraph(newGraph); // 事务完成 taskInfoMeta.removeSplitTaskPrefix(graphName); } - private void handleSplitTaskIfFailed(List subTasks, String graphName, TaskInfoMeta taskInfoMeta) + private void handleSplitTaskIfFailed(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) throws PDException { - for (var metaTask : subTasks){ + for (var metaTask : subTasks) { var splitPartitions = metaTask.getSplitPartition().getNewPartitionList(); - for (int i = 1; i < splitPartitions.size(); i ++) { + for (int i = 1; i < splitPartitions.size(); i++) { var split = splitPartitions.get(i); CleanPartition cleanPartition = CleanPartition.newBuilder() - .setKeyStart(split.getStartKey()) - .setKeyEnd(split.getEndKey()) - .setCleanType(CleanType.CLEAN_TYPE_EXCLUDE_RANGE) - .setDeletePartition(true) - .build(); + .setKeyStart(split.getStartKey()) + .setKeyEnd(split.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_EXCLUDE_RANGE) + .setDeletePartition(true) + .build(); fireCleanPartition(split, cleanPartition); } // set partition state normal var partition = metaTask.getPartition(); - updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Normal); + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Normal); } // 清理掉任务列表 taskInfoMeta.removeSplitTaskPrefix(graphName); @@ -1353,7 +1464,7 @@ private void handleSplitTaskIfFailed(List subTasks, String graphN */ protected void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { log.info("onPartitionChanged partition: {}", partition); - if (old != null && old.getState() != partition.getState()){ + if (old != null && old.getState() != partition.getState()) { // 状态改变,重置图的状态 Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { @@ -1363,7 +1474,7 @@ protected void onPartitionChanged(Metapb.Partition old, Metapb.Partition partiti } try { updateGraphState(partition.getGraphName(), state); - }catch ( PDException e){ + } catch (PDException e) { log.error("onPartitionChanged", e); } @@ -1380,6 +1491,7 @@ protected void onPartitionRemoved(Metapb.Partition partition) { e.onPartitionRemoved(partition); }); } + /** * PD的leader发生改变,需要重新加载数据 */ @@ -1400,22 +1512,25 @@ public void onRaftLeaderChanged() { * @param partId * @param state */ - public void onPartitionStateChanged(String graph, int partId, Metapb.PartitionState state) throws PDException { + public void onPartitionStateChanged(String graph, int partId, + Metapb.PartitionState state) throws PDException { updatePartitionState(graph, partId, state); } /** * Shard状态发生改变,需要传播到分区、图、集群 + * * @param graph * @param partId * @param state */ - public void onShardStateChanged(String graph, int partId, Metapb.PartitionState state){ + public void onShardStateChanged(String graph, int partId, Metapb.PartitionState state) { } /** * 发送rocksdb compaction 消息 + * * @param partId * @param tableName */ @@ -1423,15 +1538,16 @@ public void fireDbCompaction(int partId, String tableName) { try { for (Metapb.Graph graph : getGraphs()) { - Metapb.Partition partition = partitionMeta.getPartitionById(graph.getGraphName(), partId); + Metapb.Partition partition = + partitionMeta.getPartitionById(graph.getGraphName(), partId); DbCompaction dbCompaction = DbCompaction.newBuilder() - .setTableName(tableName) - .build(); + .setTableName(tableName) + .build(); instructionListeners.forEach(cmd -> { try { cmd.dbCompaction(partition, dbCompaction); - }catch (Exception e){ + } catch (Exception e) { log.error("firedbCompaction", e); } }); @@ -1442,7 +1558,7 @@ public void fireDbCompaction(int partId, String tableName) { } - public void updateShardGroupCache(Metapb.ShardGroup group){ - partitionMeta.getPartitionCache().updateShardGroup(group); + public void updateShardGroupCache(Metapb.ShardGroup group) { + partitionMeta.getPartitionCache().updateShardGroup(group); } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java index ad851ca546..f5aeaec89e 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import com.baidu.hugegraph.pd.grpc.Metapb; @@ -7,5 +24,6 @@ */ public interface PartitionStatusListener { void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition); + void onPartitionRemoved(Metapb.Partition partition); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java index 20d764b307..4a599ae4d1 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import org.apache.hugegraph.pd.config.PDConfig; @@ -14,10 +31,10 @@ * @date 2022/1/14 **/ public class RegistryService { - private PDConfig pdConfig; - private DiscoveryMetaStore meta; + private final PDConfig pdConfig; + private final DiscoveryMetaStore meta; - public RegistryService(PDConfig config){ + public RegistryService(PDConfig config) { this.pdConfig = config; meta = MetadataFactory.newDiscoveryMeta(config); } @@ -25,7 +42,8 @@ public RegistryService(PDConfig config){ public void register(NodeInfo nodeInfo, int outTimes) throws PDException { meta.register(nodeInfo, outTimes); } + public NodeInfos getNodes(Query query) { - return meta.getNodes(query); + return meta.getNodes(query); } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java index dd254df32a..86b952cc94 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package com.baidu.hugegraph.pd; import com.baidu.hugegraph.pd.grpc.Metapb; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java index be2b146501..77dfb913be 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java @@ -1,15 +1,21 @@ -package org.apache.hugegraph.pd; - -import com.baidu.hugegraph.pd.common.PDException; - -import org.apache.hugegraph.pd.config.PDConfig; -import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.Metapb; - -import lombok.extern.slf4j.Slf4j; - -import org.springframework.stereotype.Service; +package org.apache.hugegraph.pd; import java.time.Instant; import java.time.LocalDateTime; @@ -22,21 +28,27 @@ import java.util.Map; import java.util.stream.Collectors; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.springframework.stereotype.Service; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + @Slf4j @Service public class StoreMonitorDataService { - private PDConfig pdConfig; - - private KvService kvService; - private static final String MONITOR_DATA_PREFIX = "SMD"; - + private final PDConfig pdConfig; + private final KvService kvService; /** * the last timestamp of the store monitor data, * used for determine the gap of store's heartbeat. */ - private Map lastStoreStateTimestamp; + private final Map lastStoreStateTimestamp; public StoreMonitorDataService(PDConfig pdConfig) { @@ -47,6 +59,7 @@ public StoreMonitorDataService(PDConfig pdConfig) { /** * save the store stats + * * @param storeStats */ public void saveMonitorData(Metapb.StoreStats storeStats) throws PDException { @@ -54,7 +67,7 @@ public void saveMonitorData(Metapb.StoreStats storeStats) throws PDException { /** * load the latest store timestamp when start up or alter leader */ - if (! lastStoreStateTimestamp.containsKey(storeId)){ + if (!lastStoreStateTimestamp.containsKey(storeId)) { long lastTimestamp = getLatestStoreMonitorDataTimeStamp(storeId); log.debug("store id : {}, last timestamp :{}", storeId, lastTimestamp); lastStoreStateTimestamp.put(storeId, lastTimestamp); @@ -64,30 +77,33 @@ public void saveMonitorData(Metapb.StoreStats storeStats) throws PDException { long interval = this.pdConfig.getStore().getMonitorInterval(); // exceed the interval - if (current - lastStoreStateTimestamp.getOrDefault(storeId, 0L) >= interval){ + if (current - lastStoreStateTimestamp.getOrDefault(storeId, 0L) >= interval) { saveMonitorDataToDb(storeStats, current); - log.debug("store id: {}, system info:{}", storeId, debugMonitorInfo(storeStats.getSystemMetricsList())); + log.debug("store id: {}, system info:{}", storeId, + debugMonitorInfo(storeStats.getSystemMetricsList())); lastStoreStateTimestamp.put(storeId, current); } } /** * save the snapshot of store status + * * @param storeStats store status - * @param ts, timestamp + * @param ts, timestamp * @return store status * @throws PDException */ private void saveMonitorDataToDb(Metapb.StoreStats storeStats, long ts) throws PDException { String key = getMonitorDataKey(storeStats.getStoreId(), ts); - log.debug("store id: {}, save monitor data info, ts:{}, my key:{}", storeStats.getStoreId(), ts, key); + log.debug("store id: {}, save monitor data info, ts:{}, my key:{}", storeStats.getStoreId(), + ts, key); kvService.put(key, extractMetricsFromStoreStatus(storeStats)); } - public String debugMonitorInfo(List systemInfo){ + public String debugMonitorInfo(List systemInfo) { StringBuilder sb = new StringBuilder(); sb.append("["); - for(Metapb.RecordPair pair : systemInfo){ + for (Metapb.RecordPair pair : systemInfo) { sb.append(pair.getKey()); sb.append(":"); sb.append(pair.getValue()); @@ -99,45 +115,51 @@ public String debugMonitorInfo(List systemInfo){ /** * get the historical monitor data by store id, by range(start, end) + * * @param storeId store id - * @param start range start - * @param end range end + * @param start range start + * @param end range end * @return list of store stats */ - public Map getStoreMonitorData(long storeId, long start, long end) throws PDException { + public Map getStoreMonitorData(long storeId, long start, long end) throws + PDException { log.debug("get monitor data, store id:{}, start{}, end:{}", - storeId, - getMonitorDataKey(storeId, start), - getMonitorDataKey(storeId, end)); - return kvService.scanRange(getMonitorDataKey(storeId, start), getMonitorDataKey(storeId, end)); + storeId, + getMonitorDataKey(storeId, start), + getMonitorDataKey(storeId, end)); + return kvService.scanRange(getMonitorDataKey(storeId, start), + getMonitorDataKey(storeId, end)); } /** * for api service + * * @param storeId * @return * @throws PDException */ - public List> getStoreMonitorData(long storeId) throws PDException { - List> result = new LinkedList<>(); + public List> getStoreMonitorData(long storeId) throws PDException { + List> result = new LinkedList<>(); long current = System.currentTimeMillis() / 1000; long start = current - this.pdConfig.getStore().getRetentionPeriod(); try { - for(Map.Entry entry : getStoreMonitorData(storeId, start, current).entrySet()){ - String[] arr = entry.getKey().split(String.valueOf(MetadataKeyHelper.getDelimiter())); + for (Map.Entry entry : getStoreMonitorData(storeId, start, + current).entrySet()) { + String[] arr = + entry.getKey().split(String.valueOf(MetadataKeyHelper.getDelimiter())); Map map = new HashMap(); long timestamp = Long.parseLong(arr[arr.length - 1]); map.put("ts", timestamp); - for (String pair : entry.getValue().split(",")){ + for (String pair : entry.getValue().split(",")) { String[] p = pair.split(":"); - if (p.length == 2){ + if (p.length == 2) { map.put(p[0], Long.parseLong(p[1])); } } result.add(map); } - result.sort((o1,o2)-> o1.get("ts").compareTo(o2.get("ts"))); + result.sort((o1, o2) -> o1.get("ts").compareTo(o2.get("ts"))); } catch (PDException e) { log.error(e.getMessage()); } @@ -146,6 +168,7 @@ public List> getStoreMonitorData(long storeId) throws PDExcepti /** * for api service, export txt + * * @param storeId * @return * @throws PDException @@ -161,17 +184,18 @@ public String getStoreMonitorDataText(long storeId) throws PDException { // construct columns, ts + sorted keys columns.add("ts"); columns.addAll(lastRow.keySet().stream() - .filter(x -> !"ts".equals(x)) - .sorted() - .collect(Collectors.toList())); + .filter(x -> !"ts".equals(x)) + .sorted() + .collect(Collectors.toList())); sb.append(String.join(",", columns).replace("\"", "")).append("\r\n"); - for (Map row : result){ - for ( String key : columns){ + for (Map row : result) { + for (String key : columns) { // ts + , + ... - if ("ts".equals(key)){ + if ("ts".equals(key)) { // format ts - sb.append(dtf.format(LocalDateTime.ofInstant(Instant.ofEpochSecond(row.get(key)), - ZoneId.systemDefault()))); + sb.append(dtf.format( + LocalDateTime.ofInstant(Instant.ofEpochSecond(row.get(key)), + ZoneId.systemDefault()))); continue; } else { sb.append(",").append(row.getOrDefault(key, 0L)); @@ -185,15 +209,16 @@ public String getStoreMonitorDataText(long storeId) throws PDException { /** * remove the monitor data of the store that before till(not include) + * * @param storeId store id - * @param till expire time + * @param till expire time * @return affect rows */ public int removeExpiredMonitorData(long storeId, long till) throws PDException { String keyStart = getMonitorDataKey(storeId, 1); String keyEnd = getMonitorDataKey(storeId, till); int records = 0; - for (String key : kvService.scanRange(keyStart, keyEnd).keySet()){ + for (String key : kvService.scanRange(keyStart, keyEnd).keySet()) { kvService.delete(key); log.debug("remove monitor data, key: {}", key); records += 1; @@ -203,17 +228,18 @@ public int removeExpiredMonitorData(long storeId, long till) throws PDException /** * get the latest timestamp of the store monitor data + * * @param storeId * @return timestamp(by seconds) */ - public long getLatestStoreMonitorDataTimeStamp(long storeId){ + public long getLatestStoreMonitorDataTimeStamp(long storeId) { long maxId = 0L; long current = System.currentTimeMillis() / 1000; long start = current - this.pdConfig.getStore().getMonitorInterval(); String keyStart = getMonitorDataKey(storeId, start); String keyEnd = getMonitorDataKey(storeId, current); try { - for(String key : kvService.scanRange(keyStart, keyEnd).keySet()){ + for (String key : kvService.scanRange(keyStart, keyEnd).keySet()) { String[] arr = key.split(String.valueOf(MetadataKeyHelper.getDelimiter())); maxId = Math.max(maxId, Long.parseLong(arr[arr.length - 1])); } @@ -222,19 +248,18 @@ public long getLatestStoreMonitorDataTimeStamp(long storeId){ return maxId; } - private String getMonitorDataKey(long storeId, long ts){ - StringBuilder builder = new StringBuilder(); - builder.append(MONITOR_DATA_PREFIX) - .append(MetadataKeyHelper.getDelimiter()) - .append(storeId) - .append(MetadataKeyHelper.getDelimiter()) - .append(ts); - return builder.toString(); + private String getMonitorDataKey(long storeId, long ts) { + String builder = MONITOR_DATA_PREFIX + + MetadataKeyHelper.getDelimiter() + + storeId + + MetadataKeyHelper.getDelimiter() + + ts; + return builder; } - private String extractMetricsFromStoreStatus(Metapb.StoreStats storeStats){ + private String extractMetricsFromStoreStatus(Metapb.StoreStats storeStats) { List list = new ArrayList<>(); - for(Metapb.RecordPair pair : storeStats.getSystemMetricsList()){ + for (Metapb.RecordPair pair : storeStats.getSystemMetricsList()) { list.add("\"" + pair.getKey() + "\":" + pair.getValue()); } return String.join(",", list); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java index d2eb8b49be..1ff96126be 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import java.util.ArrayList; @@ -37,54 +54,65 @@ @Slf4j public class StoreNodeService { + private static final Long STORE_HEART_BEAT_INTERVAL = 30000L; + private static final String graphSpaceConfPrefix = "HUGEGRAPH/hg/GRAPHSPACE/CONF/"; // Store状态监听 - private List statusListeners; - private List shardGroupStatusListeners; - + private final List statusListeners; + private final List shardGroupStatusListeners; private PartitionService partitionService; - private StoreInfoMeta storeInfoMeta; - private TaskInfoMeta taskInfoMeta; - private Random random = new Random(System.currentTimeMillis()); + private final StoreInfoMeta storeInfoMeta; + private final TaskInfoMeta taskInfoMeta; + private final Random random = new Random(System.currentTimeMillis()); private Metapb.ClusterStats clusterStats; - private KvService kvService; - private ConfigService configService; - private PDConfig pdConfig; - - private static final Long STORE_HEART_BEAT_INTERVAL = 30000L; + private final KvService kvService; + private final ConfigService configService; + private final PDConfig pdConfig; + private final Runnable quotaChecker = () -> { + try { + getQuota(); + } catch (Exception e) { + log.error( + "obtaining and sending graph space quota information with error: ", + e); + } + }; - public StoreNodeService(PDConfig config){ + public StoreNodeService(PDConfig config) { this.pdConfig = config; storeInfoMeta = MetadataFactory.newStoreInfoMeta(pdConfig); taskInfoMeta = MetadataFactory.newTaskInfoMeta(pdConfig); shardGroupStatusListeners = Collections.synchronizedList(new ArrayList<>()); statusListeners = Collections.synchronizedList(new ArrayList()); clusterStats = Metapb.ClusterStats.newBuilder() - .setState(Metapb.ClusterState.Cluster_Not_Ready) - .setTimestamp(System.currentTimeMillis()) - .build(); + .setState(Metapb.ClusterState.Cluster_Not_Ready) + .setTimestamp(System.currentTimeMillis()) + .build(); kvService = new KvService(pdConfig); configService = new ConfigService(pdConfig); } - public void init(PartitionService partitionService){ + public void init(PartitionService partitionService) { this.partitionService = partitionService; partitionService.addStatusListener(new PartitionStatusListener() { @Override public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { - if (old != null && old.getState() != partition.getState()){ + if (old != null && old.getState() != partition.getState()) { // 状态改变,重置集群状态 try { - List partitions = partitionService.getPartitionById(partition.getId()); + List partitions = + partitionService.getPartitionById(partition.getId()); Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; - for(Metapb.Partition pt : partitions){ - if ( pt.getState().getNumber() > state.getNumber()) + for (Metapb.Partition pt : partitions) { + if (pt.getState().getNumber() > state.getNumber()) { state = pt.getState(); + } } updateShardGroupState(partition.getId(), state); - for(Metapb.ShardGroup group : getShardGroups()){ - if ( group.getState().getNumber() > state.getNumber()) + for (Metapb.ShardGroup group : getShardGroups()) { + if (group.getState().getNumber() > state.getNumber()) { state = group.getState(); + } } updateClusterStatus(state); } catch (PDException e) { @@ -102,14 +130,17 @@ public void onPartitionRemoved(Metapb.Partition partition) { /** * 集群是否准备就绪 + * * @return */ - public boolean isOK(){ - return this.clusterStats.getState().getNumber() < Metapb.ClusterState.Cluster_Offline.getNumber(); + public boolean isOK() { + return this.clusterStats.getState().getNumber() < + Metapb.ClusterState.Cluster_Offline.getNumber(); } /** * Store注册,记录Store的ip地址,首次注册需要生成store_ID + * * @param store */ public Metapb.Store register(Metapb.Store store) throws PDException { @@ -119,52 +150,55 @@ public Metapb.Store register(Metapb.Store store) throws PDException { } if (!storeInfoMeta.storeExists(store.getId())) { - log.error("Store id {} does not belong to this PD, address = {}", store.getId(), store.getAddress()); + log.error("Store id {} does not belong to this PD, address = {}", store.getId(), + store.getAddress()); // storeId不存在,抛出异常 throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, - String.format("Store id %d doest not exist.", store.getId())); + String.format("Store id %d doest not exist.", store.getId())); } // 如果store状态为Tombstone拒绝注册。 Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); if (lastStore.getState() == Metapb.StoreState.Tombstone) { log.error("Store id {} has been removed, Please reinitialize, address = {}", - store.getId(), store.getAddress()); + store.getId(), store.getAddress()); // storeId不存在,抛出异常 throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, - String.format("Store id %d has been removed. %s", store.getId(), store.getAddress())); + String.format("Store id %d has been removed. %s", store.getId(), + store.getAddress())); } // offline或者up,或者在初始激活列表中,自动上线 Metapb.StoreState storeState = lastStore.getState(); if (storeState == Metapb.StoreState.Offline || storeState == Metapb.StoreState.Up - || inInitialStoreList(store)){ + || inInitialStoreList(store)) { storeState = Metapb.StoreState.Up; - } - else { + } else { storeState = Metapb.StoreState.Pending; } store = Metapb.Store.newBuilder(lastStore) - .setAddress(store.getAddress()) - .setRaftAddress(store.getRaftAddress()) - .setDataVersion(store.getDataVersion()) - .setDeployPath(store.getDeployPath()) - .setVersion(store.getVersion()) - .setDataPath(store.getDataPath()) - .setState(storeState).setCores(store.getCores()) - .clearLabels().addAllLabels(store.getLabelsList()) - .setLastHeartbeat(System.currentTimeMillis()).build(); + .setAddress(store.getAddress()) + .setRaftAddress(store.getRaftAddress()) + .setDataVersion(store.getDataVersion()) + .setDeployPath(store.getDeployPath()) + .setVersion(store.getVersion()) + .setDataPath(store.getDataPath()) + .setState(storeState).setCores(store.getCores()) + .clearLabels().addAllLabels(store.getLabelsList()) + .setLastHeartbeat(System.currentTimeMillis()).build(); long current = System.currentTimeMillis(); boolean raftChanged = false; // 上线状态的Raft Address 发生了变更 - if (!Objects.equals(lastStore.getRaftAddress(), store.getRaftAddress()) && storeState == Metapb.StoreState.Up) { + if (!Objects.equals(lastStore.getRaftAddress(), store.getRaftAddress()) && + storeState == Metapb.StoreState.Up) { // 时间间隔太短,而且raft有变更,则认为是无效的store - if (current - lastStore.getLastHeartbeat() < STORE_HEART_BEAT_INTERVAL * 0.8){ + if (current - lastStore.getLastHeartbeat() < STORE_HEART_BEAT_INTERVAL * 0.8) { throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DUPLICATE_VALUE, - String.format("Store id %d may be duplicate. addr: %s", store.getId(), store.getAddress())); - } else if(current - lastStore.getLastHeartbeat() > STORE_HEART_BEAT_INTERVAL * 1.2 ) { + String.format("Store id %d may be duplicate. addr: %s", + store.getId(), store.getAddress())); + } else if (current - lastStore.getLastHeartbeat() > STORE_HEART_BEAT_INTERVAL * 1.2) { // 认为发生了变更 raftChanged = true; } else { @@ -191,66 +225,73 @@ public Metapb.Store register(Metapb.Store store) throws PDException { return store; } - private boolean inInitialStoreList(Metapb.Store store){ + private boolean inInitialStoreList(Metapb.Store store) { return this.pdConfig.getInitialStoreMap().containsKey(store.getAddress()); } /** * 产生一个新的store对象 + * * @param store * @return * @throws PDException */ private synchronized Metapb.Store newStoreNode(Metapb.Store store) throws PDException { long id = random.nextLong() & Long.MAX_VALUE; - while( id == 0 || storeInfoMeta.storeExists(id) ) + while (id == 0 || storeInfoMeta.storeExists(id)) { id = random.nextLong() & Long.MAX_VALUE; + } store = Metapb.Store.newBuilder(store) - .setId(id) - .setState(Metapb.StoreState.Pending) - .setStartTimestamp(System.currentTimeMillis()).build(); + .setId(id) + .setState(Metapb.StoreState.Pending) + .setStartTimestamp(System.currentTimeMillis()).build(); storeInfoMeta.updateStore(store); return store; } /** * 根据store_id返回Store信息 + * * @param id * @return * @throws PDException */ public Metapb.Store getStore(long id) throws PDException { Metapb.Store store = storeInfoMeta.getStore(id); - if ( store == null ) + if (store == null) { throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, - String.format("Store id %x doest not exist.", id)); + String.format("Store id %x doest not exist.", id)); + } return store; } /** * 更新Store信息,检测Store状态的变化,通知到Hugestore - * */ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDException { - log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), store.getAddress(), store.getState()); + log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), + store.getAddress(), store.getState()); Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); if (lastStore == null) return null; - Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); + Metapb.Store.Builder builder = + Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); store = builder.mergeFrom(store).build(); if (store.getState() == Metapb.StoreState.Tombstone) { List activeStores = getStores(); if (lastStore.getState() == Metapb.StoreState.Up - && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, - "The number of active stores is less then " + pdConfig.getMinStoreCount()); + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); } } storeInfoMeta.updateStore(store); - if (store.getState() != Metapb.StoreState.Unknown && store.getState() != lastStore.getState()){ + if (store.getState() != Metapb.StoreState.Unknown && + store.getState() != lastStore.getState()) { // 如果希望将store下线 if (store.getState() == Metapb.StoreState.Exiting) { - if (lastStore.getState() == Metapb.StoreState.Exiting){ + if (lastStore.getState() == Metapb.StoreState.Exiting) { //如果已经是下线中的状态,则不作进一步处理 return lastStore; } @@ -285,6 +326,7 @@ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDExcept /** * store被关机,重新分配shardGroup的shard + * * @param store * @throws PDException */ @@ -294,8 +336,9 @@ public synchronized void storeTurnoff(Metapb.Store store) throws PDException { Metapb.ShardGroup.Builder builder = Metapb.ShardGroup.newBuilder(group); builder.clearShards(); group.getShardsList().forEach(shard -> { - if (shard.getStoreId() != store.getId()) + if (shard.getStoreId() != store.getId()) { builder.addShards(shard); + } }); reallocShards(builder.build()); } @@ -303,17 +346,18 @@ public synchronized void storeTurnoff(Metapb.Store store) throws PDException { /** * 根据图名返回stores信息,如果graphName为空,返回所有store信息 + * * @throws PDException */ - public List getStores() throws PDException{ + public List getStores() throws PDException { return storeInfoMeta.getStores(null); } - public List getStores(String graphName) throws PDException{ + public List getStores(String graphName) throws PDException { return storeInfoMeta.getStores(graphName); } - public List getStoreStatus(boolean isActive) throws PDException{ + public List getStoreStatus(boolean isActive) throws PDException { return storeInfoMeta.getStoreStatus(isActive); } @@ -327,7 +371,7 @@ public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { public List getShardList(int groupId) throws PDException { var shardGroup = getShardGroup(groupId); - if (shardGroup != null){ + if (shardGroup != null) { return shardGroup.getShardsList(); } return new ArrayList<>(); @@ -337,8 +381,9 @@ public List getShardGroupsByStore(long storeId) throws PDExce List shardGroups = new ArrayList<>(); storeInfoMeta.getShardGroups().forEach(shardGroup -> { shardGroup.getShardsList().forEach(shard -> { - if (shard.getStoreId() == storeId) + if (shard.getStoreId() == storeId) { shardGroups.add(shardGroup); + } }); }); return shardGroups; @@ -346,6 +391,7 @@ public List getShardGroupsByStore(long storeId) throws PDExce /** * 返回活跃的store + * * @param graphName * @return * @throws PDException @@ -353,6 +399,7 @@ public List getShardGroupsByStore(long storeId) throws PDExce public List getActiveStores(String graphName) throws PDException { return storeInfoMeta.getActiveStores(graphName); } + public List getActiveStores() throws PDException { return storeInfoMeta.getActiveStores(); } @@ -367,15 +414,16 @@ public List getTombStores() throws PDException { return stores; } - public long removeStore(Long storeId) throws PDException { return storeInfoMeta.removeStore(storeId); } + /** * 给partition分配store,根据图的配置,决定分配几个peer * 分配完所有的shards,保存ShardGroup对象(store不变动,只执行一次) */ - public synchronized List allocShards(Metapb.Graph graph, int partId) throws PDException { + public synchronized List allocShards(Metapb.Graph graph, int partId) throws + PDException { // 多图共用raft分组,因此分配shard只依赖partitionId. // 图根据数据大小可以设置分区的数量,但总数不能超过raft分组数量 if (storeInfoMeta.getShardGroup(partId) == null) { @@ -384,12 +432,14 @@ public synchronized List allocShards(Metapb.Graph graph, int partI List stores = storeInfoMeta.getActiveStores(); if (stores.size() == 0) { - throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, "There is no any online store"); + throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, + "There is no any online store"); } if (stores.size() < pdConfig.getMinStoreCount()) { throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, - "The number of active stores is less then " + pdConfig.getMinStoreCount()); + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); } int shardCount = pdConfig.getPartition().getShardCount(); @@ -402,21 +452,25 @@ public synchronized List allocShards(Metapb.Graph graph, int partI } // 一次创建完所有的ShardGroup,保证初始的groupID有序,方便人工阅读 - for (int groupId = 0; groupId < pdConfig.getConfigService().getPartitionCount(); groupId++) { + for (int groupId = 0; groupId < pdConfig.getConfigService().getPartitionCount(); + groupId++) { int storeIdx = groupId % stores.size(); //store分配规则,简化为取模 List shards = new ArrayList<>(); for (int i = 0; i < shardCount; i++) { - Metapb.Shard shard = Metapb.Shard.newBuilder().setStoreId(stores.get(storeIdx).getId()) - .setRole(i == 0 ? Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) // - .build(); + Metapb.Shard shard = + Metapb.Shard.newBuilder().setStoreId(stores.get(storeIdx).getId()) + .setRole(i == 0 ? Metapb.ShardRole.Leader : + Metapb.ShardRole.Follower) // + .build(); shards.add(shard); storeIdx = (storeIdx + 1) >= stores.size() ? 0 : ++storeIdx; // 顺序选择 } Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() - .setId(groupId) - .setState(Metapb.PartitionState.PState_Normal) - .addAllShards(shards).build(); + .setId(groupId) + .setState( + Metapb.PartitionState.PState_Normal) + .addAllShards(shards).build(); // new group storeInfoMeta.updateShardGroup(group); @@ -432,16 +486,19 @@ public synchronized List allocShards(Metapb.Graph graph, int partI * 根据graph的shard_count,重新分配shard * 发送变更change shard指令 */ - public synchronized List reallocShards(Metapb.ShardGroup shardGroup) throws PDException { + public synchronized List reallocShards(Metapb.ShardGroup shardGroup) throws + PDException { List stores = storeInfoMeta.getActiveStores(); if (stores.size() == 0) { - throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, "There is no any online store"); + throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, + "There is no any online store"); } if (stores.size() < pdConfig.getMinStoreCount()) { throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, - "The number of active stores is less then " + pdConfig.getMinStoreCount()); + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); } int shardCount = pdConfig.getPartition().getShardCount(); @@ -458,14 +515,14 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou if (shardCount > shards.size()) { // 需要增加shard log.info("reallocShards ShardGroup {}, add shards from {} to {}", - shardGroup.getId(), shards.size(), shardCount); + shardGroup.getId(), shards.size(), shardCount); int storeIdx = (int) shardGroup.getId() % stores.size(); //store分配规则,简化为取模 for (int addCount = shardCount - shards.size(); addCount > 0; ) { // 检查是否已经存在 if (!isStoreInShards(shards, stores.get(storeIdx).getId())) { Metapb.Shard shard = Metapb.Shard.newBuilder() - .setStoreId(stores.get(storeIdx).getId()) - .build(); + .setStoreId(stores.get(storeIdx).getId()) + .build(); shards.add(shard); addCount--; } @@ -474,7 +531,7 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou } else if (shardCount < shards.size()) { // 需要减shard log.info("reallocShards ShardGroup {}, remove shards from {} to {}", - shardGroup.getId(), shards.size(), shardCount); + shardGroup.getId(), shards.size(), shardCount); int subCount = shards.size() - shardCount; Iterator iterator = shards.iterator(); @@ -489,8 +546,8 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou } Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder(shardGroup) - .clearShards() - .addAllShards(shards).build(); + .clearShards() + .addAllShards(shards).build(); storeInfoMeta.updateShardGroup(group); partitionService.updateShardGroupCache(group); // change shard group @@ -499,7 +556,8 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou var partitions = partitionService.getPartitionById(shardGroup.getId()); if (partitions.size() > 0) { // send one message, change shard is regardless with partition/graph - partitionService.fireChangeShard(partitions.get(0), shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + partitionService.fireChangeShard(partitions.get(0), shards, + ConfChangeType.CONF_CHANGE_TYPE_ADJUST); } log.info("reallocShards ShardGroup {}, shards: {}", group.getId(), group.getShardsList()); @@ -512,12 +570,13 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou * @param groups list of (partition id, count) * @return total groups */ - public synchronized int splitShardGroups(List> groups) throws PDException { + public synchronized int splitShardGroups(List> groups) throws + PDException { int sum = groups.stream().map(pair -> pair.getValue()).reduce(0, Integer::sum); // shard group 太大 - if (sum > getActiveStores().size() * pdConfig.getPartition().getMaxShardsPerStore()){ + if (sum > getActiveStores().size() * pdConfig.getPartition().getMaxShardsPerStore()) { throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, - "can't satisfy target shard group count"); + "can't satisfy target shard group count"); } partitionService.splitPartition(groups); @@ -527,15 +586,17 @@ public synchronized int splitShardGroups(List> groups) /** * 分配shard group,为分裂做准备 + * * @param groups * @return true * @throws PDException */ - private boolean isStoreInShards(List shards, long storeId){ + private boolean isStoreInShards(List shards, long storeId) { AtomicBoolean exist = new AtomicBoolean(false); - shards.forEach(s->{ - if ( s.getStoreId() == storeId ) + shards.forEach(s -> { + if (s.getStoreId() == storeId) { exist.set(true); + } }); return exist.get(); } @@ -544,9 +605,9 @@ private boolean isStoreInShards(List shards, long storeId){ * update shard group and cache. * send shard group change message. * - * @param groupId : shard group - * @param shards : shard lists - * @param version: term version, ignored if less than 0 + * @param groupId : shard group + * @param shards : shard lists + * @param version: term version, ignored if less than 0 * @param confVersion : conf version, ignored if less than 0 * @return */ @@ -560,15 +621,15 @@ public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List= 0){ + if (version >= 0) { builder.setVersion(version); } - if (confVersion >= 0){ + if (confVersion >= 0) { builder.setConfVer(confVersion); } - var newGroup = builder.clearShards() .addAllShards(shards) .build(); + var newGroup = builder.clearShards().addAllShards(shards).build(); storeInfoMeta.updateShardGroup(newGroup); partitionService.updateShardGroupCache(newGroup); @@ -583,8 +644,9 @@ public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List shards) throws PDException { @@ -608,6 +670,7 @@ public void shardGroupOp(int groupId, List shards) throws PDExcept /** * 删除 shard group + * * @param groupId shard group id */ public synchronized void deleteShardGroup(int groupId) throws PDException { @@ -622,60 +685,66 @@ public synchronized void deleteShardGroup(int groupId) throws PDException { var shardGroups = getShardGroups(); if (shardGroups != null) { var count1 = pdConfig.getConfigService().getPDConfig().getPartitionCount(); - var maxGroupId = getShardGroups().stream().map(Metapb.ShardGroup::getId).max(Integer::compareTo); + var maxGroupId = + getShardGroups().stream().map(Metapb.ShardGroup::getId).max(Integer::compareTo); if (maxGroupId.get() < count1) { pdConfig.getConfigService().setPartitionCount(maxGroupId.get() + 1); } } } - public synchronized void updateShardGroupState(int groupId, Metapb.PartitionState state) throws PDException { + public synchronized void updateShardGroupState(int groupId, Metapb.PartitionState state) throws + PDException { Metapb.ShardGroup shardGroup = storeInfoMeta.getShardGroup(groupId) - .toBuilder() - .setState(state).build(); + .toBuilder() + .setState(state).build(); storeInfoMeta.updateShardGroup(shardGroup); partitionService.updateShardGroupCache(shardGroup); } /** * 接收Store的心跳 + * * @param storeStats * @throws PDException */ public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDException { this.storeInfoMeta.updateStoreStats(storeStats); Metapb.Store lastStore = this.getStore(storeStats.getStoreId()); - if (lastStore == null){ + if (lastStore == null) { //store不存在 throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, - String.format("Store id %d does not exist.", storeStats.getStoreId())); + String.format("Store id %d does not exist.", + storeStats.getStoreId())); } - if (lastStore.getState() == Metapb.StoreState.Tombstone){ + if (lastStore.getState() == Metapb.StoreState.Tombstone) { throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, - String.format("Store id %d is useless since it's state is Tombstone", - storeStats.getStoreId())); + String.format( + "Store id %d is useless since it's state is Tombstone", + storeStats.getStoreId())); } Metapb.Store nowStore; // 如果正在做store下线操作 - if (lastStore.getState() == Metapb.StoreState.Exiting){ + if (lastStore.getState() == Metapb.StoreState.Exiting) { List activeStores = this.getActiveStores(); Map storeMap = new HashMap<>(); activeStores.forEach(store -> { storeMap.put(store.getId(), store); }); // 下线的store的分区为0,说明已经迁移完毕,可以下线,如果非0,则迁移还在进行,需要等待 - if (storeStats.getPartitionCount() > 0 && storeMap.containsKey(storeStats.getStoreId())){ + if (storeStats.getPartitionCount() > 0 && + storeMap.containsKey(storeStats.getStoreId())) { nowStore = Metapb.Store.newBuilder(lastStore) - .setStats(storeStats) - .setLastHeartbeat(System.currentTimeMillis()) - .setState(Metapb.StoreState.Exiting).build(); + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Exiting).build(); this.storeInfoMeta.updateStore(nowStore); return this.clusterStats; - }else { + } else { nowStore = Metapb.Store.newBuilder(lastStore) - .setStats(storeStats) - .setLastHeartbeat(System.currentTimeMillis()) - .setState(Metapb.StoreState.Tombstone).build(); + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Tombstone).build(); this.storeInfoMeta.updateStore(nowStore); storeInfoMeta.removeActiveStore(nowStore); return this.clusterStats; @@ -684,19 +753,20 @@ public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDExce if (lastStore.getState() == Metapb.StoreState.Pending) { nowStore = Metapb.Store.newBuilder(lastStore) - .setStats(storeStats) - .setLastHeartbeat(System.currentTimeMillis()) - .setState(Metapb.StoreState.Pending).build(); + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Pending).build(); this.storeInfoMeta.updateStore(nowStore); return this.clusterStats; } else { if (lastStore.getState() == Metapb.StoreState.Offline) { - this.updateStore(Metapb.Store.newBuilder(lastStore).setState(Metapb.StoreState.Up).build()); + this.updateStore( + Metapb.Store.newBuilder(lastStore).setState(Metapb.StoreState.Up).build()); } nowStore = Metapb.Store.newBuilder(lastStore) - .setState(Metapb.StoreState.Up) - .setStats(storeStats) - .setLastHeartbeat(System.currentTimeMillis()).build(); + .setState(Metapb.StoreState.Up) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()).build(); this.storeInfoMeta.updateStore(nowStore); this.storeInfoMeta.keepStoreAlive(nowStore); this.checkStoreStatus(); @@ -704,14 +774,14 @@ public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDExce } } - public synchronized Metapb.ClusterStats updateClusterStatus(Metapb.ClusterState state){ + public synchronized Metapb.ClusterStats updateClusterStatus(Metapb.ClusterState state) { this.clusterStats = clusterStats.toBuilder().setState(state).build(); return this.clusterStats; } - public Metapb.ClusterStats updateClusterStatus(Metapb.PartitionState state){ + public Metapb.ClusterStats updateClusterStatus(Metapb.PartitionState state) { Metapb.ClusterState cstate = Metapb.ClusterState.Cluster_OK; - switch (state){ + switch (state) { case PState_Normal: cstate = Metapb.ClusterState.Cluster_OK; break; @@ -727,6 +797,7 @@ public Metapb.ClusterStats updateClusterStatus(Metapb.PartitionState state){ } return updateClusterStatus(cstate); } + public Metapb.ClusterStats getClusterStats() { return this.clusterStats; } @@ -738,13 +809,15 @@ public Metapb.ClusterStats getClusterStats() { */ public synchronized void checkStoreStatus() { Metapb.ClusterStats.Builder builder = Metapb.ClusterStats.newBuilder() - .setState(Metapb.ClusterState.Cluster_OK); + .setState( + Metapb.ClusterState.Cluster_OK); try { List activeStores = this.getActiveStores(); if (activeStores.size() < pdConfig.getMinStoreCount()) { builder.setState(Metapb.ClusterState.Cluster_Not_Ready); builder.setMessage("The number of active stores is " + activeStores.size() - + ", less than pd.initial-store-count:" + pdConfig.getMinStoreCount()); + + ", less than pd.initial-store-count:" + + pdConfig.getMinStoreCount()); } Map storeMap = new HashMap<>(); activeStores.forEach(store -> { @@ -760,7 +833,8 @@ public synchronized void checkStoreStatus() { } if (count * 2 < group.getShardsList().size()) { builder.setState(Metapb.ClusterState.Cluster_Not_Ready); - builder.setMessage("Less than half of active shard, partitionId is " + group.getId()); + builder.setMessage( + "Less than half of active shard, partitionId is " + group.getId()); break; } } @@ -770,39 +844,44 @@ public synchronized void checkStoreStatus() { log.error("StoreNodeService updateClusterStatus exception {}", e); } this.clusterStats = builder.setTimestamp(System.currentTimeMillis()).build(); - if (this.clusterStats.getState() != Metapb.ClusterState.Cluster_OK) + if (this.clusterStats.getState() != Metapb.ClusterState.Cluster_OK) { log.error("The cluster is not ready, {}", this.clusterStats); + } } - public void addStatusListener(StoreStatusListener listener){ + + public void addStatusListener(StoreStatusListener listener) { statusListeners.add(listener); } protected void onStoreRaftAddressChanged(Metapb.Store store) { - log.info("onStoreRaftAddressChanged storeId = {}, new raft addr:", store.getId(), store.getRaftAddress()); + log.info("onStoreRaftAddressChanged storeId = {}, new raft addr:", store.getId(), + store.getRaftAddress()); statusListeners.forEach(e -> { e.onStoreRaftChanged(store); }); } - public void addShardGroupStatusListener(ShardGroupStatusListener listener){ + public void addShardGroupStatusListener(ShardGroupStatusListener listener) { shardGroupStatusListeners.add(listener); } - protected void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState stats){ + protected void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState stats) { log.info("onStoreStatusChanged storeId = {} from {} to {}", store.getId(), old, stats); - statusListeners.forEach(e->{ + statusListeners.forEach(e -> { e.onStoreStatusChanged(store, old, stats); }); } - protected void onShardGroupStatusChanged(Metapb.ShardGroup group, Metapb.ShardGroup newGroup){ - log.info("onShardGroupStatusChanged, groupId: {}, from {} to {}", group.getId(), group, newGroup); - shardGroupStatusListeners.forEach( e -> e.onShardListChanged(group, newGroup)); + protected void onShardGroupStatusChanged(Metapb.ShardGroup group, Metapb.ShardGroup newGroup) { + log.info("onShardGroupStatusChanged, groupId: {}, from {} to {}", group.getId(), group, + newGroup); + shardGroupStatusListeners.forEach(e -> e.onShardListChanged(group, newGroup)); } protected void onShardGroupOp(Metapb.ShardGroup shardGroup) { log.info("onShardGroupOp, group id: {}, shard group:{}", shardGroup.getId(), shardGroup); - shardGroupStatusListeners.forEach( e -> e.onShardListOp(shardGroup)); + shardGroupStatusListeners.forEach(e -> e.onShardListOp(shardGroup)); } /** @@ -846,19 +925,19 @@ public boolean checkStoreCanOffline(Metapb.Store currentStore) { /** * 对store上的对rocksdb进行compaction + * * @param groupId * @param tableName * @return */ - public synchronized void shardGroupsDbCompaction(int groupId, String tableName) throws PDException { + public synchronized void shardGroupsDbCompaction(int groupId, String tableName) throws + PDException { // 通知所有的store,对rocksdb进行compaction partitionService.fireDbCompaction(groupId, tableName); // TODO 异常怎么处理? } - private static String graphSpaceConfPrefix ="HUGEGRAPH/hg/GRAPHSPACE/CONF/"; - public Map getQuota() throws PDException { List graphs = partitionService.getGraphs(); String delimiter = String.valueOf(MetadataKeyHelper.DELIMITER); @@ -949,22 +1028,11 @@ public Map getQuota() throws PDException { return limits; } - public Runnable getQuotaChecker() { return quotaChecker; } - private Runnable quotaChecker = () -> { - try { - getQuota(); - } catch (Exception e) { - log.error( - "obtaining and sending graph space quota information with error: ", - e); - } - }; - - public TaskInfoMeta getTaskInfoMeta(){ + public TaskInfoMeta getTaskInfoMeta() { return taskInfoMeta; } @@ -974,20 +1042,21 @@ public StoreInfoMeta getStoreInfoMeta() { /** * 获得分区的Leader + * * @param partition * @param initIdx * @return */ - public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx){ + public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx) { Metapb.Shard leader = null; try { var shardGroup = this.getShardGroup(partition.getId()); - for(Metapb.Shard shard : shardGroup.getShardsList()){ + for (Metapb.Shard shard : shardGroup.getShardsList()) { if (shard.getRole() == Metapb.ShardRole.Leader) { leader = shard; } } - }catch (Exception e){ + } catch (Exception e) { log.error("get leader error: group id:{}, error:", partition.getId(), e.getMessage()); } return leader; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java index c4981a0c8e..ec7c2eabf6 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import com.baidu.hugegraph.pd.grpc.Metapb; @@ -8,6 +25,7 @@ void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status); void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, - Metapb.GraphState stateNew) ; + Metapb.GraphState stateNew); + void onStoreRaftChanged(Metapb.Store store); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java index a8aab4d2e6..06c9f8f1d1 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import java.util.ArrayList; @@ -15,18 +32,16 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import com.baidu.hugegraph.pd.common.KVPair; -import com.baidu.hugegraph.pd.common.PDException; - import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.meta.TaskInfoMeta; import org.apache.hugegraph.pd.raft.RaftEngine; +import com.baidu.hugegraph.pd.common.KVPair; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.MetaTask; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.meta.TaskInfoMeta; -import com.baidu.hugegraph.pd.raft.RaftEngine; + import lombok.extern.slf4j.Slf4j; @@ -39,34 +54,31 @@ */ @Slf4j public class TaskScheduleService { + private static final String BALANCE_SHARD_KEY = "BALANCE_SHARD_KEY"; private final long TurnOffAndBalanceInterval = 30 * 60 * 1000; //机器下线30后才能进行动态平衡 - private final long BalanceLeaderInterval = 30 * 1000; // leader平衡时间间隔 private final PDConfig pdConfig; - private StoreNodeService storeService; - private PartitionService partitionService; - private ScheduledExecutorService executor; - private TaskInfoMeta taskInfoMeta; - private StoreMonitorDataService storeMonitorDataService; - private KvService kvService; - private LogService logService; + private final long clusterStartTime; // + private final StoreNodeService storeService; + private final PartitionService partitionService; + private final ScheduledExecutorService executor; + private final TaskInfoMeta taskInfoMeta; + private final StoreMonitorDataService storeMonitorDataService; + private final KvService kvService; + private final LogService logService; private long lastStoreTurnoffTime = 0; private long lastBalanceLeaderTime = 0; - private final long clusterStartTime; // - - private static final String BALANCE_SHARD_KEY = "BALANCE_SHARD_KEY"; - // 先按照value排序,再按照key排序 - private Comparator> kvPairComparatorAsc = (o1, o2) -> { - if (o1.getValue() == o2.getValue()){ + private final Comparator> kvPairComparatorAsc = (o1, o2) -> { + if (o1.getValue() == o2.getValue()) { return o1.getKey().compareTo(o2.getKey()); } return o1.getValue().compareTo(o2.getValue()); }; // 先按照value排序(倒序),再按照key排序(升序) - private Comparator> kvPairComparatorDesc = (o1, o2) -> { - if (o1.getValue() == o2.getValue()){ + private final Comparator> kvPairComparatorDesc = (o1, o2) -> { + if (o1.getValue() == o2.getValue()) { return o2.getKey().compareTo(o1.getKey()); } return o2.getValue().compareTo(o1.getValue()); @@ -117,14 +129,17 @@ public void init() { }, 2, 30, TimeUnit.SECONDS); // clean expired monitor data each 10 minutes, delay 3min. - if (isLeader() && this.pdConfig.getStore().isMonitorDataEnabled()){ - executor.scheduleAtFixedRate(()->{ - Long expTill = System.currentTimeMillis() / 1000 - this.pdConfig.getStore().getRetentionPeriod(); - log.debug("monitor data keys before " + expTill + " will be deleted") ; + if (isLeader() && this.pdConfig.getStore().isMonitorDataEnabled()) { + executor.scheduleAtFixedRate(() -> { + Long expTill = System.currentTimeMillis() / 1000 - + this.pdConfig.getStore().getRetentionPeriod(); + log.debug("monitor data keys before " + expTill + " will be deleted"); int records = 0; try { - for (Metapb.Store store : storeService.getStores()){ - int cnt = this.storeMonitorDataService.removeExpiredMonitorData(store.getId(), expTill); + for (Metapb.Store store : storeService.getStores()) { + int cnt = + this.storeMonitorDataService.removeExpiredMonitorData(store.getId(), + expTill); log.debug("store id :{}, records:{}", store.getId(), cnt); records += cnt; } @@ -137,12 +152,14 @@ public void init() { storeService.addStatusListener(new StoreStatusListener() { @Override - public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status) { - if ( status == Metapb.StoreState.Tombstone ) + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { + if (status == Metapb.StoreState.Tombstone) { lastStoreTurnoffTime = System.currentTimeMillis(); + } - if ( status == Metapb.StoreState.Up) { - executor.schedule(()->{ + if (status == Metapb.StoreState.Up) { + executor.schedule(() -> { try { //store 上线后延时1分钟进行leader平衡 balancePartitionLeader(false); } catch (PDException e) { @@ -170,7 +187,11 @@ public void onStoreRaftChanged(Metapb.Store store) { public void shutDown() { executor.shutdownNow(); } - private boolean isLeader(){ return RaftEngine.getInstance().isLeader();} + + private boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + /** * 巡查所有的store,检查是否在线,存储空间是否充足 */ @@ -181,30 +202,32 @@ public List patrolStores() throws PDException { // 检查store在线状态 List stores = storeService.getStores(""); Map activeStores = storeService.getActiveStores("") - .stream().collect(Collectors.toMap(Metapb.Store::getId, t -> t)); + .stream().collect( + Collectors.toMap(Metapb.Store::getId, t -> t)); for (Metapb.Store store : stores) { Metapb.Store changeStore = null; if ((store.getState() == Metapb.StoreState.Up - || store.getState() == Metapb.StoreState.Unknown) - && !activeStores.containsKey(store.getId())) { + || store.getState() == Metapb.StoreState.Unknown) + && !activeStores.containsKey(store.getId())) { // 不在线,修改状态为离线 changeStore = Metapb.Store.newBuilder(store) - .setState(Metapb.StoreState.Offline) - .build(); - - } else if ((store.getState() == Metapb.StoreState.Exiting && !activeStores.containsKey(store.getId())) || - (store.getState() == Metapb.StoreState.Offline && - (System.currentTimeMillis() - store.getLastHeartbeat() > - pdConfig.getStore().getMaxDownTime() * 1000) && - (System.currentTimeMillis() - clusterStartTime > - pdConfig.getStore().getMaxDownTime() * 1000))) { + .setState(Metapb.StoreState.Offline) + .build(); + + } else if ((store.getState() == Metapb.StoreState.Exiting && + !activeStores.containsKey(store.getId())) || + (store.getState() == Metapb.StoreState.Offline && + (System.currentTimeMillis() - store.getLastHeartbeat() > + pdConfig.getStore().getMaxDownTime() * 1000) && + (System.currentTimeMillis() - clusterStartTime > + pdConfig.getStore().getMaxDownTime() * 1000))) { //手工修改为下线或者离线达到时长 // 修改状态为关机, 增加 checkStoreCanOffline 检测 if (storeService.checkStoreCanOffline(store)) { changeStore = Metapb.Store.newBuilder(store) - .setState(Metapb.StoreState.Tombstone).build(); + .setState(Metapb.StoreState.Tombstone).build(); this.logService.insertLog(LogService.NODE_CHANGE, - LogService.TASK, changeStore); + LogService.TASK, changeStore); log.info("patrolStores store {} Offline", changeStore.getId()); } } @@ -226,8 +249,8 @@ public List patrolPartitions() throws PDException { } // 副本数不一致,重新分配副本 - for(Metapb.ShardGroup group : storeService.getShardGroups()){ - if ( group.getShardsCount() != pdConfig.getPartition().getShardCount()){ + for (Metapb.ShardGroup group : storeService.getShardGroups()) { + if (group.getShardsCount() != pdConfig.getPartition().getShardCount()) { storeService.reallocShards(group); // 避免后面的 balance partition shard 马上执行. kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); @@ -260,14 +283,15 @@ public List patrolPartitions() throws PDException { * 在Store之间平衡分区的数量 * 机器转为UP半小时后才能进行动态平衡 */ - public synchronized Map> balancePartitionShard() throws PDException { + public synchronized Map> balancePartitionShard() throws + PDException { log.info("balancePartitions starting, isleader:{}", isLeader()); if (!isLeader()) { return null; } - if ( System.currentTimeMillis() - lastStoreTurnoffTime < TurnOffAndBalanceInterval) { + if (System.currentTimeMillis() - lastStoreTurnoffTime < TurnOffAndBalanceInterval) { return null;//机器下线半小时后才能进行动态平衡 } @@ -280,11 +304,12 @@ public synchronized Map> balancePartitionShard() thr // 避免频繁调用. (当改变副本数,需要调整shard list,此时又需要平衡分区)会发送重复的指令。造成结果不可预料。 // 严重会删除掉分区. - if (Objects.equals(kvService.get(BALANCE_SHARD_KEY),"DOING")) { + if (Objects.equals(kvService.get(BALANCE_SHARD_KEY), "DOING")) { return null; } - int totalShards = pdConfig.getConfigService().getPartitionCount() * pdConfig.getPartition().getShardCount(); + int totalShards = pdConfig.getConfigService().getPartitionCount() * + pdConfig.getPartition().getShardCount(); int averageCount = totalShards / activeStores; int remainder = totalShards % activeStores; @@ -303,7 +328,7 @@ public synchronized Map> balancePartitionShard() thr Long storeId = shard.getStoreId(); // 判断每个shard为leaner或者状态非正常状态 if (shard.getRole() == Metapb.ShardRole.Learner - || partition.getState() != Metapb.PartitionState.PState_Normal){ + || partition.getState() != Metapb.PartitionState.PState_Normal) { isLeaner.set(true); } if (partitionMap.containsKey(storeId)) { @@ -311,11 +336,12 @@ public synchronized Map> balancePartitionShard() thr } }); } catch (PDException e) { - log.error("get partition {} shard list error:{}.", partition.getId(), e.getMessage()); + log.error("get partition {} shard list error:{}.", partition.getId(), + e.getMessage()); } }); - if (isLeaner.get()){ + if (isLeaner.get()) { log.warn("balancePartitionShard is doing, skip this balancePartitionShard task"); return null; } @@ -329,7 +355,9 @@ public synchronized Map> balancePartitionShard() thr sortedList.sort(((o1, o2) -> o2.getValue().compareTo(o1.getValue()))); // 最大堆 PriorityQueue> maxHeap = new PriorityQueue<>(sortedList.size(), - (o1, o2) -> o2.getValue().compareTo(o1.getValue())); + (o1, o2) -> o2.getValue() + .compareTo( + o1.getValue())); // 各个副本的 committedIndex Map> committedIndexMap = partitionService.getCommittedIndexStats(); @@ -347,13 +375,15 @@ public synchronized Map> balancePartitionShard() thr // 移除多余的shard, 添加源StoreID. 非Leader,并且该分区唯一 if (shards.size() > targetCount) { int movedCount = shards.size() - targetCount; - log.info("balancePartitionShard storeId {}, shardsSize {}, targetCount {}, moveCount {}", + log.info( + "balancePartitionShard storeId {}, shardsSize {}, targetCount {}, " + + "moveCount {}", storeId, shards.size(), targetCount, movedCount); for (Iterator iterator = shards.keySet().iterator(); movedCount > 0 && iterator.hasNext(); ) { Integer id = iterator.next(); - if ( !movedPartitions.containsKey(id)) { + if (!movedPartitions.containsKey(id)) { log.info("store {}, shard of partition {} can be moved", storeId, id); movedPartitions.put(id, new KVPair<>(storeId, 0L)); movedCount--; @@ -361,20 +391,25 @@ public synchronized Map> balancePartitionShard() thr } } else if (shards.size() < targetCount) { int addCount = targetCount - shards.size(); - log.info("balancePartitionShard storeId {}, shardsSize {}, targetCount {}, addCount {}", + log.info( + "balancePartitionShard storeId {}, shardsSize {}, targetCount {}, " + + "addCount {}", storeId, shards.size(), targetCount, addCount); maxHeap.add(new KVPair<>(storeId, addCount)); } } - if (movedPartitions.size() == 0){ - log.warn("movedPartitions is empty, totalShards:{} averageCount:{} remainder:{} sortedList:{}", + if (movedPartitions.size() == 0) { + log.warn( + "movedPartitions is empty, totalShards:{} averageCount:{} remainder:{} " + + "sortedList:{}", totalShards, averageCount, remainder, sortedList); } - Iterator>> moveIterator = movedPartitions.entrySet().iterator(); + Iterator>> moveIterator = + movedPartitions.entrySet().iterator(); while (moveIterator.hasNext()) { - if(maxHeap.size() == 0) { + if (maxHeap.size() == 0) { break; } Map.Entry> moveEntry = moveIterator.next(); @@ -386,16 +421,19 @@ public synchronized Map> balancePartitionShard() thr KVPair pair = maxHeap.poll(); long destStoreId = pair.getKey(); boolean destContains = false; - if (partitionMap.containsKey(destStoreId)) + if (partitionMap.containsKey(destStoreId)) { destContains = partitionMap.get(destStoreId).containsKey(partitionId); + } // 如果目的store已经包含了该partition,则取一下store - if(!destContains) { + if (!destContains) { moveEntry.getValue().setValue(pair.getKey()); - log.info("balancePartitionShard will move partition {} from store {} to store {}", + log.info( + "balancePartitionShard will move partition {} from store {} to store " + + "{}", moveEntry.getKey(), moveEntry.getValue().getKey(), moveEntry.getValue().getValue()); - if(pair.getValue() > 1) { + if (pair.getValue() > 1) { pair.setValue(pair.getValue() - 1); tmpList.add(pair); } @@ -412,10 +450,11 @@ public synchronized Map> balancePartitionShard() thr movedPartitions.forEach((partId, storePair) -> { // 源和目标storeID都不为0 if (storePair.getKey() > 0 && storePair.getValue() > 0) { - partitionService.movePartitionsShard(partId, storePair.getKey(), storePair.getValue()); - }else { + partitionService.movePartitionsShard(partId, storePair.getKey(), + storePair.getValue()); + } else { log.warn("balancePartitionShard key or value is zero, partId:{} storePair:{}", - partId, storePair); + partId, storePair); } }); return movedPartitions; @@ -424,14 +463,16 @@ public synchronized Map> balancePartitionShard() thr /** * 在Store之间平衡分区的Leader的数量 */ - public synchronized Map balancePartitionLeader(boolean immediately) throws PDException { + public synchronized Map balancePartitionLeader(boolean immediately) throws + PDException { Map results = new HashMap<>(); if (!isLeader()) { return results; } - if (!immediately && System.currentTimeMillis() - lastBalanceLeaderTime < BalanceLeaderInterval) { + if (!immediately && + System.currentTimeMillis() - lastBalanceLeaderTime < BalanceLeaderInterval) { return results; } lastBalanceLeaderTime = System.currentTimeMillis(); @@ -445,7 +486,7 @@ public synchronized Map balancePartitionLeader(boolean immediatel } // 数据迁移的时候,退出 - if (Objects.equals(kvService.get(BALANCE_SHARD_KEY),"DOING")) { + if (Objects.equals(kvService.get(BALANCE_SHARD_KEY), "DOING")) { throw new PDException(1001, "balance shard is processing, please try later!"); } @@ -458,46 +499,53 @@ public synchronized Map balancePartitionLeader(boolean immediatel shardGroups.forEach(group -> { group.getShardsList().forEach(shard -> { storeShardCount.put(shard.getStoreId(), - storeShardCount.getOrDefault(shard.getStoreId(), 0) + 1); + storeShardCount.getOrDefault(shard.getStoreId(), 0) + 1); }); }); - log.info("balancePartitionLeader, shard group size: {}, by store: {}", shardGroups.size(), storeShardCount); + log.info("balancePartitionLeader, shard group size: {}, by store: {}", shardGroups.size(), + storeShardCount); // 按照 target count, store id稳定排序 - PriorityQueue> targetCount = new PriorityQueue<>(kvPairComparatorDesc); + PriorityQueue> targetCount = + new PriorityQueue<>(kvPairComparatorDesc); var sortedGroups = storeShardCount.entrySet().stream() - .map(entry -> new KVPair<>(entry.getKey(), entry.getValue())) - .sorted(kvPairComparatorAsc) - .collect(Collectors.toList()); + .map(entry -> new KVPair<>(entry.getKey(), + entry.getValue())) + .sorted(kvPairComparatorAsc) + .collect(Collectors.toList()); int sum = 0; for (int i = 0; i < sortedGroups.size() - 1; i++) { // at least one - int v = Math.max(sortedGroups.get(i).getValue() / pdConfig.getPartition().getShardCount(), 1); + int v = Math.max( + sortedGroups.get(i).getValue() / pdConfig.getPartition().getShardCount(), 1); targetCount.add(new KVPair<>(sortedGroups.get(i).getKey(), v)); sum += v; } // 最后一个, 除不尽的情况,保证总数正确 - targetCount.add(new KVPair<>(sortedGroups.get(sortedGroups.size() - 1).getKey(), shardGroups.size() - sum)); + targetCount.add(new KVPair<>(sortedGroups.get(sortedGroups.size() - 1).getKey(), + shardGroups.size() - sum)); log.info("target count: {}", targetCount); - for (var group : shardGroups){ + for (var group : shardGroups) { var map = group.getShardsList().stream() - .collect(Collectors.toMap(Metapb.Shard::getStoreId, shard -> shard)); + .collect(Collectors.toMap(Metapb.Shard::getStoreId, shard -> shard)); var tmpList = new ArrayList>(); // store比较多的情况,可能不包含对应的store id. 则先将不符合的store保存到临时列表,直到找到一个合适的store - while (!targetCount.isEmpty()){ + while (!targetCount.isEmpty()) { var pair = targetCount.poll(); var storeId = pair.getKey(); - if (map.containsKey(storeId)){ + if (map.containsKey(storeId)) { if (map.get(storeId).getRole() != Metapb.ShardRole.Leader) { - log.info("shard group{}, store id:{}, set to leader", group.getId(), storeId); + log.info("shard group{}, store id:{}, set to leader", group.getId(), + storeId); partitionService.transferLeader(group.getId(), map.get(storeId)); results.put(group.getId(), storeId); - }else{ - log.info("shard group {}, store id :{}, is leader, no need change", group.getId(), storeId); + } else { + log.info("shard group {}, store id :{}, is leader, no need change", + group.getId(), storeId); } if (pair.getValue() > 1) { @@ -507,7 +555,7 @@ public synchronized Map balancePartitionLeader(boolean immediatel } // 找到了,则处理完成 break; - }else{ + } else { tmpList.add(pair); } } @@ -521,10 +569,10 @@ public synchronized Map balancePartitionLeader(boolean immediatel private long getMaxIndexGap(Map> committedIndexMap, int partitionId) { long maxGap = Long.MAX_VALUE; if (committedIndexMap == null || !committedIndexMap.containsKey(partitionId)) { - return maxGap; + return maxGap; } Map shardMap = committedIndexMap.get(partitionId); - if(shardMap == null || shardMap.size() == 0) { + if (shardMap == null || shardMap.size() == 0) { return maxGap; } List sortedList = new ArrayList<>(); @@ -540,6 +588,7 @@ private long getMaxIndexGap(Map> committedIndexMap, int /** * 执行分区分裂,分为自动分裂和手工分裂 + * * @return * @throws PDException */ @@ -551,8 +600,8 @@ public List splitPartition( } var list = params.stream() - .map(param -> new KVPair<>(param.getPartitionId(), param.getCount())) - .collect(Collectors.toList()); + .map(param -> new KVPair<>(param.getPartitionId(), param.getCount())) + .collect(Collectors.toList()); storeService.splitShardGroups(list); return null; @@ -569,24 +618,30 @@ public List autoSplitPartition() throws PDException { if (!isLeader()) return null; if (Metapb.ClusterState.Cluster_OK != storeService.getClusterStats().getState()) { - if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats().getState()) - throw new PDException(Pdpb.ErrorType.Split_Partition_Doing_VALUE, "The data is splitting"); - else + if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats().getState()) { + throw new PDException(Pdpb.ErrorType.Split_Partition_Doing_VALUE, + "The data is splitting"); + } else { throw new PDException(Pdpb.ErrorType.Cluster_State_Forbid_Splitting_VALUE, - "The current state of the cluster prohibits splitting data"); + "The current state of the cluster prohibits splitting data"); + } } - //For TEST - // pdConfig.getPartition().setMaxShardsPerStore(pdConfig.getPartition().getMaxShardsPerStore()*2); + //For TEST + // pdConfig.getPartition().setMaxShardsPerStore(pdConfig.getPartition() + // .getMaxShardsPerStore()*2); // 计算集群能能支持的最大split count - int splitCount = pdConfig.getPartition().getMaxShardsPerStore() * storeService.getActiveStores().size() / - (storeService.getShardGroups().size() * pdConfig.getPartition().getShardCount()); + int splitCount = pdConfig.getPartition().getMaxShardsPerStore() * + storeService.getActiveStores().size() / + (storeService.getShardGroups().size() * + pdConfig.getPartition().getShardCount()); if (splitCount < 2) { throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, - "Too many partitions per store, partition.store-max-shard-count = " - + pdConfig.getPartition().getMaxShardsPerStore()); + "Too many partitions per store, partition.store-max-shard-count" + + " = " + + pdConfig.getPartition().getMaxShardsPerStore()); } // 每store未达最大分区数,进行分裂 @@ -595,11 +650,12 @@ public List autoSplitPartition() throws PDException { // 设置集群状态为下线 storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); // 修改默认分区数量 - // pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size() * splitCount); + // pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size() * + // splitCount); var list = storeService.getShardGroups().stream() - .map(shardGroup -> new KVPair<>(shardGroup.getId(), splitCount)) - .collect(Collectors.toList()); + .map(shardGroup -> new KVPair<>(shardGroup.getId(), splitCount)) + .collect(Collectors.toList()); storeService.splitShardGroups(list); return null; @@ -609,9 +665,10 @@ public List autoSplitPartition() throws PDException { /** * Store汇报任务状态 * 分区状态发生改变,重新计算分区所在的ShardGroup、图和整个集群的状态 + * * @param task */ - public void reportTask(MetaTask.Task task){ + public void reportTask(MetaTask.Task task) { try { switch (task.getType()) { case Split_Partition: @@ -626,13 +683,14 @@ public void reportTask(MetaTask.Task task){ default: break; } - }catch (Exception e){ + } catch (Exception e) { log.error("Report task exception {}, {}", e, task); } } /** * 对rocksdb进行compaction + * * @throws PDException */ public Boolean dbCompaction(String tableName) throws PDException { @@ -650,9 +708,11 @@ public Boolean dbCompaction(String tableName) throws PDException { /** * 判断是否能把一个store的分区全部迁出,给出判断结果和迁移方案 + * * @author tianxiaohui@baidu.com */ - public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) throws PDException { + public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) throws + PDException { if (!isLeader()) { return null; } @@ -667,29 +727,29 @@ public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) th Map partitionDataSize = new HashMap<>(); // 记录待迁移的分区的数据量 storeService.getActiveStores().forEach(store -> { - if (store.getId() != sourceStore.getId()){ + if (store.getId() != sourceStore.getId()) { otherPartitionMap.put(store.getId(), new HashMap<>()); // 记录其他store的剩余的磁盘空间, 单位为Byte availableDiskSpace.put(store.getId(), store.getStats().getAvailable()); - }else { + } else { resultMap.put("current_store_is_online", true); } }); // 统计待迁移的分区的数据大小 (从storeStats中统计,单位为KB) - for (Metapb.GraphStats graphStats : sourceStore.getStats().getGraphStatsList()){ + for (Metapb.GraphStats graphStats : sourceStore.getStats().getGraphStatsList()) { partitionDataSize.put(graphStats.getPartitionId(), - partitionDataSize.getOrDefault(graphStats.getPartitionId(), 0L) - + graphStats.getApproximateSize()); + partitionDataSize.getOrDefault(graphStats.getPartitionId(), 0L) + + graphStats.getApproximateSize()); } // 给sourcePartitionMap 和 otherPartitionMap赋值 partitionService.getPartitions().forEach(partition -> { try { storeService.getShardList(partition.getId()).forEach(shard -> { long storeId = shard.getStoreId(); - if (storeId == sourceStore.getId()){ + if (storeId == sourceStore.getId()) { sourcePartitionMap.get(storeId).put(partition.getId(), shard.getRole()); - }else{ - if (otherPartitionMap.containsKey(storeId)){ + } else { + if (otherPartitionMap.containsKey(storeId)) { otherPartitionMap.get(storeId).put(partition.getId(), shard.getRole()); } } @@ -701,48 +761,54 @@ public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) th }); // 统计待移除的分区:即源store上面的所有分区 Map> movedPartitions = new HashMap<>(); - for (Map.Entry entry : sourcePartitionMap.get(sourceStore.getId()).entrySet()){ + for (Map.Entry entry : sourcePartitionMap.get( + sourceStore.getId()).entrySet()) { movedPartitions.put(entry.getKey(), new KVPair<>(sourceStore.getId(), 0L)); } // 统计其他store的分区数量, 用小顶堆保存,以便始终把分区数量较少的store优先考虑 PriorityQueue> minHeap = new PriorityQueue<>(otherPartitionMap.size(), - (o1, o2) -> o1.getValue().compareTo(o2.getValue())); + (o1, o2) -> o1.getValue() + .compareTo( + o2.getValue())); otherPartitionMap.forEach((storeId, shards) -> { minHeap.add(new KVPair(storeId, shards.size())); }); // 遍历待迁移的分区,优先迁移到分区比较少的store - Iterator>> moveIterator = movedPartitions.entrySet().iterator(); - while (moveIterator.hasNext()){ + Iterator>> moveIterator = + movedPartitions.entrySet().iterator(); + while (moveIterator.hasNext()) { Map.Entry> moveEntry = moveIterator.next(); int partitionId = moveEntry.getKey(); List> tmpList = new ArrayList<>(); // 记录已经弹出优先队列的元素 - while(minHeap.size() > 0) { - KVPair pair = minHeap.poll(); //弹出首个元素 + while (minHeap.size() > 0) { + KVPair pair = minHeap.poll(); //弹出首个元素 long storeId = pair.getKey(); int partitionCount = pair.getValue(); Map shards = otherPartitionMap.get(storeId); final int unitRate = 1024; // 平衡不同存储单位的进率 if ((!shards.containsKey(partitionId)) && ( availableDiskSpace.getOrDefault(storeId, 0L) / unitRate >= - partitionDataSize.getOrDefault(partitionId, 0L))){ + partitionDataSize.getOrDefault(partitionId, 0L))) { // 如果目标store上面不包含该分区,且目标store剩余空间能容纳该分区,则进行迁移 moveEntry.getValue().setValue(storeId); //设置移动的目标store log.info("plan to move partition {} to store {}, " + - "available disk space {}, current partitionSize:{}", - partitionId, - storeId, - availableDiskSpace.getOrDefault(storeId, 0L) / unitRate, - partitionDataSize.getOrDefault(partitionId, 0L) + "available disk space {}, current partitionSize:{}", + partitionId, + storeId, + availableDiskSpace.getOrDefault(storeId, 0L) / unitRate, + partitionDataSize.getOrDefault(partitionId, 0L) ); // 更新该store预期的剩余空间 availableDiskSpace.put(storeId, availableDiskSpace.getOrDefault(storeId, 0L) - - partitionDataSize.getOrDefault(partitionId, 0L) * unitRate); + - partitionDataSize.getOrDefault(partitionId, + 0L) * + unitRate); // 更新统计变量中该store的分区数量 partitionCount += 1; pair.setValue(partitionCount); tmpList.add(pair); break; - }else{ + } else { tmpList.add(pair); } } @@ -750,15 +816,15 @@ public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) th } //检查是否未存在未分配目标store的分区 List remainPartitions = new ArrayList<>(); - movedPartitions.forEach((partId, storePair) ->{ - if (storePair.getValue() == 0L){ + movedPartitions.forEach((partId, storePair) -> { + if (storePair.getValue() == 0L) { remainPartitions.add(partId); } }); if (remainPartitions.size() > 0) { resultMap.put("flag", false); resultMap.put("movedPartitions", null); - }else{ + } else { resultMap.put("flag", true); resultMap.put("movedPartitions", movedPartitions); } @@ -766,7 +832,8 @@ public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) th } - public Map> movePartitions(Map> movedPartitions) { + public Map> movePartitions( + Map> movedPartitions) { if (!isLeader()) { return null; } @@ -775,7 +842,8 @@ public Map> movePartitions(Map { // 源和目标storeID都不为0 if (storePair.getKey() > 0 && storePair.getValue() > 0) { - partitionService.movePartitionsShard(partId, storePair.getKey(), storePair.getValue()); + partitionService.movePartitionsShard(partId, storePair.getKey(), + storePair.getValue()); } }); return movedPartitions; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java index f1efccb1b6..a288e50b64 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @@ -1,23 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.config; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + import org.apache.hugegraph.pd.ConfigService; import org.apache.hugegraph.pd.IdService; -import lombok.Data; import org.springframework.beans.factory.annotation.Autowired; - import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Configuration; import org.springframework.stereotype.Component; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; +import lombok.Data; /** * PD配置文件 + * * @author: yanjinbing * @date: 2021/10/20 */ @@ -57,6 +75,8 @@ public class PDConfig { private Discovery discovery; private Map initialStoreMap = null; + private ConfigService configService; + private IdService idService; public Map getInitialStoreMap() { if (initialStoreMap == null) { @@ -71,11 +91,28 @@ public Map getInitialStoreMap() { /** * 初始分区数量 * Store数量 * 每Store最大副本数 /每分区副本数 - * @return + * + * @return */ - public int getInitialPartitionCount(){ + public int getInitialPartitionCount() { return getInitialStoreMap().size() * partition.getMaxShardsPerStore() - / partition.getShardCount(); + / partition.getShardCount(); + } + + public ConfigService getConfigService() { + return configService; + } + + public void setConfigService(ConfigService configService) { + this.configService = configService; + } + + public IdService getIdService() { + return idService; + } + + public void setIdService(IdService idService) { + this.idService = idService; } @Data @@ -102,12 +139,15 @@ public class Raft { private long clusterId; // 集群ID @Value("${grpc.port}") private int grpcPort; - public String getGrpcAddress(){ return host + ":" + grpcPort;} + + public String getGrpcAddress() { + return host + ":" + grpcPort; + } } @Data @Configuration - public class Store{ + public class Store { // store 心跳超时时间 @Value("${store.keepAlive-timeout:300}") private long keepAliveTimeout = 300; @@ -126,33 +166,35 @@ public class Store{ /** * interval -> seconds. * minimum value is 1 seconds. + * * @return the seconds of the interval */ - public Long getMonitorInterval(){ + public Long getMonitorInterval() { return parseTimeExpression(this.monitorDataInterval); } /** - * the monitor data that saved in rocksdb, will be deleted - * out of period + * the monitor data that saved in rocksdb, will be deleted + * out of period * * @return the period of the monitor data should keep */ - public Long getRetentionPeriod(){ + public Long getRetentionPeriod() { return parseTimeExpression(this.monitorDataRetention); } /** * parse time expression , support pattern: - * [1-9][ ](second, minute, hour, day, month, year) - * unit could not be null, the number part is 1 by default. + * [1-9][ ](second, minute, hour, day, month, year) + * unit could not be null, the number part is 1 by default. * * @param exp * @return seconds value of the expression. 1 will return by illegal expression */ - private Long parseTimeExpression(String exp){ + private Long parseTimeExpression(String exp) { if (exp != null) { - Pattern pattern = Pattern.compile("(?(\\d+)*)(\\s)*(?(second|minute|hour|day|month|year)$)"); + Pattern pattern = Pattern.compile( + "(?(\\d+)*)(\\s)*(?(second|minute|hour|day|month|year)$)"); Matcher matcher = pattern.matcher(exp.trim()); if (matcher.find()) { String n = matcher.group("n"); @@ -194,7 +236,7 @@ private Long parseTimeExpression(String exp){ @Data @Configuration - public class Partition{ + public class Partition { private int totalCount = 0; // 每个Store最大副本数 @@ -205,40 +247,24 @@ public class Partition{ @Value("${partition.default-shard-count:3}") private int shardCount = 3; - public void setTotalCount(int totalCount){ - this.totalCount = totalCount; - } public int getTotalCount() { - if ( totalCount == 0 ) { + if (totalCount == 0) { totalCount = getInitialPartitionCount(); } return totalCount; } + + public void setTotalCount(int totalCount) { + this.totalCount = totalCount; + } } @Data @Configuration - public class Discovery{ + public class Discovery { // 客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 @Value("${discovery.heartbeat-try-count:3}") private int heartbeatOutTimes = 3; } - private ConfigService configService; - - private IdService idService; - - public void setConfigService(ConfigService configService) { - this.configService = configService; - } - public ConfigService getConfigService(){ return configService; } - - public IdService getIdService() { - return idService; - } - - public void setIdService(IdService idService) { - this.idService = idService; - } - } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java index c26450d7f4..a53c13632f 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java @@ -1,14 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.List; +import java.util.Optional; import org.apache.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; -import java.util.List; -import java.util.Optional; - public class ConfigMetaStore extends MetadataRocksDBStore { @@ -18,6 +34,7 @@ public ConfigMetaStore(PDConfig pdConfig) { super(pdConfig); this.clusterId = pdConfig.getClusterId(); } + /** * 更新图空间存储状态信息 * @@ -29,25 +46,28 @@ public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDEx put(graphSpaceKey, graphSpace.toByteArray()); return graphSpace; } + public List getGraphSpace(String graphSpace) throws PDException { byte[] graphSpaceKey = MetadataKeyHelper.getGraphSpaceKey(graphSpace); return scanPrefix(Metapb.GraphSpace.parser(), graphSpaceKey); } public Metapb.PDConfig setPdConfig(Metapb.PDConfig pdConfig) throws PDException { - byte[] graphSpaceKey = MetadataKeyHelper.getPdConfigKey(String.valueOf(pdConfig.getVersion())); + byte[] graphSpaceKey = + MetadataKeyHelper.getPdConfigKey(String.valueOf(pdConfig.getVersion())); Metapb.PDConfig config = Metapb.PDConfig.newBuilder( pdConfig).setTimestamp(System.currentTimeMillis()).build(); put(graphSpaceKey, config.toByteArray()); return config; } + public Metapb.PDConfig getPdConfig(long version) throws PDException { byte[] graphSpaceKey = MetadataKeyHelper.getPdConfigKey(version <= 0 ? null : - String.valueOf(version)); + String.valueOf(version)); Optional max = scanPrefix( Metapb.PDConfig.parser(), graphSpaceKey).stream().max( (o1, o2) -> (o1.getVersion() > o2.getVersion()) ? 1 : -1); - return max.isPresent()? max.get() : null; + return max.isPresent() ? max.get() : null; } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java index b0a508fb79..d9aa5c5e85 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -1,19 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; import org.apache.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; import com.baidu.hugegraph.pd.grpc.discovery.Query; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.lang3.StringUtils; - -import java.util.LinkedList; -import java.util.List; -import java.util.Map; +import lombok.extern.slf4j.Slf4j; /** * @author zhangyingjie @@ -22,19 +38,19 @@ @Slf4j public class DiscoveryMetaStore extends MetadataRocksDBStore { + //appName --> address --> registryInfo + private static final String PREFIX = "REGIS-"; + private static final String SPLITTER = "-"; public DiscoveryMetaStore(PDConfig pdConfig) { super(pdConfig); } - //appName --> address --> registryInfo - private static final String PREFIX ="REGIS-"; - private static final String SPLITTER ="-"; public void register(NodeInfo nodeInfo, int outTimes) throws PDException { putWithTTL(toKey(nodeInfo.getAppName(), nodeInfo.getVersion(), nodeInfo.getAddress()), - nodeInfo.toByteArray(),(nodeInfo.getInterval() / 1000) * outTimes); + nodeInfo.toByteArray(), (nodeInfo.getInterval() / 1000) * outTimes); } - byte[] toKey(String appName,String version,String address){ + byte[] toKey(String appName, String version, String address) { StringBuilder builder = getPrefixBuilder(appName, version); builder.append(SPLITTER); builder.append(address); @@ -44,11 +60,11 @@ byte[] toKey(String appName,String version,String address){ private StringBuilder getPrefixBuilder(String appName, String version) { StringBuilder builder = new StringBuilder(); builder.append(PREFIX); - if (!StringUtils.isEmpty(appName)){ + if (!StringUtils.isEmpty(appName)) { builder.append(appName); builder.append(SPLITTER); } - if (!StringUtils.isEmpty(version)){ + if (!StringUtils.isEmpty(version)) { builder.append(version); } return builder; @@ -64,22 +80,23 @@ public NodeInfos getNodes(Query query) { builder.toString().getBytes()); builder.setLength(0); } catch (PDException e) { - log.error("An error occurred getting data from the store,{}",e); + log.error("An error occurred getting data from the store,{}", e); } if (query.getLabelsMap() != null && !query.getLabelsMap().isEmpty()) { - List result =new LinkedList(); - for (NodeInfo node:nodeInfos) { - if (labelMatch(node,query)) result.add(node); + List result = new LinkedList(); + for (NodeInfo node : nodeInfos) { + if (labelMatch(node, query)) result.add(node); } return NodeInfos.newBuilder().addAllInfo(result).build(); } return NodeInfos.newBuilder().addAllInfo(nodeInfos).build(); } - private boolean labelMatch(NodeInfo node,Query query){ + + private boolean labelMatch(NodeInfo node, Query query) { Map labelsMap = node.getLabelsMap(); - for (Map.Entry entry:query.getLabelsMap().entrySet()) { - if (!entry.getValue().equals(labelsMap.get(entry.getKey()))){ + for (Map.Entry entry : query.getLabelsMap().entrySet()) { + if (!entry.getValue().equals(labelsMap.get(entry.getKey()))) { return false; } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java index 70878bb009..5265569a1b 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java @@ -1,13 +1,21 @@ -package org.apache.hugegraph.pd.meta; - -import com.baidu.hugegraph.pd.common.PDException; - -import org.apache.hugegraph.pd.config.PDConfig; -import org.apache.hugegraph.pd.store.KV; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.caucho.hessian.io.Hessian2Input; -import com.caucho.hessian.io.Hessian2Output; -import lombok.extern.slf4j.Slf4j; +package org.apache.hugegraph.pd.meta; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -17,6 +25,15 @@ import java.util.List; import java.util.concurrent.ConcurrentHashMap; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.store.KV; + +import com.baidu.hugegraph.pd.common.PDException; +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; + +import lombok.extern.slf4j.Slf4j; + /** * 自增id的实现类 */ @@ -24,26 +41,34 @@ public class IdMetaStore extends MetadataRocksDBStore { + private static final String ID_PREFIX = "@ID@"; + private static final String CID_PREFIX = "@CID@"; + private static final String CID_SLOT_PREFIX = "@CID_SLOT@"; + private static final String CID_DEL_SLOT_PREFIX = "@CID_DEL_SLOT@"; + private static final String SEPARATOR = "@"; + private static final ConcurrentHashMap SEQUENCES = new ConcurrentHashMap<>(); + public static long CID_DEL_TIMEOUT = 24 * 3600 * 1000; private final long clusterId; - public IdMetaStore(PDConfig pdConfig) { super(pdConfig); this.clusterId = pdConfig.getClusterId(); } - private static final String ID_PREFIX = "@ID@"; - private static final String CID_PREFIX = "@CID@"; - private static final String CID_SLOT_PREFIX = "@CID_SLOT@"; - - private static final String CID_DEL_SLOT_PREFIX = "@CID_DEL_SLOT@"; - - public static long CID_DEL_TIMEOUT = 24 * 3600 * 1000; + public static long bytesToLong(byte[] b) { + ByteBuffer buf = ByteBuffer.wrap(b); + return buf.getLong(); + } - private static final String SEPARATOR = "@"; - private static final ConcurrentHashMap SEQUENCES = new ConcurrentHashMap<>(); + public static byte[] longToBytes(long l) { + ByteBuffer buf = ByteBuffer.wrap(new byte[Long.SIZE]); + buf.putLong(l); + buf.flip(); + return buf.array(); + } /** * 获取自增id + * * @param key * @param delta * @return @@ -61,7 +86,7 @@ public long getId(String key, int delta) throws PDException { } } - private Object getLock(String key){ + private Object getLock(String key) { Object probableLock = new Object(); Object currentLock = SEQUENCES.putIfAbsent(key, probableLock); if (currentLock != null) { @@ -70,7 +95,6 @@ private Object getLock(String key){ return probableLock; } - public void resetId(String key) throws PDException { Object probableLock = new Object(); Object currentLock = SEQUENCES.putIfAbsent(key, probableLock); @@ -86,18 +110,17 @@ public void resetId(String key) throws PDException { /** * 在删除name标识的cid的24小时内重复申请同一个name的cid保持同一值 * 如此设计为了防止缓存的不一致,造成数据错误 + * * @param key - * @param name cid 标识 + * @param name cid 标识 * @param max * @return * @throws PDException */ public long getCId(String key, String name, long max) throws PDException { // 检测是否有过期的cid,删除图的频率比较低,此处对性能影响不大 - byte[] delKeyPrefix = new StringBuffer() - .append(CID_DEL_SLOT_PREFIX) - .append(key).append(SEPARATOR) - .toString().getBytes(Charset.defaultCharset()); + byte[] delKeyPrefix = (CID_DEL_SLOT_PREFIX + + key + SEPARATOR).getBytes(Charset.defaultCharset()); synchronized (this) { scanPrefix(delKeyPrefix).forEach(kv -> { long[] value = (long[]) deserialize(kv.getValue()); @@ -120,8 +143,9 @@ public long getCId(String key, String name, long max) throws PDException { // 从延迟删除队列删除 remove(cidDelayKey); return ((long[]) deserialize(value))[0]; - } else + } else { return getCId(key, max); + } } } @@ -133,10 +157,12 @@ public long delCIdDelay(String key, String name, long cid) throws PDException { put(delKey, serialize(new long[]{cid, System.currentTimeMillis()})); return cid; } + /** * 获取自增循环不重复id, 达到上限后从0开始自增 + * * @param key - * @param max id上限,达到该值后,重新从0开始自增 + * @param max id上限,达到该值后,重新从0开始自增 * @return * @throws PDException */ @@ -150,20 +176,22 @@ public long getCId(String key, long max) throws PDException { { // 查找一个未使用的cid List kvs = scanRange(genCIDSlotKey(key, current), genCIDSlotKey(key, max)); for (KV kv : kvs) { - if (current == bytesToLong(kv.getValue())) + if (current == bytesToLong(kv.getValue())) { current++; - else + } else { break; + } } } if (current == max) { current = 0; List kvs = scanRange(genCIDSlotKey(key, current), genCIDSlotKey(key, last)); for (KV kv : kvs) { - if (current == bytesToLong(kv.getValue())) + if (current == bytesToLong(kv.getValue())) { current++; - else + } else { break; + } } } if (current == last) return -1; @@ -173,7 +201,7 @@ public long getCId(String key, long max) throws PDException { } } - private byte[] genCIDSlotKey(String key, long value){ + private byte[] genCIDSlotKey(String key, long value) { byte[] keySlot = (CID_SLOT_PREFIX + key + SEPARATOR).getBytes(Charset.defaultCharset()); ByteBuffer buf = ByteBuffer.allocate(keySlot.length + Long.SIZE); buf.put(keySlot); @@ -181,17 +209,16 @@ private byte[] genCIDSlotKey(String key, long value){ return buf.array(); } - private byte[] getCIDDelayKey(String key, String name){ - byte[] bsKey = new StringBuffer() - .append(CID_DEL_SLOT_PREFIX) - .append(key).append(SEPARATOR) - .append(name) - .toString().getBytes(Charset.defaultCharset()); + private byte[] getCIDDelayKey(String key, String name) { + byte[] bsKey = (CID_DEL_SLOT_PREFIX + + key + SEPARATOR + + name).getBytes(Charset.defaultCharset()); return bsKey; } /** * 删除一个循环id,释放id值 + * * @param key * @param cid * @return @@ -201,18 +228,6 @@ public long delCId(String key, long cid) throws PDException { return remove(genCIDSlotKey(key, cid)); } - public static long bytesToLong(byte[] b) { - ByteBuffer buf = ByteBuffer.wrap(b); - return buf.getLong(); - } - - public static byte[] longToBytes(long l) { - ByteBuffer buf = ByteBuffer.wrap(new byte[Long.SIZE]); - buf.putLong(l); - buf.flip(); - return buf.array(); - } - private byte[] serialize(Object obj) { try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { Hessian2Output output = new Hessian2Output(bos); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java index 70975f95c5..f97529f17e 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java @@ -1,20 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.List; import org.apache.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; -import java.util.List; - /** * @author zhangyingjie * @date 2022/3/29 **/ public class LogMeta extends MetadataRocksDBStore { - private PDConfig pdConfig; + private final PDConfig pdConfig; public LogMeta(PDConfig pdConfig) { super(pdConfig); @@ -30,8 +46,8 @@ public void insertLog(Metapb.LogRecord record) throws PDException { public List getLog(String action, Long start, Long end) throws PDException { byte[] keyStart = MetadataKeyHelper.getLogKeyPrefix(action, start); byte[] keyEnd = MetadataKeyHelper.getLogKeyPrefix(action, end); - List stores =this.scanRange(Metapb.LogRecord.parser(), - keyStart, keyEnd); + List stores = this.scanRange(Metapb.LogRecord.parser(), + keyStart, keyEnd); return stores; } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java index 1ee7dd59bd..b8e824c3c5 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; import org.apache.hugegraph.pd.config.PDConfig; @@ -13,10 +30,10 @@ public class MetadataFactory { private static HgKVStore store = null; - public static HgKVStore getStore(PDConfig pdConfig){ - if ( store == null ){ - synchronized (MetadataFactory.class){ - if ( store == null ) { + public static HgKVStore getStore(PDConfig pdConfig) { + if (store == null) { + synchronized (MetadataFactory.class) { + if (store == null) { HgKVStore proto = new HgKVStoreImpl(); //proto.init(pdConfig); store = pdConfig.getRaft().isEnable() ? @@ -29,9 +46,10 @@ public static HgKVStore getStore(PDConfig pdConfig){ return store; } - public static void closeStore(){ - if ( store != null ) + public static void closeStore() { + if (store != null) { store.close(); + } } public static StoreInfoMeta newStoreInfoMeta(PDConfig pdConfig) { @@ -41,16 +59,22 @@ public static StoreInfoMeta newStoreInfoMeta(PDConfig pdConfig) { public static PartitionMeta newPartitionMeta(PDConfig pdConfig) { return new PartitionMeta(pdConfig); } + public static IdMetaStore newHugeServerMeta(PDConfig pdConfig) { return new IdMetaStore(pdConfig); } + public static DiscoveryMetaStore newDiscoveryMeta(PDConfig pdConfig) { return new DiscoveryMetaStore(pdConfig); } + public static ConfigMetaStore newConfigMeta(PDConfig pdConfig) { return new ConfigMetaStore(pdConfig); } - public static TaskInfoMeta newTaskInfoMeta(PDConfig pdConfig) { return new TaskInfoMeta(pdConfig);} + + public static TaskInfoMeta newTaskInfoMeta(PDConfig pdConfig) { + return new TaskInfoMeta(pdConfig); + } public static QueueStore newQueueStore(PDConfig pdConfig) { diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java index 7ad150e490..226a75209c 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java @@ -1,9 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.grpc.Metapb; +import java.nio.charset.Charset; + import org.apache.commons.lang3.StringUtils; -import java.nio.charset.Charset; +import com.baidu.hugegraph.pd.grpc.Metapb; public class MetadataKeyHelper { @@ -30,146 +48,146 @@ public class MetadataKeyHelper { public static byte[] getStoreInfoKey(final long storeId) { //STORE/{storeId} String key = StringBuilderHelper.get() - .append(STORE).append(DELIMITER) - .append(storeId) - .toString(); + .append(STORE).append(DELIMITER) + .append(storeId) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getActiveStoreKey(final long storeId) { //ACTIVESTORE/{storeId} String key = StringBuilderHelper.get() - .append(ACTIVESTORE).append(DELIMITER) - .append(storeId) - .toString(); + .append(ACTIVESTORE).append(DELIMITER) + .append(storeId) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getActiveStorePrefix() { //ACTIVESTORE String key = StringBuilderHelper.get() - .append(ACTIVESTORE).append(DELIMITER) - .toString(); + .append(ACTIVESTORE).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getStorePrefix() { //STORE String key = StringBuilderHelper.get() - .append(STORE).append(DELIMITER) - .toString(); + .append(STORE).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getStoreStatusKey(final long storeId) { //STORESTATUS/{storeId} String key = StringBuilderHelper.get() - .append(STORESTATUS).append(DELIMITER) - .append(storeId) - .toString(); + .append(STORESTATUS).append(DELIMITER) + .append(storeId) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getShardGroupKey(final long groupId) { //SHARDGROUP/{storeId} String key = StringBuilderHelper.get() - .append(SHARDGROUP).append(DELIMITER) - .append(groupId) - .toString(); + .append(SHARDGROUP).append(DELIMITER) + .append(groupId) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getShardGroupPrefix() { //SHARDGROUP String key = StringBuilderHelper.get() - .append(SHARDGROUP).append(DELIMITER) - .toString(); + .append(SHARDGROUP).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getPartitionKey(final String graphName, final int partId) { //GRAPH/{graphName}/Partition/{partId} String key = StringBuilderHelper.get() - .append(GRAPH).append(DELIMITER) - .append(graphName).append(DELIMITER) - .append(PARTITION).append(DELIMITER) - .append(partId) - .toString(); + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION).append(DELIMITER) + .append(partId) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getPartitionV36Key(final String graphName, final int partId) { // GRAPH/{graphName}/PartitionV36/{partId} String key = StringBuilderHelper.get() - .append(GRAPH).append(DELIMITER) - .append(graphName).append(DELIMITER) - .append(PARTITION_V36).append(DELIMITER) - .append(partId) - .toString(); + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION_V36).append(DELIMITER) + .append(partId) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getPartitionPrefix(final String graphName) { //GRAPH/{graph}/Partition String key = StringBuilderHelper.get() - .append(GRAPH).append(DELIMITER) - .append(graphName).append(DELIMITER) - .append(PARTITION).append(DELIMITER) - .toString(); + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getShardKey(final long storeId, final int partId) { //SHARD/{graphName}/{type} String key = StringBuilderHelper.get() - .append(SHARDGROUP).append(DELIMITER) - .append(storeId).append(DELIMITER) - .append(partId) - .toString(); + .append(SHARDGROUP).append(DELIMITER) + .append(storeId).append(DELIMITER) + .append(partId) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getShardPrefix(final long storeId) { //SHARD/{graphName}/{type} String key = StringBuilderHelper.get() - .append(SHARDGROUP).append(DELIMITER) - .append(storeId).append(DELIMITER) - .toString(); + .append(SHARDGROUP).append(DELIMITER) + .append(storeId).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getGraphKey(final String graphName) { //GRAPHMETA/{graphName} String key = StringBuilderHelper.get() - .append(GRAPHMETA).append(DELIMITER) - .append(graphName).append(DELIMITER) - .toString(); + .append(GRAPHMETA).append(DELIMITER) + .append(graphName).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getGraphPrefix() { //GRAPHMETA/{ String key = StringBuilderHelper.get() - .append(GRAPHMETA).append(DELIMITER) - .toString(); + .append(GRAPHMETA).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getPartitionStatusKey(String graphName, int id) { //PARTITION_STATUS/{ String key = StringBuilderHelper.get() - .append(PARTITION_STATUS) - .append(DELIMITER) - .append(graphName).append(DELIMITER) - .append(id).append(DELIMITER) - .toString(); + .append(PARTITION_STATUS) + .append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(id).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getPartitionStatusPrefixKey(String graphName) { //PARTITION_STATUS/{ StringBuilder builder = StringBuilderHelper.get().append(PARTITION_STATUS) - .append(DELIMITER); + .append(DELIMITER); if (!StringUtils.isEmpty(graphName)) { builder.append(graphName).append(DELIMITER); } @@ -199,70 +217,71 @@ public static byte[] getPdConfigKey(String configKey) { public static byte[] getQueueItemPrefix() { //QUEUE String key = StringBuilderHelper.get() - .append(QUEUE).append(DELIMITER) - .toString(); + .append(QUEUE).append(DELIMITER) + .toString(); return key.getBytes(Charset.defaultCharset()); } public static byte[] getQueueItemKey(String itemId) { //QUEUE StringBuilder builder = StringBuilderHelper.get() - .append(QUEUE).append(DELIMITER); + .append(QUEUE).append(DELIMITER); if (!StringUtils.isEmpty(itemId)) { builder.append(itemId).append(DELIMITER); } return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getSplitTaskKey(String graphName, int groupId){ + public static byte[] getSplitTaskKey(String graphName, int groupId) { // TASK_SPLIT/{GraphName}/{partitionID} StringBuilder builder = StringBuilderHelper.get() - .append(TASK_SPLIT).append(DELIMITER) - .append(graphName).append(DELIMITER) - .append(groupId); + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(groupId); return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getSplitTaskPrefix(String graphName){ + + public static byte[] getSplitTaskPrefix(String graphName) { // TASK_SPLIT/{GraphName}/ StringBuilder builder = StringBuilderHelper.get() - .append(TASK_SPLIT).append(DELIMITER) - .append(graphName); + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName); return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getAllSplitTaskPrefix(){ + public static byte[] getAllSplitTaskPrefix() { // TASK_SPLIT/{GraphName}/ StringBuilder builder = StringBuilderHelper.get() - .append(TASK_SPLIT).append(DELIMITER); + .append(TASK_SPLIT).append(DELIMITER); return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getMoveTaskKey(String graphName, int targetGroupId, int groupId){ + public static byte[] getMoveTaskKey(String graphName, int targetGroupId, int groupId) { // TASK_MOVE/{GraphName}/to PartitionID/{source partitionID} StringBuilder builder = StringBuilderHelper.get() - .append(TASK_MOVE).append(DELIMITER) - .append(graphName).append(DELIMITER) - .append(targetGroupId).append(DELIMITER) - .append(groupId); + .append(TASK_MOVE).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(targetGroupId).append(DELIMITER) + .append(groupId); return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getMoveTaskPrefix(String graphName){ + public static byte[] getMoveTaskPrefix(String graphName) { // TASK_MOVE/{graphName}/toPartitionId/ StringBuilder builder = StringBuilderHelper.get() - .append(TASK_MOVE).append(DELIMITER) - .append(graphName); + .append(TASK_MOVE).append(DELIMITER) + .append(graphName); return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getAllMoveTaskPrefix(){ + public static byte[] getAllMoveTaskPrefix() { // TASK_MOVE/{graphName}/toPartitionId/ StringBuilder builder = StringBuilderHelper.get() - .append(TASK_MOVE).append(DELIMITER); + .append(TASK_MOVE).append(DELIMITER); return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getLogKey(Metapb.LogRecord record){ + public static byte[] getLogKey(Metapb.LogRecord record) { //LOG_RECORD/{action}/{time}/ StringBuilder builder = StringBuilderHelper.get() .append(LOG_RECORD) @@ -272,7 +291,8 @@ public static byte[] getLogKey(Metapb.LogRecord record){ .append(record.getTimestamp()); return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getLogKeyPrefix(String action, long time){ + + public static byte[] getLogKeyPrefix(String action, long time) { //LOG_DATA_SPLIT/{time}/{GraphName} StringBuilder builder = StringBuilderHelper.get() .append(LOG_RECORD) @@ -283,7 +303,7 @@ public static byte[] getLogKeyPrefix(String action, long time){ return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getKVPrefix(String prefix,String key) { + public static byte[] getKVPrefix(String prefix, String key) { //K@/{key} StringBuilder builder = StringBuilderHelper.get() .append(prefix).append(DELIMITER); @@ -293,7 +313,7 @@ public static byte[] getKVPrefix(String prefix,String key) { return builder.toString().getBytes(Charset.defaultCharset()); } - public static byte[] getKVTTLPrefix(String ttlPrefix,String prefix,String key) { + public static byte[] getKVTTLPrefix(String ttlPrefix, String prefix, String key) { StringBuilder builder = StringBuilderHelper.get().append(ttlPrefix) .append(prefix).append(DELIMITER); if (!StringUtils.isEmpty(key)) { @@ -309,6 +329,7 @@ public static String getKVWatchKeyPrefix(String key, String watchDelimiter, long builder.append(clientId); return builder.toString(); } + public static String getKVWatchKeyPrefix(String key, String watchDelimiter) { StringBuilder builder = StringBuilderHelper.get(); builder.append(watchDelimiter).append(DELIMITER); @@ -316,11 +337,11 @@ public static String getKVWatchKeyPrefix(String key, String watchDelimiter) { return builder.toString(); } - public static char getDelimiter(){ + public static char getDelimiter() { return DELIMITER; } - public static StringBuilder getStringBuilderHelper(){ + public static StringBuilder getStringBuilderHelper() { return StringBuilderHelper.get(); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java index bf91d80b0a..8c84ea68a7 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; import java.util.LinkedList; @@ -23,12 +40,12 @@ public class MetadataRocksDBStore extends MetadataStoreBase { PDConfig pdConfig; - public MetadataRocksDBStore(PDConfig pdConfig){ + public MetadataRocksDBStore(PDConfig pdConfig) { store = MetadataFactory.getStore(pdConfig); this.pdConfig = pdConfig; } - private HgKVStore getStore(){ + private HgKVStore getStore() { if (store == null) { store = MetadataFactory.getStore(pdConfig); } @@ -40,7 +57,7 @@ public byte[] getOne(byte[] key) throws PDException { try { byte[] bytes = store.get(key); return bytes; - }catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } } @@ -51,7 +68,7 @@ public E getOne(Parser parser, byte[] key) throws PDException { byte[] bytes = store.get(key); if (ArrayUtils.isEmpty(bytes)) return null; return parser.parseFrom(bytes); - }catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } } @@ -60,19 +77,20 @@ public E getOne(Parser parser, byte[] key) throws PDException { public void put(byte[] key, byte[] value) throws PDException { try { getStore().put(key, value); - } catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); } } @Override public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { - this.store.putWithTTL(key,value,ttl); + this.store.putWithTTL(key, value, ttl); } @Override - public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException { - this.store.putWithTTL(key,value,ttl,timeUnit); + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + this.store.putWithTTL(key, value, ttl, timeUnit); } @Override @@ -82,7 +100,7 @@ public byte[] getWithTTL(byte[] key) throws PDException { @Override public List getListWithTTL(byte[] key) throws PDException { - return this.store.getListWithTTL(key); + return this.store.getListWithTTL(key); } @Override @@ -94,7 +112,7 @@ public void removeWithTTL(byte[] key) throws PDException { public List scanPrefix(byte[] prefix) throws PDException { //TODO 使用rocksdb 前缀查询 try { - return this.store.scanPrefix(prefix); + return this.store.scanPrefix(prefix); } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } @@ -102,16 +120,16 @@ public List scanPrefix(byte[] prefix) throws PDException { @Override public List scanRange(byte[] start, byte[] end) throws PDException { - return this.store.scanRange(start,end); + return this.store.scanRange(start, end); } @Override - public List scanRange(Parser parser,byte[] start,byte[] end) throws PDException { + public List scanRange(Parser parser, byte[] start, byte[] end) throws PDException { List stores = new LinkedList<>(); try { List kvs = this.scanRange(start, end); for (KV keyValue : kvs) { - stores.add(parser.parseFrom((byte[])keyValue.getValue())); + stores.add(parser.parseFrom((byte[]) keyValue.getValue())); } } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); @@ -125,7 +143,7 @@ public List scanPrefix(Parser parser, byte[] prefix) throws PDExceptio try { List kvs = this.scanPrefix(prefix); for (KV keyValue : kvs) { - stores.add(parser.parseFrom((byte[])keyValue.getValue())); + stores.add(parser.parseFrom((byte[]) keyValue.getValue())); } } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java index 409bb55aff..f27600c07e 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java @@ -1,21 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Pdpb; - -import org.apache.hugegraph.pd.store.KV; - import java.io.IOException; import java.util.LinkedList; import java.util.List; import java.util.concurrent.TimeUnit; +import org.apache.hugegraph.pd.store.KV; + +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; import com.google.protobuf.Parser; public abstract class MetadataStoreBase { - // public long timeout = 3; // 请求超时时间,默认三秒 + // public long timeout = 3; // 请求超时时间,默认三秒 public abstract byte[] getOne(byte[] key) throws PDException; @@ -28,16 +44,19 @@ public abstract class MetadataStoreBase { */ public abstract void putWithTTL(byte[] key, - byte[] value, - long ttl) throws PDException; + byte[] value, + long ttl) throws PDException; + public abstract void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException; + public abstract byte[] getWithTTL(byte[] key) throws PDException; - public abstract List getListWithTTL(byte[] key) throws PDException; + public abstract List getListWithTTL(byte[] key) throws PDException; public abstract void removeWithTTL(byte[] key) throws PDException; + /** * 前缀查询 * @@ -46,8 +65,12 @@ public abstract void putWithTTL(byte[] key, * @throws PDException */ public abstract List scanPrefix(byte[] prefix) throws PDException; - public abstract List scanRange(byte[] start,byte[] end) throws PDException; - public abstract List scanRange(Parser parser, byte[] start,byte[] end) throws PDException; + + public abstract List scanRange(byte[] start, byte[] end) throws PDException; + + public abstract List scanRange(Parser parser, byte[] start, byte[] end) throws + PDException; + /** * 前缀查询 * @@ -77,17 +100,18 @@ public abstract void putWithTTL(byte[] key, public abstract void close() throws IOException; - public T getInstanceWithTTL(Parser parser,byte[] key) throws PDException{ - try{ + public T getInstanceWithTTL(Parser parser, byte[] key) throws PDException { + try { byte[] withTTL = this.getWithTTL(key); return parser.parseFrom(withTTL); } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE,e); + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } } - public List getInstanceListWithTTL(Parser parser,byte[] key) - throws PDException{ - try{ + + public List getInstanceListWithTTL(Parser parser, byte[] key) + throws PDException { + try { List withTTL = this.getListWithTTL(key); LinkedList ts = new LinkedList<>(); for (int i = 0; i < withTTL.size(); i++) { @@ -95,7 +119,7 @@ public List getInstanceListWithTTL(Parser parser,byte[] key) } return ts; } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE,e); + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java index 354bdc8913..b04138c9e4 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java @@ -1,26 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.common.PartitionCache; +import java.util.ArrayList; +import java.util.List; import org.apache.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.common.PartitionCache; import com.baidu.hugegraph.pd.grpc.Metapb; -import lombok.extern.slf4j.Slf4j; -import java.util.ArrayList; -import java.util.List; +import lombok.extern.slf4j.Slf4j; /** * 分区信息管理 */ @Slf4j public class PartitionMeta extends MetadataRocksDBStore { - private PDConfig pdConfig; - private PartitionCache cache; - static String CID_GRAPH_ID_KEY = "GraphID"; - static int CID_GRAPH_ID_MAX = 0xFFFE; + static int CID_GRAPH_ID_MAX = 0xFFFE; + private final PDConfig pdConfig; + private final PartitionCache cache; public PartitionMeta(PDConfig pdConfig) { super(pdConfig); @@ -28,6 +44,7 @@ public PartitionMeta(PDConfig pdConfig) { //this.timeout = pdConfig.getEtcd().getTimeout(); this.cache = new PartitionCache(); } + /** * 初始化,加载所有的分区 */ @@ -53,62 +70,65 @@ private void loadGraphs() throws PDException { /** * partition 和 shard group分开存储,再init的时候,需要加载进来 + * * @throws PDException */ private void loadShardGroups() throws PDException { byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); - for (var shardGroup : scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix)){ + for (var shardGroup : scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix)) { cache.updateShardGroup(shardGroup); } } - private void loadPartitions(Metapb.Graph graph) throws PDException{ - byte[] prefix = MetadataKeyHelper.getPartitionPrefix( graph.getGraphName()); + private void loadPartitions(Metapb.Graph graph) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName()); List partitions = scanPrefix(Metapb.Partition.parser(), prefix); - partitions.forEach(p->{ + partitions.forEach(p -> { cache.updatePartition(p); }); } /** * 根据id查找分区 (先从缓存找,再到数据库中找) + * * @param graphName * @param partId * @return * @throws PDException */ public Metapb.Partition getPartitionById(String graphName, int partId) throws PDException { - var pair = cache.getPartitionById(graphName, partId); + var pair = cache.getPartitionById(graphName, partId); Metapb.Partition partition; if (pair == null) { - byte[] key = MetadataKeyHelper.getPartitionKey( graphName, partId); + byte[] key = MetadataKeyHelper.getPartitionKey(graphName, partId); partition = getOne(Metapb.Partition.parser(), key); - if ( partition != null ) { + if (partition != null) { cache.updatePartition(partition); } - }else{ + } else { partition = pair.getKey(); } return partition; } + public List getPartitionById(int partId) throws PDException { List partitions = new ArrayList<>(); cache.getGraphs().forEach(graph -> { cache.getPartitions(graph.getGraphName()).forEach(partition -> { - if ( partition.getId() == partId ) + if (partition.getId() == partId) { partitions.add(partition); + } }); }); - return partitions; + return partitions; } /** * 根据code查找分区 - */ public Metapb.Partition getPartitionByCode(String graphName, long code) throws PDException { var pair = cache.getPartitionByCode(graphName, code); - if (pair != null){ + if (pair != null) { return pair.getKey(); } return null; @@ -125,18 +145,18 @@ public Metapb.Graph getAndCreateGraph(String graphName, int partitionCount) thro } // 管理图,只有一个分区 - if (graphName.endsWith("/s") || graphName.endsWith("/m")){ + if (graphName.endsWith("/s") || graphName.endsWith("/m")) { partitionCount = 1; } Metapb.Graph graph = cache.getGraph(graphName); - if ( graph == null ){ + if (graph == null) { // 保存图信息 graph = Metapb.Graph.newBuilder() - .setGraphName(graphName) - .setPartitionCount(partitionCount) - .setState(Metapb.PartitionState.PState_Normal) - .build(); + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setState(Metapb.PartitionState.PState_Normal) + .build(); updateGraph(graph); } return graph; @@ -144,15 +164,16 @@ public Metapb.Graph getAndCreateGraph(String graphName, int partitionCount) thro /** * 保存分区信息 + * * @param partition * @return * @throws PDException */ public Metapb.Partition updatePartition(Metapb.Partition partition) throws PDException { - if ( !cache.hasGraph(partition.getGraphName())){ + if (!cache.hasGraph(partition.getGraphName())) { getAndCreateGraph(partition.getGraphName()); } - byte[] key = MetadataKeyHelper.getPartitionKey( partition.getGraphName(), partition.getId()); + byte[] key = MetadataKeyHelper.getPartitionKey(partition.getGraphName(), partition.getId()); put(key, partition.toByteString().toByteArray()); cache.updatePartition(partition); return partition; @@ -161,12 +182,13 @@ public Metapb.Partition updatePartition(Metapb.Partition partition) throws PDExc /** * 检查数据库,是否存在对应的图,不存在,则创建。 * 更新partition的 version, conf version 和 shard list + * * @param partition * @return * @throws PDException */ public Metapb.Partition updateShardList(Metapb.Partition partition) throws PDException { - if ( !cache.hasGraph(partition.getGraphName())){ + if (!cache.hasGraph(partition.getGraphName())) { getAndCreateGraph(partition.getGraphName()); } @@ -176,28 +198,29 @@ public Metapb.Partition updateShardList(Metapb.Partition partition) throws PDExc // .clearShards() // .addAllShards(partition.getShardsList()).build(); - byte[] key = MetadataKeyHelper.getPartitionKey( pt.getGraphName(), pt.getId()); + byte[] key = MetadataKeyHelper.getPartitionKey(pt.getGraphName(), pt.getId()); put(key, pt.toByteString().toByteArray()); cache.updatePartition(pt); return partition; } + /** * 删除所有分区 */ public long removeAllPartitions(String graphName) throws PDException { cache.removeAll(graphName); - byte[] prefix = MetadataKeyHelper.getPartitionPrefix( graphName); + byte[] prefix = MetadataKeyHelper.getPartitionPrefix(graphName); return removeByPrefix(prefix); } public long removePartition(String graphName, int id) throws PDException { cache.remove(graphName, id); - byte[] key = MetadataKeyHelper.getPartitionKey( graphName, id); + byte[] key = MetadataKeyHelper.getPartitionKey(graphName, id); return remove(key); } public void updatePartitionStats(Metapb.PartitionStats stats) throws PDException { - for(String graphName : stats.getGraphNameList()) { + for (String graphName : stats.getGraphNameList()) { byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, stats.getId()); put(prefix, stats.toByteArray()); } @@ -208,7 +231,7 @@ public void updatePartitionStats(Metapb.PartitionStats stats) throws PDException */ public Metapb.PartitionStats getPartitionStats(String graphName, int id) throws PDException { byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, id); - return getOne(Metapb.PartitionStats.parser(),prefix); + return getOne(Metapb.PartitionStats.parser(), prefix); } @@ -217,33 +240,34 @@ public Metapb.PartitionStats getPartitionStats(String graphName, int id) throws */ public List getPartitionStats(String graphName) throws PDException { byte[] prefix = MetadataKeyHelper.getPartitionStatusPrefixKey(graphName); - return scanPrefix(Metapb.PartitionStats.parser(),prefix); + return scanPrefix(Metapb.PartitionStats.parser(), prefix); } /** * 更新图信息 + * * @param graph * @return */ public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { log.info("updateGraph {}", graph); - byte[] key = MetadataKeyHelper.getGraphKey( graph.getGraphName()); + byte[] key = MetadataKeyHelper.getGraphKey(graph.getGraphName()); // 保存图信息 put(key, graph.toByteString().toByteArray()); cache.updateGraph(graph); return graph; } - public List getPartitions(){ + public List getPartitions() { List partitions = new ArrayList<>(); List graphs = cache.getGraphs(); - graphs.forEach(e->{ + graphs.forEach(e -> { partitions.addAll(cache.getPartitions(e.getGraphName())); }); return partitions; } - public List getPartitions(String graphName){ + public List getPartitions(String graphName) { return cache.getPartitions(graphName); } @@ -253,7 +277,7 @@ public List getGraphs() throws PDException { } public Metapb.Graph getGraph(String graphName) throws PDException { - byte[] key = MetadataKeyHelper.getGraphKey( graphName); + byte[] key = MetadataKeyHelper.getGraphKey(graphName); return getOne(Metapb.Graph.parser(), key); } @@ -261,12 +285,12 @@ public Metapb.Graph getGraph(String graphName) throws PDException { * 删除图,并删除图id */ public long removeGraph(String graphName) throws PDException { - byte[] key = MetadataKeyHelper.getGraphKey( graphName); + byte[] key = MetadataKeyHelper.getGraphKey(graphName); long l = remove(key); return l; } - public PartitionCache getPartitionCache(){ + public PartitionCache getPartitionCache() { return cache; } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java index 6a23615d64..2568e5a06d 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java @@ -1,14 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.common.HgAssert; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.List; import org.apache.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.common.HgAssert; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; -import java.util.List; - /** * @author lynn.bond@hotmail.com on 2022/2/10 */ diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java index 9544d87d41..c15b0e4204 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java @@ -1,31 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; import org.apache.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; -import lombok.extern.slf4j.Slf4j; -import java.util.LinkedList; -import java.util.List; -import java.util.ListIterator; +import lombok.extern.slf4j.Slf4j; /** * Store信息存储 */ @Slf4j public class StoreInfoMeta extends MetadataRocksDBStore { - private PDConfig pdConfig; + private final PDConfig pdConfig; public StoreInfoMeta(PDConfig pdConfig) { super(pdConfig); this.pdConfig = pdConfig; - // this.timeout = pdConfig.getDiscovery().getHeartbeatOutTimes(); + // this.timeout = pdConfig.getDiscovery().getHeartbeatOutTimes(); + } + + public static boolean shardGroupEquals(List g1, List g2) { + ListIterator e1 = g1.listIterator(); + ListIterator e2 = g2.listIterator(); + while (e1.hasNext() && e2.hasNext()) { + Metapb.Shard o1 = e1.next(); + Metapb.Shard o2 = e2.next(); + if (!(o1 == null ? o2 == null : o1.getStoreId() == o2.getStoreId())) { + return false; + } + } + return !(e1.hasNext() || e2.hasNext()); } /** * 更新Store信息 + * * @param store * @throws PDException */ @@ -44,7 +75,6 @@ public void keepStoreAlive(Metapb.Store store) throws PDException { putWithTTL(activeStoreKey, store.toByteArray(), pdConfig.getStore().getKeepAliveTimeout()); } - public void removeActiveStore(Metapb.Store store) throws PDException { byte[] activeStoreKey = MetadataKeyHelper.getActiveStoreKey(store.getId()); removeWithTTL(activeStoreKey); @@ -52,19 +82,20 @@ public void removeActiveStore(Metapb.Store store) throws PDException { public Metapb.Store getStore(Long storeId) throws PDException { byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); - Metapb.Store store = getOne(Metapb.Store.parser(),storeInfoKey); + Metapb.Store store = getOne(Metapb.Store.parser(), storeInfoKey); return store; } /** * 获取所有的store + * * @param graphName * @return * @throws PDException */ public List getStores(String graphName) throws PDException { byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); - return scanPrefix(Metapb.Store.parser(),storePrefix); + return scanPrefix(Metapb.Store.parser(), storePrefix); } /** @@ -77,9 +108,10 @@ public List getStores(String graphName) throws PDException { public List getActiveStores(String graphName) throws PDException { byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), - activePrefix); + activePrefix); return listWithTTL; } + public List getActiveStores() throws PDException { byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), @@ -130,19 +162,6 @@ public void deleteShardGroup(int groupId) throws PDException { remove(shardGroupKey); } - public static boolean shardGroupEquals(List g1, List g2) { - ListIterator e1 = g1.listIterator(); - ListIterator e2 = g2.listIterator(); - while (e1.hasNext() && e2.hasNext()) { - Metapb.Shard o1 = e1.next(); - Metapb.Shard o2 = e2.next(); - if (!(o1 == null ? o2 == null : o1.getStoreId() == o2.getStoreId())) { - return false; - } - } - return !(e1.hasNext() || e2.hasNext()); - } - public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(groupId); return getOne(Metapb.ShardGroup.parser(), shardGroupKey); @@ -161,24 +180,26 @@ public List getShardGroups() throws PDException { public Metapb.StoreStats getStoreStats(long storeId) throws PDException { byte[] storeStatusKey = MetadataKeyHelper.getStoreStatusKey(storeId); Metapb.StoreStats stats = getOne(Metapb.StoreStats.parser(), - storeStatusKey); + storeStatusKey); return stats; } + /** * @return store及状态信息 * @throws PDException */ public List getStoreStatus(boolean isActive) throws PDException { byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); - List stores =isActive ? getActiveStores() : - scanPrefix(Metapb.Store.parser(),storePrefix); + List stores = isActive ? getActiveStores() : + scanPrefix(Metapb.Store.parser(), storePrefix); LinkedList list = new LinkedList<>(); for (int i = 0; i < stores.size(); i++) { Metapb.Store store = stores.get(i); Metapb.StoreStats stats = getStoreStats(store.getId()); - if (stats != null) + if (stats != null) { store = Metapb.Store.newBuilder(store).setStats(getStoreStats(store.getId())) - .build(); + .build(); + } list.add(store); } return list; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java index 85c053882c..5632a7b9c7 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java @@ -1,20 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.meta; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.List; import org.apache.hugegraph.pd.config.PDConfig; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.MetaTask; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.pulse.MovePartition; import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; -import java.util.List; - /** * 任务管理 */ -public class TaskInfoMeta extends MetadataRocksDBStore{ +public class TaskInfoMeta extends MetadataRocksDBStore { public TaskInfoMeta(PDConfig pdConfig) { super(pdConfig); } @@ -26,12 +42,12 @@ public void addSplitTask(int groupID, Metapb.Partition partition, SplitPartition throws PDException { byte[] key = MetadataKeyHelper.getSplitTaskKey(partition.getGraphName(), groupID); MetaTask.Task task = MetaTask.Task.newBuilder() - .setType(MetaTask.TaskType.Split_Partition) - .setState(MetaTask.TaskState.Task_Doing) - .setStartTimestamp(System.currentTimeMillis()) - .setPartition(partition) - .setSplitPartition(splitPartition) - .build(); + .setType(MetaTask.TaskType.Split_Partition) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setSplitPartition(splitPartition) + .build(); put(key, task.toByteString().toByteArray()); } @@ -64,15 +80,16 @@ public boolean hasSplitTaskDoing() throws PDException { public void addMovePartitionTask(Metapb.Partition partition, MovePartition movePartition) throws PDException { byte[] key = MetadataKeyHelper.getMoveTaskKey(partition.getGraphName(), - movePartition.getTargetPartition().getId(), partition.getId()); + movePartition.getTargetPartition().getId(), + partition.getId()); MetaTask.Task task = MetaTask.Task.newBuilder() - .setType(MetaTask.TaskType.Move_Partition) - .setState(MetaTask.TaskState.Task_Doing) - .setStartTimestamp(System.currentTimeMillis()) - .setPartition(partition) - .setMovePartition(movePartition) - .build(); + .setType(MetaTask.TaskType.Move_Partition) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setMovePartition(movePartition) + .build(); put(key, task.toByteArray()); } @@ -80,12 +97,14 @@ public void updateMovePartitionTask(MetaTask.Task task) throws PDException { byte[] key = MetadataKeyHelper.getMoveTaskKey(task.getPartition().getGraphName(), - task.getMovePartition().getTargetPartition().getId(), - task.getPartition().getId()); + task.getMovePartition().getTargetPartition() + .getId(), + task.getPartition().getId()); put(key, task.toByteArray()); } - public MetaTask.Task getMovePartitionTask(String graphName, int targetId, int partId) throws PDException { + public MetaTask.Task getMovePartitionTask(String graphName, int targetId, int partId) throws + PDException { byte[] key = MetadataKeyHelper.getMoveTaskKey(graphName, targetId, partId); return getOne(MetaTask.Task.parser(), key); } @@ -97,6 +116,7 @@ public List scanMoveTask(String graphName) throws PDException { /** * 按照prefix删除迁移任务,一次分组的 + * * @param graphName 图名称 * @throws PDException io error */ diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java index 5df461cafa..e7b985842e 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java @@ -1,15 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; +import java.util.concurrent.CompletableFuture; + import com.alipay.sofa.jraft.Closure; import com.alipay.sofa.jraft.Status; -import java.util.concurrent.CompletableFuture; - public class FutureClosureAdapter implements Closure { public final CompletableFuture future = new CompletableFuture<>(); private T resp; - public T getResponse() { return this.resp; } + public T getResponse() { + return this.resp; + } public void setResponse(T resp) { this.resp = resp; @@ -17,7 +36,7 @@ public void setResponse(T resp) { run(Status.OK()); } - public void failure(Throwable t){ + public void failure(Throwable t) { future.completeExceptionally(t); run(new Status(-1, t.getMessage())); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java index 4c648b56ca..4af18f50a2 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java @@ -1,5 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + import com.alipay.sofa.jraft.util.BytesUtil; import com.alipay.sofa.jraft.util.Requires; import com.caucho.hessian.io.Hessian2Input; @@ -7,11 +29,6 @@ import lombok.Data; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.concurrent.TimeUnit; - @Data public class KVOperation { @@ -42,7 +59,7 @@ public class KVOperation { private Object arg; private byte op; - public KVOperation(){ + public KVOperation() { } @@ -52,6 +69,7 @@ public KVOperation(byte[] key, byte[] value, Object attach, byte op) { this.attach = attach; this.op = op; } + public KVOperation(byte[] key, byte[] value, Object attach, byte op, Object arg) { this.key = key; this.value = value; @@ -60,18 +78,6 @@ public KVOperation(byte[] key, byte[] value, Object attach, byte op, Object arg) this.arg = arg; } - public byte[] toByteArray() throws IOException { - try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { - bos.write(op); - Hessian2Output output = new Hessian2Output(bos); - output.writeObject(key); - output.writeObject(value); - output.writeObject(arg); - output.flush(); - return bos.toByteArray(); - } - } - public static KVOperation fromByteArray(byte[] value) throws IOException { try (ByteArrayInputStream bis = new ByteArrayInputStream(value, 1, value.length - 1)) { @@ -103,12 +109,13 @@ public static KVOperation createPutWithTTL(byte[] key, byte[] value, long ttl) { return new KVOperation(key, value, value, PUT_WITH_TTL, ttl); } + public static KVOperation createPutWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) { Requires.requireNonNull(key, "key"); Requires.requireNonNull(value, "value"); return new KVOperation(key, value, value, PUT_WITH_TTL_UNIT, - new Object[] { ttl, timeUnit}); + new Object[]{ttl, timeUnit}); } public static KVOperation createRemoveWithTTL(byte[] key) { @@ -139,4 +146,16 @@ public static KVOperation createLoadSnapshot(String snapshotPath) { Requires.requireNonNull(snapshotPath, "snapshotPath"); return new KVOperation(null, null, snapshotPath, LOAD_SNAPSHOT); } + + public byte[] toByteArray() throws IOException { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + bos.write(op); + Hessian2Output output = new Hessian2Output(bos); + output.writeObject(key); + output.writeObject(value); + output.writeObject(arg); + output.flush(); + return bos.toByteArray(); + } + } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java index 527864baf7..e78be0b28a 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; import com.alipay.sofa.jraft.Closure; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java index 3106362750..0b45380bc3 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java @@ -1,5 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.hugegraph.pd.config.PDConfig; + import com.alipay.sofa.jraft.JRaftUtils; import com.alipay.sofa.jraft.Node; import com.alipay.sofa.jraft.RaftGroupService; @@ -16,41 +43,30 @@ import com.alipay.sofa.jraft.util.Endpoint; import com.alipay.sofa.jraft.util.internal.ThrowUtil; import com.baidu.hugegraph.pd.common.PDException; - -import org.apache.hugegraph.pd.config.PDConfig; - import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.Pdpb; -import lombok.extern.slf4j.Slf4j; -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicReference; +import lombok.extern.slf4j.Slf4j; @Slf4j public class RaftEngine { - private volatile static RaftEngine instance = new RaftEngine(); - - public static RaftEngine getInstance() { - return instance; - } - - private String groupId = "pd_raft"; + private static final RaftEngine instance = new RaftEngine(); + private final String groupId = "pd_raft"; + private final RaftStateMachine stateMachine; private PDConfig.Raft config; - private RaftStateMachine stateMachine; private RaftGroupService raftGroupService; private RpcServer rpcServer; private Node raftNode; private RaftRpcClient raftRpcClient; - public RaftEngine(){ + public RaftEngine() { this.stateMachine = new RaftStateMachine(); } + public static RaftEngine getInstance() { + return instance; + } + public boolean init(PDConfig.Raft config) { if (this.raftNode != null) return false; this.config = config; @@ -98,9 +114,10 @@ public boolean init(PDConfig.Raft config) { rpcServer = createRaftRpcServer(config.getAddress()); // 构建raft组并启动raft this.raftGroupService = new RaftGroupService(groupId, serverId, - nodeOptions, rpcServer, true); + nodeOptions, rpcServer, true); this.raftNode = raftGroupService.start(false); - log.info("RaftEngine start successfully: id = {}, peers list = {}", groupId, nodeOptions.getInitialConf().getPeers()); + log.info("RaftEngine start successfully: id = {}, peers list = {}", groupId, + nodeOptions.getInitialConf().getPeers()); return this.raftNode != null; } @@ -126,7 +143,7 @@ public void shutDown() { } this.raftGroupService = null; } - if (this.rpcServer != null){ + if (this.rpcServer != null) { this.rpcServer.shutdown(); this.rpcServer = null; } @@ -137,7 +154,7 @@ public void shutDown() { } public boolean isLeader() { - return this.raftNode.isLeader(true); + return this.raftNode.isLeader(true); } /** @@ -153,18 +170,19 @@ public void addTask(Task task) { this.raftNode.apply(task); } - public void addStateListener(RaftStateListener listener){ + public void addStateListener(RaftStateListener listener) { this.stateMachine.addStateListener(listener); } - public void addTaskHandler(RaftTaskHandler handler){ + public void addTaskHandler(RaftTaskHandler handler) { this.stateMachine.addTaskHandler(handler); } + public PDConfig.Raft getConfig() { return this.config; } - public PeerId getLeader(){ + public PeerId getLeader() { return raftNode.getLeaderId(); } @@ -174,11 +192,11 @@ public PeerId getLeader(){ public String getLeaderGrpcAddress() throws ExecutionException, InterruptedException { if (isLeader()) return config.getGrpcAddress(); return raftRpcClient.getGrpcAddress( - raftNode.getLeaderId().getEndpoint().toString()) - .get().getGrpcAddress(); + raftNode.getLeaderId().getEndpoint().toString()) + .get().getGrpcAddress(); } - public Metapb.Member getLocalMember(){ + public Metapb.Member getLocalMember() { Metapb.Member.Builder builder = Metapb.Member.newBuilder(); builder.setClusterId(config.getClusterId()); builder.setRaftUrl(config.getAddress()); @@ -192,7 +210,7 @@ public List getMembers() throws ExecutionException, InterruptedEx List members = new ArrayList<>(); List peers = raftNode.listPeers(); - for(PeerId peerId : peers){ + for (PeerId peerId : peers) { Metapb.Member.Builder builder = Metapb.Member.newBuilder(); builder.setClusterId(config.getClusterId()); CompletableFuture future = @@ -227,11 +245,11 @@ public List getMembers() throws ExecutionException, InterruptedEx public Status changePeerList(String peerList) { AtomicReference result = new AtomicReference<>(); - try{ + try { String[] peers = peerList.split(",", -1); - if ((peers.length & 1) != 1){ - throw new PDException(-1,"the number of peer list must be odd."); - }; + if ((peers.length & 1) != 1) { + throw new PDException(-1, "the number of peer list must be odd."); + } Configuration newPeers = new Configuration(); newPeers.parse(peerList); CountDownLatch latch = new CountDownLatch(1); @@ -247,9 +265,9 @@ public Status changePeerList(String peerList) { return result.get(); } - public PeerId waitingForLeader(long timeOut){ + public PeerId waitingForLeader(long timeOut) { PeerId leader = getLeader(); - if ( leader != null ) { + if (leader != null) { return leader; } @@ -264,7 +282,7 @@ public PeerId waitingForLeader(long timeOut){ } leader = getLeader(); } - return leader != null ? leader : null; + return leader; } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java index 28e1842a55..2dfe3b3069 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java @@ -1,5 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; +import java.util.concurrent.CompletableFuture; + import com.alipay.sofa.jraft.JRaftUtils; import com.alipay.sofa.jraft.Status; import com.alipay.sofa.jraft.option.RpcOptions; @@ -9,9 +28,8 @@ import com.alipay.sofa.jraft.rpc.RpcClient; import com.alipay.sofa.jraft.util.Endpoint; import com.alipay.sofa.jraft.util.RpcFactoryHelper; -import lombok.extern.slf4j.Slf4j; -import java.util.concurrent.CompletableFuture; +import lombok.extern.slf4j.Slf4j; @Slf4j public class RaftRpcClient { @@ -21,7 +39,8 @@ public class RaftRpcClient { public synchronized boolean init(final RpcOptions rpcOptions) { this.rpcOptions = rpcOptions; final RaftRpcFactory factory = RpcFactoryHelper.rpcFactory(); - this.rpcClient = factory.createRpcClient(factory.defaultJRaftClientConfigHelper(this.rpcOptions)); + this.rpcClient = + factory.createRpcClient(factory.defaultJRaftClientConfigHelper(this.rpcOptions)); return this.rpcClient.init(null); } @@ -31,12 +50,14 @@ public synchronized boolean init(final RpcOptions rpcOptions) { public CompletableFuture getGrpcAddress(final String address) { RaftRpcProcessor.GetMemberRequest request = new RaftRpcProcessor.GetMemberRequest(); - FutureClosureAdapter response = new FutureClosureAdapter<>(); + FutureClosureAdapter response = + new FutureClosureAdapter<>(); internalCallAsyncWithRpc(JRaftUtils.getEndPoint(address), request, response); return response.future; } - private void internalCallAsyncWithRpc(final Endpoint endpoint, final RaftRpcProcessor.BaseRequest request, + private void internalCallAsyncWithRpc(final Endpoint endpoint, + final RaftRpcProcessor.BaseRequest request, final FutureClosureAdapter closure) { final InvokeContext invokeCtx = null; final InvokeCallback invokeCallback = new InvokeCallback() { @@ -44,7 +65,8 @@ private void internalCallAsyncWithRpc(final Endpoint endpoint, final RaftRpc @Override public void complete(final Object result, final Throwable err) { if (err == null) { - final RaftRpcProcessor.BaseResponse response = (RaftRpcProcessor.BaseResponse) result; + final RaftRpcProcessor.BaseResponse response = + (RaftRpcProcessor.BaseResponse) result; closure.setResponse((V) response); } else { closure.failure(err); @@ -54,7 +76,8 @@ public void complete(final Object result, final Throwable err) { }; try { - this.rpcClient.invokeAsync(endpoint, request, invokeCtx, invokeCallback, this.rpcOptions.getRpcDefaultTimeout()); + this.rpcClient.invokeAsync(endpoint, request, invokeCtx, invokeCallback, + this.rpcOptions.getRpcDefaultTimeout()); } catch (final Throwable t) { log.error("failed to call rpc to {}. {}", endpoint, t.getMessage()); closure.failure(t); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java index beeffae7d1..ca3b2f3961 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java @@ -1,36 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; +import java.io.Serializable; + import com.alipay.sofa.jraft.rpc.RpcContext; import com.alipay.sofa.jraft.rpc.RpcProcessor; import com.alipay.sofa.jraft.rpc.RpcServer; -import lombok.Data; -import java.io.Serializable; +import lombok.Data; public class RaftRpcProcessor implements RpcProcessor { - public static void registerProcessor(final RpcServer rpcServer, RaftEngine raftEngine) { - rpcServer.registerProcessor(new RaftRpcProcessor<>(GetMemberRequest.class, raftEngine)); - } - private final Class requestClass; - private RaftEngine raftEngine; - + private final RaftEngine raftEngine; public RaftRpcProcessor(Class requestClass, RaftEngine raftEngine) { this.requestClass = requestClass; this.raftEngine = raftEngine; } + public static void registerProcessor(final RpcServer rpcServer, RaftEngine raftEngine) { + rpcServer.registerProcessor(new RaftRpcProcessor<>(GetMemberRequest.class, raftEngine)); + } + @Override public void handleRequest(RpcContext rpcCtx, T request) { - switch (request.magic()) { - case BaseRequest.GET_GRPC_ADDRESS: { - rpcCtx.sendResponse(getGrpcAddress()); - break; - } - - default: + if (request.magic() == BaseRequest.GET_GRPC_ADDRESS) { + rpcCtx.sendResponse(getGrpcAddress()); } } @@ -39,17 +51,50 @@ public String interest() { return this.requestClass.getName(); } - private GetMemberResponse getGrpcAddress(){ + private GetMemberResponse getGrpcAddress() { GetMemberResponse rep = new GetMemberResponse(); rep.setGrpcAddress(raftEngine.getConfig().getGrpcAddress()); rep.setClusterId(raftEngine.getConfig().getClusterId()); rep.setDatePath(raftEngine.getConfig().getDataPath()); rep.setRaftAddress(raftEngine.getConfig().getAddress()); - rep.setRestAddress(raftEngine.getConfig().getHost() + ":" + raftEngine.getConfig().getPort()); + rep.setRestAddress( + raftEngine.getConfig().getHost() + ":" + raftEngine.getConfig().getPort()); rep.setStatus(Status.OK); return rep; } + public enum Status implements Serializable { + UNKNOWN(-1, "unknown"), + OK(0, "ok"), + COMPLETE(0, "Transmission completed"), + INCOMPLETE(1, "Incomplete transmission"), + NO_PARTITION(10, "Partition not found"), + IO_ERROR(11, "io error"), + EXCEPTION(12, "exception"), + ABORT(100, "Transmission aborted"); + + private final int code; + private String msg; + + Status(int code, String msg) { + this.code = code; + this.msg = msg; + } + + public int getCode() { + return this.code; + } + + public Status setMsg(String msg) { + this.msg = msg; + return this; + } + + public boolean isOK() { + return this.code == 0; + } + } + public abstract static class BaseRequest implements Serializable { public static final byte GET_GRPC_ADDRESS = 0x01; @@ -78,36 +123,4 @@ public static class GetMemberResponse extends BaseResponse { private String datePath; private String restAddress; } - - public enum Status implements Serializable{ - UNKNOWN(-1, "unknown"), - OK(0, "ok"), - COMPLETE(0, "Transmission completed"), - INCOMPLETE(1, "Incomplete transmission"), - NO_PARTITION(10, "Partition not found"), - IO_ERROR(11, "io error"), - EXCEPTION(12, "exception"), - ABORT(100, "Transmission aborted"); - - private int code; - private String msg; - - Status(int code, String msg) { - this.code = code; - this.msg = msg; - } - - public int getCode(){ - return this.code; - } - - public Status setMsg(String msg){ - this.msg = msg; - return this; - } - - public boolean isOK(){ - return this.code == 0; - } - } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java index c36c553a7f..3c4de74e2c 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; public interface RaftStateListener { diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java index 14aec4fd6a..1567a6188c 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java @@ -1,5 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; +import java.util.zip.Checksum; + +import org.apache.commons.io.FileUtils; +import org.springframework.util.CollectionUtils; + import com.alipay.sofa.jraft.Closure; import com.alipay.sofa.jraft.Iterator; import com.alipay.sofa.jraft.Status; @@ -15,38 +43,29 @@ import com.alipay.sofa.jraft.util.Utils; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Pdpb; -import lombok.extern.slf4j.Slf4j; -import org.apache.commons.io.FileUtils; -import org.springframework.util.CollectionUtils; -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicLong; -import java.util.zip.Checksum; +import lombok.extern.slf4j.Slf4j; @Slf4j public class RaftStateMachine extends StateMachineAdapter { - private List taskHandlers; - private List stateListeners; - private static final String SNAPSHOT_DIR_NAME = "snapshot"; private static final String SNAPSHOT_ARCHIVE_NAME = "snapshot.zip"; - private final AtomicLong leaderTerm = new AtomicLong(-1); + private final List taskHandlers; + private final List stateListeners; public RaftStateMachine() { - this.taskHandlers = new CopyOnWriteArrayList<>(); - this.stateListeners = new CopyOnWriteArrayList<>(); + this.taskHandlers = new CopyOnWriteArrayList<>(); + this.stateListeners = new CopyOnWriteArrayList<>(); } public void addTaskHandler(RaftTaskHandler handler) { taskHandlers.add(handler); } - public void addStateListener(RaftStateListener listener){ stateListeners.add(listener);} + public void addStateListener(RaftStateListener listener) { + stateListeners.add(listener); + } public boolean isLeader() { return this.leaderTerm.get() > 0; @@ -66,12 +85,14 @@ public void onApply(Iterator iter) { for (RaftTaskHandler taskHandler : taskHandlers) { taskHandler.invoke(kvOp, done); } - if ( done != null) + if (done != null) { done.run(Status.OK()); + } } catch (Throwable t) { log.error("StateMachine meet critical error: {}.", t); - if (done != null) + if (done != null) { done.run(new Status(RaftError.EINTERNAL, t.getMessage())); + } } iter.next(); } @@ -94,10 +115,11 @@ public void onLeaderStart(final long term) { log.info("Raft becomes leader"); Utils.runInThread(() -> { - if (!CollectionUtils.isEmpty(stateListeners)) + if (!CollectionUtils.isEmpty(stateListeners)) { stateListeners.forEach(listener -> { listener.onRaftLeaderChanged(); }); + } }); } @@ -112,10 +134,11 @@ public void onLeaderStop(final Status status) { public void onStartFollowing(final LeaderChangeContext ctx) { super.onStartFollowing(ctx); Utils.runInThread(() -> { - if (!CollectionUtils.isEmpty(stateListeners)) + if (!CollectionUtils.isEmpty(stateListeners)) { stateListeners.forEach(listener -> { listener.onRaftLeaderChanged(); }); + } }); } @@ -160,7 +183,7 @@ public void onSnapshotSave(final SnapshotWriter writer, final Closure done) { try { latch.await(); } catch (InterruptedException e) { - log.error("Raft onSnapshotSave failed. {}",e.toString()); + log.error("Raft onSnapshotSave failed. {}", e.toString()); done.run(new Status(RaftError.EIO, e.toString())); return; } @@ -188,7 +211,7 @@ public boolean onSnapshotLoad(final SnapshotReader reader) { // 2. decompress snapshot archive try { decompressSnapshot(reader); - } catch (PDException e) { + } catch (PDException e) { log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); return true; } @@ -222,7 +245,8 @@ public boolean onSnapshotLoad(final SnapshotReader reader) { FileUtils.forceDelete(file); } } catch (IOException e) { - log.error("Failed to delete snapshot directory {} and file {}", snapshotDir, snapshotArchive); + log.error("Failed to delete snapshot directory {} and file {}", snapshotDir, + snapshotArchive); return false; } @@ -231,13 +255,15 @@ public boolean onSnapshotLoad(final SnapshotReader reader) { private void compressSnapshot(final SnapshotWriter writer) throws PDException { final Checksum checksum = new CRC64(); - final String snapshotArchive = writer.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME;; + final String snapshotArchive = writer.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; try { ZipUtils.compress(writer.getPath(), SNAPSHOT_DIR_NAME, snapshotArchive, checksum); - LocalFileMetaOutter.LocalFileMeta.Builder metaBuild = LocalFileMetaOutter.LocalFileMeta.newBuilder(); + LocalFileMetaOutter.LocalFileMeta.Builder metaBuild = + LocalFileMetaOutter.LocalFileMeta.newBuilder(); metaBuild.setChecksum(Long.toHexString(checksum.getValue())); if (!writer.addFile(SNAPSHOT_ARCHIVE_NAME, metaBuild.build())) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, "failed to add file to LocalFileMeta"); + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, + "failed to add file to LocalFileMeta"); } } catch (IOException e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); @@ -245,14 +271,16 @@ private void compressSnapshot(final SnapshotWriter writer) throws PDException { } private void decompressSnapshot(final SnapshotReader reader) throws PDException { - final LocalFileMetaOutter.LocalFileMeta meta = (LocalFileMetaOutter.LocalFileMeta) reader.getFileMeta(SNAPSHOT_ARCHIVE_NAME); + final LocalFileMetaOutter.LocalFileMeta meta = + (LocalFileMetaOutter.LocalFileMeta) reader.getFileMeta(SNAPSHOT_ARCHIVE_NAME); final Checksum checksum = new CRC64(); - final String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME;; + final String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; try { ZipUtils.decompress(snapshotArchive, reader.getPath(), checksum); if (meta.hasChecksum()) { if (!meta.getChecksum().equals(Long.toHexString(checksum.getValue()))) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, "Snapshot checksum failed"); + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, + "Snapshot checksum failed"); } } } catch (IOException e) { @@ -262,8 +290,8 @@ private void decompressSnapshot(final SnapshotReader reader) throws PDException public static class RaftClosureAdapter implements KVStoreClosure { - private KVOperation op; - private KVStoreClosure closure; + private final KVOperation op; + private final KVStoreClosure closure; public RaftClosureAdapter(KVOperation op, KVStoreClosure closure) { this.op = op; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java index fc045e1053..28dfc975b9 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; import com.baidu.hugegraph.pd.common.PDException; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java index 13f25347e1..8bb417db48 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java @@ -1,22 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.raft; -import lombok.extern.slf4j.Slf4j; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Paths; +import java.util.zip.CheckedInputStream; +import java.util.zip.CheckedOutputStream; +import java.util.zip.Checksum; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import java.util.zip.ZipOutputStream; + import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.io.output.NullOutputStream; -import java.io.*; -import java.nio.file.Paths; -import java.util.zip.*; +import lombok.extern.slf4j.Slf4j; @Slf4j public final class ZipUtils { public static void compress(final String rootDir, final String sourceDir, - final String outputFile, final Checksum checksum) throws IOException { + final String outputFile, final Checksum checksum) throws + IOException { try (final FileOutputStream fos = new FileOutputStream(outputFile); - final CheckedOutputStream cos = new CheckedOutputStream(fos, checksum); - final ZipOutputStream zos = new ZipOutputStream(new BufferedOutputStream(cos))) { + final CheckedOutputStream cos = new CheckedOutputStream(fos, checksum); + final ZipOutputStream zos = new ZipOutputStream(new BufferedOutputStream(cos))) { ZipUtils.compressDirectoryToZipFile(rootDir, sourceDir, zos); zos.flush(); fos.getFD().sync(); @@ -24,7 +53,7 @@ public static void compress(final String rootDir, final String sourceDir, } private static void compressDirectoryToZipFile(final String rootDir, final String sourceDir, - final ZipOutputStream zos) throws IOException { + final ZipOutputStream zos) throws IOException { final String dir = Paths.get(rootDir, sourceDir).toString(); final File[] files = new File(dir).listFiles(); for (final File file : files) { @@ -34,7 +63,7 @@ private static void compressDirectoryToZipFile(final String rootDir, final Strin } else { zos.putNextEntry(new ZipEntry(child)); try (final FileInputStream fis = new FileInputStream(file); - final BufferedInputStream bis = new BufferedInputStream(fis)) { + final BufferedInputStream bis = new BufferedInputStream(fis)) { IOUtils.copy(bis, zos); } } @@ -42,17 +71,17 @@ private static void compressDirectoryToZipFile(final String rootDir, final Strin } public static void decompress(final String sourceFile, final String outputDir, - final Checksum checksum) throws IOException { + final Checksum checksum) throws IOException { try (final FileInputStream fis = new FileInputStream(sourceFile); - final CheckedInputStream cis = new CheckedInputStream(fis, checksum); - final ZipInputStream zis = new ZipInputStream(new BufferedInputStream(cis))) { + final CheckedInputStream cis = new CheckedInputStream(fis, checksum); + final ZipInputStream zis = new ZipInputStream(new BufferedInputStream(cis))) { ZipEntry entry; while ((entry = zis.getNextEntry()) != null) { final String fileName = entry.getName(); final File entryFile = new File(Paths.get(outputDir, fileName).toString()); FileUtils.forceMkdir(entryFile.getParentFile()); try (final FileOutputStream fos = new FileOutputStream(entryFile); - final BufferedOutputStream bos = new BufferedOutputStream(fos)) { + final BufferedOutputStream bos = new BufferedOutputStream(fos)) { IOUtils.copy(zis, bos); bos.flush(); fos.getFD().sync(); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java index 0baf8c98c1..9adcd32130 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.store; import org.apache.hugegraph.pd.raft.KVStoreClosure; @@ -7,6 +24,7 @@ public abstract class BaseKVStoreClosure implements KVStoreClosure { private Pdpb.Error error; private Object data; + @Override public Pdpb.Error getError() { return error; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java index 3d6bdf12e8..ee35a0cd42 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java @@ -1,11 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.store; -import com.baidu.hugegraph.pd.common.PDException; +import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.hugegraph.pd.config.PDConfig; -import java.util.List; -import java.util.concurrent.TimeUnit; +import com.baidu.hugegraph.pd.common.PDException; public interface HgKVStore { void init(PDConfig config); @@ -25,6 +42,7 @@ public interface HgKVStore { void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException; byte[] getWithTTL(byte[] key) throws PDException; + void removeWithTTL(byte[] key) throws PDException; List getListWithTTL(byte[] key) throws PDException; @@ -35,7 +53,7 @@ public interface HgKVStore { void loadSnapshot(String snapshotPath) throws PDException; - List scanRange(byte[] start,byte[] end); + List scanRange(byte[] start, byte[] end); void close(); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java index db84a2b992..00b5e14e81 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java @@ -1,19 +1,21 @@ -package org.apache.hugegraph.pd.store; - -import com.alipay.sofa.jraft.util.Utils; -import com.baidu.hugegraph.pd.common.PDException; - -import org.apache.hugegraph.pd.config.PDConfig; - -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; -import com.google.common.cache.CacheBuilder; -import com.google.common.primitives.Bytes; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import lombok.extern.slf4j.Slf4j; - -import org.apache.commons.io.FileUtils; -import org.rocksdb.*; +package org.apache.hugegraph.pd.store; import java.io.File; import java.io.IOException; @@ -29,15 +31,34 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.config.PDConfig; +import org.rocksdb.Checkpoint; +import org.rocksdb.Options; +import org.rocksdb.ReadOptions; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; +import org.rocksdb.Slice; + +import com.alipay.sofa.jraft.util.Utils; +import com.baidu.hugegraph.pd.common.PDException; +import com.baidu.hugegraph.pd.grpc.Pdpb; +import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; +import com.google.common.cache.CacheBuilder; +import com.google.common.primitives.Bytes; + +import lombok.extern.slf4j.Slf4j; + @Slf4j public class HgKVStoreImpl implements HgKVStore { - private RocksDB db; - private String dbPath; private static final ConcurrentHashMap> CACHE = new ConcurrentHashMap(); private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - + private RocksDB db; + private String dbPath; private Options dbOptions; + @Override public void init(PDConfig config) { dbOptions = new Options().setCreateIfMissing(true); @@ -133,8 +154,9 @@ public long removeByPrefix(byte[] prefix) throws PDException { while (iterator.isValid()) { if (0 == Bytes.indexOf(iterator.key(), prefix)) { db.delete(iterator.key()); - } else + } else { break; + } iterator.next(); } } catch (Exception e) { @@ -179,17 +201,18 @@ public byte[] getWithTTL(byte[] key) throws PDException { public void removeWithTTL(byte[] key) throws PDException { ConcurrentMap map; String storeKey = new String(key, Charset.defaultCharset()); - if ((map = CACHE.get(storeKey)) == null) return ; + if ((map = CACHE.get(storeKey)) == null) return; map.remove(storeKey); } @Override public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { - this.putWithTTL(key,value,ttl,TimeUnit.SECONDS); + this.putWithTTL(key, value, ttl, TimeUnit.SECONDS); } @Override - public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException { + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { try { ConcurrentMap spaceNode = CacheBuilder.newBuilder().initialCapacity(200) .expireAfterWrite(ttl, @@ -221,7 +244,8 @@ public void saveSnapshot(String snapshotPath) throws PDException { if (!Utils.atomicMoveFile(tempFile, snapshotFile, true)) { log.error("Fail to rename {} to {}", tempPath, snapshotPath); throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, - String.format("Fail to rename %s to %s", tempPath, snapshotPath)); + String.format("Fail to rename %s to %s", tempPath, + snapshotPath)); } } catch (final PDException e) { throw e; @@ -253,7 +277,8 @@ public void loadSnapshot(String snapshotPath) throws PDException { if (!Utils.atomicMoveFile(snapshotFile, dbFile, true)) { log.error("Fail to rename {} to {}", snapshotPath, this.dbPath); throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, - String.format("Fail to rename %s to %s", snapshotPath, this.dbPath)); + String.format("Fail to rename %s to %s", snapshotPath, + this.dbPath)); } // reopen the db openRocksDB(this.dbPath); @@ -272,7 +297,7 @@ public void loadSnapshot(String snapshotPath) throws PDException { public List scanRange(byte[] start, byte[] end) { final Lock readLock = this.readWriteLock.readLock(); readLock.lock(); - try(ReadOptions options = new ReadOptions() + try (ReadOptions options = new ReadOptions() .setIterateLowerBound(new Slice(start)) .setIterateUpperBound(new Slice(end))) { List kvs = new ArrayList<>(); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java index a7f39ffc07..fec074579d 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.store; @@ -5,15 +22,8 @@ public class KV { private byte[] key; private byte[] value; - public KV(byte[] key, byte[] value){ - this.key = key; - this.value = value; - } - public void setKey(byte[] key) { + public KV(byte[] key, byte[] value) { this.key = key; - } - - public void setValue(byte[] value) { this.value = value; } @@ -21,7 +31,15 @@ public byte[] getKey() { return key; } + public void setKey(byte[] key) { + this.key = key; + } + public byte[] getValue() { return value; } + + public void setValue(byte[] value) { + this.value = value; + } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java index 987a6c09af..15d431155c 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java @@ -1,26 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.store; -import com.alipay.sofa.jraft.Status; -import com.alipay.sofa.jraft.entity.Task; -import com.alipay.sofa.jraft.error.RaftError; -import com.baidu.hugegraph.pd.common.PDException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.raft.KVOperation; import org.apache.hugegraph.pd.raft.KVStoreClosure; +import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.raft.RaftStateMachine; +import org.apache.hugegraph.pd.raft.RaftTaskHandler; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Pdpb; -import org.apache.hugegraph.pd.raft.KVOperation; -import org.apache.hugegraph.pd.raft.RaftEngine; -import org.apache.hugegraph.pd.raft.RaftTaskHandler; import lombok.extern.slf4j.Slf4j; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; - @Slf4j public class RaftKVStore implements HgKVStore, RaftTaskHandler { @@ -45,7 +61,7 @@ private BaseKVStoreClosure createClosure() { public void run(Status status) { if (!status.isOk()) { log.error("An exception occurred while performing the RAFT,{}", - status.getErrorMsg()); + status.getErrorMsg()); } else { log.info("RAFT done!"); } @@ -58,7 +74,7 @@ public void put(byte[] key, byte[] value) throws PDException { KVOperation operation = KVOperation.createPut(key, value); try { applyOperation(operation).get(); - } catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); } } @@ -81,7 +97,7 @@ public List scanPrefix(byte[] prefix) { public long remove(byte[] bytes) throws PDException { try { applyOperation(KVOperation.createRemove(bytes)).get(); - } catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); } return 0; @@ -92,7 +108,7 @@ public long remove(byte[] bytes) throws PDException { public long removeByPrefix(byte[] bytes) throws PDException { try { applyOperation(KVOperation.createRemoveByPrefix(bytes)).get(); - } catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); } return 0; @@ -102,7 +118,7 @@ public long removeByPrefix(byte[] bytes) throws PDException { public void clear() throws PDException { try { applyOperation(KVOperation.createClear()).get(); - } catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); } } @@ -111,16 +127,17 @@ public void clear() throws PDException { public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { try { applyOperation(KVOperation.createPutWithTTL(key, value, ttl)).get(); - } catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); } } @Override - public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException { + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { try { applyOperation(KVOperation.createPutWithTTL(key, value, ttl, timeUnit)).get(); - } catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); } } @@ -139,7 +156,7 @@ public byte[] getWithTTL(byte[] key) throws PDException { public void removeWithTTL(byte[] key) throws PDException { try { applyOperation(KVOperation.createRemoveWithTTL(key)).get(); - } catch (Exception e){ + } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); } } @@ -150,13 +167,13 @@ public void saveSnapshot(String snapshotPath) throws PDException { } @Override - public void loadSnapshot(String snapshotPath) throws PDException{ + public void loadSnapshot(String snapshotPath) throws PDException { store.loadSnapshot(snapshotPath); } @Override public List scanRange(byte[] start, byte[] end) { - return store.scanRange(start,end); + return store.scanRange(start, end); } @Override @@ -183,6 +200,7 @@ public long doRemoveByPrefix(byte[] bytes) throws PDException { public void doRemoveWithTTL(byte[] key) throws PDException { this.store.removeWithTTL(key); } + public void doClear() throws PDException { this.store.clear(); } @@ -191,7 +209,8 @@ public void doPutWithTTL(byte[] key, byte[] value, long ttl) throws PDException this.store.putWithTTL(key, value, ttl); } - public void doPutWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException { + public void doPutWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { this.store.putWithTTL(key, value, ttl, timeUnit); } @@ -203,7 +222,7 @@ public void doLoadSnapshot(String snapshotPath) throws PDException { this.store.loadSnapshot(snapshotPath); } - private CompletableFuture applyOperation(final KVOperation op) throws PDException { + private CompletableFuture applyOperation(final KVOperation op) throws PDException { CompletableFuture future = new CompletableFuture<>(); try { final Task task = new Task(); @@ -211,17 +230,26 @@ private CompletableFuture applyOperation(final KVOperation op) throws PD task.setDone(new RaftStateMachine.RaftClosureAdapter(op, new KVStoreClosure() { Object data; Pdpb.Error error; + @Override - public Pdpb.Error getError() { return error;} + public Pdpb.Error getError() { + return error; + } @Override - public void setError(Pdpb.Error error) { this.error = error;} + public void setError(Pdpb.Error error) { + this.error = error; + } @Override - public Object getData() { return data; } + public Object getData() { + return data; + } @Override - public void setData(Object data) { this.data = data;} + public void setData(Object data) { + this.data = data; + } @Override public void run(Status status) { @@ -271,7 +299,7 @@ public boolean invoke(KVOperation op, KVStoreClosure response) throws PDExceptio break; case KVOperation.PUT_WITH_TTL_UNIT: Object[] arg = (Object[]) op.getArg(); - doPutWithTTL(op.getKey(), op.getValue(), (long) arg[0] , (TimeUnit)arg[1]); + doPutWithTTL(op.getKey(), op.getValue(), (long) arg[0], (TimeUnit) arg[1]); break; case KVOperation.REMOVE_BY_PREFIX: doRemoveByPrefix(op.getKey()); diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java index ce6d5c9108..4aa8a298fb 100644 --- a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import java.util.concurrent.ExecutionException; @@ -42,8 +59,10 @@ public static void clearClusterData() throws ExecutionException, InterruptedExce //Client client = Client.builder().endpoints(pdConfig.getEtcd().getAddress()).build(); //KV kvClient = client.getKVClient(); // - //ByteSequence key = ByteSequence.from("HUGEGRAPH/" + pdConfig.getClusterId(), Charset.forName("utf-8")); - //CompletableFuture rsp = kvClient.delete(key, DeleteOption.newBuilder().isPrefix(true).build()); + //ByteSequence key = ByteSequence.from("HUGEGRAPH/" + pdConfig.getClusterId(), Charset + // .forName("utf-8")); + //CompletableFuture rsp = kvClient.delete(key, DeleteOption.newBuilder() + // .isPrefix(true).build()); //System.out.println("删除数量 : " + rsp.get().getDeleted()); //kvClient.close(); //client.close(); @@ -53,7 +72,8 @@ public static void clearClusterData() throws ExecutionException, InterruptedExce public void testPatrolStores() throws PDException, InterruptedException { StoreNodeService storeService = new StoreNodeService(pdConfig); PartitionService partitionService = new PartitionService(pdConfig, storeService); - TaskScheduleService monitorService = new TaskScheduleService(pdConfig, storeService, partitionService); + TaskScheduleService monitorService = + new TaskScheduleService(pdConfig, storeService, partitionService); storeService.init(partitionService); partitionService.init(); monitorService.init(); @@ -62,25 +82,28 @@ public void testPatrolStores() throws PDException, InterruptedException { Metapb.Store[] stores = new Metapb.Store[count]; for (int i = 0; i < count; i++) { Metapb.Store store = Metapb.Store.newBuilder() - .setId(0) - .setAddress("" + i) - .setDeployPath("/data") - .addLabels(Metapb.StoreLabel.newBuilder() - .setKey("namespace") - .setValue("default").build()) - .build(); + .setId(0) + .setAddress(String.valueOf(i)) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); stores[i] = storeService.register(store); System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); } Metapb.Graph graph = Metapb.Graph.newBuilder() - .setGraphName("defaultGH") + .setGraphName("defaultGH") - .setPartitionCount(10) - .build(); + .setPartitionCount(10) + .build(); partitionService.updateGraph(graph); Thread.sleep(10000); count = 0; - count += storeService.getStores("").stream().filter(store -> store.getState() == Metapb.StoreState.Tombstone).count(); + count += storeService.getStores("").stream() + .filter(store -> store.getState() == Metapb.StoreState.Tombstone) + .count(); Assert.assertEquals(6, count); diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java index 1e2795fcc1..6214f4e79a 100644 --- a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java @@ -1,13 +1,30 @@ -package org.apache.hugegraph.pd; +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ -import com.baidu.hugegraph.pd.grpc.Metapb; -import org.junit.Test; +package org.apache.hugegraph.pd; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.List; +import org.junit.Test; + +import com.baidu.hugegraph.pd.grpc.Metapb; + public class PartitionServiceTest { @Test public void testPartitionHeartbeat() { @@ -17,7 +34,7 @@ public void testPartitionHeartbeat() { shardList.add(Metapb.Shard.newBuilder().setStoreId(3).build()); shardList = new ArrayList<>(shardList); Metapb.PartitionStats stats = Metapb.PartitionStats.newBuilder() - .addAllShard(shardList).build(); + .addAllShard(shardList).build(); List shardList2 = new ArrayList<>(stats.getShardList()); Collections.shuffle(shardList2); shardList2.forEach(shard -> { diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java index 7fe6a911b1..a00effcde5 100644 --- a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java @@ -1,9 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; -import com.baidu.hugegraph.pd.common.PDException; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.commons.io.FileUtils; import org.apache.hugegraph.pd.config.PDConfig; +import org.junit.Assert; +import org.junit.BeforeClass; +import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; @@ -13,18 +39,6 @@ import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; -import org.apache.commons.io.FileUtils; -import org.junit.Assert; -import org.junit.BeforeClass; -// import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicReference; - public class StoreNodeServiceTest { static PDConfig pdConfig; @@ -34,7 +48,9 @@ public static void init() throws Exception { deleteDirectory(new File(path)); pdConfig = new PDConfig() {{ this.setClusterId(100); - this.setInitialStoreList("127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503,127.0.0.1:8504,127.0.0.1:8505"); + this.setInitialStoreList( + "127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503,127.0.0.1:8504," + + "127.0.0.1:8505"); }}; pdConfig.setStore(new PDConfig().new Store() {{ @@ -46,7 +62,7 @@ public static void init() throws Exception { this.setShardCount(3); this.setMaxShardsPerStore(3); }}); - pdConfig.setRaft(new PDConfig().new Raft(){{ + pdConfig.setRaft(new PDConfig().new Raft() {{ this.setEnable(false); }}); pdConfig.setDiscovery(new PDConfig().new Discovery()); @@ -55,25 +71,42 @@ public static void init() throws Exception { pdConfig = configService.loadConfig(); } + public static byte[] intToByteArray(int i) { + byte[] result = new byte[4]; + result[0] = (byte) ((i >> 24) & 0xFF); + result[1] = (byte) ((i >> 16) & 0xFF); + result[2] = (byte) ((i >> 8) & 0xFF); + result[3] = (byte) (i & 0xFF); + return result; + } + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.printf("Failed to start ....,%s%n", e.getMessage()); + } + } // @Test public void testStoreNodeService() throws PDException { Assert.assertEquals(pdConfig.getPartition().getTotalCount(), - pdConfig.getInitialStoreMap().size() * pdConfig.getPartition().getMaxShardsPerStore() - / pdConfig.getPartition().getShardCount()); + (long) pdConfig.getInitialStoreMap().size() * + pdConfig.getPartition().getMaxShardsPerStore() + / pdConfig.getPartition().getShardCount()); StoreNodeService storeService = new StoreNodeService(pdConfig); int count = 6; Metapb.Store[] stores = new Metapb.Store[count]; for (int i = 0; i < count; i++) { Metapb.Store store = Metapb.Store.newBuilder() - .setId(0) - .setAddress("127.0.0.1:850" + i) - .setDeployPath("/data") - .addLabels(Metapb.StoreLabel.newBuilder() - .setKey("namespace") - .setValue("default").build()) - .build(); + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); stores[i] = storeService.register(store); System.out.println("新注册store, id = " + stores[i].getId()); } @@ -81,34 +114,35 @@ public void testStoreNodeService() throws PDException { for (Metapb.Store store : stores) { Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() - .setStoreId(store.getId()) - .build(); + .setStoreId(store.getId()) + .build(); storeService.heartBeat(stats); } Assert.assertEquals(6, storeService.getActiveStores("").size()); Metapb.Graph graph = Metapb.Graph.newBuilder() - .setGraphName("defaultGH") - .setPartitionCount(10) - .build(); + .setGraphName("defaultGH") + .setPartitionCount(10) + .build(); // 分配shard List shards = storeService.allocShards(graph, 1); Assert.assertEquals(3, shards.size()); - Assert.assertEquals(pdConfig.getPartition().getTotalCount(), storeService.getShardGroups().size()); // 设置leader + Assert.assertEquals(pdConfig.getPartition().getTotalCount(), + storeService.getShardGroups().size()); // 设置leader Metapb.Shard leader = Metapb.Shard.newBuilder(shards.get(0)) - .setRole(Metapb.ShardRole.Leader).build(); + .setRole(Metapb.ShardRole.Leader).build(); shards = new ArrayList<>(shards); shards.set(0, leader); // 增加shard pdConfig.getPartition().setShardCount(5); Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() - .setId(1) - .addAllShards(shards).build(); + .setId(1) + .addAllShards(shards).build(); shards = storeService.reallocShards(shardGroup); Assert.assertEquals(5, shards.size()); @@ -141,42 +175,51 @@ public void testSplitPartition() throws PDException { PartitionService partitionService = new PartitionService(pdConfig, storeService); storeService.init(partitionService); - partitionService.addInstructionListener(new PartitionInstructionListener(){ + partitionService.addInstructionListener(new PartitionInstructionListener() { @Override - public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { } @Override - public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException { + public void transferLeader(Metapb.Partition partition, + TransferLeader transferLeader) throws PDException { } @Override - public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException { - splitPartition.getNewPartitionList().forEach(p->{ - System.out.println("SplitPartition " + p.getId() + " " + p.getStartKey() + "," + p.getEndKey()); + public void splitPartition(Metapb.Partition partition, + SplitPartition splitPartition) throws PDException { + splitPartition.getNewPartitionList().forEach(p -> { + System.out.println("SplitPartition " + p.getId() + " " + p.getStartKey() + "," + + p.getEndKey()); }); } @Override - public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException{ + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { } @Override - public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { } @Override - public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { } @Override - public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) throws PDException { + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) throws + PDException { } }); @@ -184,22 +227,24 @@ public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRang Metapb.Store[] stores = new Metapb.Store[count]; for (int i = 0; i < count; i++) { Metapb.Store store = Metapb.Store.newBuilder() - .setId(0) - .setAddress("127.0.0.1:850" + i) - .setDeployPath("/data") - .addLabels(Metapb.StoreLabel.newBuilder() - .setKey("namespace") - .setValue("default").build()) - .build(); + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); stores[i] = storeService.register(store); System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); } Assert.assertEquals(count, storeService.getStores().size()); Metapb.Graph graph = Metapb.Graph.newBuilder() - .setGraphName("defaultGH") - .build(); - Metapb.PartitionShard ptShard = partitionService.getPartitionByCode(graph.getGraphName(), 0); + .setGraphName("defaultGH") + .build(); + Metapb.PartitionShard ptShard = + partitionService.getPartitionByCode(graph.getGraphName(), 0); System.out.println(ptShard.getPartition().getId()); { Metapb.Partition pt = ptShard.getPartition(); @@ -215,19 +260,21 @@ public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRang } // @Test - public void testPartitionService() throws PDException, ExecutionException, InterruptedException { + public void testPartitionService() throws PDException, ExecutionException, + InterruptedException { StoreNodeService storeService = new StoreNodeService(pdConfig); int count = 6; Metapb.Store[] stores = new Metapb.Store[count]; for (int i = 0; i < count; i++) { Metapb.Store store = Metapb.Store.newBuilder() - .setId(0) - .setAddress("" + i) - .setDeployPath("/data") - .addLabels(Metapb.StoreLabel.newBuilder() - .setKey("namespace") - .setValue("default").build()) - .build(); + .setId(0) + .setAddress(String.valueOf(i)) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default") + .build()) + .build(); stores[i] = storeService.register(store); System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); } @@ -237,35 +284,41 @@ public void testPartitionService() throws PDException, ExecutionException, Inter PartitionService partitionService = new PartitionService(pdConfig, storeService); Metapb.Graph graph = Metapb.Graph.newBuilder() - .setGraphName("defaultGH") + .setGraphName("defaultGH") - .setPartitionCount(10) - .build(); + .setPartitionCount(10) + .build(); // 申请分区 Metapb.PartitionShard[] partitions = new Metapb.PartitionShard[10]; for (int i = 0; i < partitions.length; i++) { - partitions[i] = partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); + partitions[i] = + partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); Assert.assertEquals(3, storeService.getShardGroup(i).getShardsCount()); } - System.out.println("分区数量: " + partitionService.getPartitions(graph.getGraphName()).size()); + System.out.println( + "分区数量: " + partitionService.getPartitions(graph.getGraphName()).size()); int[] caseNo = {0}; //1 测试增加shard, 2 //测试store下线 Metapb.Shard leader = null; int[] finalCaseNo = caseNo; - partitionService.addInstructionListener(new PartitionInstructionListener(){ + partitionService.addInstructionListener(new PartitionInstructionListener() { @Override - public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { - switch (finalCaseNo[0]){ + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { + switch (finalCaseNo[0]) { case 2: - Assert.assertEquals(5, storeService.getShardGroup(partition.getId()).getShardsCount()); + Assert.assertEquals(5, storeService.getShardGroup(partition.getId()) + .getShardsCount()); break; case 3: - storeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { - Assert.assertNotEquals(shard.getStoreId(), stores[0].getId()); - }); + storeService.getShardGroup(partition.getId()).getShardsList() + .forEach(shard -> { + Assert.assertNotEquals(shard.getStoreId(), + stores[0].getId()); + }); break; } @@ -281,32 +334,38 @@ public void splitPartition(Metapb.Partition partition, SplitPartition splitParti } @Override - public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException{ + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { } @Override - public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { } @Override - public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { } @Override - public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) throws PDException { } }); Metapb.Partition partition = partitions[0].getPartition(); - leader = Metapb.Shard.newBuilder(storeService.getShardGroup(partition.getId()).getShardsList().get(0)).build(); + leader = Metapb.Shard.newBuilder( + storeService.getShardGroup(partition.getId()).getShardsList().get(0)).build(); Metapb.Shard finalLeader = leader; partitionService.addStatusListener(new PartitionStatusListener() { @Override - public void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition) { + public void onPartitionChanged(Metapb.Partition partition, + Metapb.Partition newPartition) { } @@ -316,35 +375,43 @@ public void onPartitionRemoved(Metapb.Partition partition) { } }); // 测试修改图 - caseNo[0] = 1; partitionService.updateGraph(graph); - for(int i = 0; i< partitions.length ;i++) { - partitions[i] = partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); + caseNo[0] = 1; + partitionService.updateGraph(graph); + for (int i = 0; i < partitions.length; i++) { + partitions[i] = + partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); Assert.assertEquals(3, storeService.getShardGroup(i).getShardsCount()); } graph = Metapb.Graph.newBuilder(graph) - .setGraphName("defaultGH") + .setGraphName("defaultGH") - .setPartitionCount(10) - .build(); - caseNo[0] = 2; partitionService.updateGraph(graph); + .setPartitionCount(10) + .build(); + caseNo[0] = 2; + partitionService.updateGraph(graph); // 测试store离线 - caseNo[0] = 3; partitionService.storeOffline(stores[0]); - + caseNo[0] = 3; + partitionService.storeOffline(stores[0]); Metapb.PartitionStats stats = Metapb.PartitionStats.newBuilder() - .addGraphName(partition.getGraphName()) - .setId(partition.getId()) - .setLeader(Metapb.Shard.newBuilder(leader).setRole(Metapb.ShardRole.Leader)) - .build(); + .addGraphName(partition.getGraphName()) + .setId(partition.getId()) + .setLeader( + Metapb.Shard.newBuilder(leader) + .setRole( + Metapb.ShardRole.Leader)) + .build(); // 测试leader飘移 - caseNo[0] = 4; partitionService.partitionHeartbeat(stats); + caseNo[0] = 4; + partitionService.partitionHeartbeat(stats); AtomicReference shard = new AtomicReference<>(); - Metapb.PartitionShard ss = partitionService.getPartitionShardById(partition.getGraphName(), partition.getId()); - storeService.getShardList(partition.getId()).forEach(s->{ - if ( s.getRole() == Metapb.ShardRole.Leader){ + Metapb.PartitionShard ss = + partitionService.getPartitionShardById(partition.getGraphName(), partition.getId()); + storeService.getShardList(partition.getId()).forEach(s -> { + if (s.getRole() == Metapb.ShardRole.Leader) { Assert.assertNull(shard.get()); shard.set(s); } @@ -354,16 +421,6 @@ public void onPartitionRemoved(Metapb.Partition partition) { } - - public static byte[] intToByteArray(int i) { - byte[] result = new byte[4]; - result[0] = (byte)((i >> 24) & 0xFF); - result[1] = (byte)((i >> 16) & 0xFF); - result[2] = (byte)((i >> 8) & 0xFF); - result[3] = (byte)(i & 0xFF); - return result; - } - // @Test public void testMergeGraphParams() throws PDException { StoreNodeService storeService = new StoreNodeService(pdConfig); @@ -371,25 +428,26 @@ public void testMergeGraphParams() throws PDException { Metapb.Graph dfGraph = Metapb.Graph.newBuilder() - .setPartitionCount(pdConfig.getPartition().getTotalCount()) + .setPartitionCount( + pdConfig.getPartition().getTotalCount()) - .build(); + .build(); Metapb.Graph graph1 = Metapb.Graph.newBuilder() - .setGraphName("test") - .setPartitionCount(20) + .setGraphName("test") + .setPartitionCount(20) - .build(); + .build(); Metapb.Graph graph2 = Metapb.Graph.newBuilder() - .setGraphName("test") - .setPartitionCount(7).build(); + .setGraphName("test") + .setPartitionCount(7).build(); Metapb.Graph graph3 = Metapb.Graph.newBuilder() - .setGraphName("test") - .build(); + .setGraphName("test") + .build(); Metapb.Graph graph4 = Metapb.Graph.newBuilder() - .setGraphName("test") - .build(); + .setGraphName("test") + .build(); Metapb.Graph graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph2).build(); Assert.assertEquals(graph2.getGraphName(), graph.getGraphName()); @@ -410,26 +468,18 @@ public void testMergeGraphParams() throws PDException { } - public static void deleteDirectory(File dir) { - try { - FileUtils.deleteDirectory(dir); - } catch (IOException e) { - System.out.println(String.format("Failed to start ....,%s", e.getMessage())); - } - } - // @Test - public void test(){ + public void test() { int[] n = new int[3]; - if ( ++n[2] > 1){ + if (++n[2] > 1) { System.out.println(n[2]); } - if ( ++n[2] > 1){ + if (++n[2] > 1) { System.out.println(n[2]); } - if ( ++n[2] > 1){ + if (++n[2] > 1) { System.out.println(n[2]); } } diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java index 21afc4136f..0f26b1e55d 100644 --- a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd; import java.io.File; diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java index 28deca26fa..46fde5ec65 100644 --- a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; // import org.junit.Test; @@ -21,8 +38,9 @@ public void testHashCode() { } - for (int i = 0; i < counter.length; i++) + for (int i = 0; i < counter.length; i++) { System.out.println(i + " " + counter[i]); + } } diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java index b0a7568499..833313454a 100644 --- a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java @@ -1,25 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.store; -import com.baidu.hugegraph.pd.common.PDException; +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; -import org.apache.hugegraph.pd.config.PDConfig; import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.config.PDConfig; import org.junit.Assert; import org.junit.BeforeClass; -// import org.junit.Test; -import java.io.File; -import java.io.IOException; -import java.nio.file.Paths; +import com.baidu.hugegraph.pd.common.PDException; public class HgKVStoreImplTest { - static PDConfig pdConfig; static final String testPath = "tmp/test"; + static PDConfig pdConfig; @BeforeClass public static void init() throws IOException { File testFile = new File(testPath); - if(testFile.exists()) { + if (testFile.exists()) { FileUtils.deleteDirectory(testFile); } FileUtils.forceMkdir(testFile); @@ -38,7 +54,7 @@ public void Test() throws PDException { byte[] value = "pd".getBytes(); kvStore.put(key, value); } - for(int i = 0; i<100; i++){ + for (int i = 0; i < 100; i++) { byte[] key = String.format("k%03d", i).getBytes(); byte[] value = ("value" + i).getBytes(); kvStore.put(key, value); @@ -56,7 +72,7 @@ public void TestSnapshot() throws PDException { kvStore.init(pdConfig); // put 100 data - for(int i = 0; i<100; i++){ + for (int i = 0; i < 100; i++) { byte[] key = String.format("k%03d", i).getBytes(); byte[] value = ("value" + i).getBytes(); kvStore.put(key, value); @@ -68,7 +84,7 @@ public void TestSnapshot() throws PDException { kvStore.saveSnapshot(snapshotPath); // put another 100 data - for(int i = 100; i<200; i++){ + for (int i = 100; i < 200; i++) { byte[] key = String.format("k%03d", i).getBytes(); byte[] value = ("value" + i).getBytes(); kvStore.put(key, value); @@ -80,7 +96,7 @@ public void TestSnapshot() throws PDException { Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); // put another 100 data - for(int i = 100; i<200; i++){ + for (int i = 100; i < 200; i++) { byte[] key = String.format("k%03d", i).getBytes(); byte[] value = ("value" + i).getBytes(); kvStore.put(key, value); diff --git a/hg-pd-core/src/test/resources/log4j2.xml b/hg-pd-core/src/test/resources/log4j2.xml index a157b6412b..125b8ea9f7 100644 --- a/hg-pd-core/src/test/resources/log4j2.xml +++ b/hg-pd-core/src/test/resources/log4j2.xml @@ -1,5 +1,22 @@ + + @@ -25,7 +42,7 @@ - + @@ -33,8 +50,8 @@ - - + + @@ -52,7 +69,7 @@ - + @@ -60,8 +77,8 @@ - - + + @@ -80,7 +97,7 @@ - + @@ -88,8 +105,8 @@ - - + + From fd13bb436b15b0ca770f93b81b29e2a28fdd9cd3 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 6 May 2023 19:50:02 +0800 Subject: [PATCH 05/18] refact: format root/parent modules & pd-service package name & rearrange code Change-Id: Id8ec0a0d754f646c59f7251158dd89de669b2016 --- .mvn/wrapper/MavenWrapperDownloader.java | 20 ++++---- .mvn/wrapper/maven-wrapper.properties | 16 +++++++ README.md | 45 +++++++++++++++++- build-pre.sh | 17 +++++++ build.sh | 17 +++++++ ci.yml | 22 ++++----- deploy-release.sh | 17 +++++++ deploy-snapshot.sh | 17 +++++++ .../pd/client/AbstractClientStubProxy.java | 2 +- .../hugegraph/pd/client/DiscoveryClient.java | 4 +- .../apache/hugegraph/pd/client/KvClient.java | 47 ++++++++++--------- .../apache/hugegraph/pd/client/PDClient.java | 2 +- .../hugegraph/pd/client/PDPulseTest.java | 2 +- .../hugegraph/pd/client/PDWatchTest.java | 2 +- .../hugegraph/pd/common/PartitionCache.java | 9 ++-- .../apache/hugegraph/pd/ConfigService.java | 2 +- .../org/apache/hugegraph/pd/IdService.java | 2 +- .../org/apache/hugegraph/pd/KvService.java | 2 +- .../apache/hugegraph/pd/StoreNodeService.java | 4 +- .../hugegraph/pd/TaskScheduleService.java | 5 +- .../hugegraph/pd/meta/DiscoveryMetaStore.java | 1 + .../apache/hugegraph/pd/meta/IdMetaStore.java | 1 + .../hugegraph/pd/raft/RaftRpcProcessor.java | 1 + .../assembly/static/bin/stop-hugegraph-pd.sh | 2 +- .../src/assembly/static/conf/application.yml | 2 +- .../pd/license/LicenseVerifierService.java | 6 +-- .../hugegraph/pd/model/PromTargetsModel.java | 3 +- .../pd/notice/NoticeBroadcaster.java | 2 +- .../pd/service/KvServiceGrpcImpl.java | 2 +- .../hugegraph/pd/service/PDPulseService.java | 1 + .../hugegraph/pd/service/PDService.java | 7 +-- .../pd/service/PromTargetsService.java | 4 +- .../hugegraph/pd/service/UpgradeService.java | 5 +- .../pd/upgrade/VersionScriptFactory.java | 9 ++-- .../pd/upgrade/VersionUpgradeScript.java | 2 +- .../upgrade/scripts/PartitionMetaUpgrade.java | 5 +- .../pd/upgrade/scripts/TaskCleanUpgrade.java | 5 +- .../hugegraph/pd/watch/KvWatchSubject.java | 2 +- .../src/main/resources/application.yml | 2 +- .../test/resources/application-server0.yml | 4 +- .../test/resources/application-server1.yml | 4 +- .../test/resources/application-server2.yml | 2 +- .../test/resources/application-server3.yml | 2 +- .../hugegraph/pd/common/HgAssertTest.java | 6 ++- local-release.sh | 17 +++++++ mvnw | 28 +++++------ pom.xml | 21 ++++++++- settings.xml | 25 ++++++++-- start_pd_server.sh | 17 +++++++ 49 files changed, 321 insertions(+), 121 deletions(-) rename hg-pd-service/src/main/java/{com/baidu => org/apache}/hugegraph/pd/upgrade/VersionScriptFactory.java (90%) rename hg-pd-service/src/main/java/{com/baidu => org/apache}/hugegraph/pd/upgrade/VersionUpgradeScript.java (97%) rename hg-pd-service/src/main/java/{com/baidu => org/apache}/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java (97%) rename hg-pd-service/src/main/java/{com/baidu => org/apache}/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java (94%) diff --git a/.mvn/wrapper/MavenWrapperDownloader.java b/.mvn/wrapper/MavenWrapperDownloader.java index b901097f2d..574feef6be 100644 --- a/.mvn/wrapper/MavenWrapperDownloader.java +++ b/.mvn/wrapper/MavenWrapperDownloader.java @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + import java.net.*; import java.io.*; import java.nio.channels.*; @@ -24,8 +25,9 @@ public class MavenWrapperDownloader { /** * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. */ - private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" - + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + private static final String DEFAULT_DOWNLOAD_URL = + "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; /** * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to @@ -41,7 +43,8 @@ public class MavenWrapperDownloader { ".mvn/wrapper/maven-wrapper.jar"; /** - * Name of the property which should be used to override the default download url for the wrapper. + * Name of the property which should be used to override the default download url for the + * wrapper. */ private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; @@ -54,7 +57,7 @@ public static void main(String args[]) { // wrapperUrl parameter. File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); String url = DEFAULT_DOWNLOAD_URL; - if(mavenWrapperPropertyFile.exists()) { + if (mavenWrapperPropertyFile.exists()) { FileInputStream mavenWrapperPropertyFileInputStream = null; try { mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); @@ -65,7 +68,7 @@ public static void main(String args[]) { System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); } finally { try { - if(mavenWrapperPropertyFileInputStream != null) { + if (mavenWrapperPropertyFileInputStream != null) { mavenWrapperPropertyFileInputStream.close(); } } catch (IOException e) { @@ -76,10 +79,11 @@ public static void main(String args[]) { System.out.println("- Downloading from: " + url); File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); - if(!outputFile.getParentFile().exists()) { - if(!outputFile.getParentFile().mkdirs()) { + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { System.out.println( - "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + "- ERROR creating output directory '" + + outputFile.getParentFile().getAbsolutePath() + "'"); } } System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties index 6d49a6c17b..9c2bd37721 100644 --- a/.mvn/wrapper/maven-wrapper.properties +++ b/.mvn/wrapper/maven-wrapper.properties @@ -1,2 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar diff --git a/README.md b/README.md index fa3a61c2c7..c01d5a26b2 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@ # PD部署说明 + ## PD配置 + - 配置文件在application.yml + ```` license: # 验证使用的配置文件所在目录,包括主题、密码等 @@ -23,15 +26,20 @@ partition: # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count store-max-shard-count: 12 ```` + ##store配置 -配置文件在application.yml,配置pdserver的address + ```` pdserver: # pd服务地址,多个pd地址用逗号分割 address: pdserver ip:端口 ```` + ## Hugegraph配置 + - 配置项在hugegraph的启动脚本start-hugegraph.sh中 + ```` if [ -z "$META_SERVERS" ];then META_SERVERS="pdserver ip:端口" @@ -40,7 +48,9 @@ if [ -z "$PD_PEERS" ];then PD_PEERS="pdserver ip:端口" fi ```` + ## RESTFUL API + - pd提供了一些restful API可以获取集群分区,图,存储节点等一系列信息 ###获取集群统计信息 @@ -48,6 +58,7 @@ fi #### 获取集群统计信息 ###### Method & Url + ``` GET http://localhost:8620/v1/cluster ``` @@ -115,6 +126,7 @@ GET http://localhost:8620/v1/cluster #### 获取pd集群成员信息 ###### Method & Url + ``` GET http://localhost:8620/v1/member ``` @@ -160,19 +172,23 @@ GET http://localhost:8620/v1/member "status": 0 } ``` + ###存储节点相关 #### 获取集群所有的store的信息 ###### Method & Url + ``` GET http://localhost:8620/v1/stores ``` + ###### Response Status ```json 200 ``` + ###### Request Body ```json @@ -241,18 +257,23 @@ GET http://localhost:8620/v1/stores "status": 0 } ``` + #### 获取单个store的信息 ###### Method & Url + ``` GET http://localhost:8620/v1/store/{storeId} ``` + ###### Response Status ```json 200 ``` + ###### Request Body + ```json { "message": "OK", @@ -315,10 +336,13 @@ GET http://localhost:8620/v1/store/{storeId} "status": 0 } ``` + ### 分区相关 + #### 获取分区信息 ###### Method & Url + ``` GET http://localhost:8620/v1/highLevelPartitions ``` @@ -475,10 +499,13 @@ GET http://localhost:8620/v1/highLevelPartitions "status": 0 } ``` + ###获取图信息 + #### 获取所有的图信息 ###### Method & Url + ``` GET http://localhost:8620/v1/graphs ``` @@ -630,9 +657,11 @@ GET http://localhost:8620/v1/graphs "status": 0 } ``` + #### 获取单个图信息 ###### Method & Url + ``` GET http://localhost:8620/v1/graph/{graphName} ``` @@ -726,10 +755,13 @@ GET http://localhost:8620/v1/graph/{graphName} "status": 0 } ``` + ###获取shard的信息 + #### 获取所有shard的信息 ###### Method & Url + ``` GET http://localhost:8620/v1/shards ``` @@ -800,15 +832,19 @@ GET http://localhost:8620/v1/shards "status": 0 } ``` + ###服务注册 + #### 注册服务 ###### Method & Url + ``` POST http://127.0.0.1:8620/v1/registry ``` ###### Request Body + ```json 200 ``` @@ -826,6 +862,7 @@ POST http://127.0.0.1:8620/v1/registry } } ``` + appName:所属服务名 version:所属服务版本号 address:服务实例地址+端口 @@ -841,17 +878,21 @@ labels: 自定义标签,若服务名为'hg'即hugeserver时,需要提供key "data": null } ``` + errorType:状态码 message:状态码为错误时的具体出错信息 data:无返回数据 + #### 服务实例获取 ###### Method & Url + ``` POST http://127.0.0.1:8620/v1/registryInfo ``` ###### Request Body + ```json 200 ``` @@ -867,10 +908,11 @@ POST http://127.0.0.1:8620/v1/registryInfo } } ``` + 以下三项可全部为空,则获取所有服务节点的信息: -- appName:过滤所属服务名的条件 -- version:过滤所属服务版本号的条件,此项有值,则appName不能为空 --- labels: 过滤自定义标签的条件 +-- labels: 过滤自定义标签的条件 ###### Response Body @@ -892,6 +934,7 @@ POST http://127.0.0.1:8620/v1/registryInfo ] } ``` + errorType:状态码 message:状态码为错误时的具体出错信息 data:获取的服务节点信息 \ No newline at end of file diff --git a/build-pre.sh b/build-pre.sh index 06efc075af..9350c36122 100644 --- a/build-pre.sh +++ b/build-pre.sh @@ -1,4 +1,21 @@ #!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + mkdir output touch output/1 export JAVA_HOME=$ORACLEJDK_1_8_0_HOME diff --git a/build.sh b/build.sh index dd45ad01e3..785db63dc2 100644 --- a/build.sh +++ b/build.sh @@ -1,4 +1,21 @@ #!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + export PATH=$MAVEN_3_5_3_BIN:$ORACLEJDK_11_0_7_BIN:$PATH export JAVA_HOME=$ORACLEJDK_11_0_7_HOME export MAVEN_HOME=$MAVEN_3_5_3_HOME diff --git a/ci.yml b/ci.yml index 13c56261fe..1b0d705d8e 100644 --- a/ci.yml +++ b/ci.yml @@ -1,22 +1,22 @@ Global: - tool : build_submitter + tool: build_submitter languages: # Java Version: 1.7, 1.8, 9, 10 - - language : java - version : 11 + - language: java + version: 11 envs: # Maven Version: 3.0.4, 3.1.1, 3.2.5, 3.3.9, 3.5.3 - - env : maven - version : 3.5.3 + - env: maven + version: 3.5.3 Default: - profile : [dev] + profile: [ dev ] Profiles: - profile: - name : dev - tool : build_submitter - env : DECK_CENTOS7U5_K3 - command : sh build.sh - release : true + name: dev + tool: build_submitter + env: DECK_CENTOS7U5_K3 + command: sh build.sh + release: true - profile: name: deploy diff --git a/deploy-release.sh b/deploy-release.sh index 3712135c59..e9421724aa 100644 --- a/deploy-release.sh +++ b/deploy-release.sh @@ -1,4 +1,21 @@ #!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + readonly VER=3.6.3 readonly REPO_URL=http://10.14.139.8:8081/artifactory/star-local #mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set diff --git a/deploy-snapshot.sh b/deploy-snapshot.sh index 3712135c59..e9421724aa 100644 --- a/deploy-snapshot.sh +++ b/deploy-snapshot.sh @@ -1,4 +1,21 @@ #!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + readonly VER=3.6.3 readonly REPO_URL=http://10.14.139.8:8081/artifactory/star-local #mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java index e4c5cf63ee..b0f066805d 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java @@ -28,9 +28,9 @@ **/ public class AbstractClientStubProxy { + private final LinkedList hostList = new LinkedList<>(); private AbstractBlockingStub blockingStub; private AbstractStub stub; - private final LinkedList hostList = new LinkedList<>(); public AbstractClientStubProxy(String[] hosts) { for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java index 3b0d2d88b3..ef2c18c065 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java @@ -45,12 +45,12 @@ @Slf4j public abstract class DiscoveryClient implements Closeable, Discoverable { + private final Timer timer = new Timer("serverHeartbeat", true); + private final AtomicBoolean requireResetStub = new AtomicBoolean(false); protected int period; //心跳周期 LinkedList pdAddresses = new LinkedList<>(); ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - private final Timer timer = new Timer("serverHeartbeat", true); private volatile int currentIndex; // 当前在用pd地址位置 - private final AtomicBoolean requireResetStub = new AtomicBoolean(false); private int maxTime = 6; private ManagedChannel channel = null; private DiscoveryServiceGrpc.DiscoveryServiceBlockingStub registerStub; diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java index 908a49130d..5c5e1bc1ca 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java @@ -171,43 +171,33 @@ public void onCompleted() { } }; - } BiConsumer listenWrapper = (key, consumer) -> { - try { - listen(key, consumer); - } catch (PDException e) { - try { - log.warn("start listen with warning:", e); - Thread.sleep(1000); - } catch (InterruptedException ex) { - } - } - }; + } public void listen(String key, Consumer consumer) throws PDException { StreamObserver observer = getObserver(key, consumer, listenWrapper); acquire(); WatchRequest k = WatchRequest.newBuilder().setClientId(clientId.get()).setKey(key).build(); streamingCall(KvServiceGrpc.getWatchMethod(), k, observer, 1); - } BiConsumer prefixListenWrapper = (key, consumer) -> { + } + + public void listenPrefix(String prefix, Consumer consumer) throws PDException { + StreamObserver observer = getObserver(prefix, consumer, prefixListenWrapper); + acquire(); + WatchRequest k = + WatchRequest.newBuilder().setClientId(clientId.get()).setKey(prefix).build(); + streamingCall(KvServiceGrpc.getWatchPrefixMethod(), k, observer, 1); + } BiConsumer listenWrapper = (key, consumer) -> { try { - listenPrefix(key, consumer); + listen(key, consumer); } catch (PDException e) { try { - log.warn("start listenPrefix with warning:", e); + log.warn("start listen with warning:", e); Thread.sleep(1000); } catch (InterruptedException ex) { } } }; - public void listenPrefix(String prefix, Consumer consumer) throws PDException { - StreamObserver observer = getObserver(prefix, consumer, prefixListenWrapper); - acquire(); - WatchRequest k = - WatchRequest.newBuilder().setClientId(clientId.get()).setKey(prefix).build(); - streamingCall(KvServiceGrpc.getWatchPrefixMethod(), k, observer, 1); - } - private void acquire() { if (clientId.get() == 0L) { try { @@ -247,7 +237,17 @@ public Map getWatchMap(T response) { values.put(key, value); } return values; - } + } BiConsumer prefixListenWrapper = (key, consumer) -> { + try { + listenPrefix(key, consumer); + } catch (PDException e) { + try { + log.warn("start listenPrefix with warning:", e); + Thread.sleep(1000); + } catch (InterruptedException ex) { + } + } + }; public LockResponse lock(String key, long ttl) throws PDException { acquire(); @@ -307,4 +307,5 @@ public void close() { + } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java index a3ec984ff6..9a8fc21e75 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -1201,8 +1201,8 @@ default void onShardGroupChanged(WatchResponse event) { } static class StubProxy { - private volatile PDGrpc.PDBlockingStub stub; private final LinkedList hostList = new LinkedList<>(); + private volatile PDGrpc.PDBlockingStub stub; public StubProxy(String[] hosts) { for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java index 3833ef9f9b..6a087ae96a 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java @@ -78,8 +78,8 @@ public void listen() { private class PulseListener implements PDPulse.Listener { - CountDownLatch latch = new CountDownLatch(10); private final String listenerName; + CountDownLatch latch = new CountDownLatch(10); private PulseListener(CountDownLatch latch, String listenerName) { this.latch = latch; diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java index 6f5f448121..5b94730346 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java @@ -63,8 +63,8 @@ public void watch() { } private class WatchListener implements PDWatch.Listener { - CountDownLatch latch = new CountDownLatch(10); private final String watcherName; + CountDownLatch latch = new CountDownLatch(10); private WatchListener(CountDownLatch latch, String watcherName) { this.latch = latch; diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java index a0cc9bc025..e122788084 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java @@ -34,19 +34,16 @@ public class PartitionCache { + // 读写锁对象 + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + Lock writeLock = readWriteLock.writeLock(); // 每张图一个缓存 private volatile Map> keyToPartIdCache; // graphName + PartitionID组成key private volatile Map partitionCache; - private volatile Map shardGroupCache; - private volatile Map storeCache; - private volatile Map graphCache; - // 读写锁对象 - private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); - Lock writeLock = readWriteLock.writeLock(); public PartitionCache() { keyToPartIdCache = new HashMap<>(); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java index 58b32ce171..eadee3c26a 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java @@ -32,8 +32,8 @@ @Slf4j public class ConfigService implements RaftStateListener { - private PDConfig pdConfig; private final ConfigMetaStore meta; + private PDConfig pdConfig; public ConfigService(PDConfig config) { this.pdConfig = config; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java index 407d6c48de..d808b97490 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java @@ -25,8 +25,8 @@ public class IdService { - private PDConfig pdConfig; private final IdMetaStore meta; + private PDConfig pdConfig; public IdService(PDConfig config) { this.pdConfig = config; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java index 91f5225e0d..4166f3b22b 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java @@ -51,8 +51,8 @@ public class KvService { private static final String LOCK_PREFIX = "L"; private static final String KV_PREFIX_DELIMITER = KV_PREFIX + KV_DELIMITER; private static final byte[] EMPTY_VALUE = new byte[0]; - private PDConfig pdConfig; private final MetadataRocksDBStore meta; + private PDConfig pdConfig; public KvService(PDConfig config) { this.pdConfig = config; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java index 1ff96126be..97c06953dc 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -59,14 +59,13 @@ public class StoreNodeService { // Store状态监听 private final List statusListeners; private final List shardGroupStatusListeners; - private PartitionService partitionService; private final StoreInfoMeta storeInfoMeta; private final TaskInfoMeta taskInfoMeta; private final Random random = new Random(System.currentTimeMillis()); - private Metapb.ClusterStats clusterStats; private final KvService kvService; private final ConfigService configService; private final PDConfig pdConfig; + private PartitionService partitionService; private final Runnable quotaChecker = () -> { try { getQuota(); @@ -76,6 +75,7 @@ public class StoreNodeService { e); } }; + private Metapb.ClusterStats clusterStats; public StoreNodeService(PDConfig config) { this.pdConfig = config; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java index 06c9f8f1d1..4816f4731a 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -66,8 +66,6 @@ public class TaskScheduleService { private final StoreMonitorDataService storeMonitorDataService; private final KvService kvService; private final LogService logService; - private long lastStoreTurnoffTime = 0; - private long lastBalanceLeaderTime = 0; // 先按照value排序,再按照key排序 private final Comparator> kvPairComparatorAsc = (o1, o2) -> { if (o1.getValue() == o2.getValue()) { @@ -75,7 +73,6 @@ public class TaskScheduleService { } return o1.getValue().compareTo(o2.getValue()); }; - // 先按照value排序(倒序),再按照key排序(升序) private final Comparator> kvPairComparatorDesc = (o1, o2) -> { if (o1.getValue() == o2.getValue()) { @@ -83,6 +80,8 @@ public class TaskScheduleService { } return o2.getValue().compareTo(o1.getValue()); }; + private long lastStoreTurnoffTime = 0; + private long lastBalanceLeaderTime = 0; public TaskScheduleService(PDConfig config, StoreNodeService storeService, diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java index d9aa5c5e85..52b548a738 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -41,6 +41,7 @@ public class DiscoveryMetaStore extends MetadataRocksDBStore { //appName --> address --> registryInfo private static final String PREFIX = "REGIS-"; private static final String SPLITTER = "-"; + public DiscoveryMetaStore(PDConfig pdConfig) { super(pdConfig); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java index 5265569a1b..c37c2e5c10 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java @@ -49,6 +49,7 @@ public class IdMetaStore extends MetadataRocksDBStore { private static final ConcurrentHashMap SEQUENCES = new ConcurrentHashMap<>(); public static long CID_DEL_TIMEOUT = 24 * 3600 * 1000; private final long clusterId; + public IdMetaStore(PDConfig pdConfig) { super(pdConfig); this.clusterId = pdConfig.getClusterId(); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java index ca3b2f3961..984e59c60a 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java @@ -30,6 +30,7 @@ public class RaftRpcProcessor implements private final Class requestClass; private final RaftEngine raftEngine; + public RaftRpcProcessor(Class requestClass, RaftEngine raftEngine) { this.requestClass = requestClass; this.raftEngine = raftEngine; diff --git a/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh b/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh index 0183a20af2..55499ee59c 100644 --- a/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh +++ b/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh @@ -27,7 +27,7 @@ abs_path() { echo "$( cd -P "$( dirname "$SOURCE" )" && pwd )" } -BIN=`abs_path` +BIN=$(abs_path) TOP="$(cd $BIN/../ && pwd)" . "$BIN"/util.sh diff --git a/hg-pd-dist/src/assembly/static/conf/application.yml b/hg-pd-dist/src/assembly/static/conf/application.yml index 94c69e4ff5..c1ed575e74 100644 --- a/hg-pd-dist/src/assembly/static/conf/application.yml +++ b/hg-pd-dist/src/assembly/static/conf/application.yml @@ -24,7 +24,7 @@ grpc: server: # rest服务端口号 - port : 8620 + port: 8620 pd: # 存储路径 diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java index 72b9832a43..bc371ca95b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java @@ -70,15 +70,15 @@ public class LicenseVerifierService { private static final Duration CHECK_INTERVAL = Duration.ofMinutes(10); - private static LicenseContent content; - private static KvService kvService; private static final String contentKey = "contentKey"; private static final Gson mapper = new Gson(); + private static LicenseContent content; + private static KvService kvService; private static volatile boolean installed = false; private final MachineInfo machineInfo; - SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final PDConfig pdConfig; private final Instant lastCheckTime = Instant.now(); + SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); // private final LicenseVerifyParam verifyParam; private LicenseVerifyManager manager; private ManagedChannel channel; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java index 091b91c7be..105d29856f 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java @@ -30,9 +30,8 @@ public class PromTargetsModel { private static final String LABEL_SCHEME = "__scheme__"; private static final String LABEL_JOB_NAME = "job"; private static final String LABEL_CLUSTER = "cluster"; - - private Set targets = new HashSet<>(); private final Map labels = new HashMap<>(); + private Set targets = new HashSet<>(); private PromTargetsModel() { } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java index 1d88efd5c7..679d36841f 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -29,9 +29,9 @@ */ @Slf4j public class NoticeBroadcaster { + private final Supplier noticeSupplier; private long noticeId; private String durableId; - private final Supplier noticeSupplier; private Supplier durableSupplier; private Function removeFunction; private int state; //0=ready; 1=notified; 2=done ack; -1=error diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java index 7b4952e906..9eac62e24a 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -64,12 +64,12 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements RaftStateListener, ServiceGrpc { + private final ManagedChannel channel = null; KvService kvService; AtomicLong count = new AtomicLong(); String msg = "node is not leader,it is necessary to redirect to the leader on the client"; @Autowired private PDConfig pdConfig; - private final ManagedChannel channel = null; private KvWatchSubject subjects; private ScheduledExecutorService executor; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java index 674f70c30e..b6a194a7d5 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java @@ -53,6 +53,7 @@ public class PDPulseService extends HgPdPulseGrpc.HgPdPulseImplBase { @Autowired private PDConfig pdConfig; private QueueStore queueStore = null; + public PDPulseService() { PDPulseSubject.setQueueRetrieveFunction(() -> getQueue()); PDPulseSubject.setQueueDurableFunction(getQueueDurableFunction()); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java index cc9f0a01e5..0450daee55 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -84,9 +84,10 @@ public class PDService extends PDGrpc.PDImplBase implements RaftStateListener { static String TASK_ID_KEY = "task_id"; + private final Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); @Autowired private PDConfig pdConfig; - private StoreNodeService storeNodeService; private PartitionService partitionService; private TaskScheduleService taskService; @@ -94,11 +95,7 @@ public class PDService extends PDGrpc.PDImplBase implements RaftStateListener { private ConfigService configService; private LogService logService; private LicenseVerifierService licenseVerifierService; - private StoreMonitorDataService storeMonitorDataService; - - private final Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( - Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); private ManagedChannel channel; private Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java index da501106a3..f187722213 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java @@ -62,13 +62,13 @@ public class PromTargetsService { .setScheme("http") .setMetricsPath( "/actuator/prometheus"); + private final HgMapCache> targetsCache = + HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H. @Autowired private PDConfig pdConfig; @Autowired private PDService pdService; private RegistryService register; - private final HgMapCache> targetsCache = - HgMapCache.expiredOf(24 * 60 * 60 * 1000);// expired after 24H. private RegistryService getRegister() { if (this.register == null) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java index f996c8fe64..1756eea26c 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java @@ -17,12 +17,13 @@ package com.baidu.hugegraph.pd.service; +import org.apache.hugegraph.pd.upgrade.VersionScriptFactory; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; + import com.baidu.hugegraph.pd.KvService; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.config.PDConfig; import com.baidu.hugegraph.pd.rest.API; -import com.baidu.hugegraph.pd.upgrade.VersionScriptFactory; -import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java similarity index 90% rename from hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java rename to hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java index c35e95a68b..dcf6ca3443 100644 --- a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionScriptFactory.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java @@ -15,18 +15,17 @@ * under the License. */ -package com.baidu.hugegraph.pd.upgrade; +package org.apache.hugegraph.pd.upgrade; import java.util.LinkedList; import java.util.List; -import com.baidu.hugegraph.pd.upgrade.scripts.PartitionMetaUpgrade; -import com.baidu.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade; +import org.apache.hugegraph.pd.upgrade.scripts.PartitionMetaUpgrade; +import org.apache.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade; public class VersionScriptFactory { - private static volatile VersionScriptFactory factory; - private static final List scripts = new LinkedList<>(); + private static volatile VersionScriptFactory factory; static { registerScript(new PartitionMetaUpgrade()); diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java similarity index 97% rename from hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java rename to hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java index 0e216a0a7e..9f58b47024 100644 --- a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/VersionUpgradeScript.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java @@ -15,7 +15,7 @@ * under the License. */ -package com.baidu.hugegraph.pd.upgrade; +package org.apache.hugegraph.pd.upgrade; import com.baidu.hugegraph.pd.config.PDConfig; diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java similarity index 97% rename from hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java rename to hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java index 4c7587d7e7..30f1caadbf 100644 --- a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java @@ -15,16 +15,17 @@ * under the License. */ -package com.baidu.hugegraph.pd.upgrade.scripts; +package org.apache.hugegraph.pd.upgrade.scripts; import java.util.HashSet; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; + import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.config.PDConfig; import com.baidu.hugegraph.pd.grpc.Metapb; import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; import com.baidu.hugegraph.pd.meta.MetadataRocksDBStore; -import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java similarity index 94% rename from hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java rename to hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java index c3ed6b9ddc..a73bce96ff 100644 --- a/hg-pd-service/src/main/java/com/baidu/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java @@ -15,13 +15,14 @@ * under the License. */ -package com.baidu.hugegraph.pd.upgrade.scripts; +package org.apache.hugegraph.pd.upgrade.scripts; + +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; import com.baidu.hugegraph.pd.common.PDException; import com.baidu.hugegraph.pd.config.PDConfig; import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; import com.baidu.hugegraph.pd.meta.MetadataRocksDBStore; -import com.baidu.hugegraph.pd.upgrade.VersionUpgradeScript; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index 0aeac9c140..aefecc9b8b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -52,9 +52,9 @@ public class KvWatchSubject { public static final long WATCH_TTL = 20000L; private static final ConcurrentMap> clients = new ConcurrentHashMap<>(); + private final KvService kvService; BiPredicate equal = (kvKey, watchKey) -> kvKey.equals(watchKey); BiPredicate startWith = (kvKey, watchKey) -> kvKey.startsWith(watchKey); - private final KvService kvService; /** * 会使用以下三组key: diff --git a/hg-pd-service/src/main/resources/application.yml b/hg-pd-service/src/main/resources/application.yml index 06306e02fe..e6917946ea 100644 --- a/hg-pd-service/src/main/resources/application.yml +++ b/hg-pd-service/src/main/resources/application.yml @@ -24,7 +24,7 @@ license: license-path: 'conf/hugegraph.license' server: - port : 8620 + port: 8620 pd: # 定期检查集群是否健康的时间间隔,单位秒 diff --git a/hg-pd-service/src/test/resources/application-server0.yml b/hg-pd-service/src/test/resources/application-server0.yml index e04463f02c..9e129a0a23 100644 --- a/hg-pd-service/src/test/resources/application-server0.yml +++ b/hg-pd-service/src/test/resources/application-server0.yml @@ -18,12 +18,12 @@ grpc: max-inbound-message-size: 100MB server: - port : 8620 + port: 8620 pd: patrol-interval: 3000000 - data-path: tmp/8686 + data-path: tmp/8686 # 最少节点数,少于该数字,集群停止入库 initial-store-count: 1 # 初始store列表,在列表内的store自动激活 diff --git a/hg-pd-service/src/test/resources/application-server1.yml b/hg-pd-service/src/test/resources/application-server1.yml index d9ad7a8202..982954c499 100644 --- a/hg-pd-service/src/test/resources/application-server1.yml +++ b/hg-pd-service/src/test/resources/application-server1.yml @@ -18,12 +18,12 @@ grpc: max-inbound-message-size: 100MB server: - port : 8620 + port: 8620 pd: patrol-interval: 3000000 - data-path: tmp/8686 + data-path: tmp/8686 # 最少节点数,少于该数字,集群停止入库 initial-store-count: 1 # 初始store列表,在列表内的store自动激活 diff --git a/hg-pd-service/src/test/resources/application-server2.yml b/hg-pd-service/src/test/resources/application-server2.yml index ffa8d3d225..608acb8c97 100644 --- a/hg-pd-service/src/test/resources/application-server2.yml +++ b/hg-pd-service/src/test/resources/application-server2.yml @@ -19,7 +19,7 @@ grpc: max-inbound-message-size: 100MB server: - port : 8621 + port: 8621 pd: # 集群ID,区分不同的PD集群 diff --git a/hg-pd-service/src/test/resources/application-server3.yml b/hg-pd-service/src/test/resources/application-server3.yml index 1a3f797255..3d0b1d94d8 100644 --- a/hg-pd-service/src/test/resources/application-server3.yml +++ b/hg-pd-service/src/test/resources/application-server3.yml @@ -19,7 +19,7 @@ grpc: max-inbound-message-size: 100MB server: - port : 8622 + port: 8622 pd: # 集群ID,区分不同的PD集群 diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java index 62ac894395..0ea4e29855 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java @@ -110,8 +110,10 @@ public void testIsInvalidCollection() { @Test public void testIsContains() { - assertTrue(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, Long.valueOf(2))); - assertFalse(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, Long.valueOf(3))); + assertTrue(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, + Long.valueOf(2))); + assertFalse(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, + Long.valueOf(3))); } @Test diff --git a/local-release.sh b/local-release.sh index 9d01cfd546..2603bb07c3 100755 --- a/local-release.sh +++ b/local-release.sh @@ -1,4 +1,21 @@ #!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + readonly VER=3.6.3 mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set diff --git a/mvnw b/mvnw index 41c0f0c23d..d236581e40 100644 --- a/mvnw +++ b/mvnw @@ -1,22 +1,20 @@ #!/bin/sh -# ---------------------------------------------------------------------------- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to You under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations # under the License. -# ---------------------------------------------------------------------------- +# # ---------------------------------------------------------------------------- # Maven Start Up Batch script diff --git a/pom.xml b/pom.xml index 5ebb8f0809..c0d20b2a51 100644 --- a/pom.xml +++ b/pom.xml @@ -1,6 +1,23 @@ - + + 4.0.0 diff --git a/settings.xml b/settings.xml index 083a6a0dc4..dc29a27b7e 100644 --- a/settings.xml +++ b/settings.xml @@ -1,6 +1,23 @@ - + + - + diff --git a/hg-pd-dist/pom.xml b/hg-pd-dist/pom.xml index b3089d0800..16be298fe7 100644 --- a/hg-pd-dist/pom.xml +++ b/hg-pd-dist/pom.xml @@ -21,7 +21,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> hugegraph-pd-root - com.baidu.hugegraph + org.apache.hugegraph 3.6.5-SNAPSHOT 4.0.0 @@ -40,9 +40,9 @@ - com.baidu.hugegraph + org.apache.hugegraph hugegraph-pd - ${project.version} + ${revision} diff --git a/hg-pd-dist/src/assembly/descriptor/server-assembly.xml b/hg-pd-dist/src/assembly/descriptor/server-assembly.xml index a725093386..7aed34c2d2 100644 --- a/hg-pd-dist/src/assembly/descriptor/server-assembly.xml +++ b/hg-pd-dist/src/assembly/descriptor/server-assembly.xml @@ -49,7 +49,7 @@ runtime false - com.baidu.hugegraph:${executable.jar.name}:jar:* + org.apache.hugegraph:${executable.jar.name}:jar:* diff --git a/hg-pd-dist/src/assembly/static/conf/log4j2.xml b/hg-pd-dist/src/assembly/static/conf/log4j2.xml index d68438f89a..96fae99a31 100644 --- a/hg-pd-dist/src/assembly/static/conf/log4j2.xml +++ b/hg-pd-dist/src/assembly/static/conf/log4j2.xml @@ -127,7 +127,7 @@ - + diff --git a/hg-pd-grpc/src/main/proto/discovery.proto b/hg-pd-grpc/src/main/proto/discovery.proto index 1c716059ba..f3f0e2a9e8 100644 --- a/hg-pd-grpc/src/main/proto/discovery.proto +++ b/hg-pd-grpc/src/main/proto/discovery.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package discovery; import "pdpb.proto"; -option java_package = "com.baidu.hugegraph.pd.grpc.discovery"; +option java_package = "org.apache.hugegraph.pd.grpc.discovery"; option java_multiple_files = true; diff --git a/hg-pd-grpc/src/main/proto/kv.proto b/hg-pd-grpc/src/main/proto/kv.proto index 402b356ceb..5f88b2ee15 100644 --- a/hg-pd-grpc/src/main/proto/kv.proto +++ b/hg-pd-grpc/src/main/proto/kv.proto @@ -3,7 +3,7 @@ package kv; import "pdpb.proto"; import "metapb.proto"; -option java_package = "com.baidu.hugegraph.pd.grpc.kv"; +option java_package = "org.apache.hugegraph.pd.grpc.kv"; option java_multiple_files = true; diff --git a/hg-pd-grpc/src/main/proto/metaTask.proto b/hg-pd-grpc/src/main/proto/metaTask.proto index 47cc9cd5d6..ff6fc679d1 100644 --- a/hg-pd-grpc/src/main/proto/metaTask.proto +++ b/hg-pd-grpc/src/main/proto/metaTask.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package metaTask; import "metapb.proto"; import "pd_pulse.proto"; -option java_package = "com.baidu.hugegraph.pd.grpc"; +option java_package = "org.apache.hugegraph.pd.grpc"; enum TaskType { Unknown = 0; diff --git a/hg-pd-grpc/src/main/proto/metapb.proto b/hg-pd-grpc/src/main/proto/metapb.proto index 7f11d093b5..b6fd8c27c0 100644 --- a/hg-pd-grpc/src/main/proto/metapb.proto +++ b/hg-pd-grpc/src/main/proto/metapb.proto @@ -1,6 +1,6 @@ syntax = "proto3"; package metapb; -option java_package = "com.baidu.hugegraph.pd.grpc"; +option java_package = "org.apache.hugegraph.pd.grpc"; import "google/protobuf/any.proto"; enum ClusterState{ diff --git a/hg-pd-grpc/src/main/proto/pd_common.proto b/hg-pd-grpc/src/main/proto/pd_common.proto index c5724e0112..9c2dfe6174 100644 --- a/hg-pd-grpc/src/main/proto/pd_common.proto +++ b/hg-pd-grpc/src/main/proto/pd_common.proto @@ -1,7 +1,7 @@ syntax = "proto3"; option java_multiple_files = true; -option java_package = "com.baidu.hugegraph.pd.grpc.common"; +option java_package = "org.apache.hugegraph.pd.grpc.common"; option java_outer_classname = "HgPdCommonProto"; message RequestHeader { diff --git a/hg-pd-grpc/src/main/proto/pd_pulse.proto b/hg-pd-grpc/src/main/proto/pd_pulse.proto index 31c8d1e2fa..d4684eb895 100644 --- a/hg-pd-grpc/src/main/proto/pd_pulse.proto +++ b/hg-pd-grpc/src/main/proto/pd_pulse.proto @@ -4,7 +4,7 @@ import "metapb.proto"; import "pd_common.proto"; option java_multiple_files = true; -option java_package = "com.baidu.hugegraph.pd.grpc.pulse"; +option java_package = "org.apache.hugegraph.pd.grpc.pulse"; option java_outer_classname = "HgPdPulseProto"; service HgPdPulse { diff --git a/hg-pd-grpc/src/main/proto/pd_watch.proto b/hg-pd-grpc/src/main/proto/pd_watch.proto index 38e4af4ed2..2495925090 100644 --- a/hg-pd-grpc/src/main/proto/pd_watch.proto +++ b/hg-pd-grpc/src/main/proto/pd_watch.proto @@ -3,7 +3,7 @@ syntax = "proto3"; import "metapb.proto"; option java_multiple_files = true; -option java_package = "com.baidu.hugegraph.pd.grpc.watch"; +option java_package = "org.apache.hugegraph.pd.grpc.watch"; option java_outer_classname = "HgPdWatchProto"; service HgPdWatch { diff --git a/hg-pd-grpc/src/main/proto/pdpb.proto b/hg-pd-grpc/src/main/proto/pdpb.proto index 1535bb2668..3a7432ecdf 100644 --- a/hg-pd-grpc/src/main/proto/pdpb.proto +++ b/hg-pd-grpc/src/main/proto/pdpb.proto @@ -4,7 +4,7 @@ package pdpb; import "metapb.proto"; import "metaTask.proto"; -option java_package = "com.baidu.hugegraph.pd.grpc"; +option java_package = "org.apache.hugegraph.pd.grpc"; service PD { // 注册store,首次注册会生成新的store_id, store_id是store唯一标识 diff --git a/hg-pd-service/pom.xml b/hg-pd-service/pom.xml index 75e5c2a66d..38906fdf61 100644 --- a/hg-pd-service/pom.xml +++ b/hg-pd-service/pom.xml @@ -34,7 +34,7 @@ - com.baidu.hugegraph + org.apache.hugegraph hg-pd-grpc @@ -45,9 +45,9 @@ - com.baidu.hugegraph + org.apache.hugegraph hg-pd-core - ${project.version} + ${revision} @@ -110,7 +110,7 @@ 3.17.2 - com.baidu.hugegraph + org.apache.hugegraph hugegraph-common 1.8.12 diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java index e64e99b8d2..452f95a467 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java @@ -26,7 +26,7 @@ /** * PD服务启动类 */ -@ComponentScan(basePackages = {"com.baidu.hugegraph.pd"}) +@ComponentScan(basePackages = {"org.apache.hugegraph.pd"}) @SpringBootApplication public class HugePDServer { public static void main(String[] args) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java index bc371ca95b..ca0582209a 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java @@ -32,20 +32,20 @@ import java.util.prefs.Preferences; import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.license.ExtraParam; +import org.apache.hugegraph.license.LicenseVerifyParam; +import org.apache.hugegraph.license.MachineInfo; +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; import org.springframework.stereotype.Service; import org.springframework.util.Base64Utils; -import com.baidu.hugegraph.license.ExtraParam; -import com.baidu.hugegraph.license.LicenseVerifyParam; -import com.baidu.hugegraph.license.MachineInfo; -import com.baidu.hugegraph.pd.KvService; -import com.baidu.hugegraph.pd.common.PDRuntimeException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.kv.KvServiceGrpc; -import com.baidu.hugegraph.pd.grpc.kv.TTLRequest; -import com.baidu.hugegraph.pd.grpc.kv.TTLResponse; -import com.baidu.hugegraph.pd.raft.RaftEngine; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.gson.Gson; import com.google.gson.internal.LinkedTreeMap; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java index 5f54b2d6b4..ab207898b6 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java @@ -20,10 +20,11 @@ import java.io.IOException; import java.util.List; -import com.baidu.hugegraph.license.CommonLicenseManager; -import com.baidu.hugegraph.license.ExtraParam; -import com.baidu.hugegraph.pd.common.PDRuntimeException; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.license.CommonLicenseManager; +import org.apache.hugegraph.license.ExtraParam; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.Pdpb; + import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java index 0c8fb55bed..064bbb0017 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java @@ -21,13 +21,12 @@ import java.util.List; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.service.PDService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; - import io.micrometer.core.instrument.Gauge; import io.micrometer.core.instrument.MeterRegistry; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java index 0517645fa9..ddf8d7dd98 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java @@ -19,7 +19,7 @@ import java.io.Serializable; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java index 4b25fe023e..5136bc5fe0 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java @@ -19,7 +19,7 @@ import java.util.HashMap; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java index 679d36841f..53bcfee357 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -20,7 +20,7 @@ import java.util.function.Function; import java.util.function.Supplier; -import com.baidu.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.HgAssert; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java index e153fc7277..e8e099554f 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java @@ -25,10 +25,10 @@ import javax.annotation.concurrent.ThreadSafe; -import com.baidu.hugegraph.pd.grpc.pulse.PulseNoticeRequest; -import com.baidu.hugegraph.pd.grpc.pulse.PulseResponse; -import com.baidu.hugegraph.pd.grpc.pulse.PulseType; -import com.baidu.hugegraph.pd.util.IdUtil; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.util.IdUtil; import io.grpc.Status; import io.grpc.stub.StreamObserver; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java index 536fdcdcde..e2fbdae721 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java @@ -17,7 +17,7 @@ package org.apache.hugegraph.pd.pulse; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; import java.util.Collections; import java.util.List; @@ -33,12 +33,12 @@ import javax.annotation.concurrent.ThreadSafe; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.*; import org.apache.hugegraph.pd.notice.NoticeBroadcaster; import org.apache.hugegraph.pd.util.IdUtil; -import com.baidu.hugegraph.pd.common.HgAssert; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.pulse.*; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Parser; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java index c19685c51b..9fb4ad7c27 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java @@ -19,7 +19,7 @@ import java.util.function.Function; -import com.baidu.hugegraph.pd.grpc.pulse.*; +import org.apache.hugegraph.pd.grpc.pulse.*; /** * @author lynn.bond@hotmail.com created on 2021/11/9 diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java index 6d7680a2ee..9af59f7d74 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -21,7 +21,8 @@ import java.util.List; import java.util.Map; -import com.baidu.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDException; + import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.protobuf.InvalidProtocolBufferException; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java index 184b65b93b..78d24be5cd 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java @@ -27,6 +27,9 @@ import javax.servlet.http.HttpServletRequest; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.model.GraphRestRequest; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.service.PDRestService; @@ -40,10 +43,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; - import lombok.Data; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java index 8adf3c7583..1bb4f183a5 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java @@ -23,6 +23,8 @@ import javax.servlet.http.HttpServletRequest; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.model.GraphSpaceRestRequest; import org.apache.hugegraph.pd.service.PDRestService; import org.springframework.beans.factory.annotation.Autowired; @@ -34,9 +36,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; - import lombok.extern.slf4j.Slf4j; @RestController diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java index 40b8dfcbeb..f86642ca74 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @@ -22,7 +22,11 @@ import java.util.List; import java.util.concurrent.ExecutionException; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; import org.springframework.beans.factory.annotation.Autowired; @@ -32,11 +36,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.raft.RaftEngine; - import lombok.Data; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java index 1154d6cd7b..dec8ffe2aa 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @@ -31,8 +31,11 @@ import javax.servlet.http.HttpServletRequest; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.model.PeerRestRequest; import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.service.PDService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; @@ -43,10 +46,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.raft.RaftEngine; - import io.grpc.stub.CallStreamObserver; import io.grpc.stub.StreamObserver; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java index 8ccf0774bc..5bc5cdfae0 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java @@ -26,6 +26,9 @@ import java.util.concurrent.ExecutionException; import org.apache.commons.lang.time.DateFormatUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.model.TimeRangeRequest; import org.apache.hugegraph.pd.service.PDRestService; @@ -39,9 +42,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; import com.google.protobuf.util.JsonFormat; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java index 64e5eb4d5f..b0ac0b36dd 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -26,6 +26,13 @@ import javax.servlet.http.HttpServletRequest; import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.Query; import org.apache.hugegraph.pd.license.LicenseVerifierService; import org.apache.hugegraph.pd.model.RegistryQueryRestRequest; import org.apache.hugegraph.pd.model.RegistryRestRequest; @@ -42,14 +49,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.common.PDRuntimeException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.Pdpb.GetMembersResponse; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.Query; - import lombok.extern.slf4j.Slf4j; /** diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java index 0a17a3f3f4..53249b9436 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java @@ -22,6 +22,9 @@ import java.util.HashMap; import java.util.List; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; @@ -32,10 +35,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; - import lombok.Data; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java index 9f812c7a15..f288ef877f 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java @@ -24,6 +24,9 @@ import java.util.List; import java.util.Map; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.model.StoreRestRequest; import org.apache.hugegraph.pd.model.TimeRangeRequest; @@ -40,9 +43,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; import com.google.protobuf.util.JsonFormat; import lombok.Data; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java index 3ff7117600..d1419d2ddb 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java @@ -20,6 +20,9 @@ import java.util.List; import java.util.Map; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.service.PDRestService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; @@ -28,10 +31,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.common.KVPair; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; - import lombok.extern.slf4j.Slf4j; @RestController diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java index 856e8149a4..75c90f16bf 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java @@ -21,6 +21,15 @@ import java.util.List; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.QueueStore; import org.apache.hugegraph.pd.pulse.PDPulseSubject; import org.apache.hugegraph.pd.watch.PDWatchSubject; import org.springframework.beans.factory.annotation.Autowired; @@ -32,15 +41,6 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; -import com.baidu.hugegraph.pd.RegistryService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.discovery.Query; -import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; -import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; -import com.baidu.hugegraph.pd.meta.MetadataFactory; -import com.baidu.hugegraph.pd.meta.QueueStore; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Parser; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java index f279188ba8..a22239b971 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java @@ -23,25 +23,24 @@ import javax.annotation.PostConstruct; import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; import org.apache.hugegraph.pd.license.LicenseVerifierService; import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; import org.apache.hugegraph.pd.watch.PDWatchSubject; import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; -import com.baidu.hugegraph.pd.RegistryService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.common.PDRuntimeException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; -import com.baidu.hugegraph.pd.grpc.discovery.Query; -import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; -import com.baidu.hugegraph.pd.raft.RaftEngine; -import com.baidu.hugegraph.pd.raft.RaftStateListener; - import io.grpc.CallOptions; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java index 9eac62e24a..b9ec00b8d5 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -27,31 +27,30 @@ import javax.annotation.PostConstruct; +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.kv.K; +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.KvResponse; +import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +import org.apache.hugegraph.pd.grpc.kv.LockRequest; +import org.apache.hugegraph.pd.grpc.kv.LockResponse; +import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchRequest; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchState; +import org.apache.hugegraph.pd.grpc.kv.WatchType; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; import org.apache.hugegraph.pd.watch.KvWatchSubject; import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; -import com.baidu.hugegraph.pd.KvService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.kv.K; -import com.baidu.hugegraph.pd.grpc.kv.KResponse; -import com.baidu.hugegraph.pd.grpc.kv.Kv; -import com.baidu.hugegraph.pd.grpc.kv.KvResponse; -import com.baidu.hugegraph.pd.grpc.kv.KvServiceGrpc; -import com.baidu.hugegraph.pd.grpc.kv.LockRequest; -import com.baidu.hugegraph.pd.grpc.kv.LockResponse; -import com.baidu.hugegraph.pd.grpc.kv.ScanPrefixResponse; -import com.baidu.hugegraph.pd.grpc.kv.TTLRequest; -import com.baidu.hugegraph.pd.grpc.kv.TTLResponse; -import com.baidu.hugegraph.pd.grpc.kv.WatchKv; -import com.baidu.hugegraph.pd.grpc.kv.WatchRequest; -import com.baidu.hugegraph.pd.grpc.kv.WatchResponse; -import com.baidu.hugegraph.pd.grpc.kv.WatchState; -import com.baidu.hugegraph.pd.grpc.kv.WatchType; -import com.baidu.hugegraph.pd.raft.RaftEngine; -import com.baidu.hugegraph.pd.raft.RaftStateListener; - import io.grpc.ManagedChannel; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java index b6a194a7d5..8dbc221537 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java @@ -22,19 +22,18 @@ import java.util.function.Function; import java.util.function.Supplier; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.QueueStore; import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.raft.RaftEngine; import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; -import com.baidu.hugegraph.pd.grpc.pulse.PulseRequest; -import com.baidu.hugegraph.pd.grpc.pulse.PulseResponse; -import com.baidu.hugegraph.pd.meta.MetadataFactory; -import com.baidu.hugegraph.pd.meta.QueueStore; -import com.baidu.hugegraph.pd.raft.RaftEngine; - import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java index bc08d8f29d..27d6723666 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java @@ -23,28 +23,27 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; import org.apache.hugegraph.pd.model.RegistryRestRequest; import org.apache.hugegraph.pd.model.RegistryRestResponse; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; -import com.baidu.hugegraph.pd.ConfigService; -import com.baidu.hugegraph.pd.LogService; -import com.baidu.hugegraph.pd.PartitionService; -import com.baidu.hugegraph.pd.StoreMonitorDataService; -import com.baidu.hugegraph.pd.StoreNodeService; -import com.baidu.hugegraph.pd.TaskScheduleService; -import com.baidu.hugegraph.pd.common.HgAssert; -import com.baidu.hugegraph.pd.common.KVPair; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; -import com.baidu.hugegraph.pd.grpc.discovery.Query; -import com.baidu.hugegraph.pd.grpc.discovery.RegisterInfo; - import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java index 0450daee55..5fa8c40ef1 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -26,9 +26,43 @@ import javax.annotation.PostConstruct; import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionInstructionListener; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.PartitionStatusListener; +import org.apache.hugegraph.pd.ShardGroupStatusListener; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.StoreStatusListener; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; +import org.apache.hugegraph.pd.grpc.watch.WatchGraphResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; import org.apache.hugegraph.pd.license.LicenseVerifierService; import org.apache.hugegraph.pd.pulse.PDPulseSubject; import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil; import org.apache.hugegraph.pd.watch.PDWatchSubject; import org.lognet.springboot.grpc.GRpcService; @@ -36,40 +70,6 @@ import org.springframework.util.CollectionUtils; import com.alipay.sofa.jraft.Status; -import com.baidu.hugegraph.pd.ConfigService; -import com.baidu.hugegraph.pd.IdService; -import com.baidu.hugegraph.pd.LogService; -import com.baidu.hugegraph.pd.PartitionInstructionListener; -import com.baidu.hugegraph.pd.PartitionService; -import com.baidu.hugegraph.pd.PartitionStatusListener; -import com.baidu.hugegraph.pd.ShardGroupStatusListener; -import com.baidu.hugegraph.pd.StoreMonitorDataService; -import com.baidu.hugegraph.pd.StoreNodeService; -import com.baidu.hugegraph.pd.StoreStatusListener; -import com.baidu.hugegraph.pd.TaskScheduleService; -import com.baidu.hugegraph.pd.common.KVPair; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.PDGrpc; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; -import com.baidu.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; -import com.baidu.hugegraph.pd.grpc.pulse.ChangeShard; -import com.baidu.hugegraph.pd.grpc.pulse.CleanPartition; -import com.baidu.hugegraph.pd.grpc.pulse.DbCompaction; -import com.baidu.hugegraph.pd.grpc.pulse.MovePartition; -import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; -import com.baidu.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; -import com.baidu.hugegraph.pd.grpc.pulse.PartitionKeyRange; -import com.baidu.hugegraph.pd.grpc.pulse.SplitPartition; -import com.baidu.hugegraph.pd.grpc.pulse.TransferLeader; -import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; -import com.baidu.hugegraph.pd.grpc.watch.WatchGraphResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchType; -import com.baidu.hugegraph.pd.raft.RaftEngine; -import com.baidu.hugegraph.pd.raft.RaftStateListener; import io.grpc.CallOptions; import io.grpc.ManagedChannel; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java index 2e19b9bd38..1ddda6de12 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java @@ -17,13 +17,12 @@ package org.apache.hugegraph.pd.service; +import org.apache.hugegraph.pd.grpc.watch.HgPdWatchGrpc; +import org.apache.hugegraph.pd.grpc.watch.WatchRequest; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; import org.apache.hugegraph.pd.watch.PDWatchSubject; import org.lognet.springboot.grpc.GRpcService; -import com.baidu.hugegraph.pd.grpc.watch.HgPdWatchGrpc; -import com.baidu.hugegraph.pd.grpc.watch.WatchRequest; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; - import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java index f187722213..118ad4af01 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java @@ -27,22 +27,21 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; import org.apache.hugegraph.pd.model.PromTargetsModel; import org.apache.hugegraph.pd.rest.MemberAPI; import org.apache.hugegraph.pd.util.HgMapCache; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; -import com.baidu.hugegraph.pd.RegistryService; -import com.baidu.hugegraph.pd.common.HgAssert; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfo; -import com.baidu.hugegraph.pd.grpc.discovery.NodeInfos; -import com.baidu.hugegraph.pd.grpc.discovery.Query; - import lombok.extern.slf4j.Slf4j; /** diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java index 084728b857..620d0e4741 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java @@ -19,14 +19,13 @@ import java.util.concurrent.ConcurrentHashMap; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; import org.apache.hugegraph.pd.watch.PDWatchSubject; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Pdpb; -import com.baidu.hugegraph.pd.raft.RaftEngine; -import com.baidu.hugegraph.pd.raft.RaftStateListener; - import io.grpc.CallOptions; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java index 1756eea26c..78af36aaea 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java @@ -15,16 +15,15 @@ * under the License. */ -package com.baidu.hugegraph.pd.service; +package org.apache.hugegraph.pd.service; +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.rest.API; import org.apache.hugegraph.pd.upgrade.VersionScriptFactory; import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; -import com.baidu.hugegraph.pd.KvService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.rest.API; - import lombok.extern.slf4j.Slf4j; @Slf4j diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java index 9f58b47024..8cd54864aa 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java @@ -17,7 +17,7 @@ package org.apache.hugegraph.pd.upgrade; -import com.baidu.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.config.PDConfig; public interface VersionUpgradeScript { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java index 30f1caadbf..e113d95ff4 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java @@ -19,14 +19,13 @@ import java.util.HashSet; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; -import com.baidu.hugegraph.pd.meta.MetadataRocksDBStore; - import lombok.extern.slf4j.Slf4j; @Slf4j diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java index a73bce96ff..6370f839f9 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java @@ -17,13 +17,12 @@ package org.apache.hugegraph.pd.upgrade.scripts; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.meta.MetadataKeyHelper; -import com.baidu.hugegraph.pd.meta.MetadataRocksDBStore; - import lombok.extern.slf4j.Slf4j; @Slf4j diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java index 709b65a6d0..e9bbd045b6 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java @@ -21,9 +21,8 @@ import java.util.Date; import org.apache.commons.lang3.time.DateUtils; - -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; /** * @author zhangyingjie diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java index 06a7d521a6..2b6994a316 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java @@ -24,8 +24,9 @@ import javax.annotation.concurrent.ThreadSafe; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; + import com.google.protobuf.util.JsonFormat; import io.grpc.Status; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index aefecc9b8b..2e0dee58d1 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -25,14 +25,14 @@ import java.util.concurrent.ConcurrentMap; import java.util.function.BiPredicate; -import com.baidu.hugegraph.pd.KvService; -import com.baidu.hugegraph.pd.common.PDException; -import com.baidu.hugegraph.pd.config.PDConfig; -import com.baidu.hugegraph.pd.grpc.kv.WatchEvent; -import com.baidu.hugegraph.pd.grpc.kv.WatchKv; -import com.baidu.hugegraph.pd.grpc.kv.WatchResponse; -import com.baidu.hugegraph.pd.grpc.kv.WatchState; -import com.baidu.hugegraph.pd.grpc.kv.WatchType; +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.kv.WatchEvent; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchState; +import org.apache.hugegraph.pd.grpc.kv.WatchType; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java index 610baad59b..77ad0542e1 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java @@ -17,13 +17,13 @@ package org.apache.hugegraph.pd.watch; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; import javax.annotation.concurrent.ThreadSafe; -import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; /** * The subject of partition change. diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java index 6915050189..199d92622b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java @@ -17,20 +17,20 @@ package org.apache.hugegraph.pd.watch; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import javax.annotation.concurrent.ThreadSafe; -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.watch.NodeEventType; -import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; -import com.baidu.hugegraph.pd.grpc.watch.WatchCreateRequest; -import com.baidu.hugegraph.pd.grpc.watch.WatchRequest; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; +import org.apache.hugegraph.pd.grpc.watch.WatchCreateRequest; +import org.apache.hugegraph.pd.grpc.watch.WatchRequest; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; @@ -50,7 +50,7 @@ public class PDWatchSubject implements StreamObserver { subjectHolder.put(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name(), new NodeChangeSubject()); subjectHolder.put(WatchType.WATCH_TYPE_GRAPH_CHANGE.name(), new NodeChangeSubject()); subjectHolder.put(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name(), - new com.baidu.hugegraph.pd.watch.ShardGroupChangeSubject()); + new org.apache.hugegraph.pd.watch.ShardGroupChangeSubject()); } private final StreamObserver responseObserver; @@ -82,7 +82,7 @@ public static void notifyPartitionChange(ChangeType changeType, String graph, in public static void notifyShardGroupChange(ChangeType changeType, int groupId, Metapb.ShardGroup group) { - ((com.baidu.hugegraph.pd.watch.ShardGroupChangeSubject) subjectHolder.get( + ((org.apache.hugegraph.pd.watch.ShardGroupChangeSubject) subjectHolder.get( WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name())) .notifyWatcher(changeType.getGrpcType(), groupId, group); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java index 556d6b20ba..2e664b98f0 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java @@ -17,14 +17,14 @@ package org.apache.hugegraph.pd.watch; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentValid; +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentValid; import javax.annotation.concurrent.ThreadSafe; -import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; /** * The subject of partition change. diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java index f0c9643b72..d9cfde8e73 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java @@ -15,16 +15,14 @@ * under the License. */ -package com.baidu.hugegraph.pd.watch; +package org.apache.hugegraph.pd.watch; -import static com.baidu.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; -import org.apache.hugegraph.pd.watch.AbstractWatchSubject; - -import com.baidu.hugegraph.pd.grpc.Metapb; -import com.baidu.hugegraph.pd.grpc.watch.WatchChangeType; -import com.baidu.hugegraph.pd.grpc.watch.WatchResponse; -import com.baidu.hugegraph.pd.grpc.watch.WatchType; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; public class ShardGroupChangeSubject extends AbstractWatchSubject { diff --git a/hg-pd-service/src/main/resources/log4j2.xml b/hg-pd-service/src/main/resources/log4j2.xml index 125b8ea9f7..a26fe62d9d 100644 --- a/hg-pd-service/src/main/resources/log4j2.xml +++ b/hg-pd-service/src/main/resources/log4j2.xml @@ -131,7 +131,7 @@ - + diff --git a/hg-pd-service/src/test/resources/log4j2.xml b/hg-pd-service/src/test/resources/log4j2.xml index 68c8326973..d117f8328c 100644 --- a/hg-pd-service/src/test/resources/log4j2.xml +++ b/hg-pd-service/src/test/resources/log4j2.xml @@ -131,7 +131,7 @@ - + diff --git a/hg-pd-test/pom.xml b/hg-pd-test/pom.xml index 0ac87fa932..d6f29a3298 100644 --- a/hg-pd-test/pom.xml +++ b/hg-pd-test/pom.xml @@ -20,9 +20,9 @@ xmlns="http://maven.apache.org/POM/4.0.0" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + org.apache.hugegraph hugegraph-pd-root - com.baidu.hugegraph - 3.6.5-SNAPSHOT + ${revision} 4.0.0 @@ -72,14 +72,14 @@ 4.13.2 - com.baidu.hugegraph + org.apache.hugegraph hg-store-grpc - ${project.version} + ${revision} - com.baidu.hugegraph + org.apache.hugegraph hg-store-common - ${project.version} + ${revision} org.projectlombok @@ -102,9 +102,9 @@ - com.baidu.hugegraph + org.apache.hugegraph hg-pd-client - ${project.version} + ${revision} @@ -113,9 +113,9 @@ 2.8.9 - com.baidu.hugegraph + org.apache.hugegraph hg-pd-grpc - ${project.version} + ${revision} commons-io @@ -171,31 +171,31 @@ - com.baidu.hugegraph + org.apache.hugegraph hg-pd-clitools - ${project.version} + ${revision} - com.baidu.hugegraph + org.apache.hugegraph hg-pd-common - ${project.version} + ${revision} - com.baidu.hugegraph + org.apache.hugegraph hg-pd-core - ${project.version} + ${revision} - com.baidu.hugegraph + org.apache.hugegraph hg-pd-dist - ${project.version} + ${revision} compile - com.baidu.hugegraph + org.apache.hugegraph hugegraph-pd - ${project.version} + ${revision} - - - - - - - - - - - @@ -245,7 +234,6 @@ - \ No newline at end of file From bd3d313a1be5f9641c3acd0a413511a29f6defa2 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 6 May 2023 20:04:30 +0800 Subject: [PATCH 07/18] chore: fix some code problems - lack this with instance filed - lack {} in 'if/while/for' Change-Id: I37d21f1ab94529a8122f4598636fbe02b2264904 --- .../pd/client/DiscoveryClientTest.java | 8 +- .../hugegraph/pd/client/KvClientTest.java | 50 ++-- .../hugegraph/pd/common/KVPairTest.java | 16 +- .../pd/common/PartitionCacheTest.java | 222 +++++++++--------- .../pd/core/meta/MetadataKeyHelperTest.java | 8 +- .../pd/service/ConfigServiceTest.java | 16 +- .../hugegraph/pd/service/LogServiceTest.java | 8 +- .../pd/service/PartitionServiceTest.java | 14 +- .../service/StoreMonitorDataServiceTest.java | 25 +- .../pd/service/StoreNodeServiceNewTest.java | 13 +- .../pd/service/StoreServiceTest.java | 70 +++--- .../pd/service/TaskScheduleServiceTest.java | 4 +- 12 files changed, 224 insertions(+), 230 deletions(-) diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java index ee29e1ba3f..5e7d1ee08e 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java @@ -33,14 +33,14 @@ public class DiscoveryClientTest { @Before public void setUp() { - client = getClient("appName", "localhost:8654", new HashMap()); + this.client = getClient("appName", "localhost:8654", new HashMap()); } @Test public void testGetRegisterNode() { // Setup try { - Consumer result = client.getRegisterConsumer(); + Consumer result = this.client.getRegisterConsumer(); final NodeInfo expectedResult = NodeInfo.newBuilder() .setAppName("appName") .build(); @@ -50,11 +50,11 @@ public void testGetRegisterNode() { .setVersion("0.13.0").build(); // Run the test - client.getNodeInfos(query); + this.client.getNodeInfos(query); } catch (InterruptedException e) { e.printStackTrace(); } finally { - client.close(); + this.client.close(); } } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java index 55b7df9bc1..54cee0b812 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java @@ -46,7 +46,7 @@ public class KvClientTest extends BaseClientTest { @Before public void setUp() { - client = new KvClient<>(PDConfig.of("localhost:8686")); + this.client = new KvClient<>(PDConfig.of("localhost:8686")); } @Test @@ -54,7 +54,7 @@ public void testCreateStub() { // Setup // Run the test try { - final AbstractStub result = client.createStub(); + final AbstractStub result = this.client.createStub(); } catch (Exception e) { } @@ -68,7 +68,7 @@ public void testCreateBlockingStub() { // Setup // Run the test try { - final AbstractBlockingStub result = client.createBlockingStub(); + final AbstractBlockingStub result = this.client.createBlockingStub(); } catch (Exception e) { } @@ -78,42 +78,42 @@ public void testCreateBlockingStub() { public void testPutAndGet() throws Exception { // Run the test try { - client.put(key, value); + this.client.put(this.key, this.value); // Run the test - KResponse result = client.get(key); + KResponse result = this.client.get(this.key); // Verify the results - assertThat(result.getValue()).isEqualTo(value); - client.delete(key); - result = client.get(key); + assertThat(result.getValue()).isEqualTo(this.value); + this.client.delete(this.key); + result = this.client.get(this.key); assertThat(StringUtils.isEmpty(result.getValue())); - client.deletePrefix(key); - client.put(key + "1", value); - client.put(key + "2", value); - ScanPrefixResponse response = client.scanPrefix(key); + this.client.deletePrefix(this.key); + this.client.put(this.key + "1", this.value); + this.client.put(this.key + "2", this.value); + ScanPrefixResponse response = this.client.scanPrefix(this.key); assertThat(response.getKvsMap().size() == 2); - client.putTTL(key + "3", value, 1000); - client.keepTTLAlive(key + "3"); + this.client.putTTL(this.key + "3", this.value, 1000); + this.client.keepTTLAlive(this.key + "3"); final Consumer mockConsumer = mock(Consumer.class); // Run the test - client.listen(key + "3", mockConsumer); - client.listenPrefix(key + "4", mockConsumer); + this.client.listen(this.key + "3", mockConsumer); + this.client.listenPrefix(this.key + "4", mockConsumer); WatchResponse r = WatchResponse.newBuilder().addEvents( WatchEvent.newBuilder().setCurrent( - WatchKv.newBuilder().setKey(key).setValue("value") + WatchKv.newBuilder().setKey(this.key).setValue("value") .build()).setType(WatchType.Put).build()) .setClientId(0L) .setState(WatchState.Starting) .build(); - client.getWatchList(r); - client.getWatchMap(r); - client.lock(key, 3000L); - client.isLocked(key); - client.unlock(key); - client.lock(key, 3000L); - client.keepAlive(key); - client.close(); + this.client.getWatchList(r); + this.client.getWatchMap(r); + this.client.lock(this.key, 3000L); + this.client.isLocked(this.key); + this.client.unlock(this.key); + this.client.lock(this.key, 3000L); + this.client.keepAlive(this.key); + this.client.close(); } catch (Exception e) { } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java index 0f290fb03d..0b3d51a829 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java @@ -32,29 +32,29 @@ public class KVPairTest { @Before public void init() { - pair = new KVPair<>("key", 1); + this.pair = new KVPair<>("key", 1); } @Test public void testGetKey() { - assertEquals(pair.getKey(), "key"); + assertEquals(this.pair.getKey(), "key"); } @Test public void testSetKey() { - pair.setKey("key2"); - assertEquals(pair.getKey(), "key2"); + this.pair.setKey("key2"); + assertEquals(this.pair.getKey(), "key2"); } @Test public void testGetValue() { - assertTrue(Objects.equals(pair.getValue(), 1)); + assertEquals(1, this.pair.getValue()); } @Test public void testSetValue() { - pair.setValue(2); - assertTrue(Objects.equals(pair.getValue(), 2)); + this.pair.setValue(2); + assertEquals(2, this.pair.getValue()); } @Test @@ -70,6 +70,6 @@ public void testHashCode() { @Test public void testEquals() { var pair2 = new KVPair<>("key", 1); - assertTrue(pair2.equals(pair)); + assertTrue(pair2.equals(this.pair)); } } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java index 407410c648..c47e64373a 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java @@ -115,15 +115,15 @@ private static Metapb.ShardGroup createShardGroup() { @Before public void setup() { - cache = new PartitionCache(); + this.cache = new PartitionCache(); } @Test public void testGetPartitionById() { var partition = createPartition(0, "graph0", 0, 65535); - cache.updateShardGroup(creteShardGroup(0)); - cache.updatePartition(partition); - var ret = cache.getPartitionById("graph0", 0); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + var ret = this.cache.getPartitionById("graph0", 0); assertNotNull(ret); assertEquals(ret.getKey(), partition); } @@ -131,9 +131,9 @@ public void testGetPartitionById() { @Test public void testGetPartitionByKey() throws UnsupportedEncodingException { var partition = createPartition(0, "graph0", 0, 65535); - cache.updateShardGroup(creteShardGroup(0)); - cache.updatePartition(partition); - var ret = cache.getPartitionByKey("graph0", "0".getBytes(StandardCharsets.UTF_8)); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + var ret = this.cache.getPartitionByKey("graph0", "0".getBytes(StandardCharsets.UTF_8)); assertNotNull(ret); assertEquals(ret.getKey(), partition); } @@ -141,136 +141,136 @@ public void testGetPartitionByKey() throws UnsupportedEncodingException { @Test public void getPartitionByCode() { var partition = createPartition(0, "graph0", 0, 1024); - cache.updateShardGroup(creteShardGroup(0)); - cache.updatePartition(partition); - var ret = cache.getPartitionByCode("graph0", 10); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + var ret = this.cache.getPartitionByCode("graph0", 10); assertNotNull(ret); assertEquals(ret.getKey(), partition); - assertNull(cache.getPartitionByCode("graph0", 2000)); + assertNull(this.cache.getPartitionByCode("graph0", 2000)); } @Test public void testGetPartitions() { var partition1 = createPartition(0, "graph0", 0, 1024); - cache.updateShardGroup(creteShardGroup(0)); - cache.updatePartition(partition1); - assertEquals(cache.getPartitions("graph0").size(), 1); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition1); + assertEquals(this.cache.getPartitions("graph0").size(), 1); var partition2 = createPartition(1, "graph0", 1024, 2048); - cache.updateShardGroup(creteShardGroup(1)); - cache.updatePartition(partition2); - assertEquals(cache.getPartitions("graph0").size(), 2); - System.out.print(cache.debugCacheByGraphName("graph0")); + this.cache.updateShardGroup(creteShardGroup(1)); + this.cache.updatePartition(partition2); + assertEquals(this.cache.getPartitions("graph0").size(), 2); + System.out.print(this.cache.debugCacheByGraphName("graph0")); } @Test public void testAddPartition() { var partition = createPartition(0, "graph0", 0, 65535); - cache.addPartition("graph0", 0, partition); - var ret = cache.getPartitionById("graph0", 0); + this.cache.addPartition("graph0", 0, partition); + var ret = this.cache.getPartitionById("graph0", 0); assertNotNull(ret); assertEquals(ret.getKey(), partition); - assertNotNull(cache.getPartitionByCode("graph0", 2000)); - System.out.print(cache.debugCacheByGraphName("graph0")); + assertNotNull(this.cache.getPartitionByCode("graph0", 2000)); + System.out.print(this.cache.debugCacheByGraphName("graph0")); var partition2 = createPartition(0, "graph0", 0, 1024); - cache.addPartition("graph0", 0, partition2); - ret = cache.getPartitionById("graph0", 0); + this.cache.addPartition("graph0", 0, partition2); + ret = this.cache.getPartitionById("graph0", 0); assertNotNull(ret); assertEquals(ret.getKey(), partition2); - assertNull(cache.getPartitionByCode("graph0", 2000)); - System.out.print(cache.debugCacheByGraphName("graph0")); + assertNull(this.cache.getPartitionByCode("graph0", 2000)); + System.out.print(this.cache.debugCacheByGraphName("graph0")); } @Test public void testUpdatePartition() { var partition = createPartition(0, "graph0", 0, 65535); - cache.updateShardGroup(creteShardGroup(0)); - cache.addPartition("graph0", 0, partition); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.addPartition("graph0", 0, partition); var partition2 = createPartition(0, "graph0", 0, 1024); - cache.updatePartition("graph0", 0, partition2); - var ret = cache.getPartitionById("graph0", 0); + this.cache.updatePartition("graph0", 0, partition2); + var ret = this.cache.getPartitionById("graph0", 0); assertNotNull(ret); assertEquals(ret.getKey(), partition2); - assertNull(cache.getPartitionByCode("graph0", 2000)); + assertNull(this.cache.getPartitionByCode("graph0", 2000)); } @Test public void testUpdatePartition2() { var partition = createPartition(0, "graph0", 0, 1024); - cache.updateShardGroup(creteShardGroup(0)); - assertTrue(cache.updatePartition(partition)); - assertFalse(cache.updatePartition(partition)); - var ret = cache.getPartitionById("graph0", 0); + this.cache.updateShardGroup(creteShardGroup(0)); + assertTrue(this.cache.updatePartition(partition)); + assertFalse(this.cache.updatePartition(partition)); + var ret = this.cache.getPartitionById("graph0", 0); assertNotNull(ret); assertEquals(ret.getKey(), partition); - assertNull(cache.getPartitionByCode("graph0", 2000)); + assertNull(this.cache.getPartitionByCode("graph0", 2000)); } @Test public void testRemovePartition() { var partition = createPartition(0, "graph0", 0, 1024); - cache.updateShardGroup(creteShardGroup(0)); - cache.updatePartition(partition); - assertNotNull(cache.getPartitionById("graph0", 0)); - cache.removePartition("graph0", 0); - assertNull(cache.getPartitionById("graph0", 0)); - System.out.print(cache.debugCacheByGraphName("graph0")); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + assertNotNull(this.cache.getPartitionById("graph0", 0)); + this.cache.removePartition("graph0", 0); + assertNull(this.cache.getPartitionById("graph0", 0)); + System.out.print(this.cache.debugCacheByGraphName("graph0")); } @Test public void testRange() { var partition1 = createPartition(1, "graph0", 0, 3); var partition2 = createPartition(2, "graph0", 3, 6); - cache.updatePartition(partition1); - cache.updatePartition(partition2); + this.cache.updatePartition(partition1); + this.cache.updatePartition(partition2); var partition3 = createPartition(3, "graph0", 1, 2); var partition4 = createPartition(4, "graph0", 2, 3); - cache.updatePartition(partition3); - cache.updatePartition(partition4); - System.out.println(cache.debugCacheByGraphName("graph0")); + this.cache.updatePartition(partition3); + this.cache.updatePartition(partition4); + System.out.println(this.cache.debugCacheByGraphName("graph0")); var partition6 = createPartition(1, "graph0", 0, 1); - cache.updatePartition(partition6); + this.cache.updatePartition(partition6); - System.out.println(cache.debugCacheByGraphName("graph0")); + System.out.println(this.cache.debugCacheByGraphName("graph0")); var partition5 = createPartition(1, "graph0", 0, 3); - cache.updatePartition(partition5); - System.out.println(cache.debugCacheByGraphName("graph0")); + this.cache.updatePartition(partition5); + System.out.println(this.cache.debugCacheByGraphName("graph0")); } @Test public void testRange2() { var partition1 = createPartition(1, "graph0", 0, 3); var partition2 = createPartition(2, "graph0", 3, 6); - cache.updatePartition(partition1); - cache.updatePartition(partition2); + this.cache.updatePartition(partition1); + this.cache.updatePartition(partition2); - System.out.println(cache.debugCacheByGraphName("graph0")); + System.out.println(this.cache.debugCacheByGraphName("graph0")); // 中间有缺失 var partition3 = createPartition(1, "graph0", 2, 3); - cache.updatePartition(partition3); + this.cache.updatePartition(partition3); - System.out.println(cache.debugCacheByGraphName("graph0")); + System.out.println(this.cache.debugCacheByGraphName("graph0")); var partition5 = createPartition(1, "graph0", 0, 3); - cache.updatePartition(partition5); - System.out.println(cache.debugCacheByGraphName("graph0")); + this.cache.updatePartition(partition5); + System.out.println(this.cache.debugCacheByGraphName("graph0")); } @Test public void testRemovePartitions() { var partition1 = createPartition(0, "graph0", 0, 1024); var partition2 = createPartition(1, "graph0", 1024, 2048); - cache.updateShardGroup(creteShardGroup(0)); - cache.updatePartition(partition1); - cache.updateShardGroup(creteShardGroup(1)); - cache.updatePartition(partition2); - assertEquals(cache.getPartitions("graph0").size(), 2); - cache.removePartitions(); - assertEquals(cache.getPartitions("graph0").size(), 0); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition1); + this.cache.updateShardGroup(creteShardGroup(1)); + this.cache.updatePartition(partition2); + assertEquals(this.cache.getPartitions("graph0").size(), 2); + this.cache.removePartitions(); + assertEquals(this.cache.getPartitions("graph0").size(), 0); } @Test @@ -278,81 +278,81 @@ public void testRemoveAll() { var partition1 = createPartition(0, "graph0", 0, 1024); var partition2 = createPartition(1, "graph0", 1024, 2048); var partition3 = createPartition(0, "graph1", 0, 2048); - cache.updateShardGroup(creteShardGroup(0)); - cache.updateShardGroup(creteShardGroup(1)); - cache.updatePartition(partition1); - cache.updatePartition(partition2); - cache.updatePartition(partition3); - - assertEquals(cache.getPartitions("graph0").size(), 2); - assertEquals(cache.getPartitions("graph1").size(), 1); - cache.removeAll("graph0"); - assertEquals(cache.getPartitions("graph0").size(), 0); - assertEquals(cache.getPartitions("graph1").size(), 1); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updateShardGroup(creteShardGroup(1)); + this.cache.updatePartition(partition1); + this.cache.updatePartition(partition2); + this.cache.updatePartition(partition3); + + assertEquals(this.cache.getPartitions("graph0").size(), 2); + assertEquals(this.cache.getPartitions("graph1").size(), 1); + this.cache.removeAll("graph0"); + assertEquals(this.cache.getPartitions("graph0").size(), 0); + assertEquals(this.cache.getPartitions("graph1").size(), 1); } @Test public void testUpdateShardGroup() { var shardGroup = createShardGroup(); - cache.updateShardGroup(shardGroup); - assertNotNull(cache.getShardGroup(shardGroup.getId())); + this.cache.updateShardGroup(shardGroup); + assertNotNull(this.cache.getShardGroup(shardGroup.getId())); } @Test public void testGetShardGroup() { var shardGroup = createShardGroup(); - cache.updateShardGroup(shardGroup); - assertTrue(Objects.equals(cache.getShardGroup(shardGroup.getId()), shardGroup)); + this.cache.updateShardGroup(shardGroup); + assertEquals(this.cache.getShardGroup(shardGroup.getId()), shardGroup); } @Test public void testAddStore() { var store = createStore(1); - cache.addStore(1L, store); - assertEquals(cache.getStoreById(1L), store); + this.cache.addStore(1L, store); + assertEquals(this.cache.getStoreById(1L), store); } @Test public void testGetStoreById() { var store = createStore(1); - cache.addStore(1L, store); - assertEquals(cache.getStoreById(1L), store); + this.cache.addStore(1L, store); + assertEquals(this.cache.getStoreById(1L), store); } @Test public void testRemoveStore() { var store = createStore(1); - cache.addStore(1L, store); - assertEquals(cache.getStoreById(1L), store); + this.cache.addStore(1L, store); + assertEquals(this.cache.getStoreById(1L), store); - cache.removeStore(1L); - assertNull(cache.getStoreById(1L)); + this.cache.removeStore(1L); + assertNull(this.cache.getStoreById(1L)); } @Test public void testHasGraph() { var partition = createPartition(0, "graph0", 0, 65535); - cache.updateShardGroup(creteShardGroup(0)); - cache.updatePartition(partition); - assertTrue(cache.hasGraph("graph0")); - assertFalse(cache.hasGraph("graph1")); + this.cache.updateShardGroup(creteShardGroup(0)); + this.cache.updatePartition(partition); + assertTrue(this.cache.hasGraph("graph0")); + assertFalse(this.cache.hasGraph("graph1")); } @Test public void testUpdateGraph() { var graph = createGraph("graph0", 10); - cache.updateGraph(graph); - assertEquals(cache.getGraph("graph0"), graph); + this.cache.updateGraph(graph); + assertEquals(this.cache.getGraph("graph0"), graph); graph = createGraph("graph0", 12); - cache.updateGraph(graph); - assertEquals(cache.getGraph("graph0"), graph); + this.cache.updateGraph(graph); + assertEquals(this.cache.getGraph("graph0"), graph); } @Test public void testGetGraph() { var graph = createGraph("graph0", 12); - cache.updateGraph(graph); - assertEquals(cache.getGraph("graph0"), graph); + this.cache.updateGraph(graph); + assertEquals(this.cache.getGraph("graph0"), graph); } @Test @@ -360,10 +360,10 @@ public void testGetGraphs() { var graph1 = createGraph("graph0", 12); var graph2 = createGraph("graph1", 12); var graph3 = createGraph("graph2", 12); - cache.updateGraph(graph1); - cache.updateGraph(graph2); - cache.updateGraph(graph3); - assertEquals(cache.getGraphs().size(), 3); + this.cache.updateGraph(graph1); + this.cache.updateGraph(graph2); + this.cache.updateGraph(graph3); + assertEquals(this.cache.getGraphs().size(), 3); } @Test @@ -371,24 +371,24 @@ public void testReset() { var graph1 = createGraph("graph0", 12); var graph2 = createGraph("graph1", 12); var graph3 = createGraph("graph2", 12); - cache.updateGraph(graph1); - cache.updateGraph(graph2); - cache.updateGraph(graph3); - assertEquals(cache.getGraphs().size(), 3); - cache.reset(); - assertEquals(cache.getGraphs().size(), 0); + this.cache.updateGraph(graph1); + this.cache.updateGraph(graph2); + this.cache.updateGraph(graph3); + assertEquals(this.cache.getGraphs().size(), 3); + this.cache.reset(); + assertEquals(this.cache.getGraphs().size(), 0); } @Test public void testUpdateShardGroupLeader() { var shardGroup = createShardGroup(); - cache.updateShardGroup(shardGroup); + this.cache.updateShardGroup(shardGroup); var leader = Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Leader).build(); - cache.updateShardGroupLeader(shardGroup.getId(), leader); + this.cache.updateShardGroupLeader(shardGroup.getId(), leader); - assertEquals(cache.getLeaderShard(shardGroup.getId()), leader); + assertEquals(this.cache.getLeaderShard(shardGroup.getId()), leader); } } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java index b9bb234a7a..d8a538f955 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java @@ -17,9 +17,7 @@ package org.apache.hugegraph.pd.core.meta; -import static org.junit.Assert.assertTrue; - -import java.util.Arrays; +import static org.junit.Assert.assertArrayEquals; import org.apache.hugegraph.pd.meta.MetadataKeyHelper; import org.junit.Test; @@ -29,8 +27,8 @@ public class MetadataKeyHelperTest { @Test public void testMoveTaskKey() { var key = MetadataKeyHelper.getMoveTaskKey("foo", 0, 1); - assertTrue(Arrays.equals(key, "TASK_MOVE/foo/0/1".getBytes())); + assertArrayEquals(key, "TASK_MOVE/foo/0/1".getBytes()); var key2 = MetadataKeyHelper.getMoveTaskPrefix("foo"); - assertTrue(Arrays.equals(key2, "TASK_MOVE/foo".getBytes())); + assertArrayEquals(key2, "TASK_MOVE/foo".getBytes()); } } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java index 8526e49d31..7a06cef706 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java @@ -35,7 +35,7 @@ public class ConfigServiceTest { @Before public void setUp() { - service = new ConfigService(config); + this.service = new ConfigService(this.config); } @Test @@ -48,13 +48,13 @@ public void testGetPDConfig() throws Exception { .setShardCount(55) .setMaxShardsPerStore(0) .setTimestamp(0L).build(); - service.setPDConfig(config); + this.service.setPDConfig(config); // Run the test - Metapb.PDConfig result = service.getPDConfig(0L); + Metapb.PDConfig result = this.service.getPDConfig(0L); // Verify the results Assert.assertTrue(result.getShardCount() == 55); - result = service.getPDConfig(); + result = this.service.getPDConfig(); Assert.assertTrue(result.getShardCount() == 55); } catch (Exception e) { @@ -69,12 +69,12 @@ public void testGetGraphSpace() throws Exception { .setName("gs1") .setTimestamp(0L).build(); final List expectedResult = List.of(space); - service.setGraphSpace(space); + this.service.setGraphSpace(space); // Run the test - final List result = service.getGraphSpace( + final List result = this.service.getGraphSpace( "gs1"); - Assert.assertTrue(result.size() == 1); + Assert.assertEquals(1, result.size()); } @Test @@ -98,7 +98,7 @@ public void testUpdatePDConfig() { expectedResult.setHost("host"); expectedResult.setVerifyPath("verifyPath"); expectedResult.setLicensePath("licensePath"); - service.updatePDConfig(mConfig); + this.service.updatePDConfig(mConfig); } catch (Exception e) { } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java index 307851953f..1c2e838579 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java @@ -36,16 +36,16 @@ public class LogServiceTest { @Before public void setUp() { - logServiceUnderTest = new LogService(mockPdConfig); + this.logServiceUnderTest = new LogService(this.mockPdConfig); } @Test public void testGetLog() throws Exception { - logServiceUnderTest.insertLog("action", "message", - Any.newBuilder().build()); + this.logServiceUnderTest.insertLog("action", "message", + Any.newBuilder().build()); // Run the test - final List result = logServiceUnderTest.getLog( + final List result = this.logServiceUnderTest.getLog( "action", 0L, System.currentTimeMillis()); // Verify the results diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java index 3327649a39..fcc3f34c42 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java @@ -17,8 +17,6 @@ package org.apache.hugegraph.pd.service; -import static org.junit.Assert.assertEquals; - import java.util.List; import org.apache.hugegraph.pd.PartitionService; @@ -36,16 +34,16 @@ public class PartitionServiceTest extends PdTestBase { @Before public void init() { - service = getPartitionService(); + this.service = getPartitionService(); } @Test public void testCombinePartition() throws PDException { buildEnv(); // 0, 1, 2-> 0, 3,4,5->1, 6,7,8 ->2, 9,10, 11-> 3 - service.combinePartition(4); + this.service.combinePartition(4); - var partition = service.getPartitionById("graph0", 0); + var partition = this.service.getPartitionById("graph0", 0); assertEquals(0, partition.getStartKey()); assertEquals(5462, partition.getEndKey()); @@ -65,9 +63,9 @@ public void testCombinePartition() throws PDException { public void testCombinePartition2() throws PDException { buildEnv(); // 0, 1, 2-> 0, 3,4,5->1, 6,7,8 ->2, 9,10, 11-> 3 - service.combinePartition(4); + this.service.combinePartition(4); - var partition = service.getPartitionById("graph0", 0); + var partition = this.service.getPartitionById("graph0", 0); assertEquals(0, partition.getStartKey()); assertEquals(5462, partition.getEndKey()); @@ -123,7 +121,7 @@ private void buildEnv() throws PDException { .build(); storeInfoMeta.updateShardGroup(shardGroup); - var partitionShard = service.getPartitionByCode("graph0", lastId); + var partitionShard = this.service.getPartitionByCode("graph0", lastId); if (partitionShard != null) { lastId = partitionShard.getPartition().getEndKey(); } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java index 3e56099e5b..018a63d356 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java @@ -17,7 +17,6 @@ package org.apache.hugegraph.pd.service; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -35,7 +34,7 @@ public class StoreMonitorDataServiceTest extends PdTestBase { @Before public void init() { - service = getStoreMonitorDataService(); + this.service = getStoreMonitorDataService(); var store = getPdConfig().getStore(); store.setMonitorDataEnabled(true); store.setMonitorDataInterval("1s"); @@ -46,26 +45,26 @@ public void init() { public void test() throws InterruptedException, PDException { long now = System.currentTimeMillis() / 1000; for (int i = 0; i < 5; i++) { - service.saveMonitorData(genStats()); + this.service.saveMonitorData(genStats()); now = System.currentTimeMillis() / 1000; Thread.sleep(1100); } - assertTrue(service.getLatestStoreMonitorDataTimeStamp(1) == 0 || - service.getLatestStoreMonitorDataTimeStamp(1) == now); + assertTrue(this.service.getLatestStoreMonitorDataTimeStamp(1) == 0 || + this.service.getLatestStoreMonitorDataTimeStamp(1) == now); - var data = service.getStoreMonitorData(1); + var data = this.service.getStoreMonitorData(1); assertEquals(5, data.size()); - assertNotNull(service.debugMonitorInfo(List.of(Metapb.RecordPair.newBuilder() - .setKey("key1") - .setValue(1) - .build()))); + assertNotNull(this.service.debugMonitorInfo(List.of(Metapb.RecordPair.newBuilder() + .setKey("key1") + .setValue(1) + .build()))); - assertNotNull(service.getStoreMonitorDataText(1)); + assertNotNull(this.service.getStoreMonitorDataText(1)); - service.removeExpiredMonitorData(1, now + 1); - assertEquals(0, service.getStoreMonitorData(1).size()); + this.service.removeExpiredMonitorData(1, now + 1); + assertEquals(0, this.service.getStoreMonitorData(1).size()); } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java index d02c0cbc35..21537fa148 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java @@ -17,7 +17,6 @@ package org.apache.hugegraph.pd.service; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import org.apache.hugegraph.pd.StoreNodeService; @@ -31,16 +30,16 @@ public class StoreNodeServiceNewTest extends PdTestBase { @Before public void init() { - service = getStoreNodeService(); + this.service = getStoreNodeService(); } @Test public void testGetTaskInfoMeta() { - assertNotNull(service.getTaskInfoMeta()); + assertNotNull(this.service.getTaskInfoMeta()); } public void testGetStoreInfoMeta() { - assertNotNull(service.getStoreInfoMeta()); + assertNotNull(this.service.getStoreInfoMeta()); } @Test @@ -51,11 +50,11 @@ public void testRemoveShardGroup() throws PDException { .setState( Metapb.PartitionState.PState_Offline) .build(); - service.getStoreInfoMeta().updateShardGroup(group); + this.service.getStoreInfoMeta().updateShardGroup(group); } - service.deleteShardGroup(11); - service.deleteShardGroup(10); + this.service.deleteShardGroup(11); + this.service.deleteShardGroup(10); assertEquals(10, getPdConfig().getConfigService().getPDConfig().getPartitionCount()); // restore diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java index da2154df81..8b1d31fb6f 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java @@ -44,8 +44,8 @@ public class StoreServiceTest { @Before public void setUp() { - config = getConfig(); - service = new StoreNodeService(config); + this.config = getConfig(); + this.service = new StoreNodeService(this.config); } @Test @@ -58,7 +58,7 @@ public void testInit() { pdConfig1)); // Run the test - service.init(partitionService); + this.service.init(partitionService); // Verify the results } @@ -91,7 +91,7 @@ private PDConfig getConfig() { public void testIsOK() { // Setup // Run the test - final boolean result = service.isOK(); + final boolean result = this.service.isOK(); // Verify the results assertThat(result).isTrue(); @@ -162,7 +162,7 @@ public void testRegister() throws Exception { Map.entry("value", "value")); // Run the test - final Metapb.Store result = service.register(store); + final Metapb.Store result = this.service.register(store); } @Test @@ -211,7 +211,7 @@ public void testGetStore() throws Exception { .build(); // Run the test - final Metapb.Store result = service.getStore(0L); + final Metapb.Store result = this.service.getStore(0L); } catch (Exception e) { } @@ -284,7 +284,7 @@ public void testUpdateStore() throws Exception { partition.setShardCount(0); // Run the test - final Metapb.Store result = service.updateStore(store); + final Metapb.Store result = this.service.updateStore(store); } @Test @@ -317,7 +317,7 @@ public void testStoreTurnoff() throws Exception { partition.setShardCount(0); // Run the test - service.storeTurnoff(store); + this.service.storeTurnoff(store); // Verify the results } @@ -345,7 +345,7 @@ public void testGetStores1() throws Exception { .setDataPath("dataPath").build()); // Run the test - final List result = service.getStores(); + final List result = this.service.getStores(); } @Test @@ -370,7 +370,7 @@ public void testGetStores2() throws Exception { .setDataPath("dataPath").build()); // Run the test - final List result = service.getStores("graphName"); + final List result = this.service.getStores("graphName"); } @@ -396,7 +396,7 @@ public void testGetStoreStatus() throws Exception { .setDataPath("dataPath").build()); // Run the test - final List result = service.getStoreStatus(false); + final List result = this.service.getStoreStatus(false); } @@ -411,7 +411,7 @@ public void testGetShardGroups() throws Exception { .build()); // Run the test - final List result = service.getShardGroups(); + final List result = this.service.getShardGroups(); } @@ -433,7 +433,7 @@ public void testGetShardGroup() throws Exception { .build(); // Run the test - final Metapb.ShardGroup result = service.getShardGroup(0); + final Metapb.ShardGroup result = this.service.getShardGroup(0); // Verify the results } @@ -450,7 +450,7 @@ public void testGetShardGroupsByStore() throws Exception { .build()); // Run the test - final List result = service.getShardGroupsByStore( + final List result = this.service.getShardGroupsByStore( 0L); } @@ -476,7 +476,7 @@ public void testGetActiveStores1() throws Exception { .setDataPath("dataPath").build()); // Run the test - final List result = service.getActiveStores("graphName"); + final List result = this.service.getActiveStores("graphName"); // Verify the results } @@ -484,7 +484,7 @@ public void testGetActiveStores1() throws Exception { @Test public void testGetActiveStores1ThrowsPDException() { try { - List stores = service.getActiveStores(); + List stores = this.service.getActiveStores(); assertThat(stores.size() == 0); } catch (Exception e) { @@ -511,15 +511,15 @@ public void testGetTombStores() throws Exception { .build()).build()) .setDataVersion(0).setCores(0) .setDataPath("dataPath").build()); - service.register(storeList.get(0)); + this.service.register(storeList.get(0)); // Run the test - final List result = service.getTombStores(); + final List result = this.service.getTombStores(); // Verify the results assertThat(result.size() == 1); - service.removeStore(result.get(0).getId()); - List stores = service.getStores(); + this.service.removeStore(result.get(0).getId()); + List stores = this.service.getStores(); assertThat(stores.size() == 0); } @@ -550,7 +550,7 @@ public void testAllocShards() throws Exception { partition.setShardCount(0); // Run the test - final List result = service.allocShards(graph, 0); + final List result = this.service.allocShards(graph, 0); } catch (Exception e) { } @@ -583,10 +583,10 @@ public void testReallocShards() throws Exception { partition.setTotalCount(0); partition.setMaxShardsPerStore(0); partition.setShardCount(0); - when(config.getPartition()).thenReturn(partition); + when(this.config.getPartition()).thenReturn(partition); // Run the test - final List result = service.reallocShards(shardGroup); + final List result = this.service.reallocShards(shardGroup); // Verify the results assertThat(result).isEqualTo(expectedResult); @@ -604,7 +604,7 @@ public void testUpdateShardGroup() { .setRole(Metapb.ShardRole.None).build()); // Run the test - service.updateShardGroup(0, shards, 0, 0); + this.service.updateShardGroup(0, shards, 0, 0); } catch (Exception e) { } @@ -613,7 +613,7 @@ public void testUpdateShardGroup() { @Test public void testUpdateShardGroupState() throws Exception { try { - service.updateShardGroupState(0, Metapb.PartitionState.PState_None); + this.service.updateShardGroupState(0, Metapb.PartitionState.PState_None); } catch (Exception e) { } @@ -642,17 +642,17 @@ public void testHeartBeat() throws Exception { final Metapb.ClusterStats expectedResult = Metapb.ClusterStats .newBuilder().setState(Metapb.ClusterState.Cluster_OK) .setMessage("message").setTimestamp(0L).build(); - when(config.getMinStoreCount()).thenReturn(0); + when(this.config.getMinStoreCount()).thenReturn(0); // Configure PDConfig.getPartition(...). final PDConfig.Partition partition = new PDConfig().new Partition(); partition.setTotalCount(0); partition.setMaxShardsPerStore(0); partition.setShardCount(0); - when(config.getPartition()).thenReturn(partition); + when(this.config.getPartition()).thenReturn(partition); // Run the test - final Metapb.ClusterStats result = service.heartBeat(storeStats); + final Metapb.ClusterStats result = this.service.heartBeat(storeStats); // Verify the results assertThat(result).isEqualTo(expectedResult); @@ -670,7 +670,7 @@ public void testUpdateClusterStatus1() { .setMessage("message").setTimestamp(0L).build(); // Run the test - final Metapb.ClusterStats result = service.updateClusterStatus( + final Metapb.ClusterStats result = this.service.updateClusterStatus( Metapb.ClusterState.Cluster_OK); } @@ -682,7 +682,7 @@ public void testUpdateClusterStatus2() { .setMessage("message").setTimestamp(0L).build(); // Run the test - final Metapb.ClusterStats result = service.updateClusterStatus( + final Metapb.ClusterStats result = this.service.updateClusterStatus( Metapb.PartitionState.PState_None); } @@ -690,7 +690,7 @@ public void testUpdateClusterStatus2() { public void testCheckStoreStatus() { // Setup // Run the test - service.checkStoreStatus(); + this.service.checkStoreStatus(); // Verify the results } @@ -702,7 +702,7 @@ public void testAddStatusListener() { StoreStatusListener.class); // Run the test - service.addStatusListener(mockListener); + this.service.addStatusListener(mockListener); // Verify the results } @@ -801,7 +801,7 @@ public void testCheckStoreCanOffline() { .setDataPath("dataPath") .build(); // Run the test - final boolean result = service.checkStoreCanOffline(currentStore); + final boolean result = this.service.checkStoreCanOffline(currentStore); // Verify the results assertThat(result).isTrue(); @@ -812,7 +812,7 @@ public void testShardGroupsDbCompaction() throws Exception { // Setup // Run the test try { - service.shardGroupsDbCompaction(0, "tableName"); + this.service.shardGroupsDbCompaction(0, "tableName"); } catch (Exception e) { } @@ -825,7 +825,7 @@ public void testGetQuota() throws Exception { // Setup // Run the test try { - service.getQuota(); + this.service.getQuota(); } catch (Exception e) { } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java index 751aa1b5a5..b1064ccff2 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java @@ -34,7 +34,7 @@ public class TaskScheduleServiceTest extends PdTestBase { @Before public void init() { - service = getTaskService(); + this.service = getTaskService(); } @Test @@ -66,7 +66,7 @@ public void testBalancePartitionLeader() throws PDException { getPdConfig().getPartition().setShardCount(3); getPartitionService().updatePartition(list); - var rst = service.balancePartitionLeader(true); + var rst = this.service.balancePartitionLeader(true); assertTrue(rst.size() > 0); // recover getPdConfig().getPartition().setShardCount(1); From 6a813d2c0640766991682b59843559c9f85b4d9c Mon Sep 17 00:00:00 2001 From: imbajin Date: Tue, 20 Jun 2023 19:44:55 +0800 Subject: [PATCH 08/18] update code to Jun2 2023 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I9a21c17a07c96c42eafc5ffeedfc70b2a77563d2 GraphPlatform-2120 检查channel Change-Id: I9135b19b2a3bd66f31dcdbcf9169b1884ae934c2 GraphPlatform-2120 处理加锁 Change-Id: I64441d02221e96aa21027faafe90c8b7e04b15d1 GraphPlatform-1749 修复可能导致的死锁问题 Change-Id: Ie5ab9fd62746bfcd77edcaf8881671abee678d31 GraphPlatform-1749 修正pd raft Change-Id: I31064fa2ce31b677cf044a85645e7244cbbbff26 GraphPlatform-2120 控制pd线程池大小 Change-Id: Ie505f5525bcff79f84c0cd674b92c6abf3a2c03f GraphPlatform-1749 优化partition cache Change-Id: I5cafd9078ccc91b7f0b4d9157a3913049c38d8cb GraphPlatform-1657 rest接口转发到leader Change-Id: Ib5db654a35733ddead7972d5e4b08b981a89fb31 GraphPlatform-2020 fix cache bug when direct put Change-Id: I52428d8d5d4bb977ee7796c0fe1e4ced07aff12d GraphPlatform-2020 fix cache bug when direct put Change-Id: Id57427a94732e12f2d96429a2b677d5c5f71e8ff GraphPlatform-1771 update code Change-Id: Ic3b95ca22900453adbc3571734e1da2c97e28355 --- .../pd/client/AbstractClientStubProxy.java | 6 +- .../apache/hugegraph/pd/client/KvClient.java | 119 +++++++---- .../apache/hugegraph/pd/client/PDClient.java | 91 ++++---- .../hugegraph/pd/client/PDPulseImpl.java | 19 +- .../hugegraph/pd/client/PDWatchImpl.java | 13 +- .../apache/hugegraph/pd/watch/NodeEvent.java | 4 +- .../hugegraph/pd/watch/PartitionEvent.java | 8 +- .../apache/hugegraph/pd/clitools/Main.java | 14 +- .../hugegraph/pd/common/PartitionCache.java | 196 +++++++++--------- .../hugegraph/pd/TaskScheduleService.java | 4 +- .../apache/hugegraph/pd/config/PDConfig.java | 13 ++ .../hugegraph/pd/meta/DiscoveryMetaStore.java | 6 +- .../hugegraph/pd/meta/MetadataStoreBase.java | 9 +- .../apache/hugegraph/pd/raft/RaftEngine.java | 44 +++- hg-pd-grpc/src/main/proto/metapb.proto | 2 + hg-pd-grpc/src/main/proto/pdpb.proto | 12 ++ .../hugegraph/pd/metrics/PDMetrics.java | 8 +- .../apache/hugegraph/pd/model/DemoModel.java | 8 +- .../hugegraph/pd/model/PromTargetsModel.java | 8 +- .../org/apache/hugegraph/pd/rest/API.java | 4 + .../apache/hugegraph/pd/rest/IndexAPI.java | 9 +- .../apache/hugegraph/pd/rest/MemberAPI.java | 9 +- .../pd/service/DiscoveryService.java | 43 +--- .../pd/service/KvServiceGrpcImpl.java | 11 +- .../hugegraph/pd/service/PDPulseService.java | 6 +- .../hugegraph/pd/service/PDService.java | 176 +++++++++++++--- .../pd/service/PromTargetsService.java | 8 +- .../hugegraph/pd/service/ServiceGrpc.java | 20 +- .../pd/upgrade/VersionScriptFactory.java | 6 +- .../hugegraph/pd/util/HgExecutorUtil.java | 180 ++++++++++++++++ .../apache/hugegraph/pd/util/HgMapCache.java | 8 +- .../org/apache/hugegraph/pd/util/IdUtil.java | 4 +- .../pd/util/grpc/GRpcServerConfig.java | 44 ++++ .../hugegraph/pd/common/KVPairTest.java | 3 - .../pd/common/PartitionCacheTest.java | 2 - 35 files changed, 797 insertions(+), 320 deletions(-) create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java index b0f066805d..c8b37443ef 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java @@ -33,7 +33,11 @@ public class AbstractClientStubProxy { private AbstractStub stub; public AbstractClientStubProxy(String[] hosts) { - for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); + for (String host : hosts) { + if (!host.isEmpty()) { + hostList.offer(host); + } + } } public LinkedList getHostList() { diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java index 8270e78f2b..aa179570bf 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java @@ -15,18 +15,19 @@ * under the License. */ -package org.apache.hugegraph.pd.client; - import java.io.Closeable; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.Consumer; +import org.apache.hugegraph.pd.client.AbstractClient; +import org.apache.hugegraph.pd.client.PDConfig; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.kv.K; import org.apache.hugegraph.pd.grpc.kv.KResponse; @@ -49,15 +50,12 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author zhangyingjie - * @date 2022/6/20 - **/ @Slf4j public class KvClient extends AbstractClient implements Closeable { private final AtomicLong clientId = new AtomicLong(0); private final Semaphore semaphore = new Semaphore(1); + private final ConcurrentHashMap observers = new ConcurrentHashMap<>(); public KvClient(PDConfig pdConfig) { super(pdConfig); @@ -133,6 +131,21 @@ private void onEvent(WatchResponse value, Consumer consumer) { if (value.getEventsCount() != 0) consumer.accept((T) value); } + private StreamObserver getObserver(String key, Consumer consumer, + BiConsumer listenWrapper, + long client) { + StreamObserver observer; + if ((observer = observers.get(client)) == null) { + synchronized (this) { + if ((observer = observers.get(client)) == null) { + observer = getObserver(key, consumer, listenWrapper); + observers.put(client, observer); + } + } + } + return observer; + } + private StreamObserver getObserver(String key, Consumer consumer, BiConsumer listenWrapper) { return new StreamObserver() { @@ -142,6 +155,7 @@ public void onNext(WatchResponse value) { case Starting: boolean b = clientId.compareAndSet(0, value.getClientId()); if (b) { + observers.put(value.getClientId(), this); log.info("set watch client id to :{}", value.getClientId()); } semaphore.release(); @@ -174,14 +188,17 @@ public void onCompleted() { } public void listen(String key, Consumer consumer) throws PDException { - StreamObserver observer = getObserver(key, consumer, listenWrapper); + long value = clientId.get(); + StreamObserver observer = getObserver(key, consumer, listenWrapper, value); acquire(); - WatchRequest k = WatchRequest.newBuilder().setClientId(clientId.get()).setKey(key).build(); + WatchRequest k = WatchRequest.newBuilder().setClientId(value).setKey(key).build(); streamingCall(KvServiceGrpc.getWatchMethod(), k, observer, 1); } public void listenPrefix(String prefix, Consumer consumer) throws PDException { - StreamObserver observer = getObserver(prefix, consumer, prefixListenWrapper); + long value = clientId.get(); + StreamObserver observer = + getObserver(prefix, consumer, prefixListenWrapper, value); acquire(); WatchRequest k = WatchRequest.newBuilder().setClientId(clientId.get()).setKey(prefix).build(); @@ -199,17 +216,7 @@ private void acquire() { log.error("get semaphore with error:", e); } } - } BiConsumer listenWrapper = (key, consumer) -> { - try { - listen(key, consumer); - } catch (PDException e) { - try { - log.warn("start listen with warning:", e); - Thread.sleep(1000); - } catch (InterruptedException ex) { - } - } - }; + } public List getWatchList(T response) { List values = new LinkedList<>(); @@ -241,36 +248,56 @@ public Map getWatchMap(T response) { public LockResponse lock(String key, long ttl) throws PDException { acquire(); - LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl) - .build(); - LockResponse response = blockingUnaryCall(KvServiceGrpc.getLockMethod(), k); - handleErrors(response.getHeader()); - clientId.compareAndSet(0L, response.getClientId()); - assert clientId.get() == response.getClientId(); - return response; - } - - public LockResponse lockWithoutReentrant(String key, long ttl) throws PDException { - acquire(); - LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl) - .build(); - LockResponse response = blockingUnaryCall(KvServiceGrpc.getLockWithoutReentrantMethod(), k); - handleErrors(response.getHeader()); - clientId.compareAndSet(0L, response.getClientId()); - assert clientId.get() == response.getClientId(); + LockResponse response; + try { + LockRequest k = + LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl) + .build(); + response = blockingUnaryCall(KvServiceGrpc.getLockMethod(), k); + handleErrors(response.getHeader()); + if (clientId.compareAndSet(0L, response.getClientId())) { + semaphore.release(); + } + } catch (Exception e) { + if (clientId.get() == 0L) { + semaphore.release(); + } + throw e; + } return response; - } BiConsumer prefixListenWrapper = (key, consumer) -> { + } BiConsumer listenWrapper = (key, consumer) -> { try { - listenPrefix(key, consumer); + listen(key, consumer); } catch (PDException e) { try { - log.warn("start listenPrefix with warning:", e); + log.warn("start listen with warning:", e); Thread.sleep(1000); } catch (InterruptedException ex) { } } }; + public LockResponse lockWithoutReentrant(String key, long ttl) throws PDException { + acquire(); + LockResponse response; + try { + LockRequest k = + LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl) + .build(); + response = blockingUnaryCall(KvServiceGrpc.getLockWithoutReentrantMethod(), k); + handleErrors(response.getHeader()); + if (clientId.compareAndSet(0L, response.getClientId())) { + semaphore.release(); + } + } catch (Exception e) { + if (clientId.get() == 0L) { + semaphore.release(); + } + throw e; + } + return response; + } + public LockResponse isLocked(String key) throws PDException { LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build(); LockResponse response = blockingUnaryCall(KvServiceGrpc.getIsLockedMethod(), k); @@ -305,7 +332,17 @@ public void close() { - + BiConsumer prefixListenWrapper = (key, consumer) -> { + try { + listenPrefix(key, consumer); + } catch (PDException e) { + try { + log.warn("start listenPrefix with warning:", e); + Thread.sleep(1000); + } catch (InterruptedException ex) { + } + } + }; } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java index 703a237790..78b5e18c4d 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -18,7 +18,6 @@ package org.apache.hugegraph.pd.client; import java.util.ArrayList; -import java.util.LinkedList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; @@ -430,6 +429,20 @@ public KVPair getPartition(String graphName, byt cache.addPartition(graphName, partShard.getKey().getId(), partShard.getKey()); } } + + if (partShard.getValue() == null) { + var shardGroup = getShardGroup(partShard.getKey().getId()); + if (shardGroup != null) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + partShard.setValue(shard); + } + } + } else { + log.error("getPartition: get shard group failed, {}", partShard.getKey().getId()); + } + } + return partShard; } @@ -465,6 +478,21 @@ public KVPair getPartitionByCode(String graphNam cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); } } + + if (partShard.getValue() == null) { + var shardGroup = getShardGroup(partShard.getKey().getId()); + if (shardGroup != null) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + partShard.setValue(shard); + } + } + } else { + log.error("getPartitionByCode: get shard group failed, {}", + partShard.getKey().getId()); + } + } + return partShard; } @@ -504,6 +532,19 @@ public KVPair getPartitionById(String graphName, cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); } } + if (partShard.getValue() == null) { + var shardGroup = getShardGroup(partShard.getKey().getId()); + if (shardGroup != null) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + partShard.setValue(shard); + } + } + } else { + log.error("getPartitionById: get shard group failed, {}", + partShard.getKey().getId()); + } + } return partShard; } @@ -1188,46 +1229,12 @@ public PartitionCache getCache() { return cache; } - public interface PDEventListener { - void onStoreChanged(NodeEvent event); - - void onPartitionChanged(PartitionEvent event); - - void onGraphChanged(WatchResponse event); - - default void onShardGroupChanged(WatchResponse event) { - } - - } - - static class StubProxy { - private final LinkedList hostList = new LinkedList<>(); - private volatile PDGrpc.PDBlockingStub stub; - - public StubProxy(String[] hosts) { - for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); - } - - public String nextHost() { - String host = hostList.poll(); - hostList.offer(host); //移到尾部 - return host; - } - - public void set(PDGrpc.PDBlockingStub stub) { - this.stub = stub; - } - - public PDGrpc.PDBlockingStub get() { - return this.stub; - } - - public String getHost() { - return hostList.peek(); - } - - public int getHostCount() { - return hostList.size(); - } + public void updatePdRaft(String raftConfig) throws PDException { + Pdpb.UpdatePdRaftRequest request = Pdpb.UpdatePdRaftRequest.newBuilder() + .setHeader(header) + .setConfig(raftConfig) + .build(); + Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request); + handleResponseError(response.getHeader()); } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java index 8b85bc9114..d9bd73bc24 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java @@ -17,6 +17,7 @@ package org.apache.hugegraph.pd.client; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -45,14 +46,22 @@ final class PDPulseImpl implements PDPulse { private final HgPdPulseGrpc.HgPdPulseStub stub; - - private final ExecutorService threadPool; + private static ConcurrentHashMap chs = new ConcurrentHashMap<>(); + private ExecutorService threadPool ; // TODO: support several servers. public PDPulseImpl(String pdServerAddress) { - this.stub = HgPdPulseGrpc.newStub(getChannel(pdServerAddress)); - var namedThreadFactory = - new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); + ManagedChannel channel; + if ((channel = chs.get(pdServerAddress)) == null || channel.isShutdown()) { + synchronized (chs) { + if ((channel = chs.get(pdServerAddress)) == null || channel.isShutdown()) { + channel = getChannel(pdServerAddress); + chs.put(pdServerAddress, channel); + } + } + } + this.stub = HgPdPulseGrpc.newStub(channel); + var namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); threadPool = Executors.newSingleThreadExecutor(namedThreadFactory); } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java index 0e6d0ad4bf..bdf4efd501 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java @@ -17,6 +17,7 @@ package org.apache.hugegraph.pd.client; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; import org.apache.hugegraph.pd.grpc.watch.HgPdWatchGrpc; @@ -38,11 +39,21 @@ */ final class PDWatchImpl implements PDWatch { + private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); private final HgPdWatchGrpc.HgPdWatchStub stub; // TODO: support several servers. PDWatchImpl(String pdServerAddress) { - this.stub = HgPdWatchGrpc.newStub(getChannel(pdServerAddress)); + ManagedChannel channel; + if ((channel = chs.get(pdServerAddress)) == null || channel.isShutdown()) { + synchronized (chs) { + if ((channel = chs.get(pdServerAddress)) == null || channel.isShutdown()) { + channel = getChannel(pdServerAddress); + chs.put(pdServerAddress, channel); + } + } + } + this.stub = HgPdWatchGrpc.newStub(channel); } private ManagedChannel getChannel(String target) { diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java index 5cf00e5d17..756f25112a 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java @@ -49,7 +49,9 @@ public EventType getEventType() { @Override public boolean equals(Object o) { - if (this == o) return true; + if (this == o) { + return true; + } if (o == null || getClass() != o.getClass()) return false; NodeEvent nodeEvent = (NodeEvent) o; return nodeId == nodeEvent.nodeId && Objects.equals(graph, diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java index 4b4d04497e..237a35bfad 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java @@ -49,8 +49,12 @@ public ChangeType getChangeType() { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } PartitionEvent that = (PartitionEvent) o; return partitionId == that.partitionId && Objects.equals(graph, that.graph) && changeType == that.changeType; diff --git a/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java b/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java index beb1a1723f..f1db9cc8de 100644 --- a/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java +++ b/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java @@ -25,8 +25,6 @@ public class Main { public static void main(String[] args) throws PDException { - - if (args.length < 3) { String error = " usage: pd-address config key[=value] \n key list: " + "\n\tenableBatchLoad"; @@ -38,11 +36,19 @@ public static void main(String[] args) throws PDException { String param = args[2]; System.out.println(pd + " " + cmd + " " + param); System.out.println("Result: \n"); - if (cmd.equals("config")) { - doConfig(pd, param); + switch (cmd) { + case "config": + doConfig(pd, param); + case "change_raft": + doChangeRaft(pd, param); } } + private static void doChangeRaft(String pd, String param) throws PDException { + PDClient pdClient = PDClient.create(PDConfig.of(pd)); + pdClient.updatePdRaft(param); + } + public static void doConfig(String pd, String param) throws PDException { PDClient pdClient = PDClient.create(PDConfig.of(pd)); String[] pair = param.split("="); diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java index a829dd6562..28a889d5e9 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java @@ -23,16 +23,15 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hugegraph.pd.grpc.Metapb; - -import com.google.common.collect.Range; -import com.google.common.collect.RangeMap; -import com.google.common.collect.TreeRangeMap; - +/** + * 放弃copy on write的方式 + * 1. 在 graph * partition 数量极多的时候,效率严重下降,不能用 + */ public class PartitionCache { // 读写锁对象 @@ -41,11 +40,14 @@ public class PartitionCache { // 每张图一个缓存 private volatile Map> keyToPartIdCache; // graphName + PartitionID组成key - private volatile Map partitionCache; + private volatile Map> partitionCache; + private volatile Map shardGroupCache; private volatile Map storeCache; private volatile Map graphCache; + private volatile Map locks = new HashMap<>(); + public PartitionCache() { keyToPartIdCache = new HashMap<>(); partitionCache = new HashMap<>(); @@ -54,6 +56,41 @@ public PartitionCache() { graphCache = new ConcurrentHashMap<>(); } + private AtomicBoolean getOrCreateGraphLock(String graphName) { + var lock = this.locks.get(graphName); + if (lock == null) { + try { + writeLock.lock(); + if ((lock = this.locks.get(graphName)) == null) { + lock = new AtomicBoolean(); + locks.put(graphName, lock); + } + }finally { + writeLock.unlock(); + } + } + return lock; + } + + public void waitGraphLock(String graphName) { + var lock = getOrCreateGraphLock(graphName); + while (lock.get()) { + Thread.onSpinWait(); + } + } + + public void lockGraph(String graphName) { + var lock = getOrCreateGraphLock(graphName); + while (lock.compareAndSet(false, true)) { + Thread.onSpinWait(); + } + } + + public void unlockGraph(String graphName) { + var lock = getOrCreateGraphLock(graphName); + lock.set(false); + } + /** * 根据partitionId返回分区信息 * @@ -62,10 +99,15 @@ public PartitionCache() { * @return */ public KVPair getPartitionById(String graphName, int partId) { - var partition = partitionCache.get(makePartitionKey(graphName, partId)); - if (partition != null) { - return new KVPair<>(partition, getLeaderShard(partId)); + waitGraphLock(graphName); + var graphs = partitionCache.get(graphName); + if (graphs != null) { + var partition = graphs.get(partId ); + if (partition != null) { + return new KVPair<>(partition, getLeaderShard(partId)); + } } + return null; } @@ -88,6 +130,7 @@ public KVPair getPartitionByKey(String graphName * @return */ public KVPair getPartitionByCode(String graphName, long code) { + waitGraphLock(graphName); RangeMap rangeMap = keyToPartIdCache.get(graphName); if (rangeMap != null) { Integer partId = rangeMap.get(code); @@ -99,87 +142,76 @@ public KVPair getPartitionByCode(String graphNam } public List getPartitions(String graphName) { + waitGraphLock(graphName); + List partitions = new ArrayList<>(); - // partitionCache key: graph name + partition id - partitionCache.forEach((k, v) -> { - if (k.startsWith(graphName)) { + if (! partitionCache.containsKey(graphName)) { + return partitions; + } + partitionCache.get(graphName).forEach((k,v) -> { partitions.add(v); - } }); return partitions; } public boolean addPartition(String graphName, int partId, Metapb.Partition partition) { - writeLock.lock(); - try { - // graphName + PartitionID组成key - Metapb.Partition old = partitionCache.get(makePartitionKey(graphName, partId)); + waitGraphLock(graphName); + Metapb.Partition old = null; - if (old != null && old.equals(partition)) { - return false; - } + if (partitionCache.containsKey(graphName)) { + old = partitionCache.get(graphName).get(partId); + } - Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); - Map tmpPartitionCache = clonePartitionCache(); + if (old != null && old.equals(partition)) { + return false; + } + try { - tmpPartitionCache.put(makePartitionKey(graphName, partId), partition); - if (!tmpKeyToPartIdCache.containsKey(graphName)) { - tmpKeyToPartIdCache.put(graphName, TreeRangeMap.create()); - } + lockGraph(graphName); + + partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); if (old != null) { // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3) // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) - var graphRange = tmpKeyToPartIdCache.get(graphName); + var graphRange = keyToPartIdCache.get(graphName); if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); } } - tmpKeyToPartIdCache.get(graphName) - .put(Range.closedOpen(partition.getStartKey(), - partition.getEndKey()), partId); - partitionCache = tmpPartitionCache; - keyToPartIdCache = tmpKeyToPartIdCache; - return true; + keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); } finally { - writeLock.unlock(); + unlockGraph(graphName); } - + return true; } public void updatePartition(String graphName, int partId, Metapb.Partition partition) { - writeLock.lock(); try { - Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); - Map tmpPartitionCache = clonePartitionCache(); - - Metapb.Partition old = tmpPartitionCache.get(makePartitionKey(graphName, partId)); - - tmpPartitionCache.put(makePartitionKey(graphName, partId), partition); - - if (!tmpKeyToPartIdCache.containsKey(graphName)) { - tmpKeyToPartIdCache.put(graphName, TreeRangeMap.create()); + lockGraph(graphName); + Metapb.Partition old = null; + var graphs = partitionCache.get(graphName); + if (graphs != null) { + old = graphs.get(partId); } if (old != null) { - var graphRange = tmpKeyToPartIdCache.get(graphName); + var graphRange = keyToPartIdCache.get(graphName); if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); - } } - tmpKeyToPartIdCache.get(graphName) - .put(Range.closedOpen(partition.getStartKey(), - partition.getEndKey()), partId); - partitionCache = tmpPartitionCache; - keyToPartIdCache = tmpKeyToPartIdCache; + partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); + keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); } finally { - writeLock.unlock(); + unlockGraph(graphName); } } @@ -198,27 +230,19 @@ public boolean updatePartition(Metapb.Partition partition) { } public void removePartition(String graphName, int partId) { - writeLock.lock(); try { - Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); - Map tmpPartitionCache = clonePartitionCache(); - Metapb.Partition partition = - tmpPartitionCache.remove(makePartitionKey(graphName, partId)); + lockGraph(graphName); + var partition = partitionCache.get(graphName).remove(partId); if (partition != null) { - var graphRange = tmpKeyToPartIdCache.get(graphName); + var graphRange = keyToPartIdCache.get(graphName); if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); } - } - partitionCache = tmpPartitionCache; - keyToPartIdCache = tmpKeyToPartIdCache; - // log.info("PartitionCache.removePartition : (after){}", debugCacheByGraphName - // (graphName)); } finally { - writeLock.unlock(); + unlockGraph(graphName); } } @@ -240,6 +264,7 @@ public void removePartitions() { try { partitionCache = new HashMap<>(); keyToPartIdCache = new HashMap<>(); + locks.clear(); } finally { writeLock.unlock(); } @@ -251,22 +276,13 @@ public void removePartitions() { * @param graphName */ public void removeAll(String graphName) { - writeLock.lock(); try { - Map> tmpKeyToPartIdCache = cloneKeyToPartIdCache(); - Map tmpPartitionCache = clonePartitionCache(); - var itr = tmpPartitionCache.entrySet().iterator(); - while (itr.hasNext()) { - var entry = itr.next(); - if (entry.getKey().startsWith(graphName)) { - itr.remove(); - } - } - tmpKeyToPartIdCache.remove(graphName); - partitionCache = tmpPartitionCache; - keyToPartIdCache = tmpKeyToPartIdCache; + lockGraph(graphName); + partitionCache.remove(graphName); + keyToPartIdCache.remove(graphName); + locks.remove(graphName); } finally { - writeLock.unlock(); + unlockGraph(graphName); } } @@ -331,23 +347,6 @@ public List getGraphs() { return graphs; } - private Map> cloneKeyToPartIdCache() { - Map> cacheClone = new HashMap<>(); - keyToPartIdCache.forEach((k1, v1) -> { - cacheClone.put(k1, TreeRangeMap.create()); - v1.asMapOfRanges().forEach((k2, v2) -> { - cacheClone.get(k1).put(k2, v2); - }); - }); - return cacheClone; - } - - private Map clonePartitionCache() { - Map cacheClone = new HashMap<>(); - cacheClone.putAll(partitionCache); - return cacheClone; - } - public void reset() { writeLock.lock(); try { @@ -356,6 +355,7 @@ public void reset() { shardGroupCache = new ConcurrentHashMap<>(); storeCache = new ConcurrentHashMap<>(); graphCache = new ConcurrentHashMap<>(); + locks.clear(); } finally { writeLock.unlock(); } @@ -374,7 +374,7 @@ public String debugCacheByGraphName(String graphName) { if (rangeMap != null) { builder.append(", partition info : {"); rangeMap.asMapOfRanges().forEach((k, v) -> { - var partition = partitionCache.get(makePartitionKey(graphName, v)); + var partition = partitionCache.get(graphName).get(v); builder.append("[part_id:").append(v); if (partition != null) { builder.append(", start_key:").append(partition.getStartKey()) diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java index 883327d8eb..b2e89c0b09 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -26,8 +26,8 @@ import java.util.Map; import java.util.Objects; import java.util.PriorityQueue; -import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -93,7 +93,7 @@ public TaskScheduleService(PDConfig config, StoreNodeService storeService, this.storeMonitorDataService = new StoreMonitorDataService(pdConfig); this.clusterStartTime = System.currentTimeMillis(); this.kvService = new KvService(pdConfig); - this.executor = Executors.newScheduledThreadPool(1024); + this.executor = new ScheduledThreadPoolExecutor(16); } public void init() { diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java index a288e50b64..6d085f5051 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @@ -63,6 +63,19 @@ public class PDConfig { private String verifyPath; @Value("${license.license-path}") private String licensePath; + @Autowired + private ThreadPoolGrpc threadPoolGrpc; + + @Data + @Configuration + public class ThreadPoolGrpc { + @Value("${thread.pool.grpc.core:600}") + private int core; + @Value("${thread.pool.grpc.max:1000}") + private int max; + @Value("${thread.pool.grpc.queue:" + Integer.MAX_VALUE + "}") + private int queue; + } @Autowired private Raft raft; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java index ae810c213c..2a8af8523d 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -37,7 +37,7 @@ @Slf4j public class DiscoveryMetaStore extends MetadataRocksDBStore { - //appName --> address --> registryInfo + /** appName --> address --> registryInfo */ private static final String PREFIX = "REGIS-"; private static final String SPLITTER = "-"; @@ -85,7 +85,9 @@ public NodeInfos getNodes(Query query) { if (query.getLabelsMap() != null && !query.getLabelsMap().isEmpty()) { List result = new LinkedList(); for (NodeInfo node : nodeInfos) { - if (labelMatch(node, query)) result.add(node); + if (labelMatch(node, query)) { + result.add(node); + } } return NodeInfos.newBuilder().addAllInfo(result).build(); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java index 3502ecb89b..297384c146 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java @@ -66,11 +66,6 @@ public abstract void putWithTTL(byte[] key, */ public abstract List scanPrefix(byte[] prefix) throws PDException; - public abstract List scanRange(byte[] start, byte[] end) throws PDException; - - public abstract List scanRange(Parser parser, byte[] start, byte[] end) throws - PDException; - /** * 前缀查询 * @@ -81,6 +76,10 @@ public abstract List scanRange(Parser parser, byte[] start, byte[] end public abstract List scanPrefix(Parser parser, byte[] prefix) throws PDException; + public abstract List scanRange(byte[] start, byte[] end) throws PDException; + + public abstract List scanRange(Parser parser, byte[] start, byte[] end) throws + PDException; /** * 检查Key是否存在 diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java index e6bd7b0c9a..a8928a7372 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java @@ -19,7 +19,9 @@ import java.io.File; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -46,8 +48,6 @@ import com.alipay.sofa.jraft.util.Endpoint; import com.alipay.sofa.jraft.util.internal.ThrowUtil; -import lombok.extern.slf4j.Slf4j; - @Slf4j public class RaftEngine { private static final RaftEngine instance = new RaftEngine(); @@ -190,7 +190,14 @@ public PeerId getLeader() { * 向leader发消息,获取grpc地址; */ public String getLeaderGrpcAddress() throws ExecutionException, InterruptedException { - if (isLeader()) return config.getGrpcAddress(); + if (isLeader()) { + return config.getGrpcAddress(); + } + + if (raftNode.getLeaderId() == null) { + waitingForLeader(10000); + } + return raftRpcClient.getGrpcAddress( raftNode.getLeaderId().getEndpoint().toString()) .get().getGrpcAddress(); @@ -210,11 +217,28 @@ public List getMembers() throws ExecutionException, InterruptedEx List members = new ArrayList<>(); List peers = raftNode.listPeers(); + peers.addAll(raftNode.listLearners()); + var learners = new HashSet<>(raftNode.listLearners()); + for (PeerId peerId : peers) { Metapb.Member.Builder builder = Metapb.Member.newBuilder(); builder.setClusterId(config.getClusterId()); CompletableFuture future = raftRpcClient.getGrpcAddress(peerId.getEndpoint().toString()); + + Metapb.ShardRole role = Metapb.ShardRole.Follower; + if (peerEquals(peerId, raftNode.getLeaderId())) { + role = Metapb.ShardRole.Leader; + } else if (learners.contains(peerId)) { + role = Metapb.ShardRole.Learner; + var state = raftNode.getReplicatorState(peerId); + if (state != null) { + builder.setReplicatorState(state.name()); + } + } + + builder.setRole(role); + try { if (future.isCompletedExceptionally()) { log.error("failed to getGrpcAddress of {}", @@ -286,4 +310,18 @@ public PeerId waitingForLeader(long timeOut) { } } + + public Node getRaftNode() { + return raftNode; + } + + private boolean peerEquals(PeerId p1, PeerId p2) { + if (p1 == null && p2 == null) { + return true; + } + if (p1 == null || p2 == null) { + return false; + } + return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); + } } diff --git a/hg-pd-grpc/src/main/proto/metapb.proto b/hg-pd-grpc/src/main/proto/metapb.proto index b6fd8c27c0..d156a00fb9 100644 --- a/hg-pd-grpc/src/main/proto/metapb.proto +++ b/hg-pd-grpc/src/main/proto/metapb.proto @@ -308,6 +308,8 @@ message Member { string rest_url = 5; string data_path = 6; StoreState state = 7; + ShardRole role = 8; + string replicator_state = 9; } // 图空间配置 diff --git a/hg-pd-grpc/src/main/proto/pdpb.proto b/hg-pd-grpc/src/main/proto/pdpb.proto index 3a7432ecdf..45a42226dc 100644 --- a/hg-pd-grpc/src/main/proto/pdpb.proto +++ b/hg-pd-grpc/src/main/proto/pdpb.proto @@ -83,6 +83,8 @@ service PD { rpc UpdateShardGroupOp(ChangeShardRequest) returns (ChangeShardResponse){} // change shard rpc ChangeShard(ChangeShardRequest) returns (ChangeShardResponse) {} + // 更新pd raft + rpc updatePdRaft(UpdatePdRaftRequest) returns (UpdatePdRaftResponse) {} } message RequestHeader { @@ -562,3 +564,13 @@ message ChangeShardRequest{ message ChangeShardResponse { ResponseHeader header = 1; } + +message UpdatePdRaftRequest{ + RequestHeader header = 1; + string config = 3; +} + +message UpdatePdRaftResponse{ + ResponseHeader header = 1; + string message = 2; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java index 064bbb0017..31d9a7c28a 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java @@ -37,8 +37,8 @@ @Component @Slf4j public final class PDMetrics { - public final static String PREFIX = "hg"; - private final static AtomicLong graphs = new AtomicLong(0); + public static final String PREFIX = "hg"; + private static final AtomicLong GRAPHS = new AtomicLong(0); private MeterRegistry registry; @Autowired @@ -69,8 +69,8 @@ private void registerMeters() { private long updateGraphs() { long buf = getGraphs(); - if (buf != graphs.get()) { - graphs.set(buf); + if (buf != GRAPHS.get()) { + GRAPHS.set(buf); registerGraphMetrics(); } return buf; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java index 0e366ef3cd..3f28e5027c 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java @@ -51,8 +51,12 @@ public DemoModel setText(String text) { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } DemoModel that = (DemoModel) o; return status == that.status && Objects.equals(text, that.text); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java index 105d29856f..47ff3ab83a 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java @@ -56,7 +56,9 @@ public Map getLabels() { } public PromTargetsModel addTarget(String target) { - if (target == null) return this; + if (target == null) { + return this; + } this.targets.add(target); return this; } @@ -74,7 +76,9 @@ public PromTargetsModel setClusterId(String clusterId) { } public PromTargetsModel addLabel(String label, String value) { - if (label == null || value == null) return this; + if (label == null || value == null) { + return this; + } this.labels.put(label, value); return this; } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java index 9af59f7d74..1bd2c8ee95 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -39,6 +39,10 @@ public class API { public static String QUOTATION = "\""; public static String COMMA = ","; public static String COLON = ": "; + public static final String VERSION = "3.6.5"; + public static final String PD = "PD"; + public static final String STORE = "STORE"; + public String toJSON(List values, String key) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java index f86642ca74..0d0cebb3ec 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @@ -15,8 +15,6 @@ * under the License. */ -package org.apache.hugegraph.pd.rest; - import java.lang.management.ManagementFactory; import java.util.ArrayList; import java.util.List; @@ -27,6 +25,8 @@ import org.apache.hugegraph.pd.grpc.Pdpb; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.rest.API; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; import org.springframework.beans.factory.annotation.Autowired; @@ -70,8 +70,11 @@ public RestApiResponse cluster() throws InterruptedException, ExecutionException statistics.state = String.valueOf(pdService.getStoreNodeService().getClusterStats().getState()); String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + CallStreamObserverWrap response = + new CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); List pdList = new ArrayList<>(); - for (Metapb.Member member : RaftEngine.getInstance().getMembers()) { + for (Metapb.Member member : response.get().get(0).getMembersList()) { Member member1 = new Member(member); if ((leaderGrpcAddress != null) && (leaderGrpcAddress.equals(member.getGrpcUrl()))) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java index dec8ffe2aa..16f560f92b 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @@ -74,11 +74,9 @@ public RestApiResponse getMembers() throws InterruptedException, ExecutionExcept stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); Member member1 = new Member(member); if ((leaderGrpcAddress != null) && (leaderGrpcAddress.equals(member.getGrpcUrl()))) { - member1.role = "Leader"; leader = member1; - } else { - member1.role = "Follower"; } + member1.role = member.getRole().name(); members.add(member1); } String state = pdService.getStoreNodeService().getClusterStats().getState().toString(); @@ -214,6 +212,7 @@ class Member { String state; String dataPath; String role; + String replicateState; String serviceName; //服务名称,自定义属性 String serviceVersion; //静态定义 long startTimeStamp; //启动时间,暂时取进程的启动时间 @@ -226,9 +225,9 @@ public Member(Metapb.Member member) { state = String.valueOf(member.getState()); dataPath = member.getDataPath(); serviceName = grpcUrl + "-PD"; - serviceVersion = "3.6.3"; + serviceVersion = VERSION; startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime(); - + replicateState = member.getReplicatorState(); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java index a22239b971..0a73f69246 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java @@ -41,20 +41,13 @@ import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; -import io.grpc.CallOptions; -import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; -import io.grpc.MethodDescriptor; -import io.grpc.stub.AbstractBlockingStub; import lombok.extern.slf4j.Slf4j; -/** - * @author zhangyingjie - * @date 2021/12/20 - **/ + @Slf4j @GRpcService public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplBase implements + ServiceGrpc, RaftStateListener { static final AtomicLong id = new AtomicLong(); @@ -84,7 +77,7 @@ private Pdpb.ResponseHeader newErrorHeader(PDException e) { @Override public void register(NodeInfo request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { - redirectToLeader(DiscoveryServiceGrpc.getRegisterMethod(), request, observer); + redirectToLeader(null, DiscoveryServiceGrpc.getRegisterMethod(), request, observer); return; } int outTimes = pdConfig.getDiscovery().getHeartbeatOutTimes(); @@ -139,7 +132,8 @@ public void register(NodeInfo request, io.grpc.stub.StreamObserver public void getNodes(Query request, io.grpc.stub.StreamObserver responseObserver) { if (!isLeader()) { - redirectToLeader(DiscoveryServiceGrpc.getGetNodesMethod(), request, responseObserver); + redirectToLeader(null, DiscoveryServiceGrpc.getGetNodesMethod(), request, + responseObserver); return; } responseObserver.onNext(register.getNodes(request)); @@ -150,35 +144,8 @@ public boolean isLeader() { return RaftEngine.getInstance().isLeader(); } - private > void redirectToLeader( - MethodDescriptor method, ReqT req, - io.grpc.stub.StreamObserver observer) { - try { - if (channel == null) { - synchronized (this) { - if (channel == null) { - channel = ManagedChannelBuilder - .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()) - .usePlaintext() - .build(); - } - } - log.info("Grpc get leader address {}", - RaftEngine.getInstance().getLeaderGrpcAddress()); - } - - io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), - req, - observer); - } catch (Exception e) { - e.printStackTrace(); - } - - } - @Override public synchronized void onRaftLeaderChanged() { - channel = null; if (!isLeader()) { try { String message = "lose leader"; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java index b9ec00b8d5..c31f11f939 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -15,8 +15,6 @@ * under the License. */ -package org.apache.hugegraph.pd.service; - import java.util.List; import java.util.Map; import java.util.Random; @@ -47,6 +45,7 @@ import org.apache.hugegraph.pd.grpc.kv.WatchType; import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.service.ServiceGrpc; import org.apache.hugegraph.pd.watch.KvWatchSubject; import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; @@ -277,12 +276,12 @@ public void watch(WatchRequest request, StreamObserver responseOb if (!isLeader()) { try { responseObserver.onError(new PDException(-1, msg)); - return; + } catch (IllegalStateException ie) { + } catch (Exception e1) { log.error("redirect with error: ", e1); } } - responseObserver.onError(e); } } @@ -303,12 +302,12 @@ public void watchPrefix(WatchRequest request, StreamObserver resp if (!isLeader()) { try { responseObserver.onError(new PDException(-1, msg)); - return; + } catch (IllegalStateException ie) { + } catch (Exception e1) { log.error("redirect with error: ", e1); } } - responseObserver.onError(e); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java index 8dbc221537..d73a0873ac 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java @@ -45,10 +45,10 @@ @GRpcService public class PDPulseService extends HgPdPulseGrpc.HgPdPulseImplBase { - private static final Supplier> queueRetrieveFunction = + private static final Supplier> QUEUE_RETRIEVE_FUNCTION = () -> Collections.emptyList(); - private static final Function queueDurableFunction = (e) -> true; - private static final Function queueRemoveFunction = (e) -> true; + private static final Function QUEUE_ITEM_BOOLEAN_FUNCTION = (e) -> true; + private static final Function QUEUE_REMOVE_FUNCTION = (e) -> true; @Autowired private PDConfig pdConfig; private QueueStore queueStore = null; diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java index 5fa8c40ef1..306dfe23d8 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -20,8 +20,13 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; import javax.annotation.PostConstruct; @@ -69,23 +74,24 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.util.CollectionUtils; +import com.alipay.sofa.jraft.JRaftUtils; import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.entity.PeerId; -import io.grpc.CallOptions; import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; -import io.grpc.MethodDescriptor; -import io.grpc.stub.AbstractBlockingStub; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; @Slf4j @GRpcService -public class PDService extends PDGrpc.PDImplBase implements RaftStateListener { +public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc, RaftStateListener { static String TASK_ID_KEY = "task_id"; private final Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); + // private ManagedChannel channel; + private final Map channelMap = new ConcurrentHashMap<>(); @Autowired private PDConfig pdConfig; private StoreNodeService storeNodeService; @@ -1234,30 +1240,32 @@ public boolean isLeader() { return RaftEngine.getInstance().isLeader(); } - private > void redirectToLeader( - MethodDescriptor method, ReqT req, - io.grpc.stub.StreamObserver observer) { - try { - if (channel == null) { - synchronized (this) { - if (channel == null) { - channel = ManagedChannelBuilder - .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()) - .usePlaintext() - .build(); - } - } - log.info("Grpc get leader address {}", - RaftEngine.getInstance().getLeaderGrpcAddress()); - } - - io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), - req, - observer); - } catch (Exception e) { - e.printStackTrace(); - } - } + //private > void redirectToLeader( + // MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver + // observer) { + // try { + // var addr = RaftEngine.getInstance().getLeaderGrpcAddress(); + // ManagedChannel channel; + // + // if ((channel = channelMap.get(addr)) == null) { + // synchronized (this) { + // if ((channel = channelMap.get(addr)) == null|| channel.isShutdown()) { + // channel = ManagedChannelBuilder + // .forTarget(addr).usePlaintext() + // .build(); + // } + // } + // log.info("Grpc get leader address {}", RaftEngine.getInstance() + // .getLeaderGrpcAddress()); + // } + // + // io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions + // .DEFAULT), req, + // observer); + // } catch (Exception e) { + // e.printStackTrace(); + // } + //} /** * 更新peerList @@ -1291,7 +1299,7 @@ public void changePeerList(Pdpb.ChangePeerListRequest request, @Override public synchronized void onRaftLeaderChanged() { log.info("onLeaderChanged"); - channel = null; + // channel = null; if (licenseVerifierService == null) { licenseVerifierService = new LicenseVerifierService(pdConfig); } @@ -1303,7 +1311,7 @@ public synchronized void onRaftLeaderChanged() { PDPulseSubject.notifyError(message); PDWatchSubject.notifyError(message); } catch (Exception e) { - + log.error("onRaftLeaderChanged, got error:", e); } } } @@ -1627,4 +1635,110 @@ public void changeShard(Pdpb.ChangeShardRequest request, observer.onCompleted(); } + public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdatePdRaftMethod(), request, observer); + return; + } + + var list = parseConfig(request.getConfig()); + + log.info("update raft request: {}, list: {}", request.getConfig(), list); + + Pdpb.UpdatePdRaftResponse response = + Pdpb.UpdatePdRaftResponse.newBuilder().setHeader(okHeader).build(); + + do { + var leaders = list.stream().filter(s -> s.getKey().equals("leader")) + .collect(Collectors.toList()); + var node = RaftEngine.getInstance().getRaftNode(); + + if (leaders.size() == 1) { + var leaderPeer = leaders.get(0).getValue(); + // change leader + var peers = new HashSet<>(node.listPeers()); + + if (!peerEquals(leaderPeer, node.getLeaderId())) { + if (peers.contains(leaderPeer)) { + log.info("updatePdRaft, transfer to {}", leaderPeer); + node.transferLeadershipTo(leaderPeer); + } else { + response = Pdpb.UpdatePdRaftResponse.newBuilder() + .setHeader(newErrorHeader(6667, + "new leader" + + " not in " + + "raft peers")) + .build(); + } + break; + } + } else { + response = Pdpb.UpdatePdRaftResponse.newBuilder() + .setHeader(newErrorHeader(6666, + "leader size != 1")) + .build(); + break; + } + + Configuration config = new Configuration(); + // add peer + for (var peer : list) { + if (!peer.getKey().equals("learner")) { + config.addPeer(peer.getValue()); + } else { + config.addLearner(peer.getValue()); + } + } + + log.info("pd raft update with new config: {}", config); + + node.changePeers(config, status -> { + if (status.isOk()) { + log.info("updatePdRaft, change peers success"); + } else { + log.error("changePeers status: {}, msg:{}, code: {}, raft error:{}", + status, status.getErrorMsg(), status.getCode(), + status.getRaftError()); + } + }); + } while (false); + + observer.onNext(response); + observer.onCompleted(); + } + + private List> parseConfig(String conf) { + List> result = new LinkedList<>(); + + if (conf != null && conf.length() > 0) { + for (var s : conf.split(",")) { + if (s.endsWith("/leader")) { + result.add(new KVPair<>("leader", + JRaftUtils.getPeerId(s.substring(0, s.length() - 7)))); + } else if (s.endsWith("/learner")) { + result.add(new KVPair<>("learner", + JRaftUtils.getPeerId(s.substring(0, s.length() - 8)))); + } else if (s.endsWith("/follower")) { + result.add(new KVPair<>("follower", + JRaftUtils.getPeerId(s.substring(0, s.length() - 9)))); + } else { + result.add(new KVPair<>("follower", JRaftUtils.getPeerId(s))); + } + } + } + + return result; + } + + private boolean peerEquals(PeerId p1, PeerId p2) { + if (p1 == null && p2 == null) { + return true; + } + if (p1 == null || p2 == null) { + return false; + } + return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); + } + } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java index 118ad4af01..1f797bef4e 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java @@ -153,7 +153,9 @@ private List toModels(NodeInfos info) { Map labels = e.getLabelsMap(); String target = labels.get("target"); - if (HgAssert.isInvalid(target)) return null; + if (HgAssert.isInvalid(target)) { + return null; + } PromTargetsModel model = PromTargetsModel.of(); model.addTarget(target); @@ -232,7 +234,9 @@ private Set getStoreAddresses() { //TODO: optimized store registry data, to add host:port of REST server. private String getRestAddress(Metapb.Store store) { String address = store.getAddress(); - if (address == null || address.isEmpty()) return null; + if (address == null || address.isEmpty()) { + return null; + } try { Optional port = store.getLabelsList().stream().map( e -> { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java index 620d0e4741..658673a736 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java @@ -18,6 +18,7 @@ package org.apache.hugegraph.pd.service; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Pdpb; @@ -63,11 +64,14 @@ default void redirectToLeader(ManagedChannel channel, io.grpc.stub.StreamObserver observer) { try { String address = RaftEngine.getInstance().getLeaderGrpcAddress(); - if ((channel = channels.get(address)) == null) { - synchronized (this) { - if ((channel = channels.get(address)) == null) { - ManagedChannel c = - ManagedChannelBuilder.forTarget(address).usePlaintext().build(); + if ((channel = channels.get(address)) == null || channel.isTerminated() || channel.isShutdown()) { + synchronized (ServiceGrpc.class) { + if ((channel = channels.get(address)) == null || channel.isTerminated() || + channel.isShutdown()) { + while (channel != null && channel.isShutdown() && !channel.isTerminated()) { + channel.awaitTermination(50, TimeUnit.MILLISECONDS); + } + ManagedChannel c = ManagedChannelBuilder.forTarget(address).usePlaintext().build(); channels.put(address, c); channel = c; } @@ -81,6 +85,12 @@ default void redirectToLeader(ManagedChannel channel, } + default void redirectToLeader(MethodDescriptor method, + ReqT req, io.grpc.stub.StreamObserver observer) { + redirectToLeader(null, method, req, observer); + + } + @Override default void onRaftLeaderChanged() { synchronized (this) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java index dcf6ca3443..7f4b4fda4d 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java @@ -24,7 +24,7 @@ import org.apache.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade; public class VersionScriptFactory { - private static final List scripts = new LinkedList<>(); + private static final List SCRIPTS = new LinkedList<>(); private static volatile VersionScriptFactory factory; static { @@ -48,10 +48,10 @@ public static VersionScriptFactory getInstance() { } public static void registerScript(VersionUpgradeScript script) { - scripts.add(script); + SCRIPTS.add(script); } public List getScripts() { - return scripts; + return SCRIPTS; } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java new file mode 100644 index 0000000000..7619254483 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.util; + +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hugegraph.pd.common.HgAssert; + +import lombok.extern.slf4j.Slf4j; + + +@Slf4j +public final class HgExecutorUtil { + private static final Map EXECUTOR_MAP = new ConcurrentHashMap<>(); + private static final Executor COMMON_EXECUTOR + = new ThreadPoolExecutor(0, Integer.MAX_VALUE, + 60L, TimeUnit.SECONDS, + new SynchronousQueue(), + newThreadFactory("pd-common")); + + public static void execute(Runnable command) { + if (command == null) { + return; + } + COMMON_EXECUTOR.execute(command); + } + + public static ThreadFactory newThreadFactory(String namePrefix, int priority) { + HgAssert.isArgumentNotNull(namePrefix, "namePrefix"); + return new HgThreadFactory(namePrefix, priority); + } + + public static ThreadFactory newThreadFactory(String namePrefix) { + HgAssert.isArgumentNotNull(namePrefix, "namePrefix"); + return new HgDefaultThreadFactory(namePrefix); + } + + public static ThreadPoolExecutor getThreadPoolExecutor(String name) { + if (name == null) { + return null; + } + return EXECUTOR_MAP.get(name); + } + + /** + * @see HgExecutorUtil:createExecutor(String , int , int , int ) + */ + @Deprecated + public static Executor createExecutor(String name, int coreThreads, int maxThreads) { +/* ThreadPoolExecutor res = + new ThreadPoolExecutor(coreThreads, maxThreads, + 60L, TimeUnit.SECONDS, + new LinkedBlockingQueue(), + newThreadFactory(name)); + if (threadPoolMap.containsKey(name)) { + threadPoolMap.put(name + "-1", res); + } else { + threadPoolMap.put(name, res); + }*/ + return createExecutor(name, coreThreads, maxThreads, Integer.MAX_VALUE); + } + + public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads, + int queueSize) { + ThreadPoolExecutor res = EXECUTOR_MAP.get(name); + + if (res != null) { + return res; + } + + synchronized (EXECUTOR_MAP) { + res = EXECUTOR_MAP.get(name); + if (res != null) { + return res; + } + + BlockingQueue queue = null; + + if (queueSize <= 0) { + queue = new SynchronousQueue(); + } else { + queue = new LinkedBlockingQueue<>(queueSize); + } + + res = new ThreadPoolExecutor( + coreThreads, + maxThreads, + 60L, TimeUnit.SECONDS, + queue, + newThreadFactory(name) + ); + EXECUTOR_MAP.put(name, res); + } + + return res; + } + + /** + * The default thread factory + */ + static class HgThreadFactory implements ThreadFactory { + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + private final int priority; + + HgThreadFactory(String namePrefix, int priority) { + this.namePrefix = namePrefix; + this.priority = priority; + SecurityManager s = System.getSecurityManager(); + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(null, r, + namePrefix + "-" + threadNumber.getAndIncrement(), + 0); + if (t.isDaemon()) { + t.setDaemon(false); + } + if (t.getPriority() != priority) { + t.setPriority(priority); + } + return t; + } + } + + /** + * The default thread factory, which added threadNamePrefix in construction method. + */ + static class HgDefaultThreadFactory implements ThreadFactory { + private static final AtomicInteger POOL_NUMBER = new AtomicInteger(1); + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + + HgDefaultThreadFactory(String threadNamePrefix) { + SecurityManager s = System.getSecurityManager(); + this.namePrefix = threadNamePrefix + "-" + + POOL_NUMBER.getAndIncrement() + + "-thread-"; + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(null, r, + namePrefix + threadNumber.getAndIncrement(), + 0); + if (t.isDaemon()) { + t.setDaemon(false); + } + if (t.getPriority() != Thread.NORM_PRIORITY) { + t.setPriority(Thread.NORM_PRIORITY); + } + return t; + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java index a905ca3665..496dfee0ff 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java @@ -47,13 +47,17 @@ private boolean isExpired() { } public void put(K key, V value) { - if (key == null || value == null) return; + if (key == null || value == null) { + return; + } this.cache.put(key, value); } public V get(K key) { - if (isExpired()) return null; + if (isExpired()) { + return null; + } return this.cache.get(key); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java index b92056d735..3c5f7a82da 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java @@ -24,7 +24,7 @@ */ @Slf4j public final class IdUtil { - private final static byte[] lock = new byte[0]; + private static final byte[] LOCK = new byte[0]; public static String createMillisStr() { return String.valueOf(createMillisId()); @@ -36,7 +36,7 @@ public static String createMillisStr() { * @return */ public static Long createMillisId() { - synchronized (lock) { + synchronized (LOCK) { try { Thread.sleep(1); } catch (InterruptedException e) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java new file mode 100644 index 0000000000..ad0bad5493 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.util.grpc; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.util.HgExecutorUtil; +import org.lognet.springboot.grpc.GRpcServerBuilderConfigurer; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import io.grpc.ServerBuilder; + +@Component +public class GRpcServerConfig extends GRpcServerBuilderConfigurer { + public static final String EXECUTOR_NAME = "hg-grpc"; + @Autowired + private PDConfig pdConfig; + + @Override + public void configure(ServerBuilder serverBuilder) { + serverBuilder.executor( + HgExecutorUtil.createExecutor(EXECUTOR_NAME, + pdConfig.getThreadPoolGrpc().getCore(), + pdConfig.getThreadPoolGrpc().getMax(), + pdConfig.getThreadPoolGrpc().getQueue()) + ); + } + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java index 0b3d51a829..53c7b27dc0 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java @@ -17,11 +17,8 @@ package org.apache.hugegraph.pd.common; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import java.util.Objects; - import org.apache.hugegraph.pd.common.KVPair; import org.junit.Before; import org.junit.Test; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java index c47e64373a..638dea8c91 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java @@ -17,7 +17,6 @@ package org.apache.hugegraph.pd.common; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -27,7 +26,6 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; -import java.util.Objects; import org.apache.hugegraph.pd.common.PartitionCache; import org.apache.hugegraph.pd.grpc.Metapb; From 8ddf8291af9777dad5d3d445f737cb9102bfac7e Mon Sep 17 00:00:00 2001 From: V_Galaxy <1904821183@qq.com> Date: Tue, 27 Jun 2023 23:48:51 +0800 Subject: [PATCH 09/18] build: add flatten plugin and fix dependency version 1. Added the [flatten](https://github.com/apache/incubator-hugegraph/pull/2004) plugin. 2. Unified the global version to 1.5.0. 3. Modified the version of `jraft-core`. --------- Co-authored-by: imbajin --- .gitignore | 3 ++- hg-pd-client/pom.xml | 3 ++- hg-pd-clitools/pom.xml | 5 +++-- hg-pd-common/pom.xml | 3 ++- hg-pd-core/pom.xml | 6 ++++-- hg-pd-dist/pom.xml | 3 ++- hg-pd-grpc/pom.xml | 3 ++- hg-pd-service/pom.xml | 11 +++++++++-- hg-pd-test/pom.xml | 1 + pom.xml | 25 +++++++++++++++++++++++++ 10 files changed, 52 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index c588baf7c4..689a7a80b3 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,5 @@ *.iml /hg-pd-common/target_B000000405016P_Oct-28-114458-2021_conflict_parent/ -dist/ \ No newline at end of file +dist/ +.flattened-pom.xml diff --git a/hg-pd-client/pom.xml b/hg-pd-client/pom.xml index 9e1c81694e..194e7dde2a 100644 --- a/hg-pd-client/pom.xml +++ b/hg-pd-client/pom.xml @@ -25,7 +25,8 @@ org.apache.hugegraph hugegraph-pd-root - 3.6.5-SNAPSHOT + ${revision} + ../pom.xml hg-pd-client diff --git a/hg-pd-clitools/pom.xml b/hg-pd-clitools/pom.xml index fa9054bf31..c0a3009e1b 100644 --- a/hg-pd-clitools/pom.xml +++ b/hg-pd-clitools/pom.xml @@ -22,7 +22,8 @@ hugegraph-pd-root org.apache.hugegraph - 3.6.3-SNAPSHOT + ${revision} + ../pom.xml 4.0.0 @@ -31,7 +32,7 @@ org.apache.hugegraph hg-pd-client - 3.6.5-SNAPSHOT + ${revision} junit diff --git a/hg-pd-common/pom.xml b/hg-pd-common/pom.xml index 2e6b24ab95..7732b0f58f 100644 --- a/hg-pd-common/pom.xml +++ b/hg-pd-common/pom.xml @@ -24,7 +24,8 @@ org.apache.hugegraph hugegraph-pd-root - 3.6.5-SNAPSHOT + ${revision} + ../pom.xml hg-pd-common diff --git a/hg-pd-core/pom.xml b/hg-pd-core/pom.xml index e4c0aaa425..8963680d29 100644 --- a/hg-pd-core/pom.xml +++ b/hg-pd-core/pom.xml @@ -24,7 +24,8 @@ org.apache.hugegraph hugegraph-pd-root - 3.6.5-SNAPSHOT + ${revision} + ../pom.xml hg-pd-core @@ -36,7 +37,8 @@ com.alipay.sofa jraft-core - 1.3.9-hg-SNAPSHOT + + 1.3.9 org.rocksdb diff --git a/hg-pd-dist/pom.xml b/hg-pd-dist/pom.xml index 16be298fe7..602ba4f6b7 100644 --- a/hg-pd-dist/pom.xml +++ b/hg-pd-dist/pom.xml @@ -22,7 +22,8 @@ hugegraph-pd-root org.apache.hugegraph - 3.6.5-SNAPSHOT + ${revision} + ../pom.xml 4.0.0 diff --git a/hg-pd-grpc/pom.xml b/hg-pd-grpc/pom.xml index 41308bacb4..0c5cff10db 100644 --- a/hg-pd-grpc/pom.xml +++ b/hg-pd-grpc/pom.xml @@ -25,7 +25,8 @@ org.apache.hugegraph hugegraph-pd-root - 3.6.5-SNAPSHOT + ${revision} + ../pom.xml hg-pd-grpc diff --git a/hg-pd-service/pom.xml b/hg-pd-service/pom.xml index 38906fdf61..4a027fbbcd 100644 --- a/hg-pd-service/pom.xml +++ b/hg-pd-service/pom.xml @@ -24,7 +24,8 @@ org.apache.hugegraph hugegraph-pd-root - 3.6.5-SNAPSHOT + ${revision} + ../pom.xml hugegraph-pd @@ -112,7 +113,13 @@ org.apache.hugegraph hugegraph-common - 1.8.12 + 1.0.1 + + + commons-io + commons-io + 2.7 + compile diff --git a/hg-pd-test/pom.xml b/hg-pd-test/pom.xml index d6f29a3298..0371e81630 100644 --- a/hg-pd-test/pom.xml +++ b/hg-pd-test/pom.xml @@ -23,6 +23,7 @@ org.apache.hugegraph hugegraph-pd-root ${revision} + ../pom.xml 4.0.0 diff --git a/pom.xml b/pom.xml index f29ff1c3bf..884495320d 100644 --- a/pom.xml +++ b/pom.xml @@ -110,6 +110,31 @@ + + org.codehaus.mojo + flatten-maven-plugin + 1.2.7 + + true + resolveCiFriendliesOnly + + + + flatten + process-resources + + flatten + + + + flatten.clean + clean + + clean + + + + From ed00d4eb29cf4cb740fd50734d5843cd07c7e6da Mon Sep 17 00:00:00 2001 From: V_Galaxy <1904821183@qq.com> Date: Wed, 28 Jun 2023 19:07:25 +0800 Subject: [PATCH 10/18] build: fix build for community edition compatibility (WIP) Please refer to https://hugegraph.feishu.cn/wiki/Y6d2wys9KiWf24kpzNKct0Yknnr for detailed fix documentation. --------- Co-authored-by: imbajin --- .../hugegraph/pd/client/DiscoveryClient.java | 12 +- .../apache/hugegraph/pd/client/KvClient.java | 6 +- .../apache/hugegraph/pd/client/PDClient.java | 52 +- .../hugegraph/pd/client/PDPulseImpl.java | 9 +- .../apache/hugegraph/pd/watch/NodeEvent.java | 4 +- .../hugegraph/pd/StoreRegisterTest.java | 26 +- .../hugegraph/pd/client/PDWatchTest.java | 20 +- .../apache/hugegraph/pd/common/HgAssert.java | 24 +- .../apache/hugegraph/pd/common/KVPair.java | 8 +- .../hugegraph/pd/common/PartitionCache.java | 40 +- .../org/apache/hugegraph/pd/KvService.java | 20 +- .../apache/hugegraph/pd/StoreNodeService.java | 4 +- .../hugegraph/pd/TaskScheduleService.java | 8 +- .../apache/hugegraph/pd/meta/IdMetaStore.java | 4 +- .../pd/meta/MetadataRocksDBStore.java | 4 +- .../apache/hugegraph/pd/raft/RaftEngine.java | 76 +- .../hugegraph/pd/store/HgKVStoreImpl.java | 12 +- .../pd/license/LicenseVerifierService.java | 849 +++++++++--------- .../pd/license/LicenseVerifyManager.java | 153 ++-- .../hugegraph/pd/pulse/PDPulseSubject.java | 8 +- .../org/apache/hugegraph/pd/rest/API.java | 19 +- .../apache/hugegraph/pd/rest/RegistryAPI.java | 11 +- .../pd/service/DiscoveryService.java | 10 +- .../pd/service/KvServiceGrpcImpl.java | 46 +- .../hugegraph/pd/service/PDRestService.java | 4 +- .../hugegraph/pd/service/PDService.java | 102 +-- .../hugegraph/pd/watch/KvWatchSubject.java | 4 +- .../hugegraph/pd/watch/PDWatchSubject.java | 4 +- .../hugegraph/pd/client/BaseClientTest.java | 2 - .../hugegraph/pd/client/PDClientTest.java | 5 +- .../hugegraph/pd/common/KVPairTest.java | 5 +- .../pd/common/PartitionCacheTest.java | 2 +- .../pd/service/PartitionServiceTest.java | 2 + .../service/StoreMonitorDataServiceTest.java | 1 + .../pd/service/StoreNodeServiceNewTest.java | 1 + pom.xml | 20 + 36 files changed, 858 insertions(+), 719 deletions(-) diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java index 287cb75d62..39f7e13705 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java @@ -106,11 +106,15 @@ private void resetStub() { break; } catch (Exception e) { requireResetStub.set(true); - if (errLog == null) errLog = e.getMessage(); + if (errLog == null) { + errLog = e.getMessage(); + } continue; } } - if (errLog != null) log.error(errLog); + if (errLog != null) { + log.error(errLog); + } } /*** @@ -180,7 +184,9 @@ public void run() { register = registerStub.register(t); log.debug("Discovery Client work done."); Consumer consumer = getRegisterConsumer(); - if (consumer != null) consumer.accept(register); + if (consumer != null) { + consumer.accept(register); + } } catch (Exception e) { throw e; } finally { diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java index aa179570bf..27975ca57c 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java @@ -15,6 +15,8 @@ * under the License. */ +package org.apache.hugegraph.pd.client; + import java.io.Closeable; import java.util.HashMap; import java.util.LinkedList; @@ -128,7 +130,9 @@ public TTLResponse putTTL(String key, String value, long ttl) throws PDException private void onEvent(WatchResponse value, Consumer consumer) { log.info("receive message for {},event Count:{}", value, value.getEventsCount()); clientId.compareAndSet(0L, value.getClientId()); - if (value.getEventsCount() != 0) consumer.accept((T) value); + if (value.getEventsCount() != 0) { + consumer.accept((T) value); + } } private StreamObserver getObserver(String key, Consumer consumer, diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java index 78b5e18c4d..6616cb340c 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -18,6 +18,7 @@ package org.apache.hugegraph.pd.client; import java.util.ArrayList; +import java.util.LinkedList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; @@ -94,7 +95,9 @@ private static void handleResponseError(Pdpb.ResponseHeader header) throws } private synchronized void newBlockingStub() throws PDException { - if (stubProxy.get() != null) return; + if (stubProxy.get() != null) { + return; + } String host = newLeaderStub(); if (host.isEmpty()) { throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, @@ -1237,4 +1240,51 @@ public void updatePdRaft(String raftConfig) throws PDException { Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request); handleResponseError(response.getHeader()); } + + public interface PDEventListener { + void onStoreChanged(NodeEvent event); + + void onPartitionChanged(PartitionEvent event); + + void onGraphChanged(WatchResponse event); + + default void onShardGroupChanged(WatchResponse event) { + } + + } + + static class StubProxy { + private final LinkedList hostList = new LinkedList<>(); + private volatile PDGrpc.PDBlockingStub stub; + + public StubProxy(String[] hosts) { + for (String host : hosts) { + if (!host.isEmpty()) { + hostList.offer(host); + } + } + } + + public String nextHost() { + String host = hostList.poll(); + hostList.offer(host); //移到尾部 + return host; + } + + public void set(PDGrpc.PDBlockingStub stub) { + this.stub = stub; + } + + public PDGrpc.PDBlockingStub get() { + return this.stub; + } + + public String getHost() { + return hostList.peek(); + } + + public int getHostCount() { + return hostList.size(); + } + } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java index d9bd73bc24..6d85d6b043 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java @@ -43,11 +43,11 @@ * @author lynn.bond@hotmail.com created on 2021/11/9 */ @Slf4j -final class PDPulseImpl implements PDPulse { +public final class PDPulseImpl implements PDPulse { + private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); private final HgPdPulseGrpc.HgPdPulseStub stub; - private static ConcurrentHashMap chs = new ConcurrentHashMap<>(); - private ExecutorService threadPool ; + private final ExecutorService threadPool; // TODO: support several servers. public PDPulseImpl(String pdServerAddress) { @@ -61,7 +61,8 @@ public PDPulseImpl(String pdServerAddress) { } } this.stub = HgPdPulseGrpc.newStub(channel); - var namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); + var namedThreadFactory = + new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); threadPool = Executors.newSingleThreadExecutor(namedThreadFactory); } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java index 756f25112a..2f28986072 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java @@ -52,7 +52,9 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (o == null || getClass() != o.getClass()) return false; + if (o == null || getClass() != o.getClass()) { + return false; + } NodeEvent nodeEvent = (NodeEvent) o; return nodeId == nodeEvent.nodeId && Objects.equals(graph, nodeEvent.graph) && diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java index 95e51b9776..04177b665f 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java @@ -60,7 +60,7 @@ public void testRegisterStore() throws PDException { public void testGetStore() throws PDException { testRegisterStore(); Metapb.Store store = pdClient.getStore(storeId); - Assert.assertTrue(store.getAddress().equals(storeAddr)); + Assert.assertEquals(storeAddr, store.getAddress()); System.out.println(store); } @@ -78,9 +78,7 @@ public void testGetActiveStores() throws PDException { // @Test public void testStoreHeartbeat() throws PDException { testRegisterStore(); - Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() - .setStoreId(storeId) - .build(); + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder().setStoreId(storeId).build(); pdClient.storeHeartbeat(stats); List stores = pdClient.getActiveStores(graphName); boolean exist = false; @@ -98,8 +96,8 @@ public void testStoreHeartbeat() throws PDException { public void testPartitionHeartbeat() throws InterruptedException, PDException { testRegisterStore(); PDPulse pdPulse = pdClient.getPulseClient(); - PDPulse.Notifier notifier = pdPulse.connectPartition( - new PDPulse.Listener() { + PDPulse.Notifier notifier = + pdPulse.connectPartition(new PDPulse.Listener() { @Override public void onNext(PartitionHeartbeatResponse response) { @@ -123,18 +121,10 @@ public void onCompleted() { }); KVPair partShard = pdClient.getPartition("test", "1".getBytes(StandardCharsets.UTF_8)); - notifier.notifyServer(PartitionHeartbeatRequest.newBuilder() - .setStates( - Metapb.PartitionStats.newBuilder() - .addGraphName( - "test") - .setId(partShard.getKey() - .getId()) - .setLeader( - Metapb.Shard.newBuilder() - .setStoreId( - 1) - .build()))); + notifier.notifyServer(PartitionHeartbeatRequest.newBuilder().setStates( + Metapb.PartitionStats.newBuilder().addGraphName("test") + .setId(partShard.getKey().getId()) + .setLeader(Metapb.Shard.newBuilder().setStoreId(1).build()))); Thread.sleep(10000); diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java index 5b94730346..675577596a 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java @@ -21,12 +21,9 @@ import java.util.concurrent.TimeUnit; import org.apache.hugegraph.pd.client.test.HgPDTestUtil; -import org.apache.hugegraph.pd.watch.NodeEvent; import org.junit.BeforeClass; +import org.junit.Test; -/** - * @author lynn.bond@hotmail.com created on 2021/11/8 - */ @Deprecated public class PDWatchTest { private static PDClient pdClient; @@ -36,21 +33,20 @@ public class PDWatchTest { private final String graphName = "graph1"; @BeforeClass - public static void beforeClass() throws Exception { + public static void beforeClass() { pdClient = PDClient.create(PDConfig.of("localhost:9000")); } - // @Test + @Test public void watch() { PDWatch watch = pdClient.getWatchClient(); CountDownLatch latch = new CountDownLatch(10); - PDWatch.Watcher watcher1 = watch.watchPartition(new WatchListener(latch, "watcher1")); - PDWatch.Watcher watcher2 = watch.watchPartition(new WatchListener(latch, "watcher2")); - PDWatch.Watcher watcher3 = watch.watchPartition(new WatchListener(latch, "watcher3")); + PDWatch.Watcher watcher1 = watch.watchPartition(new WatchListener<>(latch, "watcher1")); + PDWatch.Watcher watcher2 = watch.watchPartition(new WatchListener<>(latch, "watcher2")); + PDWatch.Watcher watcher3 = watch.watchPartition(new WatchListener<>(latch, "watcher3")); - PDWatch.Watcher nodeWatcher1 = - watch.watchNode(new WatchListener(latch, "nodeWatcher1")); + PDWatch.Watcher nodeWatcher1 = watch.watchNode(new WatchListener<>(latch, "nodeWatcher1")); try { latch.await(15, TimeUnit.SECONDS); @@ -64,7 +60,7 @@ public void watch() { private class WatchListener implements PDWatch.Listener { private final String watcherName; - CountDownLatch latch = new CountDownLatch(10); + CountDownLatch latch; private WatchListener(CountDownLatch latch, String watcherName) { this.latch = latch; diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java index 0d94758631..3e43ef2adf 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java @@ -26,7 +26,9 @@ public final class HgAssert { public static void isTrue(boolean expression, String message) { - if (message == null) throw new IllegalArgumentException("message is null"); + if (message == null) { + throw new IllegalArgumentException("message is null"); + } if (!expression) { throw new IllegalArgumentException(message); @@ -62,15 +64,21 @@ public static void isNotNull(Object obj, String msg) { } public static boolean isContains(Object[] objs, Object obj) { - if (objs == null || objs.length == 0 || obj == null) return false; + if (objs == null || objs.length == 0 || obj == null) { + return false; + } for (Object item : objs) { - if (obj.equals(item)) return true; + if (obj.equals(item)) { + return true; + } } return false; } public static boolean isInvalid(String... strs) { - if (strs == null || strs.length == 0) return true; + if (strs == null || strs.length == 0) { + return true; + } for (String item : strs) { if (item == null || "".equals(item.trim())) { return true; @@ -92,12 +100,16 @@ public static boolean isInvalid(Collection list) { } public static boolean isContains(Collection list, T item) { - if (list == null || item == null) return false; + if (list == null || item == null) { + return false; + } return list.contains(item); } public static boolean isNull(Object... objs) { - if (objs == null) return true; + if (objs == null) { + return true; + } for (Object item : objs) { if (item == null) { return true; diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java index 9af56464ed..b560911ea2 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java @@ -117,10 +117,14 @@ public int hashCode() { */ @Override public boolean equals(Object o) { - if (this == o) return true; + if (this == o) { + return true; + } if (o instanceof KVPair) { KVPair pair = (KVPair) o; - if (!Objects.equals(key, pair.key)) return false; + if (!Objects.equals(key, pair.key)) { + return false; + } return Objects.equals(value, pair.value); } return false; diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java index 28a889d5e9..7c9199dd0d 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java @@ -28,9 +28,15 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hugegraph.pd.grpc.Metapb; + +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + /** - * 放弃copy on write的方式 - * 1. 在 graph * partition 数量极多的时候,效率严重下降,不能用 + * 放弃 copy on write 的方式 + * 1. 在 graph * partition 数量极多的时候,效率严重下降,不能用 */ public class PartitionCache { @@ -39,14 +45,14 @@ public class PartitionCache { Lock writeLock = readWriteLock.writeLock(); // 每张图一个缓存 private volatile Map> keyToPartIdCache; - // graphName + PartitionID组成key + // graphName + PartitionID 组成 key private volatile Map> partitionCache; private volatile Map shardGroupCache; private volatile Map storeCache; private volatile Map graphCache; - private volatile Map locks = new HashMap<>(); + private final Map locks = new HashMap<>(); public PartitionCache() { keyToPartIdCache = new HashMap<>(); @@ -65,7 +71,7 @@ private AtomicBoolean getOrCreateGraphLock(String graphName) { lock = new AtomicBoolean(); locks.put(graphName, lock); } - }finally { + } finally { writeLock.unlock(); } } @@ -92,7 +98,7 @@ public void unlockGraph(String graphName) { } /** - * 根据partitionId返回分区信息 + * 根据 partitionId 返回分区信息 * * @param graphName * @param partId @@ -102,7 +108,7 @@ public KVPair getPartitionById(String graphName, waitGraphLock(graphName); var graphs = partitionCache.get(graphName); if (graphs != null) { - var partition = graphs.get(partId ); + var partition = graphs.get(partId); if (partition != null) { return new KVPair<>(partition, getLeaderShard(partId)); } @@ -112,7 +118,7 @@ public KVPair getPartitionById(String graphName, } /** - * 返回key所在的分区信息 + * 返回 key 所在的分区信息 * * @param key * @return @@ -123,7 +129,7 @@ public KVPair getPartitionByKey(String graphName } /** - * 根据key的hashcode返回分区信息 + * 根据 key 的 hashcode 返回分区信息 * * @param graphName * @param code @@ -145,11 +151,11 @@ public List getPartitions(String graphName) { waitGraphLock(graphName); List partitions = new ArrayList<>(); - if (! partitionCache.containsKey(graphName)) { + if (!partitionCache.containsKey(graphName)) { return partitions; } - partitionCache.get(graphName).forEach((k,v) -> { - partitions.add(v); + partitionCache.get(graphName).forEach((k, v) -> { + partitions.add(v); }); return partitions; @@ -173,8 +179,8 @@ public boolean addPartition(String graphName, int partId, Metapb.Partition parti partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); if (old != null) { - // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3) - // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) + // old [1-3) 被 [2-3) 覆盖了。当 [1-3) 变成 [1-2) 不应该删除原先的 [1-3) + // 当确认老的 start, end 都是自己的时候,才可以删除老的。(即还没覆盖) var graphRange = keyToPartIdCache.get(graphName); if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { @@ -183,7 +189,8 @@ public boolean addPartition(String graphName, int partId, Metapb.Partition parti } keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) - .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + .put(Range.closedOpen(partition.getStartKey(), + partition.getEndKey()), partId); } finally { unlockGraph(graphName); } @@ -209,7 +216,8 @@ public void updatePartition(String graphName, int partId, Metapb.Partition parti partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) - .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), + partId); } finally { unlockGraph(graphName); } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java index fa972ab23d..129f22c081 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java @@ -120,7 +120,9 @@ public String get(byte[] keyBytes) throws PDException { } private String getValue(byte[] keyBytes, byte[] valueBytes) throws PDException { - if (valueBytes == null || valueBytes.length == 0) return ""; + if (valueBytes == null || valueBytes.length == 0) { + return ""; + } try { V v = V.parseFrom(valueBytes); if (v.getTtl() == 0 || v.getTtl() >= System.currentTimeMillis()) { @@ -178,7 +180,9 @@ public List deleteWithPrefix(String key) throws PDException { for (KV kv : kvList) { String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); String kvValue = getValue(kv.getKey(), kv.getValue()); - if (kvValue != null) kvs.add(Kv.newBuilder().setKey(kvKey).setValue(kvValue).build()); + if (kvValue != null) { + kvs.add(Kv.newBuilder().setKey(kvKey).setValue(kvValue).build()); + } } meta.removeByPrefix(storeKey); // log.warn("delete kv with key prefix :{}", key); @@ -228,7 +232,9 @@ public boolean locked(String key) throws PDException { private boolean owned(String key, long clientId) throws PDException { String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); Map allLock = scanWithPrefix(lockKey); - if (allLock.size() == 0) return true; + if (allLock.size() == 0) { + return true; + } for (Map.Entry entry : allLock.entrySet()) { String entryKey = entry.getKey(); String[] split = entryKey.split(String.valueOf(KV_DELIMITER)); @@ -242,7 +248,9 @@ private boolean owned(String key, long clientId) throws PDException { public boolean lock(String key, long ttl, long clientId) throws PDException { //TODO lock improvement synchronized (KvService.class) { - if (!owned(key, clientId)) return false; + if (!owned(key, clientId)) { + return false; + } put(getLockKey(key, clientId), " ", ttl); return true; } @@ -261,7 +269,9 @@ public boolean lockWithoutReentrant(String key, long ttl, public boolean unlock(String key, long clientId) throws PDException { synchronized (KvService.class) { - if (!owned(key, clientId)) return false; + if (!owned(key, clientId)) { + return false; + } delete(getLockKey(key, clientId)); return true; } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java index fa064410c5..d8387e944c 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -271,7 +271,9 @@ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDExcept log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), store.getAddress(), store.getState()); Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); - if (lastStore == null) return null; + if (lastStore == null) { + return null; + } Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); store = builder.mergeFrom(store).build(); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java index b2e89c0b09..ba68aab9bd 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -194,7 +194,9 @@ private boolean isLeader() { * 巡查所有的store,检查是否在线,存储空间是否充足 */ public List patrolStores() throws PDException { - if (!isLeader()) return null; + if (!isLeader()) { + return null; + } List changedStores = new ArrayList<>(); // 检查store在线状态 @@ -613,7 +615,9 @@ public List splitPartition( * @throws PDException */ public List autoSplitPartition() throws PDException { - if (!isLeader()) return null; + if (!isLeader()) { + return null; + } if (Metapb.ClusterState.Cluster_OK != storeService.getClusterStats().getState()) { if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats().getState()) { diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java index c224dc9715..a2d36a0028 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java @@ -195,7 +195,9 @@ public long getCId(String key, long max) throws PDException { } } } - if (current == last) return -1; + if (current == last) { + return -1; + } put(genCIDSlotKey(key, current), longToBytes(current)); put(keyBs, longToBytes(current + 1)); return current; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java index 6b591483e2..eacd593917 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java @@ -66,7 +66,9 @@ public byte[] getOne(byte[] key) throws PDException { public E getOne(Parser parser, byte[] key) throws PDException { try { byte[] bytes = store.get(key); - if (ArrayUtils.isEmpty(bytes)) return null; + if (ArrayUtils.isEmpty(bytes)) { + return null; + } return parser.parseFrom(bytes); } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java index a8928a7372..116bab48ce 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java @@ -48,10 +48,11 @@ import com.alipay.sofa.jraft.util.Endpoint; import com.alipay.sofa.jraft.util.internal.ThrowUtil; +import lombok.extern.slf4j.Slf4j; + @Slf4j public class RaftEngine { - private static final RaftEngine instance = new RaftEngine(); - private final String groupId = "pd_raft"; + private static final RaftEngine INSTANCE = new RaftEngine(); private final RaftStateMachine stateMachine; private PDConfig.Raft config; private RaftGroupService raftGroupService; @@ -64,16 +65,19 @@ public RaftEngine() { } public static RaftEngine getInstance() { - return instance; + return INSTANCE; } public boolean init(PDConfig.Raft config) { - if (this.raftNode != null) return false; + if (this.raftNode != null) { + return false; + } this.config = config; raftRpcClient = new RaftRpcClient(); raftRpcClient.init(new RpcOptions()); + String groupId = "pd_raft"; String raftPath = config.getDataPath() + "/" + groupId; new File(raftPath).mkdirs(); @@ -81,18 +85,16 @@ public boolean init(PDConfig.Raft config) { Configuration initConf = new Configuration(); initConf.parse(config.getPeersList()); if (config.isEnable() && config.getPeersList().length() < 3) { - log.error( - "The RaftEngine parameter is incorrect." + - " When RAFT is enabled, the number of peers " + - "cannot be less than 3"); + log.error("The RaftEngine parameter is incorrect." + + " When RAFT is enabled, the number of peers " + "cannot be less than 3"); } - // 设置Node参数,包括日志存储路径和状态机实例 + // 设置 Node 参数,包括日志存储路径和状态机实例 NodeOptions nodeOptions = new NodeOptions(); nodeOptions.setFsm(stateMachine); nodeOptions.setEnableMetrics(true); // 日志路径 nodeOptions.setLogUri(raftPath + "/log"); - // raft元数据路径 + // raft 元数据路径 nodeOptions.setRaftMetaUri(raftPath + "/meta"); // 快照路径 nodeOptions.setSnapshotUri(raftPath + "/snapshot"); @@ -104,7 +106,7 @@ public boolean init(PDConfig.Raft config) { nodeOptions.setRpcConnectTimeoutMs(config.getRpcTimeout()); nodeOptions.setRpcDefaultTimeout(config.getRpcTimeout()); nodeOptions.setRpcInstallSnapshotTimeout(config.getRpcTimeout()); - // 设置raft配置 + // 设置 raft 配置 RaftOptions raftOptions = nodeOptions.getRaftOptions(); nodeOptions.setEnableMetrics(true); @@ -112,9 +114,9 @@ public boolean init(PDConfig.Raft config) { final PeerId serverId = JRaftUtils.getPeerId(config.getAddress()); rpcServer = createRaftRpcServer(config.getAddress()); - // 构建raft组并启动raft - this.raftGroupService = new RaftGroupService(groupId, serverId, - nodeOptions, rpcServer, true); + // 构建 raft 组并启动 raft + this.raftGroupService = + new RaftGroupService(groupId, serverId, nodeOptions, rpcServer, true); this.raftNode = raftGroupService.start(false); log.info("RaftEngine start successfully: id = {}, peers list = {}", groupId, nodeOptions.getInitialConf().getPeers()); @@ -122,7 +124,7 @@ public boolean init(PDConfig.Raft config) { } /** - * 创建raft rpc server,用于pd之间通讯 + * 创建 raft rpc server,用于 pd 之间通讯 */ private RpcServer createRaftRpcServer(String raftAddr) { Endpoint endpoint = JRaftUtils.getEndPoint(raftAddr); @@ -158,7 +160,7 @@ public boolean isLeader() { } /** - * 添加Raft任务,grpc通过该接口给raft发送数据 + * 添加 Raft 任务,grpc 通过该接口给 raft 发送数据 */ public void addTask(Task task) { if (!isLeader()) { @@ -187,7 +189,7 @@ public PeerId getLeader() { } /** - * 向leader发消息,获取grpc地址; + * 向 leader 发消息,获取 grpc 地址; */ public String getLeaderGrpcAddress() throws ExecutionException, InterruptedException { if (isLeader()) { @@ -198,9 +200,8 @@ public String getLeaderGrpcAddress() throws ExecutionException, InterruptedExcep waitingForLeader(10000); } - return raftRpcClient.getGrpcAddress( - raftNode.getLeaderId().getEndpoint().toString()) - .get().getGrpcAddress(); + return raftRpcClient.getGrpcAddress(raftNode.getLeaderId().getEndpoint().toString()).get() + .getGrpcAddress(); } public Metapb.Member getLocalMember() { @@ -213,7 +214,7 @@ public Metapb.Member getLocalMember() { return builder.build(); } - public List getMembers() throws ExecutionException, InterruptedException { + public List getMembers() { List members = new ArrayList<>(); List peers = raftNode.listPeers(); @@ -226,23 +227,23 @@ public List getMembers() throws ExecutionException, InterruptedEx CompletableFuture future = raftRpcClient.getGrpcAddress(peerId.getEndpoint().toString()); - Metapb.ShardRole role = Metapb.ShardRole.Follower; - if (peerEquals(peerId, raftNode.getLeaderId())) { - role = Metapb.ShardRole.Leader; - } else if (learners.contains(peerId)) { - role = Metapb.ShardRole.Learner; - var state = raftNode.getReplicatorState(peerId); - if (state != null) { - builder.setReplicatorState(state.name()); - } - } - - builder.setRole(role); + // TODO: uncomment later - jraft problem +// Metapb.ShardRole role = Metapb.ShardRole.Follower; +// if (peerEquals(peerId, raftNode.getLeaderId())) { +// role = Metapb.ShardRole.Leader; +// } else if (learners.contains(peerId)) { +// role = Metapb.ShardRole.Learner; +// var state = raftNode.getReplicatorState(peerId); +// if (state != null) { +// builder.setReplicatorState(state.name()); +// } +// } +// +// builder.setRole(role); try { if (future.isCompletedExceptionally()) { - log.error("failed to getGrpcAddress of {}", - peerId.getEndpoint().toString()); + log.error("failed to getGrpcAddress of {}", peerId.getEndpoint().toString()); builder.setState(Metapb.StoreState.Offline); builder.setRaftUrl(peerId.getEndpoint().toString()); members.add(builder.build()); @@ -256,8 +257,7 @@ public List getMembers() throws ExecutionException, InterruptedEx members.add(builder.build()); } } catch (Exception e) { - log.error("failed to getGrpcAddress of {}. {}", - peerId.getEndpoint().toString(), e); + log.error("failed to getGrpcAddress of {}.", peerId.getEndpoint().toString(), e); builder.setState(Metapb.StoreState.Offline); builder.setRaftUrl(peerId.getEndpoint().toString()); members.add(builder.build()); @@ -283,7 +283,7 @@ public Status changePeerList(String peerList) { }); latch.await(); } catch (Exception e) { - log.error("failed to changePeerList to {},{}", peerList, e); + log.error("failed to changePeerList to {}", peerList, e); result.set(new Status(-1, e.getMessage())); } return result.get(); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java index f24d68c826..8ef79bbb7d 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java @@ -179,7 +179,9 @@ public List getListWithTTL(byte[] key) throws PDException { CACHE.keySet().forEach((cacheKey) -> { if (cacheKey.startsWith(storeKey)) { ConcurrentMap map; - if ((map = CACHE.get(cacheKey)) == null) return; + if ((map = CACHE.get(cacheKey)) == null) { + return; + } map.values().forEach((element) -> { ts.add((byte[]) element); }); @@ -192,7 +194,9 @@ public List getListWithTTL(byte[] key) throws PDException { public byte[] getWithTTL(byte[] key) throws PDException { ConcurrentMap map; String storeKey = new String(key, Charset.defaultCharset()); - if ((map = CACHE.get(storeKey)) == null) return null; + if ((map = CACHE.get(storeKey)) == null) { + return null; + } Object value = map.get(storeKey); return value == null ? null : (byte[]) value; } @@ -201,7 +205,9 @@ public byte[] getWithTTL(byte[] key) throws PDException { public void removeWithTTL(byte[] key) throws PDException { ConcurrentMap map; String storeKey = new String(key, Charset.defaultCharset()); - if ((map = CACHE.get(storeKey)) == null) return; + if ((map = CACHE.get(storeKey)) == null) { + return; + } map.remove(storeKey); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java index ca0582209a..344a71cd48 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java @@ -1,423 +1,426 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hugegraph.pd.license; - -import java.io.File; -import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.charset.Charset; -import java.text.SimpleDateFormat; -import java.time.Duration; -import java.time.Instant; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.prefs.Preferences; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hugegraph.license.ExtraParam; -import org.apache.hugegraph.license.LicenseVerifyParam; -import org.apache.hugegraph.license.MachineInfo; -import org.apache.hugegraph.pd.KvService; -import org.apache.hugegraph.pd.common.PDRuntimeException; -import org.apache.hugegraph.pd.config.PDConfig; -import org.apache.hugegraph.pd.grpc.Pdpb; -import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; -import org.apache.hugegraph.pd.grpc.kv.TTLRequest; -import org.apache.hugegraph.pd.grpc.kv.TTLResponse; -import org.apache.hugegraph.pd.raft.RaftEngine; -import org.springframework.stereotype.Service; -import org.springframework.util.Base64Utils; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.gson.Gson; -import com.google.gson.internal.LinkedTreeMap; - -import de.schlichtherle.license.CipherParam; -import de.schlichtherle.license.DefaultCipherParam; -import de.schlichtherle.license.DefaultKeyStoreParam; -import de.schlichtherle.license.DefaultLicenseParam; -import de.schlichtherle.license.KeyStoreParam; -import de.schlichtherle.license.LicenseContent; -import de.schlichtherle.license.LicenseParam; -import io.grpc.CallOptions; -import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; -import io.grpc.MethodDescriptor; -import io.grpc.stub.AbstractBlockingStub; -import io.grpc.stub.StreamObserver; -import lombok.extern.slf4j.Slf4j; - -@Service -@Slf4j -public class LicenseVerifierService { - - private static final Duration CHECK_INTERVAL = Duration.ofMinutes(10); - private static final String contentKey = "contentKey"; - private static final Gson mapper = new Gson(); - private static LicenseContent content; - private static KvService kvService; - private static volatile boolean installed = false; - private final MachineInfo machineInfo; - private final PDConfig pdConfig; - private final Instant lastCheckTime = Instant.now(); - SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - // private final LicenseVerifyParam verifyParam; - private LicenseVerifyManager manager; - private ManagedChannel channel; - - // public static LicenseVerifierService instance() { - // if (INSTANCE == null) { - // synchronized (LicenseVerifierService.class) { - // if (INSTANCE == null) { - // INSTANCE = new LicenseVerifierService(); - // } - // } - // } - // return INSTANCE; - // } - - // public void verifyIfNeeded() { - // Instant now = Instant.now(); - // Duration interval = Duration.between(this.lastCheckTime, now); - // if (!interval.minus(CHECK_INTERVAL).isNegative()) { - // this.verify(); - // this.lastCheckTime = now; - // } - // } - - public LicenseVerifierService(PDConfig pdConfig) { - this.pdConfig = pdConfig; - machineInfo = new MachineInfo(); - kvService = new KvService(pdConfig); - // verifyParam = initLicense(pdConfig); - } - - private static LicenseVerifyParam buildVerifyParam(String path) { - // NOTE: can't use JsonUtil due to it bind tinkerpop jackson - try { - ObjectMapper mapper = new ObjectMapper(); - File licenseParamFile = new File(path); - if (!licenseParamFile.exists()) { - log.warn("failed to get file:{}", path); - return null; - } - return mapper.readValue(licenseParamFile, LicenseVerifyParam.class); - } catch (IOException e) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, - String.format("Failed to read json stream to %s", - LicenseVerifyParam.class)); - } - } - - public LicenseVerifyParam init() { - LicenseVerifyParam verifyParam = null; - if (!installed) { - synchronized (LicenseVerifierService.class) { - if (!installed) { - verifyParam = buildVerifyParam(pdConfig.getVerifyPath()); - log.info("get license param: {}", pdConfig.getVerifyPath()); - if (verifyParam != null) { - LicenseParam licenseParam = this.initLicenseParam(verifyParam); - this.manager = new LicenseVerifyManager(licenseParam); - // this.install("d01e1814cd9edb01a05671bebf3919cc"); - try { - // this.verifyPublicCert(md5); - File licenseFile = new File(pdConfig.getLicensePath()); - if (!licenseFile.exists()) { - log.warn("invalid parameter:license-path"); - return null; - } else { - log.info("get license file....{}", licenseFile.getAbsolutePath()); - } - this.manager.uninstall(); - content = this.manager.install(licenseFile); - ExtraParam param = LicenseVerifyManager.getExtraParams(content); - content.setExtra(param); - this.checkIpAndMac(param); - // 获取有效期,并设置过期时间,通知leader,将content保存到... - Date notAfter = content.getNotAfter(); - long ttl = notAfter.getTime() - System.currentTimeMillis(); - final TTLResponse[] info = {null}; - if (!isLeader()) { - while (RaftEngine.getInstance().getLeader() == null) { - this.wait(200); - } - if (RaftEngine.getInstance().getLeader() != null) { - CountDownLatch latch = new CountDownLatch(1); - TTLRequest request = - TTLRequest.newBuilder().setKey(contentKey).setValue( - mapper.toJson(content, - LicenseContent.class)) - .setTtl(ttl).build(); - StreamObserver observer = - new StreamObserver() { - @Override - public void onNext(TTLResponse value) { - info[0] = value; - latch.countDown(); - } - - @Override - public void onError(Throwable t) { - latch.countDown(); - } - - @Override - public void onCompleted() { - latch.countDown(); - } - }; - redirectToLeader(KvServiceGrpc.getPutTTLMethod(), request, - observer); - latch.await(); - Pdpb.Error error = info[0].getHeader().getError(); - if (!error.getType().equals(Pdpb.ErrorType.OK)) { - throw new Exception(error.getMessage()); - } - } else { - log.warn("wait for leader to put the license content......"); - } - - } else { - kvService.put(contentKey, - mapper.toJson(content, LicenseContent.class), ttl); - } - installed = true; - log.info("The license is successfully installed, valid for {} - {}", - content.getNotBefore(), notAfter); - } catch (Exception e) { - log.error("Failed to install license", e); - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, - "Failed to install license, ", e); - } - } - } - } - } - return verifyParam; - } - - public synchronized void install(String md5) { - - } - - public HashMap getContext() throws Exception { - try { - String value = kvService.get(contentKey); - if (StringUtils.isEmpty(value)) { - throw new Exception("can not find license content from storage"); - } - LicenseContent content = mapper.fromJson(value, LicenseContent.class); - Date notAfter = content.getNotAfter(); - Date notBefore = content.getNotBefore(); - Date issued = content.getIssued(); - // long currentTimeMillis = System.currentTimeMillis(); - // long diff = notAfter - currentTimeMillis; - // boolean expired = diff <= 0; - HashMap result = mapper.fromJson(value, HashMap.class); - result.put("current", formatter.format(new Date())); - result.put("notAfter", formatter.format(notAfter)); - result.put("issued", formatter.format(issued)); - result.put("notBefore", formatter.format(notBefore)); - return result; - } catch (Exception e) { - throw new Exception("can not find license content from storage:" + e.getMessage()); - } - } - - public LicenseContent verify(int cores, int nodeCount) { - try { - String value = kvService.get(contentKey); - if (StringUtils.isEmpty(value)) { - throw new Exception("can not find license content from storage"); - } - LicenseContent content = mapper.fromJson(value, LicenseContent.class); - LinkedTreeMap param = (LinkedTreeMap) content.getExtra(); - int licCpus = ((Double) param.get("cpus")).intValue(); - int licNodes = ((Double) param.get("nodes")).intValue(); - if (param != null) { - if (licCpus != -1) { - // licCpus为 -1时,表示不限制cpu核数 - if (cores <= 0 || cores > licCpus) { - String msg = - String.format("无效的cpu核数: %s,授权数: %s", cores, licCpus); - throw new PDRuntimeException( - Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); - } - } - - if (licNodes != -1) { - // licNodes为 -1时,表示不限制服务节点数目 - if (nodeCount > licNodes) { - String msg = - String.format("无效的节点个数: %s,授权数: %s", nodeCount, licNodes); - throw new PDRuntimeException( - Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); - } - } - } - return content; - } catch (Exception e) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, - "授权信息校验异常," + e.getMessage()); - } - } - - public boolean isLeader() { - return RaftEngine.getInstance().isLeader(); - } - - // private void verifyPublicCert(String expectMD5) { - // String path = this.verifyParam.publicKeyPath(); - // try (InputStream is = LicenseVerifierService.class.getResourceAsStream(path)) { - // String actualMD5 = DigestUtils.md5Hex(is); - // if (!actualMD5.equals(expectMD5)) { - // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Invalid public - // cert"); - // } - // } catch (IOException e) { - // log.error("Failed to read public cert", e); - // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Failed to read - // public cert", e); - // } - // } - - private > void redirectToLeader( - MethodDescriptor method, ReqT req, - io.grpc.stub.StreamObserver observer) { - try { - if (channel == null) { - synchronized (this) { - if (channel == null) { - channel = ManagedChannelBuilder - .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()) - .usePlaintext() - .build(); - } - } - log.info("Grpc get leader address {}", - RaftEngine.getInstance().getLeaderGrpcAddress()); - } - - io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), - req, - observer); - } catch (Exception e) { - e.printStackTrace(); - } - - } - - private LicenseParam initLicenseParam(LicenseVerifyParam param) { - Preferences preferences = Preferences.userNodeForPackage(LicenseVerifierService.class); - CipherParam cipherParam = new DefaultCipherParam(param.storePassword()); - KeyStoreParam keyStoreParam = new DefaultKeyStoreParam(LicenseVerifierService.class, - param.publicKeyPath(), - param.publicAlias(), - param.storePassword(), null); - return new DefaultLicenseParam(param.subject(), preferences, keyStoreParam, cipherParam); - } - - public String getIpAndMac() { - List actualIps = this.machineInfo.getIpAddress(); - String host = pdConfig.getHost(); - String licenseHost = host; - if (!actualIps.contains(host)) { - licenseHost = actualIps.get(0); - } - try { - String mac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(licenseHost)); - HashMap ipAndMac = new HashMap<>(); - ipAndMac.put("ip", licenseHost); - ipAndMac.put("mac", mac); - String json = new Gson().toJson(ipAndMac); - String encode = Base64Utils.encodeToString(json.getBytes(Charset.defaultCharset())); - return encode; - } catch (Exception e) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, - String.format("Failed to get ip and mac for %s", - e.getMessage())); - } - } - - private void checkIpAndMac(ExtraParam param) { - String expectIp = param.ip(); - boolean matched = false; - List actualIps = null; - if (StringUtils.isEmpty(expectIp)) { - matched = true; - } else { - actualIps = this.machineInfo.getIpAddress(); - for (String actualIp : actualIps) { - if (actualIp.equalsIgnoreCase(expectIp)) { - matched = true; - break; - } - } - } - if (!matched) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( - "The server's ip '%s' doesn't match the authorized '%s'", actualIps, expectIp)); - } - String expectMac = param.mac(); - if (StringUtils.isEmpty(expectMac)) { - return; - } - // The mac must be not empty here - if (!StringUtils.isEmpty(expectIp)) { - String actualMac; - try { - actualMac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(expectIp)); - } catch (UnknownHostException e) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, - String.format("Failed to get mac address for ip '%s'", - expectIp)); - } - String expectFormatMac = expectMac.replaceAll(":", "-"); - String actualFormatMac = actualMac.replaceAll(":", "-"); - if (!actualFormatMac.equalsIgnoreCase(expectFormatMac)) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, - String.format( - "The server's mac '%s' doesn't match the " + - "authorized '%s'", - actualMac, expectMac)); - } - } else { - String expectFormatMac = expectMac.replaceAll(":", "-"); - List actualMacs = this.machineInfo.getMacAddress(); - matched = false; - for (String actualMac : actualMacs) { - String actualFormatMac = actualMac.replaceAll(":", "-"); - if (actualFormatMac.equalsIgnoreCase(expectFormatMac)) { - matched = true; - break; - } - } - if (!matched) { - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, - String.format( - "The server's macs %s don't match the " + - "authorized '%s'", - actualMacs, expectMac)); - } - } - } -} +// TODO: uncomment later +///* +// * Licensed to the Apache Software Foundation (ASF) under one or more +// * contributor license agreements. See the NOTICE file distributed with this +// * work for additional information regarding copyright ownership. The ASF +// * licenses this file to You under the Apache License, Version 2.0 (the +// * "License"); you may not use this file except in compliance with the License. +// * You may obtain a copy of the License at +// * +// * http://www.apache.org/licenses/LICENSE-2.0 +// * +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// * License for the specific language governing permissions and limitations +// * under the License. +// */ +// +//package org.apache.hugegraph.pd.license; +// +//import java.io.File; +//import java.io.IOException; +//import java.net.InetAddress; +//import java.net.UnknownHostException; +//import java.nio.charset.Charset; +//import java.text.SimpleDateFormat; +//import java.time.Duration; +//import java.time.Instant; +//import java.util.Date; +//import java.util.HashMap; +//import java.util.List; +//import java.util.concurrent.CountDownLatch; +//import java.util.prefs.Preferences; +// +//import org.apache.commons.lang3.StringUtils; +//import org.apache.hugegraph.license.ExtraParam; +//import org.apache.hugegraph.license.LicenseVerifyParam; +//import org.apache.hugegraph.license.MachineInfo; +//import org.apache.hugegraph.pd.KvService; +//import org.apache.hugegraph.pd.common.PDRuntimeException; +//import org.apache.hugegraph.pd.config.PDConfig; +//import org.apache.hugegraph.pd.grpc.Pdpb; +//import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +//import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +//import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +//import org.apache.hugegraph.pd.raft.RaftEngine; +//import org.springframework.stereotype.Service; +//import org.springframework.util.Base64Utils; +// +//import com.fasterxml.jackson.databind.ObjectMapper; +//import com.google.gson.Gson; +//import com.google.gson.internal.LinkedTreeMap; +// +//import de.schlichtherle.license.CipherParam; +//import de.schlichtherle.license.DefaultCipherParam; +//import de.schlichtherle.license.DefaultKeyStoreParam; +//import de.schlichtherle.license.DefaultLicenseParam; +//import de.schlichtherle.license.KeyStoreParam; +//import de.schlichtherle.license.LicenseContent; +//import de.schlichtherle.license.LicenseParam; +//import io.grpc.CallOptions; +//import io.grpc.ManagedChannel; +//import io.grpc.ManagedChannelBuilder; +//import io.grpc.MethodDescriptor; +//import io.grpc.stub.AbstractBlockingStub; +//import io.grpc.stub.StreamObserver; +//import lombok.extern.slf4j.Slf4j; +// +//@Service +//@Slf4j +//public class LicenseVerifierService { +// +// private static final Duration CHECK_INTERVAL = Duration.ofMinutes(10); +// private static final String contentKey = "contentKey"; +// private static final Gson mapper = new Gson(); +// private static LicenseContent content; +// private static KvService kvService; +// private static volatile boolean installed = false; +// private final MachineInfo machineInfo; +// private final PDConfig pdConfig; +// private final Instant lastCheckTime = Instant.now(); +// SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); +// // private final LicenseVerifyParam verifyParam; +// private LicenseVerifyManager manager; +// private ManagedChannel channel; +// +// // public static LicenseVerifierService instance() { +// // if (INSTANCE == null) { +// // synchronized (LicenseVerifierService.class) { +// // if (INSTANCE == null) { +// // INSTANCE = new LicenseVerifierService(); +// // } +// // } +// // } +// // return INSTANCE; +// // } +// +// // public void verifyIfNeeded() { +// // Instant now = Instant.now(); +// // Duration interval = Duration.between(this.lastCheckTime, now); +// // if (!interval.minus(CHECK_INTERVAL).isNegative()) { +// // this.verify(); +// // this.lastCheckTime = now; +// // } +// // } +// +// public LicenseVerifierService(PDConfig pdConfig) { +// this.pdConfig = pdConfig; +// machineInfo = new MachineInfo(); +// kvService = new KvService(pdConfig); +// // verifyParam = initLicense(pdConfig); +// } +// +// private static LicenseVerifyParam buildVerifyParam(String path) { +// // NOTE: can't use JsonUtil due to it bind tinkerpop jackson +// try { +// ObjectMapper mapper = new ObjectMapper(); +// File licenseParamFile = new File(path); +// if (!licenseParamFile.exists()) { +// log.warn("failed to get file:{}", path); +// return null; +// } +// return mapper.readValue(licenseParamFile, LicenseVerifyParam.class); +// } catch (IOException e) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// String.format("Failed to read json stream to %s", +// LicenseVerifyParam.class)); +// } +// } +// +// public LicenseVerifyParam init() { +// LicenseVerifyParam verifyParam = null; +// if (!installed) { +// synchronized (LicenseVerifierService.class) { +// if (!installed) { +// verifyParam = buildVerifyParam(pdConfig.getVerifyPath()); +// log.info("get license param: {}", pdConfig.getVerifyPath()); +// if (verifyParam != null) { +// LicenseParam licenseParam = this.initLicenseParam(verifyParam); +// this.manager = new LicenseVerifyManager(licenseParam); +// // this.install("d01e1814cd9edb01a05671bebf3919cc"); +// try { +// // this.verifyPublicCert(md5); +// File licenseFile = new File(pdConfig.getLicensePath()); +// if (!licenseFile.exists()) { +// log.warn("invalid parameter:license-path"); +// return null; +// } else { +// log.info("get license file....{}", licenseFile.getAbsolutePath()); +// } +// this.manager.uninstall(); +// content = this.manager.install(licenseFile); +// ExtraParam param = LicenseVerifyManager.getExtraParams(content); +// content.setExtra(param); +// this.checkIpAndMac(param); +// // 获取有效期,并设置过期时间,通知 leader,将 content 保存到... +// Date notAfter = content.getNotAfter(); +// long ttl = notAfter.getTime() - System.currentTimeMillis(); +// final TTLResponse[] info = {null}; +// if (!isLeader()) { +// while (RaftEngine.getInstance().getLeader() == null) { +// this.wait(200); +// } +// if (RaftEngine.getInstance().getLeader() != null) { +// CountDownLatch latch = new CountDownLatch(1); +// TTLRequest request = +// TTLRequest.newBuilder().setKey(contentKey).setValue( +// mapper.toJson(content, +// LicenseContent.class)) +// .setTtl(ttl).build(); +// StreamObserver observer = +// new StreamObserver() { +// @Override +// public void onNext(TTLResponse value) { +// info[0] = value; +// latch.countDown(); +// } +// +// @Override +// public void onError(Throwable t) { +// latch.countDown(); +// } +// +// @Override +// public void onCompleted() { +// latch.countDown(); +// } +// }; +// redirectToLeader(KvServiceGrpc.getPutTTLMethod(), request, +// observer); +// latch.await(); +// Pdpb.Error error = info[0].getHeader().getError(); +// if (!error.getType().equals(Pdpb.ErrorType.OK)) { +// throw new Exception(error.getMessage()); +// } +// } else { +// log.warn("wait for leader to put the license content......"); +// } +// +// } else { +// kvService.put(contentKey, +// mapper.toJson(content, LicenseContent.class), ttl); +// } +// installed = true; +// log.info("The license is successfully installed, valid for {} - {}", +// content.getNotBefore(), notAfter); +// } catch (Exception e) { +// log.error("Failed to install license", e); +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, +// "Failed to install license, ", e); +// } +// } +// } +// } +// } +// return verifyParam; +// } +// +// public synchronized void install(String md5) { +// +// } +// +// public HashMap getContext() throws Exception { +// try { +// String value = kvService.get(contentKey); +// if (StringUtils.isEmpty(value)) { +// throw new Exception("can not find license content from storage"); +// } +// LicenseContent content = mapper.fromJson(value, LicenseContent.class); +// Date notAfter = content.getNotAfter(); +// Date notBefore = content.getNotBefore(); +// Date issued = content.getIssued(); +// // long currentTimeMillis = System.currentTimeMillis(); +// // long diff = notAfter - currentTimeMillis; +// // boolean expired = diff <= 0; +// HashMap result = mapper.fromJson(value, HashMap.class); +// result.put("current", formatter.format(new Date())); +// result.put("notAfter", formatter.format(notAfter)); +// result.put("issued", formatter.format(issued)); +// result.put("notBefore", formatter.format(notBefore)); +// return result; +// } catch (Exception e) { +// throw new Exception("can not find license content from storage:" + e.getMessage()); +// } +// } +// +// public LicenseContent verify(int cores, int nodeCount) { +// try { +// String value = kvService.get(contentKey); +// if (StringUtils.isEmpty(value)) { +// throw new Exception("can not find license content from storage"); +// } +// LicenseContent content = mapper.fromJson(value, LicenseContent.class); +// LinkedTreeMap param = (LinkedTreeMap) content.getExtra(); +// int licCpus = ((Double) param.get("cpus")).intValue(); +// int licNodes = ((Double) param.get("nodes")).intValue(); +// if (param != null) { +// if (licCpus != -1) { +// // licCpus 为 -1 时,表示不限制 cpu 核数 +// if (cores <= 0 || cores > licCpus) { +// String msg = +// String.format("无效的 cpu 核数:%s,授权数:%s", cores, licCpus); +// throw new PDRuntimeException( +// Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); +// } +// } +// +// if (licNodes != -1) { +// // licNodes 为 -1 时,表示不限制服务节点数目 +// if (nodeCount > licNodes) { +// String msg = +// String.format("无效的节点个数:%s,授权数:%s", nodeCount, licNodes); +// throw new PDRuntimeException( +// Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); +// } +// } +// } +// return content; +// } catch (Exception e) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// "授权信息校验异常," + e.getMessage()); +// } +// } +// +// public boolean isLeader() { +// return RaftEngine.getInstance().isLeader(); +// } +// +// // private void verifyPublicCert(String expectMD5) { +// // String path = this.verifyParam.publicKeyPath(); +// // try (InputStream is = LicenseVerifierService.class.getResourceAsStream(path)) { +// // String actualMD5 = DigestUtils.md5Hex(is); +// // if (!actualMD5.equals(expectMD5)) { +// // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Invalid public +// // cert"); +// // } +// // } catch (IOException e) { +// // log.error("Failed to read public cert", e); +// // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Failed to read +// // public cert", e); +// // } +// // } +// +// private > void redirectToLeader( +// MethodDescriptor method, ReqT req, +// io.grpc.stub.StreamObserver observer) { +// try { +// if (channel == null) { +// synchronized (this) { +// if (channel == null) { +// channel = ManagedChannelBuilder +// .forTarget(RaftEngine.getInstance().getLeaderGrpcAddress()) +// .usePlaintext() +// .build(); +// } +// } +// log.info("Grpc get leader address {}", +// RaftEngine.getInstance().getLeaderGrpcAddress()); +// } +// +// io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions.DEFAULT), +// req, +// observer); +// } catch (Exception e) { +// e.printStackTrace(); +// } +// +// } +// +// private LicenseParam initLicenseParam(LicenseVerifyParam param) { +// Preferences preferences = Preferences.userNodeForPackage(LicenseVerifierService.class); +// CipherParam cipherParam = new DefaultCipherParam(param.storePassword()); +// KeyStoreParam keyStoreParam = new DefaultKeyStoreParam(LicenseVerifierService.class, +// param.publicKeyPath(), +// param.publicAlias(), +// param.storePassword(), null); +// return new DefaultLicenseParam(param.subject(), preferences, keyStoreParam, cipherParam); +// } +// +// public String getIpAndMac() { +// List actualIps = this.machineInfo.getIpAddress(); +// String host = pdConfig.getHost(); +// String licenseHost = host; +// if (!actualIps.contains(host)) { +// licenseHost = actualIps.get(0); +// } +// try { +// String mac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(licenseHost)); +// HashMap ipAndMac = new HashMap<>(); +// ipAndMac.put("ip", licenseHost); +// ipAndMac.put("mac", mac); +// String json = new Gson().toJson(ipAndMac); +// String encode = Base64Utils.encodeToString(json.getBytes(Charset.defaultCharset())); +// return encode; +// } catch (Exception e) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, +// String.format("Failed to get ip and mac for %s", +// e.getMessage())); +// } +// } +// +// private void checkIpAndMac(ExtraParam param) { +// String expectIp = param.ip(); +// boolean matched = false; +// List actualIps = null; +// if (StringUtils.isEmpty(expectIp)) { +// matched = true; +// } else { +// actualIps = this.machineInfo.getIpAddress(); +// for (String actualIp : actualIps) { +// if (actualIp.equalsIgnoreCase(expectIp)) { +// matched = true; +// break; +// } +// } +// } +// if (!matched) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( +// "The server's ip '%s' doesn't match the authorized '%s'", actualIps, +// expectIp)); +// } +// String expectMac = param.mac(); +// if (StringUtils.isEmpty(expectMac)) { +// return; +// } +// // The mac must be not empty here +// if (!StringUtils.isEmpty(expectIp)) { +// String actualMac; +// try { +// actualMac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(expectIp)); +// } catch (UnknownHostException e) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// String.format("Failed to get mac address for ip +// '%s'", +// expectIp)); +// } +// String expectFormatMac = expectMac.replaceAll(":", "-"); +// String actualFormatMac = actualMac.replaceAll(":", "-"); +// if (!actualFormatMac.equalsIgnoreCase(expectFormatMac)) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// String.format( +// "The server's mac '%s' doesn't match the " + +// "authorized '%s'", +// actualMac, expectMac)); +// } +// } else { +// String expectFormatMac = expectMac.replaceAll(":", "-"); +// List actualMacs = this.machineInfo.getMacAddress(); +// matched = false; +// for (String actualMac : actualMacs) { +// String actualFormatMac = actualMac.replaceAll(":", "-"); +// if (actualFormatMac.equalsIgnoreCase(expectFormatMac)) { +// matched = true; +// break; +// } +// } +// if (!matched) { +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// String.format( +// "The server's macs %s don't match the " + +// "authorized '%s'", +// actualMacs, expectMac)); +// } +// } +// } +//} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java index ab207898b6..d584c45400 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java @@ -1,76 +1,77 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -package org.apache.hugegraph.pd.license; - -import java.io.IOException; -import java.util.List; - -import org.apache.hugegraph.license.CommonLicenseManager; -import org.apache.hugegraph.license.ExtraParam; -import org.apache.hugegraph.pd.common.PDRuntimeException; -import org.apache.hugegraph.pd.grpc.Pdpb; - -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; - -import de.schlichtherle.license.LicenseContent; -import de.schlichtherle.license.LicenseContentException; -import de.schlichtherle.license.LicenseParam; -import lombok.extern.slf4j.Slf4j; - -@Slf4j -public class LicenseVerifyManager extends CommonLicenseManager { - - private static final ObjectMapper MAPPER = new ObjectMapper(); - private static final int NO_LIMIT = -1; - - public LicenseVerifyManager(LicenseParam param) { - super(param); - } - - public static ExtraParam getExtraParams(LicenseContent content) { - List params; - try { - TypeReference> type; - type = new TypeReference>() { - }; - params = MAPPER.readValue((String) content.getExtra(), type); - if (params != null && params.size() > 0) { - return params.get(0); - } - } catch (IOException e) { - log.error("Failed to read extra params", e); - throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, - "Failed to read extra params", e); - } - return null; - } - - @Override - protected synchronized void validate(LicenseContent content) throws LicenseContentException { - // Call super validate firstly to verify the common license parameters - try { - super.validate(content); - } catch (LicenseContentException e) { - // log.error("Failed to verify license", e); - throw e; - } - // Verify the customized license parameters. - getExtraParams(content); - } -} +// TODO: uncomment later +///* +// * Licensed to the Apache Software Foundation (ASF) under one or more +// * contributor license agreements. See the NOTICE file distributed with this +// * work for additional information regarding copyright ownership. The ASF +// * licenses this file to You under the Apache License, Version 2.0 (the +// * "License"); you may not use this file except in compliance with the License. +// * You may obtain a copy of the License at +// * +// * http://www.apache.org/licenses/LICENSE-2.0 +// * +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// * License for the specific language governing permissions and limitations +// * under the License. +// */ +// +//package org.apache.hugegraph.pd.license; +// +//import java.io.IOException; +//import java.util.List; +// +//import org.apache.hugegraph.license.CommonLicenseManager; +//import org.apache.hugegraph.license.ExtraParam; +//import org.apache.hugegraph.pd.common.PDRuntimeException; +//import org.apache.hugegraph.pd.grpc.Pdpb; +// +//import com.fasterxml.jackson.core.type.TypeReference; +//import com.fasterxml.jackson.databind.ObjectMapper; +// +//import de.schlichtherle.license.LicenseContent; +//import de.schlichtherle.license.LicenseContentException; +//import de.schlichtherle.license.LicenseParam; +//import lombok.extern.slf4j.Slf4j; +// +//@Slf4j +//public class LicenseVerifyManager extends CommonLicenseManager { +// +// private static final ObjectMapper MAPPER = new ObjectMapper(); +// private static final int NO_LIMIT = -1; +// +// public LicenseVerifyManager(LicenseParam param) { +// super(param); +// } +// +// public static ExtraParam getExtraParams(LicenseContent content) { +// List params; +// try { +// TypeReference> type; +// type = new TypeReference>() { +// }; +// params = MAPPER.readValue((String) content.getExtra(), type); +// if (params != null && params.size() > 0) { +// return params.get(0); +// } +// } catch (IOException e) { +// log.error("Failed to read extra params", e); +// throw new PDRuntimeException(Pdpb.ErrorType.LICENSE_VERIFY_ERROR_VALUE, +// "Failed to read extra params", e); +// } +// return null; +// } +// +// @Override +// protected synchronized void validate(LicenseContent content) throws LicenseContentException { +// // Call super validate firstly to verify the common license parameters +// try { +// super.validate(content); +// } catch (LicenseContentException e) { +// // log.error("Failed to verify license", e); +// throw e; +// } +// // Verify the customized license parameters. +// getExtraParams(content); +// } +//} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java index e2fbdae721..521a18aff6 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java @@ -177,7 +177,9 @@ private static T getSubject(PulseType pulseType, Class clazz) { private static NoticeBroadcaster createBroadcaster(Metapb.QueueItem item) { PartitionHeartbeatResponse notice = toNotice(item); - if (notice == null) return null; + if (notice == null) { + return null; + } NoticeBroadcaster res = createBroadcaster(notice); res.setDurableId(item.getItemId()); res.setTimestamp(item.getTimestamp()); @@ -306,7 +308,9 @@ private void addObserver(PulseCreateRequest request) { } PulseType pulseType = getPulseType(request); - if (pulseType == null) return; + if (pulseType == null) { + return; + } this.subject = getSubject(pulseType); this.observerId = createObserverId(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java index 1bd2c8ee95..a4b83333ed 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -39,10 +39,6 @@ public class API { public static String QUOTATION = "\""; public static String COMMA = ","; public static String COLON = ": "; - public static final String VERSION = "3.6.5"; - public static final String PD = "PD"; - public static final String STORE = "STORE"; - public String toJSON(List values, String key) { @@ -119,15 +115,14 @@ public String toJSON(Map> values) { } public String toJSON(PDException exception) { - StringBuilder builder = new StringBuilder(); - builder.append("{") - .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON) - .append(exception.getErrorCode()).append(COMMA) - .append(QUOTATION).append(ERROR_KEY).append(QUOTATION).append(COLON) - .append(QUOTATION).append(exception.getMessage()).append(QUOTATION); - builder.append("}"); + String builder = "{" + + QUOTATION + STATUS_KEY + QUOTATION + COLON + + exception.getErrorCode() + COMMA + + QUOTATION + ERROR_KEY + QUOTATION + COLON + + QUOTATION + exception.getMessage() + QUOTATION + + "}"; - return builder.toString(); + return builder; } public String toJSON(Exception exception) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java index b0ac0b36dd..5c9bb36cad 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -33,7 +33,6 @@ import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse; import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; import org.apache.hugegraph.pd.grpc.discovery.Query; -import org.apache.hugegraph.pd.license.LicenseVerifierService; import org.apache.hugegraph.pd.model.RegistryQueryRestRequest; import org.apache.hugegraph.pd.model.RegistryRestRequest; import org.apache.hugegraph.pd.model.RegistryRestResponse; @@ -176,8 +175,9 @@ public RegistryRestResponse getLicenseInfo(HttpServletRequest request) { RegistryRestResponse response = new RegistryRestResponse(); try { response.setErrorType(Pdpb.ErrorType.OK); - LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); - response.setData(licenseVerifierService.getContext()); + // TODO: uncomment later + //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + //response.setData(licenseVerifierService.getContext()); } catch (Exception e) { log.warn(e.getMessage()); response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); @@ -193,8 +193,9 @@ public RegistryRestResponse getLicenseMachineInfo(HttpServletRequest request) { RegistryRestResponse response = new RegistryRestResponse(); try { response.setErrorType(Pdpb.ErrorType.OK); - LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); - response.setData(licenseVerifierService.getIpAndMac()); + // TODO: uncomment later + //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + //response.setData(licenseVerifierService.getIpAndMac()); } catch (Exception e) { log.warn(e.getMessage()); response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java index 0a73f69246..e67a72052c 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java @@ -33,7 +33,6 @@ import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; import org.apache.hugegraph.pd.grpc.discovery.Query; import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; -import org.apache.hugegraph.pd.license.LicenseVerifierService; import org.apache.hugegraph.pd.pulse.PDPulseSubject; import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.raft.RaftStateListener; @@ -41,9 +40,10 @@ import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; +import io.grpc.ManagedChannel; import lombok.extern.slf4j.Slf4j; - +// TODO: uncomment later @Slf4j @GRpcService public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplBase implements @@ -53,7 +53,7 @@ public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplB static final AtomicLong id = new AtomicLong(); private static final String CORES = "cores"; RegistryService register = null; - LicenseVerifierService licenseVerifierService; + //LicenseVerifierService licenseVerifierService; @Autowired private PDConfig pdConfig; private ManagedChannel channel; @@ -64,7 +64,7 @@ public void init() throws PDException { RaftEngine.getInstance().init(pdConfig.getRaft()); RaftEngine.getInstance().addStateListener(this); register = new RegistryService(pdConfig); - licenseVerifierService = new LicenseVerifierService(pdConfig); + //licenseVerifierService = new LicenseVerifierService(pdConfig); } private Pdpb.ResponseHeader newErrorHeader(PDException e) { @@ -101,7 +101,7 @@ public void register(NodeInfo request, io.grpc.stub.StreamObserver throw new PDException(-1, "core count can not be null"); } int core = Integer.parseInt(coreCount); - licenseVerifierService.verify(core, nodeCount); + //licenseVerifierService.verify(core, nodeCount); } register.register(request, outTimes); String valueId = request.getId(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java index c31f11f939..a9be77c519 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -15,6 +15,8 @@ * under the License. */ +package org.apache.hugegraph.pd.service; + import java.util.List; import java.util.Map; import java.util.Random; @@ -45,7 +47,6 @@ import org.apache.hugegraph.pd.grpc.kv.WatchType; import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.raft.RaftStateListener; -import org.apache.hugegraph.pd.service.ServiceGrpc; import org.apache.hugegraph.pd.watch.KvWatchSubject; import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; @@ -55,7 +56,7 @@ import lombok.extern.slf4j.Slf4j; /** - * kv存储的核心实现类 + * kv 存储的核心实现类 */ @Slf4j @GRpcService @@ -86,7 +87,7 @@ public void init() { } /** - * 普通的put + * 普通的 put * * @param request * @param responseObserver @@ -117,7 +118,7 @@ public void put(Kv request, StreamObserver responseObserver) { } /** - * 普通的get + * 普通的 get * * @param request * @param responseObserver @@ -132,7 +133,9 @@ public void get(K request, StreamObserver responseObserver) { try { String value = this.kvService.get(request.getKey()); builder.setHeader(getResponseHeader()); - if (value != null) builder.setValue(value); + if (value != null) { + builder.setValue(value); + } response = builder.build(); } catch (PDException e) { if (!isLeader()) { @@ -146,7 +149,7 @@ public void get(K request, StreamObserver responseObserver) { } /** - * 普通的delete + * 普通的 delete * * @param request * @param responseObserver @@ -245,7 +248,7 @@ public void scanPrefix(K request, StreamObserver responseObs } /** - * 获取随机非0字符串做Id + * 获取随机非 0 字符串做 Id * * @return */ @@ -260,7 +263,7 @@ private long getRandomLong() { } /** - * 普通的watch + * 普通的 watch * * @param request * @param responseObserver @@ -286,7 +289,7 @@ public void watch(WatchRequest request, StreamObserver responseOb } /** - * 普通的前缀watch + * 普通的前缀 watch * * @param request * @param responseObserver @@ -363,7 +366,9 @@ public void lock(LockRequest request, StreamObserver responseObser LockResponse.Builder builder = LockResponse.newBuilder(); try { long clientId = request.getClientId(); - if (clientId == 0) clientId = getRandomLong(); + if (clientId == 0) { + clientId = getRandomLong(); + } boolean locked = this.kvService.lock(request.getKey(), request.getTtl(), clientId); response = builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId) @@ -394,10 +399,11 @@ public void lockWithoutReentrant(LockRequest request, if (clientId == 0) { clientId = getRandomLong(); } - boolean locked = this.kvService.lockWithoutReentrant( - request.getKey(), request.getTtl(), clientId); - response = builder.setHeader(getResponseHeader()).setSucceed(locked) - .setClientId(clientId).build(); + boolean locked = this.kvService.lockWithoutReentrant(request.getKey(), request.getTtl(), + clientId); + response = + builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId) + .build(); } catch (PDException e) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request, @@ -449,7 +455,9 @@ public void unlock(LockRequest request, StreamObserver responseObs LockResponse.Builder builder = LockResponse.newBuilder(); try { long clientId = request.getClientId(); - if (clientId == 0) throw new PDException(-1, "incorrect clientId: 0"); + if (clientId == 0) { + throw new PDException(-1, "incorrect clientId: 0"); + } boolean unlocked = this.kvService.unlock(request.getKey(), clientId); response = builder.setHeader(getResponseHeader()).setSucceed(unlocked) .setClientId(clientId).build(); @@ -482,7 +490,9 @@ public void keepAlive(LockRequest request, StreamObserver response LockResponse.Builder builder = LockResponse.newBuilder(); try { long clientId = request.getClientId(); - if (clientId == 0) throw new PDException(-1, "incorrect clientId: 0"); + if (clientId == 0) { + throw new PDException(-1, "incorrect clientId: 0"); + } boolean alive = this.kvService.keepAlive(request.getKey(), clientId); response = builder.setHeader(getResponseHeader()).setSucceed(alive).setClientId(clientId) @@ -500,7 +510,7 @@ public void keepAlive(LockRequest request, StreamObserver response } /** - * 带超时时间的put + * 带超时时间的 put * * @param request * @param responseObserver @@ -528,7 +538,7 @@ public void putTTL(TTLRequest request, StreamObserver responseObser } /** - * 续活带有超时时间的key + * 续活带有超时时间的 key * * @param request * @param responseObserver diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java index 27d6723666..a7402886ff 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java @@ -98,7 +98,9 @@ public Metapb.Store updateStore(Metapb.Store store) throws PDException { } public boolean removeStore(Long storeId) throws PDException { - if (storeId == null) return false; + if (storeId == null) { + return false; + } return 0 != storeNodeService.removeStore(storeId); } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java index 306dfe23d8..26140e0802 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -63,7 +63,6 @@ import org.apache.hugegraph.pd.grpc.watch.WatchGraphResponse; import org.apache.hugegraph.pd.grpc.watch.WatchResponse; import org.apache.hugegraph.pd.grpc.watch.WatchType; -import org.apache.hugegraph.pd.license.LicenseVerifierService; import org.apache.hugegraph.pd.pulse.PDPulseSubject; import org.apache.hugegraph.pd.pulse.PulseListener; import org.apache.hugegraph.pd.raft.RaftEngine; @@ -83,6 +82,7 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; +// TODO: uncomment later - remove license verifier service now @Slf4j @GRpcService public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc, RaftStateListener { @@ -100,7 +100,7 @@ public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc, RaftSta private IdService idService; private ConfigService configService; private LogService logService; - private LicenseVerifierService licenseVerifierService; + //private LicenseVerifierService licenseVerifierService; private StoreMonitorDataService storeMonitorDataService; private ManagedChannel channel; @@ -141,9 +141,9 @@ public LogService getLogService() { return logService; } - public LicenseVerifierService getLicenseVerifierService() { - return licenseVerifierService; - } + //public LicenseVerifierService getLicenseVerifierService() { + // return licenseVerifierService; + //} /** * 初始化 @@ -156,16 +156,16 @@ public void init() throws PDException { RaftEngine.getInstance().addStateListener(this); RaftEngine.getInstance().addStateListener(configService); RaftEngine.getInstance().init(pdConfig.getRaft()); - //pdConfig = configService.loadConfig(); onLeaderChanged中加载 + //pdConfig = configService.loadConfig(); onLeaderChanged 中加载 storeNodeService = new StoreNodeService(pdConfig); partitionService = new PartitionService(pdConfig, storeNodeService); taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService); idService = new IdService(pdConfig); logService = new LogService(pdConfig); storeMonitorDataService = new StoreMonitorDataService(pdConfig); - if (licenseVerifierService == null) { - licenseVerifierService = new LicenseVerifierService(pdConfig); - } + //if (licenseVerifierService == null) { + // licenseVerifierService = new LicenseVerifierService(pdConfig); + //} RaftEngine.getInstance().addStateListener(partitionService); pdConfig.setIdService(idService); @@ -193,7 +193,7 @@ public void onCompleted() { /** - * 监听分区指令,并转发给Store + * 监听分区指令,并转发给 Store */ partitionService.addInstructionListener(new PartitionInstructionListener() { private PartitionHeartbeatResponse.Builder getBuilder(Metapb.Partition partition) throws @@ -255,7 +255,7 @@ public void changePartitionKeyRange(Metapb.Partition partition, }); /** - * 监听分区状态改变消息,并转发给Client + * 监听分区状态改变消息,并转发给 Client */ partitionService.addStatusListener(new PartitionStatusListener() { @Override @@ -296,7 +296,7 @@ public void onShardListOp(Metapb.ShardGroup shardGroup) { }); /** - * 监听store状态改变消息,并转发给Client + * 监听 store 状态改变消息,并转发给 Client */ storeNodeService.addStatusListener(new StoreStatusListener() { @@ -344,7 +344,7 @@ public void onStoreRaftChanged(Metapb.Store store) { /** *

-     * 注册store,首次注册会生成新的store_id, store_id是store唯一标识
+     * 注册 store,首次注册会生成新的 store_id,store_id 是 store 唯一标识
      * 
*/ @Override @@ -371,7 +371,7 @@ public void registerStore(Pdpb.RegisterStoreRequest request, } /** - * 根据store_id查找store + * 根据 store_id 查找 store */ @Override public void getStore(Pdpb.GetStoreRequest request, @@ -396,7 +396,7 @@ public void getStore(Pdpb.GetStoreRequest request, /** *
-     * 修改Store状态等信息.
+     * 修改 Store 状态等信息。
      * 
*/ public void setStore(Pdpb.SetStoreRequest request, @@ -409,10 +409,10 @@ public void setStore(Pdpb.SetStoreRequest request, try { Metapb.StoreState state = request.getStore().getState(); Long storeId = request.getStore().getId(); - // 处于Pending状态,才可以上线 + // 处于 Pending 状态,才可以上线 Metapb.Store lastStore = storeNodeService.getStore(request.getStore().getId()); if (lastStore == null) { - // storeId不存在,抛出异常 + // storeId 不存在,抛出异常 throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, String.format("Store id %d does not exist!", storeId)); } @@ -437,7 +437,7 @@ public void setStore(Pdpb.SetStoreRequest request, } } logService.insertLog(LogService.NODE_CHANGE, LogService.GRPC, request.getStore()); - // 检查失败,状态改为Pending,把错误原因返回去 + // 检查失败,状态改为 Pending,把错误原因返回去 if (state.equals(Metapb.StoreState.Up)) { int cores = 0; long id = request.getStore().getId(); @@ -445,7 +445,7 @@ public void setStore(Pdpb.SetStoreRequest request, int nodeCount = 0; for (Metapb.Store store : stores) { if (store.getId() == id) { - // 获取之前注册的store中的cores 作为验证参数 + // 获取之前注册的 store 中的 cores 作为验证参数 cores = store.getCores(); } if (store.getState().equals(Metapb.StoreState.Up)) { @@ -453,7 +453,7 @@ public void setStore(Pdpb.SetStoreRequest request, } } try { - licenseVerifierService.verify(cores, nodeCount); + //licenseVerifierService.verify(cores, nodeCount); } catch (Exception e) { Metapb.Store store = Metapb.Store.newBuilder(request.getStore()) .setState(Metapb.StoreState.Pending).build(); @@ -488,15 +488,15 @@ public void setStore(Pdpb.SetStoreRequest request, if (resultMap.get("current_store_is_online") != null && (boolean) resultMap.get("current_store_is_online")) { log.info("updateStore removeActiveStores store {}", store.getId()); - // 将在线的store的状态设置为下线中,等待副本迁移 + // 将在线的 store 的状态设置为下线中,等待副本迁移 store = Metapb.Store.newBuilder(lastStore) .setState(Metapb.StoreState.Exiting).build(); // 进行分区迁移操作 taskService.movePartitions((Map>) resultMap.get( "movedPartitions")); } else { - // store已经离线的,不做副本迁移 - // 将状态改为Tombstone + // store 已经离线的,不做副本迁移 + // 将状态改为 Tombstone } } else { throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, @@ -505,7 +505,7 @@ public void setStore(Pdpb.SetStoreRequest request, "the partitions of current store!"); } } - // 替换license 都走grpc + // 替换 license 都走 grpc store = storeNodeService.updateStore(store); response = Pdpb.SetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); @@ -519,7 +519,7 @@ public void setStore(Pdpb.SetStoreRequest request, } /** - * 返回所有的store,exclude_offline_stores=true,返回活跃的stores + * 返回所有的 store,exclude_offline_stores=true,返回活跃的 stores */ @Override public void getAllStores(Pdpb.GetAllStoresRequest request, @@ -548,7 +548,7 @@ public void getAllStores(Pdpb.GetAllStoresRequest request, } /** - * 处理store心跳 + * 处理 store 心跳 */ @Override public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request, @@ -595,7 +595,7 @@ public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request, /** *
-     * 查找key所属的分区
+     * 查找 key 所属的分区
      * 
*/ @Override @@ -624,7 +624,7 @@ public void getPartition(Pdpb.GetPartitionRequest request, /** *
-     * 查找HashCode所属的分区
+     * 查找 HashCode 所属的分区
      * 
*/ @Override @@ -651,7 +651,7 @@ public void getPartitionByCode(Pdpb.GetPartitionByCodeRequest request, } /** - * 根据partition_id查找partition + * 根据 partition_id 查找 partition */ @Override public void getPartitionByID(Pdpb.GetPartitionByIDRequest request, @@ -684,7 +684,7 @@ public void getPartitionByID(Pdpb.GetPartitionByIDRequest request, /** *
-     * 更新分区信息,主要用来更新分区key范围,调用此接口需谨慎,否则会造成数据丢失。
+     * 更新分区信息,主要用来更新分区 key 范围,调用此接口需谨慎,否则会造成数据丢失。
      * 
*/ public void updatePartition(Pdpb.UpdatePartitionRequest request, @@ -708,7 +708,7 @@ public void updatePartition(Pdpb.UpdatePartitionRequest request, } /** - * 根据partition_id查找partition + * 根据 partition_id 查找 partition */ @Override public void delPartition(Pdpb.DelPartitionRequest request, @@ -723,7 +723,7 @@ public void delPartition(Pdpb.DelPartitionRequest request, request.getPartitionId()); if (partition != null) { partitionService.removePartition(request.getGraphName(), - (int) request.getPartitionId()); + request.getPartitionId()); response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader) .setPartition(partition) .build(); @@ -739,7 +739,7 @@ public void delPartition(Pdpb.DelPartitionRequest request, } /** - * 给定key范围查找所属的partition集合 + * 给定 key 范围查找所属的 partition 集合 */ @Override public void scanPartitions(Pdpb.ScanPartitionsRequest request, @@ -848,7 +848,7 @@ public void delGraph(Pdpb.DelGraphRequest request, /** *
-     * 根据条件查询分区信息, 包括Store、Graph等条件
+     * 根据条件查询分区信息,包括 Store、Graph 等条件
      * 
*/ public void queryPartitions(Pdpb.QueryPartitionsRequest request, @@ -857,7 +857,7 @@ public void queryPartitions(Pdpb.QueryPartitionsRequest request, redirectToLeader(PDGrpc.getQueryPartitionsMethod(), request, observer); return; } - //TODO 临时采用遍历方案,后续使用rocksdb存储时,通过kv索引实现 + //TODO 临时采用遍历方案,后续使用 rocksdb 存储时,通过 kv 索引实现 Metapb.PartitionQuery query = request.getQuery(); List partitions = partitionService.getPartitions(query.getGraphName()); List result = new ArrayList<>(); @@ -986,7 +986,7 @@ public void getStoreStatus(Pdpb.GetAllStoresRequest request, } /** - * 读取PD配置 + * 读取 PD 配置 */ @Override public void getPDConfig(Pdpb.GetPDConfigRequest request, @@ -1010,7 +1010,7 @@ public void getPDConfig(Pdpb.GetPDConfigRequest request, } /** - * 修改PD配置 + * 修改 PD 配置 */ @Override public void setPDConfig(Pdpb.SetPDConfigRequest request, @@ -1028,7 +1028,7 @@ public void setPDConfig(Pdpb.SetPDConfigRequest request, } if (request.getPdConfig().getShardCount() > storeNodeService.getActiveStores().size()) { - // 不能大于活跃的store数量 + // 不能大于活跃的 store 数量 throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, "shard count can't be greater than the number of active " + "stores!"); @@ -1036,7 +1036,7 @@ public void setPDConfig(Pdpb.SetPDConfigRequest request, int oldShardCount = configService.getPDConfig().getShardCount(); int newShardCount = request.getPdConfig().getShardCount(); if (newShardCount > oldShardCount) { - // 如果副本数增大,则检查store内部的资源是否够用 + // 如果副本数增大,则检查 store 内部的资源是否够用 if (!isResourceEnough(oldShardCount, newShardCount)) { throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, "There is not enough disk space left!"); @@ -1147,7 +1147,7 @@ public void splitGraphData(Pdpb.SplitGraphDataRequest request, } /** - * 在store之间平衡数据 + * 在 store 之间平衡数据 */ public void movePartition(Pdpb.MovePartitionRequest request, StreamObserver observer) { @@ -1268,7 +1268,7 @@ public boolean isLeader() { //} /** - * 更新peerList + * 更新 peerList */ @Override public void changePeerList(Pdpb.ChangePeerListRequest request, @@ -1300,13 +1300,13 @@ public void changePeerList(Pdpb.ChangePeerListRequest request, public synchronized void onRaftLeaderChanged() { log.info("onLeaderChanged"); // channel = null; - if (licenseVerifierService == null) { - licenseVerifierService = new LicenseVerifierService(pdConfig); - } - licenseVerifierService.init(); + //if (licenseVerifierService == null) { + // licenseVerifierService = new LicenseVerifierService(pdConfig); + //} + //licenseVerifierService.init(); if (!isLeader()) { try { - // 关闭Client通知,Client重新向Leader发起连接 + // 关闭 Client 通知,Client 重新向 Leader 发起连接 String message = "lose leader"; PDPulseSubject.notifyError(message); PDWatchSubject.notifyError(message); @@ -1384,7 +1384,7 @@ public void delStore(Pdpb.DetStoreRequest request, try { Metapb.Store store = storeNodeService.getStore(storeId); if (Metapb.StoreState.Tombstone == store.getState()) { - // 只有已经被下线(Tombstone)的store可以被删除 + // 只有已经被下线 (Tombstone) 的 store 可以被删除 storeNodeService.removeStore(storeId); response = Pdpb.DetStoreResponse.newBuilder() .setHeader(okHeader) @@ -1427,10 +1427,10 @@ private boolean checkShardCount(int newShardCount) { } /** - * 检查store资源是否够用 + * 检查 store 资源是否够用 */ public boolean isResourceEnough(int oldShardCount, int newShardCount) { - // 活跃的store的资源是否够用 + // 活跃的 store 的资源是否够用 try { float expansionRatio = newShardCount / oldShardCount; // 占用的存储空间膨胀的倍数 @@ -1449,7 +1449,7 @@ public boolean isResourceEnough(int oldShardCount, int newShardCount) { } // 估计数据膨胀后占用的存储空间 newDataSize = (long) Math.ceil(currentDataSize * expansionRatio); - // 统计所有活跃的store里面可用的空间 + // 统计所有活跃的 store 里面可用的空间 List activeStores = storeNodeService.getActiveStores(); for (Metapb.Store store : activeStores) { Metapb.StoreStats storeStats = store.getStats(); @@ -1465,7 +1465,7 @@ public boolean isResourceEnough(int oldShardCount, int newShardCount) { /** *
-     * 对rocksdb进行compaction
+     * 对 rocksdb 进行 compaction
      * 
*/ public void dbCompaction(Pdpb.DbCompactionRequest request, diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index 2e0dee58d1..2e5eefe5d3 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -255,7 +255,9 @@ public void notifyClientChangeLeader() { String key = entry.getKey(); String client = key.split(KvService.getDelimiter())[3]; String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client); - if (value == null) removeClient(null, key, clientKey); + if (value == null) { + removeClient(null, key, clientKey); + } for (int i = 0; i < 3; i++) { try { synchronized (value) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java index 199d92622b..8c18f7d729 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java @@ -164,7 +164,9 @@ private void addWatcher(WatchCreateRequest request) { return; } WatchType watchType = getWatchType(request); - if (watchType == null) return; + if (watchType == null) { + return; + } this.subject = getSubject(watchType); this.watcherId = createWatcherId(); diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java index 8892121fe6..87b8081339 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java @@ -17,8 +17,6 @@ package org.apache.hugegraph.pd.client; -import org.apache.hugegraph.pd.client.PDClient; -import org.apache.hugegraph.pd.client.PDConfig; import org.junit.After; import org.junit.BeforeClass; import org.junit.runner.RunWith; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java index 143c9d6ade..d0bc7a4cbd 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hugegraph.pd.client.PDClient; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.MetaTask; import org.apache.hugegraph.pd.grpc.Metapb; @@ -325,9 +324,7 @@ public void testChangePeerList() { public void testSplitData() { try { Metapb.PDConfig config = pdClient.getPDConfig(); - pdClient.setPDConfig(config.toBuilder() - .setMaxShardsPerStore(12) - .build()); + pdClient.setPDConfig(config.toBuilder().setMaxShardsPerStore(12).build()); System.out.println(pdClient.getPDConfig()); // 开始分区分裂 diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java index 53c7b27dc0..9d5f019d73 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java @@ -18,8 +18,9 @@ package org.apache.hugegraph.pd.common; import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; -import org.apache.hugegraph.pd.common.KVPair; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -67,6 +68,6 @@ public void testHashCode() { @Test public void testEquals() { var pair2 = new KVPair<>("key", 1); - assertTrue(pair2.equals(this.pair)); + Assert.assertEquals(pair2, this.pair); } } diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java index 638dea8c91..3377a5b732 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java @@ -21,13 +21,13 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; -import org.apache.hugegraph.pd.common.PartitionCache; import org.apache.hugegraph.pd.grpc.Metapb; import org.junit.Before; import org.junit.Test; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java index fcc3f34c42..b4fecf1d79 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java @@ -17,6 +17,8 @@ package org.apache.hugegraph.pd.service; +import static org.junit.jupiter.api.Assertions.assertEquals; + import java.util.List; import org.apache.hugegraph.pd.PartitionService; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java index 018a63d356..cd9ae710b3 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.util.List; diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java index 21537fa148..4fec3f1f03 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java @@ -18,6 +18,7 @@ package org.apache.hugegraph.pd.service; import static org.junit.Assert.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertEquals; import org.apache.hugegraph.pd.StoreNodeService; import org.apache.hugegraph.pd.common.PDException; diff --git a/pom.xml b/pom.xml index 884495320d..6ec9ef0032 100644 --- a/pom.xml +++ b/pom.xml @@ -26,6 +26,7 @@ ${revision} pom + org.springframework.boot spring-boot-starter-parent @@ -135,6 +136,25 @@ + + org.apache.maven.plugins + maven-clean-plugin + + + + ${project.basedir}/ + + *.tar + *.tar.gz + .flattened-pom.xml + + + false + + + + + From 6092dc0fc1e827109190909b7fdffaab77694d59 Mon Sep 17 00:00:00 2001 From: What is broken can be reforged <34939118+GYXkeep@users.noreply.github.com> Date: Sat, 1 Jul 2023 18:25:35 +0800 Subject: [PATCH 11/18] refact: adapt for jraft v1.3.13 --- hg-pd-core/pom.xml | 2 +- .../apache/hugegraph/pd/raft/RaftEngine.java | 76 +++++++++++++++---- 2 files changed, 64 insertions(+), 14 deletions(-) diff --git a/hg-pd-core/pom.xml b/hg-pd-core/pom.xml index 8963680d29..a02a4c9deb 100644 --- a/hg-pd-core/pom.xml +++ b/hg-pd-core/pom.xml @@ -38,7 +38,7 @@ com.alipay.sofa jraft-core - 1.3.9 + 1.3.13 org.rocksdb diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java index 116bab48ce..910240cbd7 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java @@ -35,8 +35,10 @@ import com.alipay.sofa.jraft.JRaftUtils; import com.alipay.sofa.jraft.Node; import com.alipay.sofa.jraft.RaftGroupService; +import com.alipay.sofa.jraft.ReplicatorGroup; import com.alipay.sofa.jraft.Status; import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.core.Replicator; import com.alipay.sofa.jraft.entity.PeerId; import com.alipay.sofa.jraft.entity.Task; import com.alipay.sofa.jraft.error.RaftError; @@ -46,6 +48,7 @@ import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory; import com.alipay.sofa.jraft.rpc.RpcServer; import com.alipay.sofa.jraft.util.Endpoint; +import com.alipay.sofa.jraft.util.ThreadId; import com.alipay.sofa.jraft.util.internal.ThrowUtil; import lombok.extern.slf4j.Slf4j; @@ -227,19 +230,18 @@ public List getMembers() { CompletableFuture future = raftRpcClient.getGrpcAddress(peerId.getEndpoint().toString()); - // TODO: uncomment later - jraft problem -// Metapb.ShardRole role = Metapb.ShardRole.Follower; -// if (peerEquals(peerId, raftNode.getLeaderId())) { -// role = Metapb.ShardRole.Leader; -// } else if (learners.contains(peerId)) { -// role = Metapb.ShardRole.Learner; -// var state = raftNode.getReplicatorState(peerId); -// if (state != null) { -// builder.setReplicatorState(state.name()); -// } -// } -// -// builder.setRole(role); + Metapb.ShardRole role = Metapb.ShardRole.Follower; + if (peerEquals(peerId, raftNode.getLeaderId())) { + role = Metapb.ShardRole.Leader; + } else if (learners.contains(peerId)) { + role = Metapb.ShardRole.Learner; + var state = getReplicatorState(peerId); + if (state != null) { + builder.setReplicatorState(state.name()); + } + } + + builder.setRole(role); try { if (future.isCompletedExceptionally()) { @@ -324,4 +326,52 @@ private boolean peerEquals(PeerId p1, PeerId p2) { } return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); } + + private Replicator.State getReplicatorState(PeerId peerId) { + var replicateGroup = getReplicatorGroup(); + if (replicateGroup == null) { + return null; + } + + ThreadId threadId = replicateGroup.getReplicator(peerId); + if (threadId == null) { + return null; + } else { + Replicator r = (Replicator) threadId.lock(); + if (r == null) { + return Replicator.State.Probe; + } + Replicator.State result = getState(r); + threadId.unlock(); + return result; + } + } + + private ReplicatorGroup getReplicatorGroup() { + var clz = this.raftNode.getClass(); + try { + var f = clz.getDeclaredField("replicatorGroup"); + f.setAccessible(true); + var group = (ReplicatorGroup) f.get(this.raftNode); + f.setAccessible(false); + return group; + } catch (NoSuchFieldException | IllegalAccessException e) { + log.info("getReplicatorGroup: error {}", e.getMessage()); + return null; + } + } + + private Replicator.State getState(Replicator r) { + var clz = r.getClass(); + try { + var f = clz.getDeclaredField("state"); + f.setAccessible(true); + var state = (Replicator.State) f.get(this.raftNode); + f.setAccessible(false); + return state; + } catch (NoSuchFieldException | IllegalAccessException e) { + log.info("getReplicatorGroup: error {}", e.getMessage()); + return null; + } + } } From 3ea878c3a8e70ee9a29f5a2bc5b15ca7abeb699d Mon Sep 17 00:00:00 2001 From: imbajin Date: Wed, 5 Jul 2023 11:42:03 +0800 Subject: [PATCH 12/18] refact: GP-2141 handle low speed import - 6.9 from inner commit 0a95b1e587e65ea6a0a06774d53c80868f36af5b --------- Co-authored-by: zhangyingjie02 Co-authored-by: V_Galaxy <1904821183@qq.com> --- .../hugegraph/pd/client/AbstractClient.java | 32 +- .../hugegraph/pd/client/ClientCache.java | 321 ++++++++++++++++++ .../apache/hugegraph/pd/client/PDClient.java | 172 +++++----- .../hugegraph/pd/PartitionCacheTest.java | 4 +- hg-pd-common/pom.xml | 11 +- .../hugegraph/pd/common/GraphCache.java | 45 +++ .../apache/hugegraph/pd/StoreNodeService.java | 21 +- hg-pd-grpc/src/main/proto/pdpb.proto | 14 + .../hugegraph/pd/service/PDService.java | 39 ++- .../hugegraph/pd/watch/KvWatchSubject.java | 14 +- 10 files changed, 559 insertions(+), 114 deletions(-) create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java index 65dad10803..db48eeee37 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java @@ -47,20 +47,17 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author zhangyingjie - * @date 2022/6/20 - **/ @Slf4j public abstract class AbstractClient implements Closeable { + private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); public static Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); protected final Pdpb.RequestHeader header; protected final AbstractClientStubProxy stubProxy; protected final PDConfig config; protected ManagedChannel channel = null; - protected ConcurrentMap stubs = null; + protected volatile ConcurrentMap stubs = null; protected AbstractClient(PDConfig config) { String[] hosts = config.getServerHost().split(","); @@ -84,7 +81,7 @@ protected static void handleErrors(Pdpb.ResponseHeader header) throws PDExceptio } } - private AbstractBlockingStub getBlockingStub() throws PDException { + protected AbstractBlockingStub getBlockingStub() throws PDException { if (stubProxy.getBlockingStub() == null) { synchronized (this) { if (stubProxy.getBlockingStub() == null) { @@ -97,10 +94,12 @@ private AbstractBlockingStub getBlockingStub() throws PDException { } } } - return stubProxy.getBlockingStub(); + return (AbstractBlockingStub) stubProxy.getBlockingStub() + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); } - private AbstractStub getStub() throws PDException { + protected AbstractStub getStub() throws PDException { if (stubProxy.getStub() == null) { synchronized (this) { if (stubProxy.getStub() == null) { @@ -129,8 +128,8 @@ private String resetStub() { .withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS); try { - GetMembersRequest request = - Pdpb.GetMembersRequest.newBuilder().setHeader(header).build(); + GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header).build(); GetMembersResponse members = blockingStub.getMembers(request); Metapb.Member leader = members.getLeader(); leaderHost = leader.getGrpcUrl(); @@ -192,8 +191,7 @@ private AbstractBlockingStub getConcurrentBlockingStub(String address) { } protected KVPair concurrentBlockingUnaryCall( - MethodDescriptor method, ReqT req, Predicate predicate) throws - PDException { + MethodDescriptor method, ReqT req, Predicate predicate) { LinkedList hostList = this.stubProxy.getHostList(); if (this.stubs == null) { synchronized (this) { @@ -204,9 +202,8 @@ protected KVPair concurrentBlockingUnaryCall( } Stream respTStream = hostList.parallelStream().map((address) -> { AbstractBlockingStub stub = getConcurrentBlockingStub(address); - RespT resp = - ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(), - req); + RespT resp = ClientCalls.blockingUnaryCall(stub.getChannel(), + method, stub.getCallOptions(), req); return resp; }); KVPair pair; @@ -225,8 +222,7 @@ protected KVPair concurrentBlockingUnaryCall( protected void streamingCall(MethodDescriptor method, ReqT request, StreamObserver responseObserver, - int retry) throws - PDException { + int retry) throws PDException { AbstractStub stub = getStub(); try { ClientCall call = stub.getChannel().newCall(method, stub.getCallOptions()); @@ -263,7 +259,7 @@ private void closeChannel(ManagedChannel channel) { continue; } } catch (Exception e) { - log.info("Close channel with error : {}.", e); + log.info("Close channel with error : ", e); } } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java new file mode 100644 index 0000000000..71930e3351 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java @@ -0,0 +1,321 @@ +package org.apache.hugegraph.pd.client; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hugegraph.pd.common.GraphCache; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.Partition; +import org.apache.hugegraph.pd.grpc.Metapb.Shard; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; + +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class ClientCache { + + private volatile Map> groups; + private volatile Map stores; + private volatile Map caches = new ConcurrentHashMap<>(); + private final AtomicBoolean initialized = new AtomicBoolean(false); + private final PDClient client; + + public ClientCache(PDClient pdClient) { + groups = new ConcurrentHashMap<>(); + stores = new ConcurrentHashMap<>(); + client = pdClient; + } + + private GraphCache getGraphCache(String graphName) { + GraphCache graph; + if ((graph = caches.get(graphName)) == null) { + synchronized (caches) { + if ((graph = caches.get(graphName)) == null) { + graph = new GraphCache(); + caches.put(graphName, graph); + } + } + } + return graph; + } + + public KVPair getPartitionById(String graphName, int partId) { + try { + GraphCache graph = initGraph(graphName); + Partition partition = graph.getPartition(partId); + Shard shard = groups.get(partId).getValue(); + if (partition == null || shard == null) { + return null; + } + return new KVPair<>(partition, shard); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private KVPair getPair(int partId, GraphCache graph) { + Partition p = graph.getPartition(partId); + KVPair pair = groups.get(partId); + if (p != null && pair != null) { + Shard s = pair.getValue(); + if (s == null) { + pair.setValue(getLeader(partId)); + return new KVPair<>(p, pair.getValue()); + } else { + return new KVPair<>(p, s); + } + } + return null; + } + + /** + * 根据key的hashcode返回分区信息 + * + * @param graphName + * @param code + * @return + */ + public KVPair getPartitionByCode(String graphName, long code) { + try { + GraphCache graph = initGraph(graphName); + RangeMap range = graph.getRange(); + Integer pId = range.get(code); + if (pId != null) { + return getPair(pId, graph); + } + return null; + } catch (PDException e) { + throw new RuntimeException(e); + } + } + + private GraphCache initGraph(String graphName) throws PDException { + initCache(); + GraphCache graph = getGraphCache(graphName); + if (!graph.getInitialized().get()) { + synchronized (graph) { + if (!graph.getInitialized().get()) { + CachePartitionResponse pc = client.getPartitionCache(graphName); + RangeMap range = graph.getRange(); + List ps = pc.getPartitionsList(); + HashMap gps = new HashMap<>(ps.size(), 1); + for (Partition p : ps) { + gps.put(p.getId(), p); + range.put(Range.closedOpen(p.getStartKey(), p.getEndKey()), p.getId()); + } + graph.setPartitions(gps); + graph.getInitialized().set(true); + } + } + } + return graph; + } + + private void initCache() throws PDException { + if (!initialized.get()) { + synchronized (this) { + if (!initialized.get()) { + CacheResponse cache = client.getClientCache(); + List shardGroups = cache.getShardsList(); + for (ShardGroup s : shardGroups) { + this.groups.put(s.getId(), new KVPair<>(s, getLeader(s.getId()))); + } + List stores = cache.getStoresList(); + for (Metapb.Store store : stores) { + this.stores.put(store.getId(), store); + } + List graphs = cache.getGraphsList(); + for (Metapb.Graph g : graphs) { + GraphCache c = new GraphCache(g); + caches.put(g.getGraphName(), c); + } + initialized.set(true); + } + } + } + } + + /** + * 返回key所在的分区信息 + * + * @param key + * @return + */ + public KVPair getPartitionByKey(String graphName, byte[] key) { + int code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + public boolean update(String graphName, int partId, Partition partition) { + GraphCache graph = getGraphCache(graphName); + try { + Partition p = graph.getPartition(partId); + if (p != null && p.equals(partition)) { + return false; + } + RangeMap range = graph.getRange(); + graph.addPartition(partId, partition); + if (p != null) { + // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3) + // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) + if (Objects.equals(partition.getId(), range.get(partition.getStartKey())) && + Objects.equals(partition.getId(), range.get(partition.getEndKey() - 1))) { + range.remove(range.getEntry(partition.getStartKey()).getKey()); + } + } + range.put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + } catch (Exception e) { + throw new RuntimeException(e); + } + return true; + } + + public void removePartition(String graphName, int partId) { + GraphCache graph = getGraphCache(graphName); + Partition p = graph.removePartition(partId); + if (p != null) { + RangeMap range = graph.getRange(); + if (Objects.equals(p.getId(), range.get(p.getStartKey())) && + Objects.equals(p.getId(), range.get(p.getEndKey() - 1))) { + range.remove(range.getEntry(p.getStartKey()).getKey()); + } + } + } + + /** + * remove all partitions + */ + public void removePartitions() { + for (Entry entry : caches.entrySet()) { + removePartitions(entry.getValue()); + } + } + + private void removePartitions(GraphCache graph) { + graph.getState().clear(); + graph.getRange().clear(); + } + + /** + * remove partition cache of graphName + * + * @param graphName + */ + public void removeAll(String graphName) { + GraphCache graph = caches.get(graphName); + if (graph != null) { + removePartitions(graph); + } + } + + public boolean updateShardGroup(ShardGroup shardGroup) { + KVPair old = groups.get(shardGroup.getId()); + Shard leader = getLeader(shardGroup); + if (old != null) { + old.setKey(shardGroup); + old.setValue(leader); + return false; + } + groups.put(shardGroup.getId(), new KVPair<>(shardGroup, leader)); + return true; + } + + public void deleteShardGroup(int shardGroupId) { + groups.remove(shardGroupId); + } + + public ShardGroup getShardGroup(int groupId) { + KVPair pair = groups.get(groupId); + if (pair != null) { + return pair.getKey(); + } + return null; + } + + public boolean addStore(Long storeId, Metapb.Store store) { + Metapb.Store oldStore = stores.get(storeId); + if (oldStore != null && oldStore.equals(store)) { + return false; + } + stores.put(storeId, store); + return true; + } + + public Metapb.Store getStoreById(Long storeId) { + return stores.get(storeId); + } + + public void removeStore(Long storeId) { + stores.remove(storeId); + } + + public void reset() { + groups = new ConcurrentHashMap<>(); + stores = new ConcurrentHashMap<>(); + caches = new ConcurrentHashMap<>(); + } + + public Shard getLeader(int partitionId) { + KVPair pair = groups.get(partitionId); + if (pair != null) { + if (pair.getValue() != null) { + return pair.getValue(); + } + for (Shard shard : pair.getKey().getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + pair.setValue(shard); + return shard; + } + } + } + + return null; + } + + public Shard getLeader(ShardGroup shardGroup) { + if (shardGroup != null) { + for (Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + return shard; + } + } + } + + return null; + } + + public void updateLeader(int partitionId, Shard leader) { + KVPair pair = groups.get(partitionId); + if (pair != null && leader != null) { + Shard l = getLeader(partitionId); + if (l == null || leader.getStoreId() != l.getStoreId()) { + ShardGroup shardGroup = pair.getKey(); + ShardGroup.Builder builder = ShardGroup.newBuilder(shardGroup).clearShards(); + for (var shard : shardGroup.getShardsList()) { + builder.addShards( + Shard.newBuilder() + .setStoreId(shard.getStoreId()) + .setRole(shard.getStoreId() == leader.getStoreId() ? + Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) + .build() + ); + } + pair.setKey(builder.build()); + pair.setValue(leader); + } + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java index 6616cb340c..16fdc8d3a2 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -25,12 +25,18 @@ import org.apache.hugegraph.pd.common.KVPair; import org.apache.hugegraph.pd.common.PDException; -import org.apache.hugegraph.pd.common.PartitionCache; import org.apache.hugegraph.pd.common.PartitionUtils; import org.apache.hugegraph.pd.grpc.MetaTask; import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; import org.apache.hugegraph.pd.grpc.PDGrpc; import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionByCodeRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionResponse; import org.apache.hugegraph.pd.grpc.watch.WatchResponse; import org.apache.hugegraph.pd.watch.NodeEvent; import org.apache.hugegraph.pd.watch.PartitionEvent; @@ -53,7 +59,7 @@ public class PDClient { private final PDConfig config; private final Pdpb.RequestHeader header; - private final PartitionCache cache; + private final ClientCache cache; private final StubProxy stubProxy; private final List eventListeners; private PDWatch.Watcher partitionWatcher; @@ -65,10 +71,9 @@ public class PDClient { private PDClient(PDConfig config) { this.config = config; this.header = Pdpb.RequestHeader.getDefaultInstance(); - this.cache = new PartitionCache(); this.stubProxy = new StubProxy(config.getServerHost().split(",")); this.eventListeners = new CopyOnWriteArrayList<>(); - + this.cache = new ClientCache(this); } /** @@ -78,8 +83,7 @@ private PDClient(PDConfig config) { * @return */ public static PDClient create(PDConfig config) { - PDClient client = new PDClient(config); - return client; + return new PDClient(config); } private static void handleResponseError(Pdpb.ResponseHeader header) throws @@ -95,9 +99,7 @@ private static void handleResponseError(Pdpb.ResponseHeader header) throws } private synchronized void newBlockingStub() throws PDException { - if (stubProxy.get() != null) { - return; - } + if (stubProxy.get() != null) return; String host = newLeaderStub(); if (host.isEmpty()) { throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, @@ -107,12 +109,12 @@ private synchronized void newBlockingStub() throws PDException { if (config.isEnableCache()) { log.info("PDClient enable cache, init PDWatch object"); this.pdPulse = new PDPulseImpl(host); - partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener() { + partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener<>() { @Override public void onNext(PartitionEvent response) { - // log.info("PDClient receive partition event {}-{} {}", - // response.getGraph(), response.getPartitionId(), response - // .getChangeType()); + log.info("PDClient receive partition event {}-{} {}", + response.getGraph(), response.getPartitionId(), + response.getChangeType()); invalidPartitionCache(response.getGraph(), response.getPartitionId()); if (response.getChangeType() == PartitionEvent.ChangeType.DEL) { @@ -181,9 +183,9 @@ public void onCompleted() { @Override public void onNext(WatchResponse response) { var shardResponse = response.getShardGroupResponse(); - log.info("PDClient receive shard group event: raft {}-{}", - shardResponse.getShardGroupId(), - shardResponse.getType()); + // log.info("PDClient receive shard group event: raft {}-{}", shardResponse + // .getShardGroupId(), + // shardResponse.getType()); if (config.isEnableCache()) { switch (shardResponse.getType()) { case WATCH_CHANGE_TYPE_DEL: @@ -246,6 +248,15 @@ private PDGrpc.PDBlockingStub getStub() throws PDException { TimeUnit.MILLISECONDS); } + private PDGrpc.PDBlockingStub newStub() throws PDException { + if (stubProxy.get() == null) { + newBlockingStub(); + } + return PDGrpc.newBlockingStub(stubProxy.get().getChannel()) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + } + private String newLeaderStub() { String leaderHost = ""; for (int i = 0; i < stubProxy.getHostCount(); i++) { @@ -406,6 +417,25 @@ public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDExce return response.getClusterStats(); } + private KVPair getKvPair(String graphName, byte[] key, + KVPair partShard) throws + PDException { + if (partShard == null) { + GetPartitionRequest request = GetPartitionRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setKey(ByteString.copyFrom(key)) + .build(); + GetPartitionResponse response = + blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); + } + return partShard; + } + /** * 查询Key所属分区信息 * @@ -418,34 +448,16 @@ public KVPair getPartition(String graphName, byt PDException { // 先查cache,cache没有命中,在调用PD KVPair partShard = cache.getPartitionByKey(graphName, key); - if (partShard == null) { - Pdpb.GetPartitionRequest request = Pdpb.GetPartitionRequest.newBuilder() - .setHeader(header) - .setGraphName(graphName) - .setKey(ByteString.copyFrom( - key)).build(); - Pdpb.GetPartitionResponse response = - blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request); - handleResponseError(response.getHeader()); - partShard = new KVPair<>(response.getPartition(), response.getLeader()); - if (config.isEnableCache()) { - cache.addPartition(graphName, partShard.getKey().getId(), partShard.getKey()); - } - } - - if (partShard.getValue() == null) { - var shardGroup = getShardGroup(partShard.getKey().getId()); - if (shardGroup != null) { - for (var shard : shardGroup.getShardsList()) { - if (shard.getRole() == Metapb.ShardRole.Leader) { - partShard.setValue(shard); - } - } - } else { - log.error("getPartition: get shard group failed, {}", partShard.getKey().getId()); - } - } + partShard = getKvPair(graphName, key, partShard); + return partShard; + } + public KVPair getPartition(String graphName, byte[] key, + int code) throws + PDException { + KVPair partShard = + cache.getPartitionByCode(graphName, code); + partShard = getKvPair(graphName, key, partShard); return partShard; } @@ -464,26 +476,20 @@ public KVPair getPartitionByCode(String graphNam KVPair partShard = cache.getPartitionByCode(graphName, hashCode); if (partShard == null) { - Pdpb.GetPartitionByCodeRequest request = Pdpb.GetPartitionByCodeRequest.newBuilder() - .setHeader( - header) - .setGraphName( - graphName) - .setCode( - hashCode) - .build(); - Pdpb.GetPartitionResponse response = + GetPartitionByCodeRequest request = GetPartitionByCodeRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setCode(hashCode).build(); + GetPartitionResponse response = blockingUnaryCall(PDGrpc.getGetPartitionByCodeMethod(), request); handleResponseError(response.getHeader()); partShard = new KVPair<>(response.getPartition(), response.getLeader()); - if (config.isEnableCache()) { - cache.addPartition(graphName, partShard.getKey().getId(), partShard.getKey()); - cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); - } + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); + cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); } if (partShard.getValue() == null) { - var shardGroup = getShardGroup(partShard.getKey().getId()); + ShardGroup shardGroup = getShardGroup(partShard.getKey().getId()); if (shardGroup != null) { for (var shard : shardGroup.getShardsList()) { if (shard.getRole() == Metapb.ShardRole.Leader) { @@ -495,7 +501,6 @@ public KVPair getPartitionByCode(String graphNam partShard.getKey().getId()); } } - return partShard; } @@ -526,12 +531,12 @@ public KVPair getPartitionById(String graphName, .setPartitionId( partId) .build(); - Pdpb.GetPartitionResponse response = + GetPartitionResponse response = blockingUnaryCall(PDGrpc.getGetPartitionByIDMethod(), request); handleResponseError(response.getHeader()); partShard = new KVPair<>(response.getPartition(), response.getLeader()); if (config.isEnableCache()) { - cache.addPartition(graphName, partShard.getKey().getId(), partShard.getKey()); + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); } } @@ -551,8 +556,8 @@ public KVPair getPartitionById(String graphName, return partShard; } - public Metapb.ShardGroup getShardGroup(int partId) throws PDException { - Metapb.ShardGroup group = cache.getShardGroup(partId); + public ShardGroup getShardGroup(int partId) throws PDException { + ShardGroup group = cache.getShardGroup(partId); if (group == null) { Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder() .setHeader(header) @@ -569,7 +574,7 @@ public Metapb.ShardGroup getShardGroup(int partId) throws PDException { return group; } - public void updateShardGroup(Metapb.ShardGroup shardGroup) throws PDException { + public void updateShardGroup(ShardGroup shardGroup) throws PDException { Pdpb.UpdateShardGroupRequest request = Pdpb.UpdateShardGroupRequest.newBuilder() .setHeader(header) .setShardGroup( @@ -683,9 +688,9 @@ public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { } public Metapb.Graph getGraph(String graphName) throws PDException { - Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder() - .setGraphName(graphName) - .build(); + GetGraphRequest request = GetGraphRequest.newBuilder() + .setGraphName(graphName) + .build(); Pdpb.GetGraphResponse response = blockingUnaryCall(PDGrpc.getGetGraphMethod(), request); @@ -695,10 +700,10 @@ public Metapb.Graph getGraph(String graphName) throws PDException { public Metapb.Graph getGraphWithOutException(String graphName) throws PDException { - Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder() - .setGraphName( - graphName) - .build(); + GetGraphRequest request = GetGraphRequest.newBuilder() + .setGraphName( + graphName) + .build(); Pdpb.GetGraphResponse response = blockingUnaryCall( PDGrpc.getGetGraphMethod(), request); return response.getGraph(); @@ -814,8 +819,8 @@ public void updatePartitionLeader(String graphName, int partId, long leaderStore */ public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) { if (config.isEnableCache()) { - cache.updatePartition(partition.getGraphName(), partition.getId(), partition); - cache.updateShardGroupLeader(partition.getId(), leader); + cache.update(partition.getGraphName(), partition.getId(), partition); + cache.updateLeader(partition.getId(), leader); } } @@ -1228,10 +1233,25 @@ public void changeShard(int groupId, List shards) throws PDExcepti handleResponseError(response.getHeader()); } - public PartitionCache getCache() { + public ClientCache getCache() { return cache; } + public CacheResponse getClientCache() throws PDException { + GetGraphRequest request = GetGraphRequest.newBuilder().setHeader(header).build(); + CacheResponse cache = getStub().getCache(request); + handleResponseError(cache.getHeader()); + return cache; + } + + public CachePartitionResponse getPartitionCache(String graph) throws PDException { + GetGraphRequest request = + GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build(); + CachePartitionResponse ps = getStub().getPartitions(request); + handleResponseError(ps.getHeader()); + return ps; + } + public void updatePdRaft(String raftConfig) throws PDException { Pdpb.UpdatePdRaftRequest request = Pdpb.UpdatePdRaftRequest.newBuilder() .setHeader(header) @@ -1258,11 +1278,7 @@ static class StubProxy { private volatile PDGrpc.PDBlockingStub stub; public StubProxy(String[] hosts) { - for (String host : hosts) { - if (!host.isEmpty()) { - hostList.offer(host); - } - } + for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); } public String nextHost() { diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java index af855451b0..0b937f4ed2 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java @@ -30,8 +30,6 @@ import com.google.common.collect.RangeMap; import com.google.common.collect.TreeRangeMap; -// import org.junit.Test; - public class PartitionCacheTest { // @Test @@ -43,7 +41,7 @@ public void test() { .setStartKey(i * 10) .setEndKey((i + 1) * 10) .build(), null); - cache.addPartition("aa", i, partShards.getKey()); + cache.updatePartition("aa", i, partShards.getKey()); } for (int i = 0; i < 100; i++) { diff --git a/hg-pd-common/pom.xml b/hg-pd-common/pom.xml index 7732b0f58f..4580d14c7e 100644 --- a/hg-pd-common/pom.xml +++ b/hg-pd-common/pom.xml @@ -40,5 +40,14 @@ hg-pd-grpc ${revision} + + org.projectlombok + lombok + + + org.apache.commons + commons-collections4 + 4.4 + -
\ No newline at end of file +
diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java new file mode 100644 index 0000000000..de3b395a92 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java @@ -0,0 +1,45 @@ +package org.apache.hugegraph.pd.common; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.apache.hugegraph.pd.grpc.Metapb.Graph; +import org.apache.hugegraph.pd.grpc.Metapb.Partition; + +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + +import lombok.Data; + +@Data +public class GraphCache { + + private Graph graph; + private AtomicBoolean initialized = new AtomicBoolean(false); + private AtomicBoolean writing = new AtomicBoolean(false); + private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private Map state = new ConcurrentHashMap<>(); + private Map partitions = new ConcurrentHashMap<>(); + private RangeMap range = TreeRangeMap.create(); + + public GraphCache(Graph graph) { + this.graph = graph; + } + + public GraphCache() { + } + + public Partition getPartition(Integer id) { + return partitions.get(id); + } + + public Partition addPartition(Integer id, Partition p) { + return partitions.put(id, p); + } + + public Partition removePartition(Integer id) { + return partitions.remove(id); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java index d8387e944c..23fa109a49 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -36,6 +36,7 @@ import org.apache.hugegraph.pd.grpc.Metapb.GraphModeReason; import org.apache.hugegraph.pd.grpc.Metapb.GraphState; import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; import org.apache.hugegraph.pd.meta.MetadataFactory; import org.apache.hugegraph.pd.meta.MetadataKeyHelper; @@ -115,7 +116,7 @@ public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) } updateClusterStatus(state); } catch (PDException e) { - log.error("onPartitionChanged exception {}", e); + log.error("onPartitionChanged exception: ", e); } } } @@ -271,9 +272,7 @@ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDExcept log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), store.getAddress(), store.getState()); Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); - if (lastStore == null) { - return null; - } + if (lastStore == null) return null; Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); store = builder.mergeFrom(store).build(); @@ -517,7 +516,7 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou // 需要增加shard log.info("reallocShards ShardGroup {}, add shards from {} to {}", shardGroup.getId(), shards.size(), shardCount); - int storeIdx = (int) shardGroup.getId() % stores.size(); //store分配规则,简化为取模 + int storeIdx = shardGroup.getId() % stores.size(); //store分配规则,简化为取模 for (int addCount = shardCount - shards.size(); addCount > 0; ) { // 检查是否已经存在 if (!isStoreInShards(shards, stores.get(storeIdx).getId())) { @@ -588,7 +587,6 @@ public synchronized int splitShardGroups(List> groups) /** * 分配shard group,为分裂做准备 * - * @param groups * @return true * @throws PDException */ @@ -1063,4 +1061,15 @@ public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx) { return leader; } + public CacheResponse getCache() throws PDException { + + List stores = getStores(); + List groups = getShardGroups(); + List graphs = partitionService.getGraphs(); + CacheResponse cache = CacheResponse.newBuilder().addAllGraphs(graphs) + .addAllShards(groups) + .addAllStores(stores) + .build(); + return cache; + } } diff --git a/hg-pd-grpc/src/main/proto/pdpb.proto b/hg-pd-grpc/src/main/proto/pdpb.proto index 45a42226dc..cba5a49a4b 100644 --- a/hg-pd-grpc/src/main/proto/pdpb.proto +++ b/hg-pd-grpc/src/main/proto/pdpb.proto @@ -85,6 +85,9 @@ service PD { rpc ChangeShard(ChangeShardRequest) returns (ChangeShardResponse) {} // 更新pd raft rpc updatePdRaft(UpdatePdRaftRequest) returns (UpdatePdRaftResponse) {} + + rpc getCache(GetGraphRequest) returns (CacheResponse) {} + rpc getPartitions(GetGraphRequest) returns (CachePartitionResponse) {} } message RequestHeader { @@ -574,3 +577,14 @@ message UpdatePdRaftResponse{ ResponseHeader header = 1; string message = 2; } +message CacheResponse { + ResponseHeader header = 1; + // 返回修改后的Store + repeated metapb.Store stores = 2; + repeated metapb.ShardGroup shards = 3; + repeated metapb.Graph graphs = 4; +} +message CachePartitionResponse { + ResponseHeader header = 1; + repeated metapb.Partition partitions = 2; +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java index 26140e0802..a2505dd7c1 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -48,6 +48,9 @@ import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.grpc.PDGrpc; import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; @@ -770,7 +773,7 @@ public void scanPartitions(Pdpb.ScanPartitionsRequest request, /** * 获得图信息 */ - public void getGraph(Pdpb.GetGraphRequest request, + public void getGraph(GetGraphRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getGetGraphMethod(), request, observer); @@ -1708,6 +1711,39 @@ public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, observer.onCompleted(); } + public void getCache(GetGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetCacheMethod(), request, observer); + return; + } + CacheResponse response; + try { + response = CacheResponse.newBuilder().mergeFrom(storeNodeService.getCache()) + .setHeader(okHeader).build(); + } catch (PDException e) { + log.error("get cache exception, ", e); + response = CacheResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + public void getPartitions(GetGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionsMethod(), request, observer); + return; + } + CachePartitionResponse response; + List partitions = partitionService.getPartitions(request.getGraphName()); + response = CachePartitionResponse.newBuilder().addAllPartitions(partitions) + .setHeader(okHeader).build(); + observer.onNext(response); + observer.onCompleted(); + } + + private List> parseConfig(String conf) { List> result = new LinkedList<>(); @@ -1740,5 +1776,4 @@ private boolean peerEquals(PeerId p1, PeerId p2) { } return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); } - } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index 2e5eefe5d3..060b3353b3 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -34,6 +34,7 @@ import org.apache.hugegraph.pd.grpc.kv.WatchState; import org.apache.hugegraph.pd.grpc.kv.WatchType; +import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; @@ -53,8 +54,8 @@ public class KvWatchSubject { private static final ConcurrentMap> clients = new ConcurrentHashMap<>(); private final KvService kvService; - BiPredicate equal = (kvKey, watchKey) -> kvKey.equals(watchKey); - BiPredicate startWith = (kvKey, watchKey) -> kvKey.startsWith(watchKey); + BiPredicate equal = String::equals; + BiPredicate startWith = String::startsWith; /** * 会使用以下三组key: @@ -134,7 +135,7 @@ public void notifyObserver(String key, WatchType watchType, assert values.length == 4; String watchKey = values[2]; String c = values[3]; - long clientId = Long.valueOf(c); + long clientId = Long.parseLong(c); LinkedList watchEvents = new LinkedList<>(); for (WatchKv kv : kvs) { String kvKey = kv.getKey(); @@ -162,6 +163,8 @@ public void notifyObserver(String key, WatchType watchType, } else { log.info("cannot find StreamObserver for clientId:{}", clientId); } + } catch (StatusRuntimeException ignored) { + } catch (Exception e) { log.warn("notifyObserver with error:{}", clientId, e); } @@ -231,7 +234,7 @@ public void keepClientAlive() { private void removeClient(StreamObserver value, String key, String clientKey) { try { - log.info("remove null observer,client:", clientKey); + log.info("remove null observer, client {}", clientKey); kvService.deleteWithPrefix(clientKey); if (value != null) { synchronized (value) { @@ -268,8 +271,7 @@ public void notifyClientChangeLeader() { } catch (Exception e) { try { Thread.sleep(100); - } catch (InterruptedException ex) { - + } catch (InterruptedException ignored) { } } } From ec0b0e006ba4e497b7c1292c2a9aef4620e7c084 Mon Sep 17 00:00:00 2001 From: imbajin Date: Thu, 6 Jul 2023 14:46:03 +0800 Subject: [PATCH 13/18] refact: GP-2141 handle low speed import - Compile OK from inner commit 0a95b1e587e65ea6a0a06774d53c80868f36af5b fix store compile problem https://hugegraph.feishu.cn/wiki/Y6d2wys9KiWf24kpzNKct0Yknnr --------- Co-authored-by: V_Galaxy <1904821183@qq.com> --- .../pd/client/AbstractClientStubProxy.java | 1 - .../apache/hugegraph/pd/client/Channels.java | 27 +++ .../hugegraph/pd/client/ClientCache.java | 6 +- .../hugegraph/pd/client/Discoverable.java | 1 - .../hugegraph/pd/client/DiscoveryClient.java | 1 - .../pd/client/DiscoveryClientImpl.java | 1 - .../hugegraph/pd/client/LicenseClient.java | 1 - .../apache/hugegraph/pd/client/PDClient.java | 227 +++++++++++------- .../apache/hugegraph/pd/client/PDPulse.java | 16 +- .../hugegraph/pd/client/PDPulseImpl.java | 61 +++-- .../apache/hugegraph/pd/client/PDWatch.java | 5 +- .../hugegraph/pd/client/PDWatchImpl.java | 27 +-- .../hugegraph/pd/pulse/PartitionNotice.java | 14 +- .../apache/hugegraph/pd/watch/NodeEvent.java | 9 +- .../hugegraph/pd/StoreRegisterTest.java | 14 +- .../pd/client/DiscoveryClientImplTest.java | 11 +- .../pd/client/LicenseClientImplTest.java | 1 - .../hugegraph/pd/client/PDPulseTest.java | 2 +- .../pd/common/PDRuntimeException.java | 1 - .../org/apache/hugegraph/pd/KvService.java | 5 +- .../org/apache/hugegraph/pd/LogService.java | 1 - .../apache/hugegraph/pd/PartitionService.java | 2 +- .../apache/hugegraph/pd/RegistryService.java | 1 - .../apache/hugegraph/pd/StoreNodeService.java | 49 ++-- .../hugegraph/pd/meta/DiscoveryMetaStore.java | 5 +- .../org/apache/hugegraph/pd/meta/LogMeta.java | 1 - .../pd/meta/MetadataRocksDBStore.java | 7 +- .../apache/hugegraph/pd/meta/QueueStore.java | 12 +- hg-pd-grpc/src/main/proto/pd_pulse.proto | 33 ++- hg-pd-grpc/src/main/proto/pd_watch.proto | 10 +- .../pd/model/RegistryQueryRestRequest.java | 1 - .../pd/model/RegistryRestRequest.java | 1 - .../pd/model/RegistryRestResponse.java | 1 - .../hugegraph/pd/model/TimeRangeRequest.java | 1 - .../pd/pulse/AbstractObserverSubject.java | 23 +- .../hugegraph/pd/pulse/PDPulseSubject.java | 85 ++++++- .../pd/pulse/PartitionHeartbeatSubject.java | 13 +- .../pd/pulse/PdInstructionSubject.java | 39 +++ .../hugegraph/pd/pulse/PulseListener.java | 5 +- .../apache/hugegraph/pd/rest/RegistryAPI.java | 1 - .../pd/service/DiscoveryService.java | 16 +- .../pd/service/KvServiceGrpcImpl.java | 14 ++ .../hugegraph/pd/service/PDService.java | 59 +++-- .../hugegraph/pd/service/ServiceGrpc.java | 23 +- .../apache/hugegraph/pd/util/DateUtil.java | 1 - .../pd/watch/AbstractWatchSubject.java | 9 +- .../hugegraph/pd/watch/KvWatchSubject.java | 16 +- .../hugegraph/pd/watch/NodeChangeSubject.java | 17 +- .../hugegraph/pd/watch/PDWatchSubject.java | 6 +- .../src/test/java/live/PDServer0.java | 1 - .../src/test/java/live/PDServer1.java | 1 - .../src/test/java/live/PDServer2.java | 1 - .../src/test/java/live/PDServer3.java | 1 - .../hugegraph/pd/client/PDClientTest.java | 2 +- 54 files changed, 545 insertions(+), 344 deletions(-) create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java index c8b37443ef..8c8bf38152 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java @@ -23,7 +23,6 @@ import io.grpc.stub.AbstractStub; /** - * @author zhangyingjie * @date 2022/6/20 **/ public class AbstractClientStubProxy { diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java new file mode 100644 index 0000000000..a8d7d07afa --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java @@ -0,0 +1,27 @@ +package org.apache.hugegraph.pd.client; + +import java.util.concurrent.ConcurrentHashMap; + +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; + +public class Channels { + + private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); + + public static ManagedChannel getChannel(String target) { + + ManagedChannel channel; + if ((channel = chs.get(target)) == null || channel.isShutdown() || channel.isTerminated()) { + synchronized (chs) { + if ((channel = chs.get(target)) == null || channel.isShutdown() || + channel.isTerminated()) { + channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + chs.put(target, channel); + } + } + } + + return channel; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java index 71930e3351..05914feff8 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java @@ -27,13 +27,13 @@ @Slf4j public class ClientCache { + private final AtomicBoolean initialized = new AtomicBoolean(false); + private final org.apache.hugegraph.pd.client.PDClient client; private volatile Map> groups; private volatile Map stores; private volatile Map caches = new ConcurrentHashMap<>(); - private final AtomicBoolean initialized = new AtomicBoolean(false); - private final PDClient client; - public ClientCache(PDClient pdClient) { + public ClientCache(org.apache.hugegraph.pd.client.PDClient pdClient) { groups = new ConcurrentHashMap<>(); stores = new ConcurrentHashMap<>(); client = pdClient; diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java index dfdcfa2cc9..3dac3478d1 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java @@ -21,7 +21,6 @@ import org.apache.hugegraph.pd.grpc.discovery.Query; /** - * @author zhangyingjie * @date 2021/12/20 **/ public interface Discoverable { diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java index 39f7e13705..8d38b23fe7 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java @@ -39,7 +39,6 @@ import lombok.extern.slf4j.Slf4j; /** - * @author zhangyingjie * @date 2021/12/20 **/ @Slf4j diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java index b5e04f757b..c65053cc9f 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java @@ -24,7 +24,6 @@ import org.apache.hugegraph.pd.grpc.discovery.RegisterType; /** - * @author zhangyingjie * @date 2021/12/20 **/ public class DiscoveryClientImpl extends DiscoveryClient { diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java index 5dcad36ffd..d21741b194 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java @@ -28,7 +28,6 @@ import lombok.extern.slf4j.Slf4j; /** - * @author zhangyingjie * @date 2022/8/3 **/ @Slf4j diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java index 16fdc8d3a2..ae57e622cb 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -17,9 +17,12 @@ package org.apache.hugegraph.pd.client; +import static org.apache.hugegraph.pd.watch.NodeEvent.EventType.NODE_PD_LEADER_CHANGE; + import java.util.ArrayList; import java.util.LinkedList; import java.util.List; +import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; @@ -44,7 +47,6 @@ import com.google.protobuf.ByteString; import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; import io.grpc.MethodDescriptor; import io.grpc.StatusRuntimeException; import io.grpc.stub.AbstractBlockingStub; @@ -66,7 +68,7 @@ public class PDClient { private PDWatch.Watcher storeWatcher; private PDWatch.Watcher graphWatcher; private PDWatch.Watcher shardGroupWatcher; - private PDPulse pdPulse; + private PDWatch pdWatch; private PDClient(PDConfig config) { this.config = config; @@ -86,96 +88,94 @@ public static PDClient create(PDConfig config) { return new PDClient(config); } - private static void handleResponseError(Pdpb.ResponseHeader header) throws - PDException { - if (header.hasError() && header.getError() - .getType() != Pdpb.ErrorType.OK) { - throw new PDException(header.getError().getTypeValue(), - String.format( - "PD request error, error code = %d, msg = %s", - header.getError().getTypeValue(), - header.getError().getMessage())); + private synchronized void newBlockingStub() throws PDException { + if (stubProxy.get() != null) { + return; } - } - private synchronized void newBlockingStub() throws PDException { - if (stubProxy.get() != null) return; String host = newLeaderStub(); if (host.isEmpty()) { throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, "PD unreachable, pd.peers=" + config.getServerHost()); } - PDWatch pdWatch = new PDWatchImpl(host); - if (config.isEnableCache()) { - log.info("PDClient enable cache, init PDWatch object"); - this.pdPulse = new PDPulseImpl(host); - partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener<>() { - @Override - public void onNext(PartitionEvent response) { - log.info("PDClient receive partition event {}-{} {}", - response.getGraph(), response.getPartitionId(), - response.getChangeType()); - invalidPartitionCache(response.getGraph(), response.getPartitionId()); - - if (response.getChangeType() == PartitionEvent.ChangeType.DEL) { - cache.removeAll(response.getGraph()); - } - eventListeners.forEach(listener -> { - listener.onPartitionChanged(response); - }); - } + log.info("PDClient enable cache, init PDWatch object"); + connectPdWatch(host); + } - @Override - public void onError(Throwable throwable) { - log.error("watchPartition exception {}", throwable.getMessage()); - closeStub(false); - } + public void connectPdWatch(String leader) { - @Override - public void onCompleted() { + if (pdWatch != null && Objects.equals(pdWatch.getCurrentHost(), leader) && + pdWatch.checkChannel()) { + return; + } - } - }); - storeWatcher = pdWatch.watchNode(new PDWatch.Listener() { - @Override - public void onNext(NodeEvent response) { - log.info("PDClient receive store event {} {}", response.getEventType(), - Long.toHexString(response.getNodeId())); - invalidStoreCache(response.getNodeId()); - eventListeners.forEach(listener -> { - listener.onStoreChanged(response); - }); - } + log.info("PDWatch client connect host:{}", leader); + pdWatch = new PDWatchImpl(leader); + + partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener<>() { + @Override + public void onNext(PartitionEvent response) { + // log.info("PDClient receive partition event {}-{} {}", + // response.getGraph(), response.getPartitionId(), response.getChangeType()); + invalidPartitionCache(response.getGraph(), response.getPartitionId()); - @Override - public void onError(Throwable throwable) { - log.error("watchNode exception {}", throwable.getMessage()); - closeStub(false); + if (response.getChangeType() == PartitionEvent.ChangeType.DEL) { + cache.removeAll(response.getGraph()); } - @Override - public void onCompleted() { + eventListeners.forEach(listener -> { + listener.onPartitionChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.error("watchPartition exception {}", throwable.getMessage()); + closeStub(false); + } + }); - } - }); - } - graphWatcher = pdWatch.watchGraph(new PDWatch.Listener() { + storeWatcher = pdWatch.watchNode(new PDWatch.Listener<>() { @Override - public void onNext(WatchResponse response) { + public void onNext(NodeEvent response) { + log.info("PDClient receive store event {} {}", + response.getEventType(), Long.toHexString(response.getNodeId())); + + if (response.getEventType() == NODE_PD_LEADER_CHANGE) { + // pd raft change + var leaderIp = response.getGraph(); + log.info("watchNode: pd leader changed to {}, current watch:{}", + leaderIp, pdWatch.getCurrentHost()); + closeStub(!Objects.equals(pdWatch.getCurrentHost(), leaderIp)); + connectPdWatch(leaderIp); + } + + invalidStoreCache(response.getNodeId()); eventListeners.forEach(listener -> { - listener.onGraphChanged(response); + listener.onStoreChanged(response); }); } @Override public void onError(Throwable throwable) { - log.warn("graphWatcher exception {}", throwable.getMessage()); + log.error("watchNode exception {}", throwable.getMessage()); + closeStub(false); } + }); + + graphWatcher = pdWatch.watchGraph(new PDWatch.Listener<>() { @Override - public void onCompleted() { + public void onNext(WatchResponse response) { + eventListeners.forEach(listener -> { + listener.onGraphChanged(response); + }); + } + @Override + public void onError(Throwable throwable) { + log.warn("graphWatcher exception {}", throwable.getMessage()); } }); @@ -206,12 +206,8 @@ public void onNext(WatchResponse response) { public void onError(Throwable throwable) { log.warn("shardGroupWatcher exception {}", throwable.getMessage()); } - - @Override - public void onCompleted() { - - } }); + } private synchronized void closeStub(boolean closeWatcher) { @@ -222,9 +218,11 @@ private synchronized void closeStub(boolean closeWatcher) { if (closeWatcher) { if (partitionWatcher != null) { partitionWatcher.close(); + partitionWatcher = null; } if (storeWatcher != null) { storeWatcher.close(); + storeWatcher = null; } if (graphWatcher != null) { graphWatcher.close(); @@ -235,17 +233,16 @@ private synchronized void closeStub(boolean closeWatcher) { shardGroupWatcher.close(); shardGroupWatcher = null; } - } - partitionWatcher = storeWatcher = null; + pdWatch = null; + } } private PDGrpc.PDBlockingStub getStub() throws PDException { if (stubProxy.get() == null) { newBlockingStub(); } - return stubProxy.get().withDeadlineAfter(config.getGrpcTimeOut(), - TimeUnit.MILLISECONDS); + return stubProxy.get().withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS); } private PDGrpc.PDBlockingStub newStub() throws PDException { @@ -261,20 +258,15 @@ private String newLeaderStub() { String leaderHost = ""; for (int i = 0; i < stubProxy.getHostCount(); i++) { String host = stubProxy.nextHost(); - ManagedChannel channel = ManagedChannelBuilder.forTarget(host).usePlaintext().build(); + ManagedChannel channel = Channels.getChannel(host); + PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(channel) .withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS); try { - Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() - .setHeader(header) - .build(); - Metapb.Member leader = stub.getMembers(request).getLeader(); - if (!leader.getGrpcUrl().equalsIgnoreCase(host)) { - leaderHost = leader.getGrpcUrl(); - channel.shutdown(); - channel.awaitTermination(10000, TimeUnit.MILLISECONDS); - channel = ManagedChannelBuilder.forTarget(leaderHost).usePlaintext().build(); + var leaderIp = getLeaderIp(stub); + if (!leaderIp.equalsIgnoreCase(host)) { + leaderHost = leaderIp; stubProxy.set(PDGrpc.newBlockingStub(channel) .withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS)); @@ -282,6 +274,8 @@ private String newLeaderStub() { stubProxy.set(stub); leaderHost = host; } + stubProxy.setLeader(leaderIp); + log.info("PDClient connect to host = {} success", leaderHost); break; } catch (Exception e) { @@ -292,6 +286,28 @@ private String newLeaderStub() { return leaderHost; } + public String getLeaderIp() { + + return getLeaderIp(stubProxy.get()); + } + + private String getLeaderIp(PDGrpc.PDBlockingStub stub) { + if (stub == null) { + try { + getStub(); + return stubProxy.getLeader(); + } catch (PDException e) { + throw new RuntimeException(e); + } + } + + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Metapb.Member leader = stub.getMembers(request).getLeader(); + return leader.getGrpcUrl(); + } + /** * Store注册,返回storeID,初次注册会返回新ID * @@ -875,7 +891,7 @@ public Metapb.ClusterStats getClusterStats() throws PDException { private > RespT blockingUnaryCall(MethodDescriptor method, ReqT req) throws PDException { - return blockingUnaryCall(method, req, 5); + return blockingUnaryCall(method, req, 1); } private > RespT @@ -901,6 +917,19 @@ public Metapb.ClusterStats getClusterStats() throws PDException { return null; } + private void handleResponseError(Pdpb.ResponseHeader header) throws + PDException { + var errorType = header.getError().getType(); + if (header.hasError() && errorType != Pdpb.ErrorType.OK) { + + throw new PDException(header.getError().getTypeValue(), + String.format( + "PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } + } + public void addEventListener(PDEventListener listener) { eventListeners.add(listener); } @@ -909,10 +938,6 @@ public PDWatch getWatchClient() { return new PDWatchImpl(stubProxy.getHost()); } - public PDPulse getPulseClient() { - return this.pdPulse; - } - /** * 返回Store状态信息 */ @@ -1088,7 +1113,7 @@ public void reportTask(MetaTask.Task task) throws PDException { Pdpb.ReportTaskRequest request = Pdpb.ReportTaskRequest.newBuilder() .setHeader(header) .setTask(task).build(); - Pdpb.ReportTaskResponse response = getStub().reportTask(request); + Pdpb.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request); handleResponseError(response.getHeader()); } @@ -1197,7 +1222,9 @@ public void deleteShardGroup(int groupId) throws PDException { .setHeader(header) .setGroupId(groupId) .build(); - Pdpb.DeleteShardGroupResponse response = getStub().deleteShardGroup(request); + Pdpb.DeleteShardGroupResponse response = + blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request); + handleResponseError(response.getHeader()); } @@ -1274,11 +1301,17 @@ default void onShardGroupChanged(WatchResponse event) { } static class StubProxy { + private final LinkedList hostList = new LinkedList<>(); private volatile PDGrpc.PDBlockingStub stub; + private String leader; public StubProxy(String[] hosts) { - for (String host : hosts) if (!host.isEmpty()) hostList.offer(host); + for (String host : hosts) { + if (!host.isEmpty()) { + hostList.offer(host); + } + } } public String nextHost() { @@ -1302,5 +1335,13 @@ public String getHost() { public int getHostCount() { return hostList.size(); } + + public String getLeader() { + return leader; + } + + public void setLeader(String leader) { + this.leader = leader; + } } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java index 8b5ac39040..542311280c 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java @@ -21,7 +21,7 @@ import java.util.function.Consumer; import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; -import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; import org.apache.hugegraph.pd.pulse.PulseServerNotice; /** @@ -50,7 +50,7 @@ static Listener listener(Consumer onNext, Runnable onCompleted) { static Listener listener(Consumer onNext, Consumer onError, Runnable onCompleted) { - return new Listener() { + return new Listener<>() { @Override public void onNext(T response) { onNext.accept(response); @@ -77,8 +77,16 @@ public void onCompleted() { * @param listener * @return */ - Notifier connectPartition( - Listener listener); + Notifier connectPartition(Listener listener); + + /** + * 切换成新的host。做 channel/host的检查,如果需要关闭,notifier调用close方法。 + * + * @param host new host + * @param notifier notifier + * @return true if create new stub, otherwise false + */ + boolean resetStub(String host, Notifier notifier); /** * Interface of pulse. diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java index 6d85d6b043..0e473a9c78 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java @@ -17,13 +17,13 @@ package org.apache.hugegraph.pd.client; +import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; -import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; import org.apache.hugegraph.pd.grpc.pulse.PulseAckRequest; import org.apache.hugegraph.pd.grpc.pulse.PulseCreateRequest; import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; @@ -35,7 +35,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; @@ -46,43 +45,60 @@ public final class PDPulseImpl implements PDPulse { private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); - private final HgPdPulseGrpc.HgPdPulseStub stub; private final ExecutorService threadPool; + private HgPdPulseGrpc.HgPdPulseStub stub; + private String pdServerAddress; // TODO: support several servers. public PDPulseImpl(String pdServerAddress) { - ManagedChannel channel; - if ((channel = chs.get(pdServerAddress)) == null || channel.isShutdown()) { - synchronized (chs) { - if ((channel = chs.get(pdServerAddress)) == null || channel.isShutdown()) { - channel = getChannel(pdServerAddress); - chs.put(pdServerAddress, channel); - } - } - } - this.stub = HgPdPulseGrpc.newStub(channel); + this.pdServerAddress = pdServerAddress; + this.stub = HgPdPulseGrpc.newStub(Channels.getChannel(pdServerAddress)); var namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build(); threadPool = Executors.newSingleThreadExecutor(namedThreadFactory); } - private ManagedChannel getChannel(String target) { - return ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + + private String getCurrentHost() { + return this.pdServerAddress; } + private boolean checkChannel() { + return stub != null && !((ManagedChannel) stub.getChannel()).isShutdown(); + } + + /* TODO: handle this override problem */ @Override - public Notifier connectPartition( - Listener listener) { + public Notifier connectPartition(Listener + listener) { return new PartitionHeartbeat(listener); } + @Override + public boolean resetStub(String host, Notifier notifier) { + log.info("reset stub: current, {}, new: {}, channel state:{}", getCurrentHost(), host, + checkChannel()); + if (Objects.equals(host, getCurrentHost()) && checkChannel()) { + return false; + } + + if (notifier != null) { + notifier.close(); + } + + this.stub = HgPdPulseGrpc.newStub(Channels.getChannel(host)); + log.info("pd pulse connect to {}", host); + this.pdServerAddress = host; + return true; + } + /*** PartitionHeartbeat's implement ***/ private class PartitionHeartbeat extends AbstractConnector { + PulseResponse> { private long observerId = -1; - PartitionHeartbeat(Listener listener) { + PartitionHeartbeat(Listener listener) { super(listener, PulseType.PULSE_TYPE_PARTITION_HEARTBEAT); } @@ -108,10 +124,10 @@ public void notifyServer(PartitionHeartbeatRequest.Builder requestBuilder) { public void onNext(PulseResponse pulseResponse) { this.setObserverId(pulseResponse.getObserverId()); long noticeId = pulseResponse.getNoticeId(); - PartitionHeartbeatResponse res = pulseResponse.getPartitionHeartbeatResponse(); - this.listener.onNext(res); + this.listener.onNext(pulseResponse); this.listener.onNotice(new PartitionNotice(noticeId, - e -> super.ackNotice(e, observerId), res)); + e -> super.ackNotice(e, observerId), + pulseResponse)); } } @@ -180,5 +196,4 @@ protected void ackNotice(long noticeId, long observerId) { }); } } - } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java index b068ea0181..a16d7b1b02 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java @@ -46,6 +46,9 @@ public interface PDWatch { */ //PDWatcher watchNode(String graph, Listener listener); + String getCurrentHost(); + + boolean checkChannel(); /*** inner static methods ***/ static Listener listener(Consumer onNext) { @@ -118,7 +121,7 @@ interface Listener { /** * Invoked on completion. */ - void onCompleted(); + default void onCompleted() {}; } interface Watcher extends Closeable { diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java index bdf4efd501..c723eb9479 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java @@ -39,25 +39,23 @@ */ final class PDWatchImpl implements PDWatch { - private static final ConcurrentHashMap chs = new ConcurrentHashMap<>(); - private final HgPdWatchGrpc.HgPdWatchStub stub; + private HgPdWatchGrpc.HgPdWatchStub stub; + private String pdServerAddress; // TODO: support several servers. PDWatchImpl(String pdServerAddress) { - ManagedChannel channel; - if ((channel = chs.get(pdServerAddress)) == null || channel.isShutdown()) { - synchronized (chs) { - if ((channel = chs.get(pdServerAddress)) == null || channel.isShutdown()) { - channel = getChannel(pdServerAddress); - chs.put(pdServerAddress, channel); - } - } - } - this.stub = HgPdWatchGrpc.newStub(channel); + this.pdServerAddress = pdServerAddress; + this.stub = HgPdWatchGrpc.newStub(Channels.getChannel(pdServerAddress)); + } + + @Override + public String getCurrentHost() { + return this.pdServerAddress; } - private ManagedChannel getChannel(String target) { - return ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + @Override + public boolean checkChannel() { + return stub != null && ! ((ManagedChannel) stub.getChannel()).isShutdown(); } /** @@ -195,6 +193,7 @@ public void close() { @Override public void onError(Throwable throwable) { + this.listener.onError(throwable); } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java index af9403b726..b6e5555e6a 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java @@ -19,18 +19,14 @@ import java.util.function.Consumer; -import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; -/** - * @author lynn.bond@hotmail.com created on 2022/2/13 - */ -public class PartitionNotice implements PulseServerNotice { +public class PartitionNotice implements PulseServerNotice { private final long noticeId; private final Consumer ackConsumer; - private final PartitionHeartbeatResponse content; + private final PulseResponse content; - public PartitionNotice(long noticeId, Consumer ackConsumer, - PartitionHeartbeatResponse content) { + public PartitionNotice(long noticeId, Consumer ackConsumer, PulseResponse content) { this.noticeId = noticeId; this.ackConsumer = ackConsumer; this.content = content; @@ -47,7 +43,7 @@ public long getNoticeId() { } @Override - public PartitionHeartbeatResponse getContent() { + public PulseResponse getContent() { return this.content; } } diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java index 2f28986072..893566b9f5 100644 --- a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java @@ -21,9 +21,6 @@ import org.apache.hugegraph.pd.grpc.watch.NodeEventType; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ public class NodeEvent { private final String graph; private final long nodeId; @@ -79,8 +76,8 @@ public enum EventType { UNKNOWN, NODE_ONLINE, NODE_OFFLINE, - NODE_RAFT_CHANGE; - + NODE_RAFT_CHANGE, + NODE_PD_LEADER_CHANGE; public static EventType grpcTypeOf(NodeEventType grpcType) { switch (grpcType) { @@ -90,6 +87,8 @@ public static EventType grpcTypeOf(NodeEventType grpcType) { return NODE_OFFLINE; case NODE_EVENT_TYPE_NODE_RAFT_CHANGE: return NODE_RAFT_CHANGE; + case NODE_EVENT_TYPE_PD_LEADER_CHANGE: + return NODE_PD_LEADER_CHANGE; default: return UNKNOWN; } diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java index 04177b665f..bc5e3879ac 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java @@ -23,11 +23,12 @@ import org.apache.hugegraph.pd.client.PDClient; import org.apache.hugegraph.pd.client.PDConfig; import org.apache.hugegraph.pd.client.PDPulse; +import org.apache.hugegraph.pd.client.PDPulseImpl; import org.apache.hugegraph.pd.common.KVPair; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; -import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; import org.apache.hugegraph.pd.pulse.PulseServerNotice; import org.junit.Assert; import org.junit.BeforeClass; @@ -95,17 +96,18 @@ public void testStoreHeartbeat() throws PDException { // @Test public void testPartitionHeartbeat() throws InterruptedException, PDException { testRegisterStore(); - PDPulse pdPulse = pdClient.getPulseClient(); - PDPulse.Notifier notifier = - pdPulse.connectPartition(new PDPulse.Listener() { + PDPulse pdPulse = new PDPulseImpl(pdClient.getLeaderIp()); + + PDPulse.Notifier notifier = pdPulse.connectPartition( + new PDPulse.Listener() { @Override - public void onNext(PartitionHeartbeatResponse response) { + public void onNext(PulseResponse response) { } @Override - public void onNotice(PulseServerNotice notice) { + public void onNotice(PulseServerNotice notice) { } diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java index 09e5a90ddc..f2b8139740 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java @@ -28,7 +28,6 @@ import org.junit.Assert; /** - * @author zhangyingjie * @date 2021/12/21 **/ public class DiscoveryClientImplTest { @@ -117,21 +116,19 @@ private void register(String appName, String address) throws InterruptedExceptio DiscoveryClientImpl discoveryClient = getClient(appName, address, labels); Thread.sleep(10000); NodeInfos nodeInfos1 = discoveryClient.getNodeInfos(query); - Assert.assertTrue(nodeInfos1.getInfoCount() == 1); + Assert.assertEquals(1, nodeInfos1.getInfoCount()); DiscoveryClientImpl discoveryClient1 = getClient(appName, address + 0, labels); Thread.sleep(10000); - Assert.assertTrue( - discoveryClient.getNodeInfos(query).getInfoCount() == 2); + Assert.assertEquals(2, discoveryClient.getNodeInfos(query).getInfoCount()); Query query1 = Query.newBuilder().setAppName( appName).setVersion("0.12.0").putAllLabels(labels).build(); - Assert.assertTrue( - discoveryClient.getNodeInfos(query1).getInfoCount() == 0); + Assert.assertEquals(0, discoveryClient.getNodeInfos(query1).getInfoCount()); discoveryClient.cancelTask(); discoveryClient1.cancelTask(); Thread.sleep(wait); NodeInfos nodeInfos = discoveryClient.getNodeInfos(query); System.out.println(nodeInfos); - Assert.assertTrue(nodeInfos.getInfoCount() == 0); + Assert.assertEquals(0, nodeInfos.getInfoCount()); discoveryClient.close(); discoveryClient1.close(); } diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java index b7991b5253..6fc2518b0d 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java @@ -31,7 +31,6 @@ import lombok.extern.slf4j.Slf4j; /** - * @author zhangyingjie * @date 2021/12/21 **/ @Slf4j diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java index cdeb60ef93..27d247a369 100644 --- a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java @@ -46,7 +46,7 @@ public static void beforeClass() throws Exception { // @Test public void listen() { - PDPulse pulse = pdClient.getPulseClient(); + PDPulse pulse = new PDPulseImpl(pdClient.getLeaderIp()); CountDownLatch latch = new CountDownLatch(60); PDPulse.Notifier notifier1 = diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java index b0bfe2c0ae..3865639aea 100644 --- a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java @@ -18,7 +18,6 @@ package org.apache.hugegraph.pd.common; /** - * @author zhangyingjie * @date 2022/8/1 **/ public class PDRuntimeException extends RuntimeException { diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java index 129f22c081..5c8ef3c8db 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java @@ -37,7 +37,7 @@ import lombok.extern.slf4j.Slf4j; /** - * @author zhangyingjie + * **/ @Slf4j @Service @@ -311,4 +311,7 @@ public void clearTTLData() { } } + public MetadataRocksDBStore getMeta() { + return meta; + } } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java index b493c8256c..ecb69e82d4 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java @@ -32,7 +32,6 @@ import lombok.extern.slf4j.Slf4j; /** - * @author zhangyingjie * @date 2022/3/29 **/ @Slf4j diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java index daa2d31594..95c8f88321 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java @@ -1496,7 +1496,7 @@ protected void onPartitionRemoved(Metapb.Partition partition) { */ @Override public void onRaftLeaderChanged() { - log.info("Partition service reload cache from rocksdb"); + log.info("Partition service reload cache from rocksdb, due to leader change"); try { partitionMeta.reload(); } catch (PDException e) { diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java index ed02d37ccf..724281a349 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java @@ -26,7 +26,6 @@ import org.apache.hugegraph.pd.meta.MetadataFactory; /** - * @author zhangyingjie * @date 2022/1/14 **/ public class RegistryService { diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java index 23fa109a49..eff6c4d0f7 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -272,7 +272,9 @@ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDExcept log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), store.getAddress(), store.getState()); Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); - if (lastStore == null) return null; + if (lastStore == null) { + return null; + } Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); store = builder.mergeFrom(store).build(); @@ -611,34 +613,30 @@ private boolean isStoreInShards(List shards, long storeId) { * @return */ public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List shards, - long version, long confVersion) { - try { - Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); + long version, long confVersion) throws + PDException { + Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); - if (group == null) { - return null; - } + if (group == null) { + return null; + } - var builder = Metapb.ShardGroup.newBuilder(group); - if (version >= 0) { - builder.setVersion(version); - } + var builder = Metapb.ShardGroup.newBuilder(group); + if (version >= 0) { + builder.setVersion(version); + } - if (confVersion >= 0) { - builder.setConfVer(confVersion); - } + if (confVersion >= 0) { + builder.setConfVer(confVersion); + } - var newGroup = builder.clearShards().addAllShards(shards).build(); + var newGroup = builder.clearShards().addAllShards(shards).build(); - storeInfoMeta.updateShardGroup(newGroup); - partitionService.updateShardGroupCache(newGroup); - onShardGroupStatusChanged(group, newGroup); - log.info("Raft {} updateShardGroup {}", groupId, newGroup); - return group; - } catch (Exception e) { - log.error("Shardgroup {} update exception {}", groupId, e); - } - return null; + storeInfoMeta.updateShardGroup(newGroup); + partitionService.updateShardGroupCache(newGroup); + onShardGroupStatusChanged(group, newGroup); + log.info("Raft {} updateShardGroup {}", groupId, newGroup); + return group; } /** @@ -1056,7 +1054,8 @@ public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx) { } } } catch (Exception e) { - log.error("get leader error: group id:{}, error:", partition.getId(), e.getMessage()); + log.error("get leader error: group id:{}, error: {}", + partition.getId(), e.getMessage()); } return leader; } diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java index 2a8af8523d..81da5e3030 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -31,13 +31,14 @@ import lombok.extern.slf4j.Slf4j; /** - * @author zhangyingjie * @date 2022/1/14 **/ @Slf4j public class DiscoveryMetaStore extends MetadataRocksDBStore { - /** appName --> address --> registryInfo */ + /** + * appName --> address --> registryInfo + */ private static final String PREFIX = "REGIS-"; private static final String SPLITTER = "-"; diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java index 4fea518b87..dd5a3be16a 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java @@ -24,7 +24,6 @@ import org.apache.hugegraph.pd.grpc.Metapb; /** - * @author zhangyingjie * @date 2022/3/29 **/ public class LogMeta extends MetadataRocksDBStore { diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java index eacd593917..c591c116d9 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java @@ -31,7 +31,6 @@ import com.google.protobuf.Parser; /** - * @author zhangyingjie * @date 2022/1/9 **/ public class MetadataRocksDBStore extends MetadataStoreBase { @@ -45,7 +44,7 @@ public MetadataRocksDBStore(PDConfig pdConfig) { this.pdConfig = pdConfig; } - private HgKVStore getStore() { + public HgKVStore getStore() { if (store == null) { store = MetadataFactory.getStore(pdConfig); } @@ -131,7 +130,7 @@ public List scanRange(Parser parser, byte[] start, byte[] end) throws try { List kvs = this.scanRange(start, end); for (KV keyValue : kvs) { - stores.add(parser.parseFrom((byte[]) keyValue.getValue())); + stores.add(parser.parseFrom(keyValue.getValue())); } } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); @@ -145,7 +144,7 @@ public List scanPrefix(Parser parser, byte[] prefix) throws PDExceptio try { List kvs = this.scanPrefix(prefix); for (KV keyValue : kvs) { - stores.add(parser.parseFrom((byte[]) keyValue.getValue())); + stores.add(parser.parseFrom(keyValue.getValue())); } } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java index 7d31e8551e..979f5fcc25 100644 --- a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java @@ -23,6 +23,8 @@ import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.store.RaftKVStore; /** * @author lynn.bond@hotmail.com on 2022/2/10 @@ -39,7 +41,15 @@ public void addItem(Metapb.QueueItem queueItem) throws PDException { } public void removeItem(String itemId) throws PDException { - remove(MetadataKeyHelper.getQueueItemKey(itemId)); + if (RaftEngine.getInstance().isLeader()) { + remove(MetadataKeyHelper.getQueueItemKey(itemId)); + } else { + var store = getStore(); + // todo: delete record via client + if (store instanceof RaftKVStore) { + ((RaftKVStore) store).doRemove(MetadataKeyHelper.getQueueItemKey(itemId)); + } + } } public List getQueue() throws PDException { diff --git a/hg-pd-grpc/src/main/proto/pd_pulse.proto b/hg-pd-grpc/src/main/proto/pd_pulse.proto index d4684eb895..f49c3aeaad 100644 --- a/hg-pd-grpc/src/main/proto/pd_pulse.proto +++ b/hg-pd-grpc/src/main/proto/pd_pulse.proto @@ -49,13 +49,14 @@ message PartitionHeartbeatRequest { /* responses */ message PulseResponse { - PulseType pulse_type = 1; - int64 observer_id = 2; - int32 status = 3; //0=ok,1=fail - int64 notice_id = 4; - oneof response_union { - PartitionHeartbeatResponse partition_heartbeat_response = 10; - } + PulseType pulse_type = 1; + int64 observer_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id=4; + oneof response_union { + PartitionHeartbeatResponse partition_heartbeat_response = 10; + PdInstructionResponse instruction_response = 11; + } } message PartitionHeartbeatResponse { @@ -117,10 +118,16 @@ message PartitionKeyRange{ uint64 key_end = 3; } +message PdInstructionResponse { + PdInstructionType instruction_type = 1; + string leader_ip = 2; +} + /* enums */ enum PulseType { - PULSE_TYPE_UNKNOWN = 0; - PULSE_TYPE_PARTITION_HEARTBEAT = 1; + PULSE_TYPE_UNKNOWN = 0; + PULSE_TYPE_PARTITION_HEARTBEAT = 1; + PULSE_TYPE_PD_INSTRUCTION = 2; } enum PulseChangeType { @@ -139,6 +146,10 @@ enum ConfChangeType { } enum CleanType { - CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range - CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range + CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range + CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range +} + +enum PdInstructionType { + CHANGE_TO_FOLLOWER = 0; } \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/pd_watch.proto b/hg-pd-grpc/src/main/proto/pd_watch.proto index 2495925090..7470057a85 100644 --- a/hg-pd-grpc/src/main/proto/pd_watch.proto +++ b/hg-pd-grpc/src/main/proto/pd_watch.proto @@ -77,8 +77,10 @@ enum WatchChangeType { } enum NodeEventType { - NODE_EVENT_TYPE_UNKNOWN = 0; - NODE_EVENT_TYPE_NODE_ONLINE = 1; - NODE_EVENT_TYPE_NODE_OFFLINE = 2; - NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3; + NODE_EVENT_TYPE_UNKNOWN = 0; + NODE_EVENT_TYPE_NODE_ONLINE = 1; + NODE_EVENT_TYPE_NODE_OFFLINE = 2; + NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3; + // pd leader 变更 + NODE_EVENT_TYPE_PD_LEADER_CHANGE = 4; } \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java index a6cbe08632..a076c93b8e 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java @@ -22,7 +22,6 @@ import lombok.Data; /** - * @author zhangyingjie * @date 2022/2/8 **/ @Data diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java index fd90ea3df3..10ab054758 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java @@ -22,7 +22,6 @@ import lombok.Data; /** - * @author zhangyingjie * @date 2022/2/8 **/ @Data diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java index ddf8d7dd98..8cd00b5825 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java @@ -24,7 +24,6 @@ import lombok.Data; /** - * @author zhangyingjie * @date 2022/2/8 **/ @Data diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java index edad568a8b..25b9ed51c0 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java @@ -20,7 +20,6 @@ import lombok.Data; /** - * @author zhangyingjie * @date 2022/3/23 **/ @Data diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java index e8e099554f..503bd2d688 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java @@ -150,7 +150,9 @@ protected long notifyClient(Consumer c) { } - protected void notifyError(String message) { + abstract long notifyClient(com.google.protobuf.GeneratedMessageV3 response); + + protected void notifyError(int code, String message){ synchronized (lock) { Iterator>> iter = observerHolder.entrySet().iterator(); @@ -159,13 +161,10 @@ protected void notifyError(String message) { Long observerId = entry.getKey(); PulseResponse res = this.builder.setObserverId(observerId).build(); try { - entry.getValue().onError( - Status.PERMISSION_DENIED.withDescription(message).asRuntimeException()); + entry.getValue().onError(Status.fromCodeValue(code).withDescription(message).asRuntimeException()); } catch (Throwable e) { - log.error("Failed to send " + this.pulseType.name() + "'s notice[" + - toNoticeString(res) - + "] to observer[" + observerId + "].", e); - + log.warn("Failed to send {} 's notice[{}] to observer[{}], error:{}", + this.pulseType.name(), toNoticeString(res), observerId, e.getMessage()); } } } @@ -214,20 +213,14 @@ void removeListener(Long listenerId, PulseListener listener) { abstract Function getNoticeHandler(); - void handleClientNotice(PulseNoticeRequest noticeRequest) { + void handleClientNotice(PulseNoticeRequest noticeRequest) throws Exception { Iterator> iter = listenerHolder.entrySet().iterator(); while (iter.hasNext()) { Map.Entry entry = iter.next(); Long listenerId = entry.getKey(); - try { - entry.getValue().onNext(getNoticeHandler().apply(noticeRequest)); - } catch (Throwable e) { - log.error(e.getMessage(), e); - } - + entry.getValue().onNext(getNoticeHandler().apply(noticeRequest)); } - } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java index 521a18aff6..36891803bc 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java @@ -18,12 +18,14 @@ package org.apache.hugegraph.pd.pulse; import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.grpc.Pdpb.ErrorType.NOT_LEADER; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -34,9 +36,19 @@ import javax.annotation.concurrent.ThreadSafe; import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Metapb; -import org.apache.hugegraph.pd.grpc.pulse.*; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionType; +import org.apache.hugegraph.pd.grpc.pulse.PulseCreateRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; import org.apache.hugegraph.pd.notice.NoticeBroadcaster; +import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.util.IdUtil; import com.google.protobuf.InvalidProtocolBufferException; @@ -69,6 +81,7 @@ public class PDPulseSubject { static { subjectHolder.put(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name(), new PartitionHeartbeatSubject()); + subjectHolder.put(PulseType.PULSE_TYPE_PD_INSTRUCTION.name(), new PdInstructionSubject()); // add some other type here... // ... } @@ -167,12 +180,16 @@ private static void notifyClient(PartitionHeartbeatResponse response) { doBroadcast(createBroadcaster(response)); } + public static void notifyClient(PdInstructionResponse response) { + doBroadcast(createBroadcaster(response)); + } + private static void doBroadcast(NoticeBroadcaster broadcaster) { broadcasterQueue.add(broadcaster.notifying()); } - private static T getSubject(PulseType pulseType, Class clazz) { - return (T) subjectHolder.get(pulseType.name()); + private static AbstractObserverSubject getSubject(PulseType pulseType) { + return subjectHolder.get(pulseType.name()); } private static NoticeBroadcaster createBroadcaster(Metapb.QueueItem item) { @@ -192,14 +209,35 @@ private static NoticeBroadcaster createBroadcaster(PartitionHeartbeatResponse no .setRemoveFunction(getRemoveFunction()); } - public static Supplier getNoticeSupplier(PartitionHeartbeatResponse notice) { - // TODO: PartitionHeartbeatSubject.class -> T - return () -> getSubject(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT, - PartitionHeartbeatSubject.class) - .notifyClient(notice); + private static NoticeBroadcaster createBroadcaster(PdInstructionResponse notice) { + return NoticeBroadcaster.of(getNoticeSupplier(notice)) + .setDurableSupplier(getDurableSupplier(notice)) + .setRemoveFunction(getRemoveFunction()); + } + + // public static Supplier getNoticeSupplier(PartitionHeartbeatResponse notice) { + // TODO: PartitionHeartbeatSubject.class -> T + // return () -> getSubject(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT, + // PartitionHeartbeatSubject.class) + // .notifyClient(notice); + // } + + public static Supplier getNoticeSupplier( + T notice) { + PulseType type; + if (notice instanceof PdInstructionResponse) { + type = PulseType.PULSE_TYPE_PD_INSTRUCTION; + } else if (notice instanceof PartitionHeartbeatResponse) { + type = PulseType.PULSE_TYPE_PARTITION_HEARTBEAT; + } else { + throw new IllegalArgumentException("Unknown pulse type " + notice.getClass().getName()); + } + return () -> getSubject(type).notifyClient(notice); } - private static Supplier getDurableSupplier(PartitionHeartbeatResponse notice) { + + private static Supplier getDurableSupplier( + com.google.protobuf.GeneratedMessageV3 notice) { return () -> { Metapb.QueueItem queueItem = toQueueItem(notice); String res = null; @@ -235,7 +273,7 @@ private static Function getRemoveFunction() { }; } - private static Metapb.QueueItem toQueueItem(PartitionHeartbeatResponse notice) { + private static Metapb.QueueItem toQueueItem(com.google.protobuf.GeneratedMessageV3 notice) { return Metapb.QueueItem.newBuilder() .setItemId(IdUtil.createMillisStr()) .setItemClass(notice.getClass().getTypeName()) @@ -255,9 +293,9 @@ private static PartitionHeartbeatResponse toNotice(Metapb.QueueItem item) { return buf; } - public static void notifyError(String message) { + public static void notifyError(int code, String message) { subjectHolder.forEach((k, v) -> { - v.notifyError(message); + v.notifyError(code, message); }); } @@ -348,7 +386,28 @@ private AbstractObserverSubject getSubject(PulseType pulseType) { } private void handleNotice(PulseNoticeRequest noticeRequest) { - subject.handleClientNotice(noticeRequest); + try { + subject.handleClientNotice(noticeRequest); + } catch (Exception e) { + if (e instanceof PDException) { + var pde = (PDException) e; + if (pde.getErrorCode() == NOT_LEADER.getNumber()) { + try { + log.info("send change leader command to watch, due to ERROR-100", pde); + notifyClient(PdInstructionResponse.newBuilder() + .setInstructionType( + PdInstructionType.CHANGE_TO_FOLLOWER) + .setLeaderIp(RaftEngine.getInstance() + .getLeaderGrpcAddress()) + .build()); + } catch (ExecutionException | InterruptedException ex) { + log.error("send notice to observer failed, ", ex); + } + } + } else { + log.error("handleNotice error", e); + } + } } @Override diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java index 9fb4ad7c27..a08f2c826d 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java @@ -19,7 +19,13 @@ import java.util.function.Function; -import org.apache.hugegraph.pd.grpc.pulse.*; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; + +import com.google.protobuf.GeneratedMessageV3; /** * @author lynn.bond@hotmail.com created on 2021/11/9 @@ -48,9 +54,10 @@ void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) { } - long notifyClient(PartitionHeartbeatResponse response) { + @Override + long notifyClient(GeneratedMessageV3 response) { return super.notifyClient(b -> { - b.setPartitionHeartbeatResponse(response); + b.setPartitionHeartbeatResponse((PartitionHeartbeatResponse) response); }); } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java new file mode 100644 index 0000000000..c5e4ce1fc3 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java @@ -0,0 +1,39 @@ +package org.apache.hugegraph.pd.pulse; + +import java.util.function.Function; + +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; + +import com.google.protobuf.GeneratedMessageV3; + +public class PdInstructionSubject extends AbstractObserverSubject { + + protected PdInstructionSubject() { + super(PulseType.PULSE_TYPE_PD_INSTRUCTION); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getInstructionResponse().toString(); + } + + /** + * pd单纯的向pulse发送的指令,不接收对应的notice + * + * @return null + */ + @Override + Function getNoticeHandler() { + return pulseNoticeRequest -> null; + } + + @Override + long notifyClient(GeneratedMessageV3 response) { + return super.notifyClient(b -> { + b.setInstructionResponse((PdInstructionResponse) response); + }); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java index dd3758bf86..09a7d3d05d 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java @@ -17,16 +17,13 @@ package org.apache.hugegraph.pd.pulse; -/** - * @author lynn.bond@hotmail.com created on 2021/11/9 - */ public interface PulseListener { /** * Invoked on new notice. * * @param notice the notice. */ - void onNext(T notice); + void onNext(T notice) throws Exception; /** * Invoked on errors. diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java index 5c9bb36cad..d18608bdda 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -51,7 +51,6 @@ import lombok.extern.slf4j.Slf4j; /** - * @author zhangyingjie * @date 2022/2/8 **/ @RestController diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java index e67a72052c..e1f6fcef3f 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java @@ -33,10 +33,8 @@ import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; import org.apache.hugegraph.pd.grpc.discovery.Query; import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; -import org.apache.hugegraph.pd.pulse.PDPulseSubject; import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.raft.RaftStateListener; -import org.apache.hugegraph.pd.watch.PDWatchSubject; import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; @@ -130,6 +128,7 @@ public void register(NodeInfo request, io.grpc.stub.StreamObserver observer.onCompleted(); } + @Override public void getNodes(Query request, io.grpc.stub.StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(null, DiscoveryServiceGrpc.getGetNodesMethod(), request, @@ -140,20 +139,9 @@ public void getNodes(Query request, io.grpc.stub.StreamObserver respo responseObserver.onCompleted(); } + @Override public boolean isLeader() { return RaftEngine.getInstance().isLeader(); } - @Override - public synchronized void onRaftLeaderChanged() { - if (!isLeader()) { - try { - String message = "lose leader"; - PDPulseSubject.notifyError(message); - PDWatchSubject.notifyError(message); - } catch (Exception e) { - log.error("notifyError error {}", e); - } - } - } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java index a9be77c519..7898248d2d 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -92,6 +92,7 @@ public void init() { * @param request * @param responseObserver */ + @Override public void put(Kv request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver); @@ -123,6 +124,7 @@ public void put(Kv request, StreamObserver responseObserver) { * @param request * @param responseObserver */ + @Override public void get(K request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver); @@ -154,6 +156,7 @@ public void get(K request, StreamObserver responseObserver) { * @param request * @param responseObserver */ + @Override public void delete(K request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, responseObserver); @@ -187,6 +190,7 @@ public void delete(K request, StreamObserver responseObserver) { * @param request * @param responseObserver */ + @Override public void deletePrefix(K request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, @@ -224,6 +228,7 @@ public void deletePrefix(K request, StreamObserver responseObserver) * @param request * @param responseObserver */ + @Override public void scanPrefix(K request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, @@ -268,6 +273,7 @@ private long getRandomLong() { * @param request * @param responseObserver */ + @Override public void watch(WatchRequest request, StreamObserver responseObserver) { if (!isLeader()) { responseObserver.onError(new PDException(-1, msg)); @@ -294,6 +300,7 @@ public void watch(WatchRequest request, StreamObserver responseOb * @param request * @param responseObserver */ + @Override public void watchPrefix(WatchRequest request, StreamObserver responseObserver) { if (!isLeader()) { responseObserver.onError(new PDException(-1, msg)); @@ -357,6 +364,7 @@ private void clientWatch(WatchRequest request, StreamObserver res * @param request * @param responseObserver */ + @Override public void lock(LockRequest request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver); @@ -385,6 +393,7 @@ public void lock(LockRequest request, StreamObserver responseObser responseObserver.onCompleted(); } + @Override public void lockWithoutReentrant(LockRequest request, StreamObserver responseObserver) { if (!isLeader()) { @@ -417,6 +426,7 @@ public void lockWithoutReentrant(LockRequest request, responseObserver.onCompleted(); } + @Override public void isLocked(LockRequest request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, responseObserver); @@ -446,6 +456,7 @@ public void isLocked(LockRequest request, StreamObserver responseO * @param request * @param responseObserver */ + @Override public void unlock(LockRequest request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, responseObserver); @@ -480,6 +491,7 @@ public void unlock(LockRequest request, StreamObserver responseObs * @param request * @param responseObserver */ + @Override public void keepAlive(LockRequest request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, @@ -515,6 +527,7 @@ public void keepAlive(LockRequest request, StreamObserver response * @param request * @param responseObserver */ + @Override public void putTTL(TTLRequest request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, responseObserver); @@ -543,6 +556,7 @@ public void putTTL(TTLRequest request, StreamObserver responseObser * @param request * @param responseObserver */ + @Override public void keepTTLAlive(TTLRequest request, StreamObserver responseObserver) { if (!isLeader()) { redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java index a2505dd7c1..b7ec32abf1 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -26,6 +26,7 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import javax.annotation.PostConstruct; @@ -48,8 +49,8 @@ import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.grpc.PDGrpc; import org.apache.hugegraph.pd.grpc.Pdpb; -import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; @@ -175,12 +176,8 @@ public void init() throws PDException { // 接收心跳消息 PDPulseSubject.listenPartitionHeartbeat(new PulseListener() { @Override - public void onNext(PartitionHeartbeatRequest request) { - try { - partitionService.partitionHeartbeat(request.getStates()); - } catch (PDException e) { - log.error("PartitionHeartbeatRequest onNext exception", e); - } + public void onNext(PartitionHeartbeatRequest request) throws Exception { + partitionService.partitionHeartbeat(request.getStates()); } @Override @@ -402,6 +399,7 @@ public void getStore(Pdpb.GetStoreRequest request, * 修改 Store 状态等信息。 * */ + @Override public void setStore(Pdpb.SetStoreRequest request, StreamObserver observer) { if (!isLeader()) { @@ -690,6 +688,7 @@ public void getPartitionByID(Pdpb.GetPartitionByIDRequest request, * 更新分区信息,主要用来更新分区 key 范围,调用此接口需谨慎,否则会造成数据丢失。 * */ + @Override public void updatePartition(Pdpb.UpdatePartitionRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -773,6 +772,7 @@ public void scanPartitions(Pdpb.ScanPartitionsRequest request, /** * 获得图信息 */ + @Override public void getGraph(GetGraphRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -803,6 +803,7 @@ public void getGraph(GetGraphRequest request, /** * 修改图信息 */ + @Override public void setGraph(Pdpb.SetGraphRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -826,6 +827,7 @@ public void setGraph(Pdpb.SetGraphRequest request, /** * 获得图信息 */ + @Override public void delGraph(Pdpb.DelGraphRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -854,6 +856,7 @@ public void delGraph(Pdpb.DelGraphRequest request, * 根据条件查询分区信息,包括 Store、Graph 等条件 * */ + @Override public void queryPartitions(Pdpb.QueryPartitionsRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -943,6 +946,7 @@ public void resetId(Pdpb.ResetIdRequest request, /** * 获取集群成员信息 */ + @Override public void getMembers(Pdpb.GetMembersRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -1108,6 +1112,7 @@ public void setGraphSpace(Pdpb.SetGraphSpaceRequest request, * 数据分裂 * */ + @Override public void splitData(Pdpb.SplitDataRequest request, StreamObserver observer) { if (!isLeader()) { @@ -1152,6 +1157,7 @@ public void splitGraphData(Pdpb.SplitGraphDataRequest request, /** * 在 store 之间平衡数据 */ + @Override public void movePartition(Pdpb.MovePartitionRequest request, StreamObserver observer) { if (!isLeader()) { @@ -1177,6 +1183,7 @@ public void movePartition(Pdpb.MovePartitionRequest request, * 获取集群健康状态 * */ + @Override public void getClusterStats(Pdpb.GetClusterStatsRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -1196,6 +1203,7 @@ public void getClusterStats(Pdpb.GetClusterStatsRequest request, * 汇报分区分裂等任务执行结果 * */ + @Override public void reportTask(Pdpb.ReportTaskRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -1216,6 +1224,7 @@ public void reportTask(Pdpb.ReportTaskRequest request, /** * */ + @Override public void getPartitionStats(Pdpb.GetPartitionStatsRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -1239,6 +1248,7 @@ public void getPartitionStats(Pdpb.GetPartitionStatsRequest request, observer.onCompleted(); } + @Override public boolean isLeader() { return RaftEngine.getInstance().isLeader(); } @@ -1303,19 +1313,17 @@ public void changePeerList(Pdpb.ChangePeerListRequest request, public synchronized void onRaftLeaderChanged() { log.info("onLeaderChanged"); // channel = null; + // TODO: uncomment later //if (licenseVerifierService == null) { // licenseVerifierService = new LicenseVerifierService(pdConfig); //} //licenseVerifierService.init(); - if (!isLeader()) { - try { - // 关闭 Client 通知,Client 重新向 Leader 发起连接 - String message = "lose leader"; - PDPulseSubject.notifyError(message); - PDWatchSubject.notifyError(message); - } catch (Exception e) { - log.error("onRaftLeaderChanged, got error:", e); - } + + try { + PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_PD_LEADER_CHANGE, + RaftEngine.getInstance().getLeaderGrpcAddress(), 0L); + } catch (ExecutionException | InterruptedException e) { + log.error("failed to notice client", e); } } @@ -1332,7 +1340,7 @@ public void balanceLeaders(Pdpb.BalanceLeadersRequest request, taskService.balancePartitionLeader(true); response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { - log.error("balance Leaders exception {}", e); + log.error("balance Leaders exception: ", e); response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(newErrorHeader(e)).build(); } @@ -1471,6 +1479,7 @@ public boolean isResourceEnough(int oldShardCount, int newShardCount) { * 对 rocksdb 进行 compaction * */ + @Override public void dbCompaction(Pdpb.DbCompactionRequest request, StreamObserver observer) { if (!isLeader()) { @@ -1555,6 +1564,7 @@ public void deleteShardGroup(Pdpb.DeleteShardGroupRequest request, observer.onCompleted(); } + @Override public void getShardGroup(Pdpb.GetShardGroupRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -1584,11 +1594,17 @@ public void updateShardGroup(Pdpb.UpdateShardGroupRequest request, return; } Pdpb.UpdateShardGroupResponse response; - var group = request.getShardGroup(); - storeNodeService.updateShardGroup(group.getId(), group.getShardsList(), - group.getVersion(), group.getConfVer()); - response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); + try { + var group = request.getShardGroup(); + storeNodeService.updateShardGroup(group.getId(), group.getShardsList(), + group.getVersion(), group.getConfVer()); + response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("updateShardGroup exception, ", e); + response = + Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + } responseObserver.onNext(response); responseObserver.onCompleted(); @@ -1638,6 +1654,7 @@ public void changeShard(Pdpb.ChangeShardRequest request, observer.onCompleted(); } + @Override public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, StreamObserver observer) { if (!isLeader()) { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java index 658673a736..5035e453b5 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java @@ -22,10 +22,8 @@ import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Pdpb; -import org.apache.hugegraph.pd.pulse.PDPulseSubject; import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.raft.RaftStateListener; -import org.apache.hugegraph.pd.watch.PDWatchSubject; import io.grpc.CallOptions; import io.grpc.ManagedChannel; @@ -33,7 +31,6 @@ import io.grpc.MethodDescriptor; /** - * @author zhangyingjie * @date 2022/6/21 **/ public interface ServiceGrpc extends RaftStateListener { @@ -64,14 +61,16 @@ default void redirectToLeader(ManagedChannel channel, io.grpc.stub.StreamObserver observer) { try { String address = RaftEngine.getInstance().getLeaderGrpcAddress(); - if ((channel = channels.get(address)) == null || channel.isTerminated() || channel.isShutdown()) { + if ((channel = channels.get(address)) == null || channel.isTerminated() || + channel.isShutdown()) { synchronized (ServiceGrpc.class) { if ((channel = channels.get(address)) == null || channel.isTerminated() || channel.isShutdown()) { while (channel != null && channel.isShutdown() && !channel.isTerminated()) { channel.awaitTermination(50, TimeUnit.MILLISECONDS); } - ManagedChannel c = ManagedChannelBuilder.forTarget(address).usePlaintext().build(); + ManagedChannel c = + ManagedChannelBuilder.forTarget(address).usePlaintext().build(); channels.put(address, c); channel = c; } @@ -86,23 +85,13 @@ default void redirectToLeader(ManagedChannel channel, } default void redirectToLeader(MethodDescriptor method, - ReqT req, io.grpc.stub.StreamObserver observer) { + ReqT req, + io.grpc.stub.StreamObserver observer) { redirectToLeader(null, method, req, observer); } @Override default void onRaftLeaderChanged() { - synchronized (this) { - if (!isLeader()) { - try { - String message = "lose leader"; - PDPulseSubject.notifyError(message); - PDWatchSubject.notifyError(message); - } catch (Exception e) { - e.printStackTrace(); - } - } - } } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java index e9bbd045b6..5435b5b902 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java @@ -25,7 +25,6 @@ import org.apache.hugegraph.pd.grpc.Pdpb; /** - * @author zhangyingjie * @date 2022/3/23 **/ public class DateUtil { diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java index 2b6994a316..c3f94821e2 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java @@ -77,7 +77,7 @@ void removeObserver(Long watcherId, StreamObserver responseObserv abstract String toNoticeString(WatchResponse res); - public void notifyError(String message) { + public void notifyError(int code, String message){ synchronized (lock) { Iterator>> iter = watcherHolder.entrySet().iterator(); @@ -86,13 +86,10 @@ public void notifyError(String message) { Long watcherId = entry.getKey(); WatchResponse res = this.builder.setWatcherId(watcherId).build(); try { - entry.getValue().onError( - Status.PERMISSION_DENIED.withDescription(message).asRuntimeException()); + entry.getValue().onError(Status.fromCodeValue(code).withDescription(message).asRuntimeException()); } catch (Throwable e) { - //log.error("Failed to send " + this.watchType.name() + "'s error message [" - // + toNoticeString(res) + // log.error("Failed to send " + this.watchType.name() + "'s error message [" + toNoticeString(res) // + "] to watcher[" + watcherId + "].", e); - } } } diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index 060b3353b3..9130651d6c 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -33,6 +33,8 @@ import org.apache.hugegraph.pd.grpc.kv.WatchResponse; import org.apache.hugegraph.pd.grpc.kv.WatchState; import org.apache.hugegraph.pd.grpc.kv.WatchType; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.store.RaftKVStore; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; @@ -41,7 +43,6 @@ /** * watch订阅、响应处理类 * - * @author zhangyingjie * @date 2022/6/21 **/ @Slf4j @@ -234,8 +235,17 @@ public void keepClientAlive() { private void removeClient(StreamObserver value, String key, String clientKey) { try { - log.info("remove null observer, client {}", clientKey); - kvService.deleteWithPrefix(clientKey); + log.info("remove null observer,client:", clientKey); + if (RaftEngine.getInstance().isLeader()) { + kvService.deleteWithPrefix(clientKey); + } else { + // todo: delete records via client + var store = kvService.getMeta().getStore(); + if (store instanceof RaftKVStore) { + ((RaftKVStore) store).doRemoveByPrefix(kvService.getStoreKey(clientKey)); + } + } + if (value != null) { synchronized (value) { value.onCompleted(); diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java index 77ad0542e1..4e8b50fd13 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java @@ -39,11 +39,10 @@ final class NodeChangeSubject extends AbstractWatchSubject { @Override String toNoticeString(WatchResponse res) { - StringBuilder sb = new StringBuilder(); - return sb.append("graph:").append(res.getNodeResponse().getGraph()) - .append(",") - .append("nodeId:").append(res.getNodeResponse().getNodeId()) - .toString(); + String sb = "graph:" + res.getNodeResponse().getGraph() + + "," + + "nodeId:" + res.getNodeResponse().getNodeId(); + return sb; } public void notifyWatcher(NodeEventType nodeEventType, String graph, long nodeId) { @@ -61,8 +60,8 @@ public void notifyWatcher(NodeEventType nodeEventType, String graph, long nodeId }); } - public void notifyError(String message) { - super.notifyError(message); + @Override + public void notifyError(int code, String message) { + super.notifyError(code, message); } - -} \ No newline at end of file +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java index 8c18f7d729..cf3e6df06e 100644 --- a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java @@ -104,9 +104,9 @@ public static void notifyChange(WatchType type, subjectHolder.get(type.name()).notifyWatcher(builder); } - public static void notifyError(String message) { - subjectHolder.forEach((k, v) -> { - v.notifyError(message); + public static void notifyError(int code, String message){ + subjectHolder.forEach((k, v)->{ + v.notifyError(code, message); }); } diff --git a/hg-pd-service/src/test/java/live/PDServer0.java b/hg-pd-service/src/test/java/live/PDServer0.java index 1ca4e30b08..ccb9724892 100644 --- a/hg-pd-service/src/test/java/live/PDServer0.java +++ b/hg-pd-service/src/test/java/live/PDServer0.java @@ -25,7 +25,6 @@ import org.springframework.boot.SpringApplication; /** - * @author zhangyingjie * @date 2022/1/9 **/ public class PDServer0 { diff --git a/hg-pd-service/src/test/java/live/PDServer1.java b/hg-pd-service/src/test/java/live/PDServer1.java index c9219e8bc2..88f722523f 100644 --- a/hg-pd-service/src/test/java/live/PDServer1.java +++ b/hg-pd-service/src/test/java/live/PDServer1.java @@ -25,7 +25,6 @@ import org.springframework.boot.SpringApplication; /** - * @author zhangyingjie * @date 2022/1/9 **/ public class PDServer1 { diff --git a/hg-pd-service/src/test/java/live/PDServer2.java b/hg-pd-service/src/test/java/live/PDServer2.java index fb9c9bf024..bc6c67e87c 100644 --- a/hg-pd-service/src/test/java/live/PDServer2.java +++ b/hg-pd-service/src/test/java/live/PDServer2.java @@ -25,7 +25,6 @@ import org.springframework.boot.SpringApplication; /** - * @author zhangyingjie * @date 2022/1/9 **/ public class PDServer2 { diff --git a/hg-pd-service/src/test/java/live/PDServer3.java b/hg-pd-service/src/test/java/live/PDServer3.java index 8807eaeccd..4603d59295 100644 --- a/hg-pd-service/src/test/java/live/PDServer3.java +++ b/hg-pd-service/src/test/java/live/PDServer3.java @@ -25,7 +25,6 @@ import org.springframework.boot.SpringApplication; /** - * @author zhangyingjie * @date 2022/1/9 **/ public class PDServer3 { diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java index d0bc7a4cbd..552abfcfc3 100644 --- a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java @@ -247,7 +247,7 @@ public void testGetWatchClient() { @Test public void testGetPulseClient() { - pdClient.getPulseClient(); + // pdClient.getPulseClient(); } @Test From a37177207cb79df62b39361cd70985ee290a1cbd Mon Sep 17 00:00:00 2001 From: imbajin Date: Tue, 11 Jul 2023 19:41:59 +0800 Subject: [PATCH 14/18] chore: enhance pom & start file --------- Co-authored-by: V_Galaxy <1904821183@qq.com> --- hg-pd-dist/pom.xml | 7 +++++-- hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh | 5 ++--- hg-pd-dist/src/assembly/static/conf/application.yml | 7 +++++-- pom.xml | 5 +++-- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/hg-pd-dist/pom.xml b/hg-pd-dist/pom.xml index 602ba4f6b7..b505e54d94 100644 --- a/hg-pd-dist/pom.xml +++ b/hg-pd-dist/pom.xml @@ -107,11 +107,10 @@ - tar -zcvf \ + tar zcvf \ ${dist.dir}/${final.name}.tar.gz \ ${final.name} || exit 1 rm -f ${dist.dir}/dist.sh - rm -rf ${dist.dir}/${final.name} echo echo "HugeGraph dist tar.gz available at: ${dist.dir}/${final.name}.tar.gz" @@ -122,6 +121,10 @@ + + + +
diff --git a/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh b/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh index 8ccd5fc279..df44dd1078 100644 --- a/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh +++ b/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh @@ -28,7 +28,6 @@ while getopts "g:j:v" arg; do case ${arg} in g) GC_OPTION="$OPTARG" ;; j) USER_OPTION="$OPTARG" ;; - v) VERBOSE="verbose" ;; ?) echo "USAGE: $0 [-g g1] [-j xxx] [-v]" && exit 1 ;; esac done @@ -61,7 +60,7 @@ MIN_MEM=$((1 * 512)) EXPECT_JDK_VERSION=11 # Change to $BIN's parent -cd ${TOP} +cd "${TOP}" || exit # Find Java if [ "$JAVA_HOME" = "" ]; then @@ -116,7 +115,7 @@ echo "Starting HugeGraphPDServer..." # Turn on security check exec ${JAVA} ${JAVA_OPTIONS} -jar -Dspring.config.location=${CONF}/application.yml \ - ${LIB}/hugegraph-pd-3.6.5-SNAPSHOT.jar >> ${OUTPUT} 2>&1 & + ${LIB}/hugegraph-pd-*.jar >> ${OUTPUT} 2>&1 & PID="$!" # Write pid to file diff --git a/hg-pd-dist/src/assembly/static/conf/application.yml b/hg-pd-dist/src/assembly/static/conf/application.yml index c1ed575e74..dfd2d14456 100644 --- a/hg-pd-dist/src/assembly/static/conf/application.yml +++ b/hg-pd-dist/src/assembly/static/conf/application.yml @@ -32,11 +32,12 @@ pd: # 自动扩容的检查周期,定时检查每个Store的分区数量,自动进行分区数量平衡 patrol-interval: 1800 # 初始store列表,grpc IP:grpc port, 在列表内的store自动激活 + # NOTE: set to one addr when in stand-alone mode initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 raft: # 本机raft服务地址 address: 127.0.0.1:8610 - # PD集群服务地址 + # PD集群服务地址 NOTE: set to one addr when in stand-alone mode peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 store: @@ -49,9 +50,11 @@ store: monitor_data_interval: 1 minute # 监控数据的保留时间 1 天; day, month, year monitor_data_retention: 1 day + # NOTE: set to 1 when in stand-alone mode + # initial-store-count: 1 partition: - # 默认每个分区副本数 + # 默认每个分区副本数, set to 1 when in stand-alone mode default-shard-count: 3 # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count store-max-shard-count: 12 diff --git a/pom.xml b/pom.xml index 6ec9ef0032..4d429c430e 100644 --- a/pom.xml +++ b/pom.xml @@ -147,11 +147,12 @@ *.tar *.tar.gz .flattened-pom.xml - + + false - + From 7c325c5efd96a81beb68868ccefa1ff9381c46cd Mon Sep 17 00:00:00 2001 From: V_Galaxy <1904821183@qq.com> Date: Tue, 1 Aug 2023 17:09:10 +0800 Subject: [PATCH 15/18] refact: prepare before merge --- .DS_Store => hugegraph-pd/.DS_Store | Bin .gitattributes => hugegraph-pd/.gitattributes | 0 .gitignore => hugegraph-pd/.gitignore | 0 .../.mvn}/wrapper/MavenWrapperDownloader.java | 0 .../.mvn}/wrapper/maven-wrapper.jar | Bin .../.mvn}/wrapper/maven-wrapper.properties | 0 README.md => hugegraph-pd/README.md | 0 build-pre.sh => hugegraph-pd/build-pre.sh | 0 build.sh => hugegraph-pd/build.sh | 0 ci.yml => hugegraph-pd/ci.yml | 0 {conf => hugegraph-pd/conf}/hugegraph.license | Bin {conf => hugegraph-pd/conf}/verify-license.json | 0 deploy-release.sh => hugegraph-pd/deploy-release.sh | 0 .../deploy-snapshot.sh | 0 {hg-pd-client => hugegraph-pd/hg-pd-client}/pom.xml | 0 .../apache/hugegraph/pd/client/AbstractClient.java | 0 .../pd/client/AbstractClientStubProxy.java | 0 .../org/apache/hugegraph/pd/client/Channels.java | 0 .../org/apache/hugegraph/pd/client/ClientCache.java | 0 .../apache/hugegraph/pd/client/Discoverable.java | 0 .../apache/hugegraph/pd/client/DiscoveryClient.java | 0 .../hugegraph/pd/client/DiscoveryClientImpl.java | 0 .../org/apache/hugegraph/pd/client/KvClient.java | 0 .../apache/hugegraph/pd/client/LicenseClient.java | 0 .../org/apache/hugegraph/pd/client/PDClient.java | 0 .../org/apache/hugegraph/pd/client/PDConfig.java | 0 .../org/apache/hugegraph/pd/client/PDPulse.java | 0 .../org/apache/hugegraph/pd/client/PDPulseImpl.java | 0 .../org/apache/hugegraph/pd/client/PDWatch.java | 0 .../org/apache/hugegraph/pd/client/PDWatchImpl.java | 0 .../apache/hugegraph/pd/pulse/PartitionNotice.java | 0 .../hugegraph/pd/pulse/PulseServerNotice.java | 0 .../org/apache/hugegraph/pd/watch/NodeEvent.java | 0 .../org/apache/hugegraph/pd/watch/PDWatcher.java | 0 .../apache/hugegraph/pd/watch/PartitionEvent.java | 0 .../org/apache/hugegraph/pd/watch/WatchType.java | 0 .../org/apache/hugegraph/pd/PartitionCacheTest.java | 0 .../org/apache/hugegraph/pd/StoreRegisterTest.java | 0 .../pd/client/DiscoveryClientImplTest.java | 0 .../hugegraph/pd/client/LicenseClientImplTest.java | 0 .../org/apache/hugegraph/pd/client/PDPulseTest.java | 0 .../org/apache/hugegraph/pd/client/PDWatchTest.java | 0 .../hugegraph/pd/client/test/HgPDTestUtil.java | 0 .../hg-pd-clitools}/pom.xml | 0 .../java/org/apache/hugegraph/pd/clitools/Main.java | 0 .../org/apache/hugegraph/pd/clitools/MainTest.java | 0 {hg-pd-common => hugegraph-pd/hg-pd-common}/pom.xml | 0 .../org/apache/hugegraph/pd/common/GraphCache.java | 0 .../org/apache/hugegraph/pd/common/HgAssert.java | 0 .../java/org/apache/hugegraph/pd/common/KVPair.java | 0 .../org/apache/hugegraph/pd/common/PDException.java | 0 .../hugegraph/pd/common/PDRuntimeException.java | 0 .../apache/hugegraph/pd/common/PartitionCache.java | 0 .../apache/hugegraph/pd/common/PartitionUtils.java | 0 {hg-pd-core => hugegraph-pd/hg-pd-core}/pom.xml | 0 .../java/org/apache/hugegraph/pd/ConfigService.java | 0 .../java/org/apache/hugegraph/pd/IdService.java | 0 .../java/org/apache/hugegraph/pd/KvService.java | 0 .../java/org/apache/hugegraph/pd/LogService.java | 0 .../hugegraph/pd/PartitionInstructionListener.java | 0 .../org/apache/hugegraph/pd/PartitionService.java | 0 .../hugegraph/pd/PartitionStatusListener.java | 0 .../org/apache/hugegraph/pd/RegistryService.java | 0 .../hugegraph/pd/ShardGroupStatusListener.java | 0 .../hugegraph/pd/StoreMonitorDataService.java | 0 .../org/apache/hugegraph/pd/StoreNodeService.java | 0 .../apache/hugegraph/pd/StoreStatusListener.java | 0 .../apache/hugegraph/pd/TaskScheduleService.java | 0 .../org/apache/hugegraph/pd/config/PDConfig.java | 0 .../apache/hugegraph/pd/meta/ConfigMetaStore.java | 0 .../hugegraph/pd/meta/DiscoveryMetaStore.java | 0 .../org/apache/hugegraph/pd/meta/IdMetaStore.java | 0 .../java/org/apache/hugegraph/pd/meta/LogMeta.java | 0 .../apache/hugegraph/pd/meta/MetadataFactory.java | 0 .../apache/hugegraph/pd/meta/MetadataKeyHelper.java | 0 .../hugegraph/pd/meta/MetadataRocksDBStore.java | 0 .../apache/hugegraph/pd/meta/MetadataStoreBase.java | 0 .../org/apache/hugegraph/pd/meta/PartitionMeta.java | 0 .../org/apache/hugegraph/pd/meta/QueueStore.java | 0 .../org/apache/hugegraph/pd/meta/StoreInfoMeta.java | 0 .../org/apache/hugegraph/pd/meta/TaskInfoMeta.java | 0 .../hugegraph/pd/raft/FutureClosureAdapter.java | 0 .../org/apache/hugegraph/pd/raft/KVOperation.java | 0 .../apache/hugegraph/pd/raft/KVStoreClosure.java | 0 .../org/apache/hugegraph/pd/raft/RaftEngine.java | 0 .../org/apache/hugegraph/pd/raft/RaftRpcClient.java | 0 .../apache/hugegraph/pd/raft/RaftRpcProcessor.java | 0 .../apache/hugegraph/pd/raft/RaftStateListener.java | 0 .../apache/hugegraph/pd/raft/RaftStateMachine.java | 0 .../apache/hugegraph/pd/raft/RaftTaskHandler.java | 0 .../java/org/apache/hugegraph/pd/raft/ZipUtils.java | 0 .../hugegraph/pd/store/BaseKVStoreClosure.java | 0 .../org/apache/hugegraph/pd/store/HgKVStore.java | 0 .../apache/hugegraph/pd/store/HgKVStoreImpl.java | 0 .../main/java/org/apache/hugegraph/pd/store/KV.java | 0 .../org/apache/hugegraph/pd/store/RaftKVStore.java | 0 .../org/apache/hugegraph/pd/MonitorServiceTest.java | 0 .../apache/hugegraph/pd/PartitionServiceTest.java | 0 .../apache/hugegraph/pd/StoreNodeServiceTest.java | 0 .../java/org/apache/hugegraph/pd/UnitTestBase.java | 0 .../hugegraph/pd/common/PartitionUtilsTest.java | 0 .../hugegraph/pd/store/HgKVStoreImplTest.java | 0 .../hg-pd-core}/src/test/resources/log4j2.xml | 0 {hg-pd-dist => hugegraph-pd/hg-pd-dist}/pom.xml | 0 .../src/assembly/descriptor/server-assembly.xml | 0 .../src/assembly/static/bin/start-hugegraph-pd.sh | 0 .../src/assembly/static/bin/stop-hugegraph-pd.sh | 0 .../hg-pd-dist}/src/assembly/static/bin/util.sh | 0 .../src/assembly/static/conf/application.yml | 0 .../assembly/static/conf/application.yml.template | 0 .../src/assembly/static/conf/hugegraph.license | Bin .../hg-pd-dist}/src/assembly/static/conf/log4j2.xml | 0 .../src/assembly/static/conf/verify-license.json | 0 {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/pom.xml | 0 .../hg-pd-grpc}/src/main/proto/discovery.proto | 0 .../hg-pd-grpc}/src/main/proto/kv.proto | 0 .../hg-pd-grpc}/src/main/proto/metaTask.proto | 0 .../hg-pd-grpc}/src/main/proto/metapb.proto | 0 .../hg-pd-grpc}/src/main/proto/pd_common.proto | 0 .../hg-pd-grpc}/src/main/proto/pd_pulse.proto | 0 .../hg-pd-grpc}/src/main/proto/pd_watch.proto | 0 .../hg-pd-grpc}/src/main/proto/pdpb.proto | 0 .../hg-pd-service}/pom.xml | 0 .../org/apache/hugegraph/pd/boot/HugePDServer.java | 0 .../pd/license/LicenseVerifierService.java | 0 .../hugegraph/pd/license/LicenseVerifyManager.java | 0 .../apache/hugegraph/pd/metrics/MetricsConfig.java | 0 .../org/apache/hugegraph/pd/metrics/PDMetrics.java | 0 .../org/apache/hugegraph/pd/model/DemoModel.java | 0 .../apache/hugegraph/pd/model/GraphRestRequest.java | 0 .../hugegraph/pd/model/GraphSpaceRestRequest.java | 0 .../apache/hugegraph/pd/model/PeerRestRequest.java | 0 .../apache/hugegraph/pd/model/PromTargetsModel.java | 0 .../pd/model/RegistryQueryRestRequest.java | 0 .../hugegraph/pd/model/RegistryRestRequest.java | 0 .../hugegraph/pd/model/RegistryRestResponse.java | 0 .../apache/hugegraph/pd/model/RestApiResponse.java | 0 .../apache/hugegraph/pd/model/StoreRestRequest.java | 0 .../apache/hugegraph/pd/model/TimeRangeRequest.java | 0 .../hugegraph/pd/notice/NoticeBroadcaster.java | 0 .../hugegraph/pd/pulse/AbstractObserverSubject.java | 0 .../apache/hugegraph/pd/pulse/PDPulseSubject.java | 0 .../pd/pulse/PartitionHeartbeatSubject.java | 0 .../hugegraph/pd/pulse/PdInstructionSubject.java | 0 .../apache/hugegraph/pd/pulse/PulseListener.java | 0 .../main/java/org/apache/hugegraph/pd/rest/API.java | 0 .../java/org/apache/hugegraph/pd/rest/GraphAPI.java | 0 .../org/apache/hugegraph/pd/rest/GraphSpaceAPI.java | 0 .../java/org/apache/hugegraph/pd/rest/IndexAPI.java | 0 .../org/apache/hugegraph/pd/rest/MemberAPI.java | 0 .../org/apache/hugegraph/pd/rest/PartitionAPI.java | 0 .../apache/hugegraph/pd/rest/PromTargetsAPI.java | 0 .../org/apache/hugegraph/pd/rest/RegistryAPI.java | 0 .../java/org/apache/hugegraph/pd/rest/ShardAPI.java | 0 .../java/org/apache/hugegraph/pd/rest/StoreAPI.java | 0 .../java/org/apache/hugegraph/pd/rest/TaskAPI.java | 0 .../java/org/apache/hugegraph/pd/rest/TestAPI.java | 0 .../hugegraph/pd/service/DiscoveryService.java | 0 .../hugegraph/pd/service/KvServiceGrpcImpl.java | 0 .../apache/hugegraph/pd/service/PDPulseService.java | 0 .../apache/hugegraph/pd/service/PDRestService.java | 0 .../org/apache/hugegraph/pd/service/PDService.java | 0 .../apache/hugegraph/pd/service/PDWatchService.java | 0 .../hugegraph/pd/service/PromTargetsService.java | 0 .../apache/hugegraph/pd/service/ServiceGrpc.java | 0 .../apache/hugegraph/pd/service/UpgradeService.java | 0 .../hugegraph/pd/upgrade/VersionScriptFactory.java | 0 .../hugegraph/pd/upgrade/VersionUpgradeScript.java | 0 .../pd/upgrade/scripts/PartitionMetaUpgrade.java | 0 .../pd/upgrade/scripts/TaskCleanUpgrade.java | 0 .../java/org/apache/hugegraph/pd/util/DateUtil.java | 0 .../apache/hugegraph/pd/util/HgExecutorUtil.java | 0 .../org/apache/hugegraph/pd/util/HgMapCache.java | 0 .../java/org/apache/hugegraph/pd/util/IdUtil.java | 0 .../hugegraph/pd/util/grpc/GRpcServerConfig.java | 0 .../hugegraph/pd/util/grpc/StreamObserverUtil.java | 0 .../hugegraph/pd/watch/AbstractWatchSubject.java | 0 .../apache/hugegraph/pd/watch/KvWatchSubject.java | 0 .../hugegraph/pd/watch/NodeChangeSubject.java | 0 .../apache/hugegraph/pd/watch/PDWatchSubject.java | 0 .../hugegraph/pd/watch/PartitionChangeSubject.java | 0 .../hugegraph/pd/watch/ShardGroupChangeSubject.java | 0 .../src/main/resources/application.yml | 0 .../hg-pd-service}/src/main/resources/banner.txt | 0 .../hg-pd-service}/src/main/resources/log4j2.xml | 0 .../src/main/resources/private-keys.store | Bin .../src/main/resources/public-certs.store | Bin .../src/test/java/live/PDServer0.java | 0 .../src/test/java/live/PDServer1.java | 0 .../src/test/java/live/PDServer2.java | 0 .../src/test/java/live/PDServer3.java | 0 .../src/test/resources/application-server0.yml | 0 .../src/test/resources/application-server1.yml | 0 .../src/test/resources/application-server2.yml | 0 .../src/test/resources/application-server3.yml | 0 .../hg-pd-service}/src/test/resources/banner.txt | 0 .../hg-pd-service}/src/test/resources/log4j2.xml | 0 {hg-pd-test => hugegraph-pd/hg-pd-test}/pom.xml | 0 .../apache/hugegraph/pd/client/BaseClientTest.java | 0 .../hugegraph/pd/client/DiscoveryClientTest.java | 0 .../apache/hugegraph/pd/client/KvClientTest.java | 0 .../hugegraph/pd/client/PDClientSuiteTest.java | 0 .../apache/hugegraph/pd/client/PDClientTest.java | 0 .../hugegraph/pd/clitools/BaseCliToolsTest.java | 0 .../hugegraph/pd/clitools/CliToolsSuiteTest.java | 0 .../org/apache/hugegraph/pd/clitools/MainTest.java | 0 .../apache/hugegraph/pd/common/BaseCommonTest.java | 0 .../apache/hugegraph/pd/common/CommonSuiteTest.java | 0 .../apache/hugegraph/pd/common/HgAssertTest.java | 0 .../org/apache/hugegraph/pd/common/KVPairTest.java | 0 .../hugegraph/pd/common/MetadataKeyHelperTest.java | 0 .../hugegraph/pd/common/PartitionCacheTest.java | 0 .../hugegraph/pd/common/PartitionUtilsTest.java | 0 .../org/apache/hugegraph/pd/core/BaseCoreTest.java | 0 .../apache/hugegraph/pd/core/PDCoreSuiteTest.java | 0 .../hugegraph/pd/core/StoreNodeServiceTest.java | 0 .../pd/core/meta/MetadataKeyHelperTest.java | 0 .../org/apache/hugegraph/pd/grpc/BaseGrpcTest.java | 0 .../org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java | 0 .../apache/hugegraph/pd/service/BaseServerTest.java | 0 .../hugegraph/pd/service/ConfigServiceTest.java | 0 .../apache/hugegraph/pd/service/IdServiceTest.java | 0 .../apache/hugegraph/pd/service/KvServiceTest.java | 0 .../apache/hugegraph/pd/service/LogServiceTest.java | 0 .../hugegraph/pd/service/PartitionServiceTest.java | 0 .../org/apache/hugegraph/pd/service/PdTestBase.java | 0 .../apache/hugegraph/pd/service/RestApiTest.java | 0 .../hugegraph/pd/service/ServerSuiteTest.java | 0 .../pd/service/StoreMonitorDataServiceTest.java | 0 .../pd/service/StoreNodeServiceNewTest.java | 0 .../hugegraph/pd/service/StoreServiceTest.java | 0 .../pd/service/TaskScheduleServiceTest.java | 0 local-release.sh => hugegraph-pd/local-release.sh | 0 mvnw => hugegraph-pd/mvnw | 0 mvnw.cmd => hugegraph-pd/mvnw.cmd | 0 pom.xml => hugegraph-pd/pom.xml | 0 settings.xml => hugegraph-pd/settings.xml | 0 .../start_pd_server.sh | 0 238 files changed, 0 insertions(+), 0 deletions(-) rename .DS_Store => hugegraph-pd/.DS_Store (100%) rename .gitattributes => hugegraph-pd/.gitattributes (100%) rename .gitignore => hugegraph-pd/.gitignore (100%) rename {.mvn => hugegraph-pd/.mvn}/wrapper/MavenWrapperDownloader.java (100%) rename {.mvn => hugegraph-pd/.mvn}/wrapper/maven-wrapper.jar (100%) rename {.mvn => hugegraph-pd/.mvn}/wrapper/maven-wrapper.properties (100%) rename README.md => hugegraph-pd/README.md (100%) rename build-pre.sh => hugegraph-pd/build-pre.sh (100%) rename build.sh => hugegraph-pd/build.sh (100%) rename ci.yml => hugegraph-pd/ci.yml (100%) rename {conf => hugegraph-pd/conf}/hugegraph.license (100%) rename {conf => hugegraph-pd/conf}/verify-license.json (100%) rename deploy-release.sh => hugegraph-pd/deploy-release.sh (100%) rename deploy-snapshot.sh => hugegraph-pd/deploy-snapshot.sh (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/pom.xml (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/Channels.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/KvClient.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/PDClient.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java (100%) rename {hg-pd-client => hugegraph-pd/hg-pd-client}/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java (100%) rename {hg-pd-clitools => hugegraph-pd/hg-pd-clitools}/pom.xml (100%) rename {hg-pd-clitools => hugegraph-pd/hg-pd-clitools}/src/main/java/org/apache/hugegraph/pd/clitools/Main.java (100%) rename {hg-pd-clitools => hugegraph-pd/hg-pd-clitools}/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java (100%) rename {hg-pd-common => hugegraph-pd/hg-pd-common}/pom.xml (100%) rename {hg-pd-common => hugegraph-pd/hg-pd-common}/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java (100%) rename {hg-pd-common => hugegraph-pd/hg-pd-common}/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java (100%) rename {hg-pd-common => hugegraph-pd/hg-pd-common}/src/main/java/org/apache/hugegraph/pd/common/KVPair.java (100%) rename {hg-pd-common => hugegraph-pd/hg-pd-common}/src/main/java/org/apache/hugegraph/pd/common/PDException.java (100%) rename {hg-pd-common => hugegraph-pd/hg-pd-common}/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java (100%) rename {hg-pd-common => hugegraph-pd/hg-pd-common}/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java (100%) rename {hg-pd-common => hugegraph-pd/hg-pd-common}/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/pom.xml (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/ConfigService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/IdService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/KvService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/LogService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/PartitionService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/RegistryService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/store/KV.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java (100%) rename {hg-pd-core => hugegraph-pd/hg-pd-core}/src/test/resources/log4j2.xml (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/pom.xml (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/descriptor/server-assembly.xml (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/static/bin/start-hugegraph-pd.sh (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/static/bin/stop-hugegraph-pd.sh (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/static/bin/util.sh (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/static/conf/application.yml (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/static/conf/application.yml.template (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/static/conf/hugegraph.license (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/static/conf/log4j2.xml (100%) rename {hg-pd-dist => hugegraph-pd/hg-pd-dist}/src/assembly/static/conf/verify-license.json (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/pom.xml (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/src/main/proto/discovery.proto (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/src/main/proto/kv.proto (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/src/main/proto/metaTask.proto (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/src/main/proto/metapb.proto (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/src/main/proto/pd_common.proto (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/src/main/proto/pd_pulse.proto (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/src/main/proto/pd_watch.proto (100%) rename {hg-pd-grpc => hugegraph-pd/hg-pd-grpc}/src/main/proto/pdpb.proto (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/pom.xml (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/API.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/PDService.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/resources/application.yml (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/resources/banner.txt (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/resources/log4j2.xml (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/resources/private-keys.store (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/main/resources/public-certs.store (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/java/live/PDServer0.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/java/live/PDServer1.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/java/live/PDServer2.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/java/live/PDServer3.java (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/resources/application-server0.yml (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/resources/application-server1.yml (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/resources/application-server2.yml (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/resources/application-server3.yml (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/resources/banner.txt (100%) rename {hg-pd-service => hugegraph-pd/hg-pd-service}/src/test/resources/log4j2.xml (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/pom.xml (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java (100%) rename {hg-pd-test => hugegraph-pd/hg-pd-test}/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java (100%) rename local-release.sh => hugegraph-pd/local-release.sh (100%) rename mvnw => hugegraph-pd/mvnw (100%) rename mvnw.cmd => hugegraph-pd/mvnw.cmd (100%) rename pom.xml => hugegraph-pd/pom.xml (100%) rename settings.xml => hugegraph-pd/settings.xml (100%) rename start_pd_server.sh => hugegraph-pd/start_pd_server.sh (100%) diff --git a/.DS_Store b/hugegraph-pd/.DS_Store similarity index 100% rename from .DS_Store rename to hugegraph-pd/.DS_Store diff --git a/.gitattributes b/hugegraph-pd/.gitattributes similarity index 100% rename from .gitattributes rename to hugegraph-pd/.gitattributes diff --git a/.gitignore b/hugegraph-pd/.gitignore similarity index 100% rename from .gitignore rename to hugegraph-pd/.gitignore diff --git a/.mvn/wrapper/MavenWrapperDownloader.java b/hugegraph-pd/.mvn/wrapper/MavenWrapperDownloader.java similarity index 100% rename from .mvn/wrapper/MavenWrapperDownloader.java rename to hugegraph-pd/.mvn/wrapper/MavenWrapperDownloader.java diff --git a/.mvn/wrapper/maven-wrapper.jar b/hugegraph-pd/.mvn/wrapper/maven-wrapper.jar similarity index 100% rename from .mvn/wrapper/maven-wrapper.jar rename to hugegraph-pd/.mvn/wrapper/maven-wrapper.jar diff --git a/.mvn/wrapper/maven-wrapper.properties b/hugegraph-pd/.mvn/wrapper/maven-wrapper.properties similarity index 100% rename from .mvn/wrapper/maven-wrapper.properties rename to hugegraph-pd/.mvn/wrapper/maven-wrapper.properties diff --git a/README.md b/hugegraph-pd/README.md similarity index 100% rename from README.md rename to hugegraph-pd/README.md diff --git a/build-pre.sh b/hugegraph-pd/build-pre.sh similarity index 100% rename from build-pre.sh rename to hugegraph-pd/build-pre.sh diff --git a/build.sh b/hugegraph-pd/build.sh similarity index 100% rename from build.sh rename to hugegraph-pd/build.sh diff --git a/ci.yml b/hugegraph-pd/ci.yml similarity index 100% rename from ci.yml rename to hugegraph-pd/ci.yml diff --git a/conf/hugegraph.license b/hugegraph-pd/conf/hugegraph.license similarity index 100% rename from conf/hugegraph.license rename to hugegraph-pd/conf/hugegraph.license diff --git a/conf/verify-license.json b/hugegraph-pd/conf/verify-license.json similarity index 100% rename from conf/verify-license.json rename to hugegraph-pd/conf/verify-license.json diff --git a/deploy-release.sh b/hugegraph-pd/deploy-release.sh similarity index 100% rename from deploy-release.sh rename to hugegraph-pd/deploy-release.sh diff --git a/deploy-snapshot.sh b/hugegraph-pd/deploy-snapshot.sh similarity index 100% rename from deploy-snapshot.sh rename to hugegraph-pd/deploy-snapshot.sh diff --git a/hg-pd-client/pom.xml b/hugegraph-pd/hg-pd-client/pom.xml similarity index 100% rename from hg-pd-client/pom.xml rename to hugegraph-pd/hg-pd-client/pom.xml diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java similarity index 100% rename from hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java rename to hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java similarity index 100% rename from hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java rename to hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java similarity index 100% rename from hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java rename to hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java similarity index 100% rename from hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java rename to hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java similarity index 100% rename from hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java rename to hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java similarity index 100% rename from hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java rename to hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java similarity index 100% rename from hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java rename to hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDWatchTest.java diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java similarity index 100% rename from hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java rename to hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java diff --git a/hg-pd-clitools/pom.xml b/hugegraph-pd/hg-pd-clitools/pom.xml similarity index 100% rename from hg-pd-clitools/pom.xml rename to hugegraph-pd/hg-pd-clitools/pom.xml diff --git a/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java b/hugegraph-pd/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java similarity index 100% rename from hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java rename to hugegraph-pd/hg-pd-clitools/src/main/java/org/apache/hugegraph/pd/clitools/Main.java diff --git a/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java b/hugegraph-pd/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java similarity index 100% rename from hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java rename to hugegraph-pd/hg-pd-clitools/src/test/java/org/apache/hugegraph/pd/clitools/MainTest.java diff --git a/hg-pd-common/pom.xml b/hugegraph-pd/hg-pd-common/pom.xml similarity index 100% rename from hg-pd-common/pom.xml rename to hugegraph-pd/hg-pd-common/pom.xml diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java similarity index 100% rename from hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java rename to hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java similarity index 100% rename from hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java rename to hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java similarity index 100% rename from hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java rename to hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java similarity index 100% rename from hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java rename to hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java similarity index 100% rename from hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java rename to hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java similarity index 100% rename from hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java rename to hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java similarity index 100% rename from hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java rename to hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java diff --git a/hg-pd-core/pom.xml b/hugegraph-pd/hg-pd-core/pom.xml similarity index 100% rename from hg-pd-core/pom.xml rename to hugegraph-pd/hg-pd-core/pom.xml diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionInstructionListener.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionStatusListener.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ShardGroupStatusListener.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreStatusListener.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java similarity index 100% rename from hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java rename to hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java similarity index 100% rename from hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java rename to hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java similarity index 100% rename from hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java rename to hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java similarity index 100% rename from hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java rename to hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java similarity index 100% rename from hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java rename to hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java similarity index 100% rename from hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java rename to hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java b/hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java similarity index 100% rename from hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java rename to hugegraph-pd/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java diff --git a/hg-pd-core/src/test/resources/log4j2.xml b/hugegraph-pd/hg-pd-core/src/test/resources/log4j2.xml similarity index 100% rename from hg-pd-core/src/test/resources/log4j2.xml rename to hugegraph-pd/hg-pd-core/src/test/resources/log4j2.xml diff --git a/hg-pd-dist/pom.xml b/hugegraph-pd/hg-pd-dist/pom.xml similarity index 100% rename from hg-pd-dist/pom.xml rename to hugegraph-pd/hg-pd-dist/pom.xml diff --git a/hg-pd-dist/src/assembly/descriptor/server-assembly.xml b/hugegraph-pd/hg-pd-dist/src/assembly/descriptor/server-assembly.xml similarity index 100% rename from hg-pd-dist/src/assembly/descriptor/server-assembly.xml rename to hugegraph-pd/hg-pd-dist/src/assembly/descriptor/server-assembly.xml diff --git a/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh similarity index 100% rename from hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh rename to hugegraph-pd/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh diff --git a/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh similarity index 100% rename from hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh rename to hugegraph-pd/hg-pd-dist/src/assembly/static/bin/stop-hugegraph-pd.sh diff --git a/hg-pd-dist/src/assembly/static/bin/util.sh b/hugegraph-pd/hg-pd-dist/src/assembly/static/bin/util.sh similarity index 100% rename from hg-pd-dist/src/assembly/static/bin/util.sh rename to hugegraph-pd/hg-pd-dist/src/assembly/static/bin/util.sh diff --git a/hg-pd-dist/src/assembly/static/conf/application.yml b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml similarity index 100% rename from hg-pd-dist/src/assembly/static/conf/application.yml rename to hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml diff --git a/hg-pd-dist/src/assembly/static/conf/application.yml.template b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template similarity index 100% rename from hg-pd-dist/src/assembly/static/conf/application.yml.template rename to hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template diff --git a/hg-pd-dist/src/assembly/static/conf/hugegraph.license b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/hugegraph.license similarity index 100% rename from hg-pd-dist/src/assembly/static/conf/hugegraph.license rename to hugegraph-pd/hg-pd-dist/src/assembly/static/conf/hugegraph.license diff --git a/hg-pd-dist/src/assembly/static/conf/log4j2.xml b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml similarity index 100% rename from hg-pd-dist/src/assembly/static/conf/log4j2.xml rename to hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml diff --git a/hg-pd-dist/src/assembly/static/conf/verify-license.json b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/verify-license.json similarity index 100% rename from hg-pd-dist/src/assembly/static/conf/verify-license.json rename to hugegraph-pd/hg-pd-dist/src/assembly/static/conf/verify-license.json diff --git a/hg-pd-grpc/pom.xml b/hugegraph-pd/hg-pd-grpc/pom.xml similarity index 100% rename from hg-pd-grpc/pom.xml rename to hugegraph-pd/hg-pd-grpc/pom.xml diff --git a/hg-pd-grpc/src/main/proto/discovery.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto similarity index 100% rename from hg-pd-grpc/src/main/proto/discovery.proto rename to hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto diff --git a/hg-pd-grpc/src/main/proto/kv.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto similarity index 100% rename from hg-pd-grpc/src/main/proto/kv.proto rename to hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto diff --git a/hg-pd-grpc/src/main/proto/metaTask.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto similarity index 100% rename from hg-pd-grpc/src/main/proto/metaTask.proto rename to hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto diff --git a/hg-pd-grpc/src/main/proto/metapb.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto similarity index 100% rename from hg-pd-grpc/src/main/proto/metapb.proto rename to hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto diff --git a/hg-pd-grpc/src/main/proto/pd_common.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto similarity index 100% rename from hg-pd-grpc/src/main/proto/pd_common.proto rename to hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto diff --git a/hg-pd-grpc/src/main/proto/pd_pulse.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto similarity index 100% rename from hg-pd-grpc/src/main/proto/pd_pulse.proto rename to hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto diff --git a/hg-pd-grpc/src/main/proto/pd_watch.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto similarity index 100% rename from hg-pd-grpc/src/main/proto/pd_watch.proto rename to hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto diff --git a/hg-pd-grpc/src/main/proto/pdpb.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto similarity index 100% rename from hg-pd-grpc/src/main/proto/pdpb.proto rename to hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto diff --git a/hg-pd-service/pom.xml b/hugegraph-pd/hg-pd-service/pom.xml similarity index 100% rename from hg-pd-service/pom.xml rename to hugegraph-pd/hg-pd-service/pom.xml diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java similarity index 100% rename from hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java rename to hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/ShardGroupChangeSubject.java diff --git a/hg-pd-service/src/main/resources/application.yml b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml similarity index 100% rename from hg-pd-service/src/main/resources/application.yml rename to hugegraph-pd/hg-pd-service/src/main/resources/application.yml diff --git a/hg-pd-service/src/main/resources/banner.txt b/hugegraph-pd/hg-pd-service/src/main/resources/banner.txt similarity index 100% rename from hg-pd-service/src/main/resources/banner.txt rename to hugegraph-pd/hg-pd-service/src/main/resources/banner.txt diff --git a/hg-pd-service/src/main/resources/log4j2.xml b/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml similarity index 100% rename from hg-pd-service/src/main/resources/log4j2.xml rename to hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml diff --git a/hg-pd-service/src/main/resources/private-keys.store b/hugegraph-pd/hg-pd-service/src/main/resources/private-keys.store similarity index 100% rename from hg-pd-service/src/main/resources/private-keys.store rename to hugegraph-pd/hg-pd-service/src/main/resources/private-keys.store diff --git a/hg-pd-service/src/main/resources/public-certs.store b/hugegraph-pd/hg-pd-service/src/main/resources/public-certs.store similarity index 100% rename from hg-pd-service/src/main/resources/public-certs.store rename to hugegraph-pd/hg-pd-service/src/main/resources/public-certs.store diff --git a/hg-pd-service/src/test/java/live/PDServer0.java b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer0.java similarity index 100% rename from hg-pd-service/src/test/java/live/PDServer0.java rename to hugegraph-pd/hg-pd-service/src/test/java/live/PDServer0.java diff --git a/hg-pd-service/src/test/java/live/PDServer1.java b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer1.java similarity index 100% rename from hg-pd-service/src/test/java/live/PDServer1.java rename to hugegraph-pd/hg-pd-service/src/test/java/live/PDServer1.java diff --git a/hg-pd-service/src/test/java/live/PDServer2.java b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer2.java similarity index 100% rename from hg-pd-service/src/test/java/live/PDServer2.java rename to hugegraph-pd/hg-pd-service/src/test/java/live/PDServer2.java diff --git a/hg-pd-service/src/test/java/live/PDServer3.java b/hugegraph-pd/hg-pd-service/src/test/java/live/PDServer3.java similarity index 100% rename from hg-pd-service/src/test/java/live/PDServer3.java rename to hugegraph-pd/hg-pd-service/src/test/java/live/PDServer3.java diff --git a/hg-pd-service/src/test/resources/application-server0.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml similarity index 100% rename from hg-pd-service/src/test/resources/application-server0.yml rename to hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml diff --git a/hg-pd-service/src/test/resources/application-server1.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml similarity index 100% rename from hg-pd-service/src/test/resources/application-server1.yml rename to hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml diff --git a/hg-pd-service/src/test/resources/application-server2.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml similarity index 100% rename from hg-pd-service/src/test/resources/application-server2.yml rename to hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml diff --git a/hg-pd-service/src/test/resources/application-server3.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml similarity index 100% rename from hg-pd-service/src/test/resources/application-server3.yml rename to hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml diff --git a/hg-pd-service/src/test/resources/banner.txt b/hugegraph-pd/hg-pd-service/src/test/resources/banner.txt similarity index 100% rename from hg-pd-service/src/test/resources/banner.txt rename to hugegraph-pd/hg-pd-service/src/test/resources/banner.txt diff --git a/hg-pd-service/src/test/resources/log4j2.xml b/hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml similarity index 100% rename from hg-pd-service/src/test/resources/log4j2.xml rename to hugegraph-pd/hg-pd-service/src/test/resources/log4j2.xml diff --git a/hg-pd-test/pom.xml b/hugegraph-pd/hg-pd-test/pom.xml similarity index 100% rename from hg-pd-test/pom.xml rename to hugegraph-pd/hg-pd-test/pom.xml diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/BaseCliToolsTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/CliToolsSuiteTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/clitools/MainTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java similarity index 100% rename from hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java rename to hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java diff --git a/local-release.sh b/hugegraph-pd/local-release.sh similarity index 100% rename from local-release.sh rename to hugegraph-pd/local-release.sh diff --git a/mvnw b/hugegraph-pd/mvnw similarity index 100% rename from mvnw rename to hugegraph-pd/mvnw diff --git a/mvnw.cmd b/hugegraph-pd/mvnw.cmd similarity index 100% rename from mvnw.cmd rename to hugegraph-pd/mvnw.cmd diff --git a/pom.xml b/hugegraph-pd/pom.xml similarity index 100% rename from pom.xml rename to hugegraph-pd/pom.xml diff --git a/settings.xml b/hugegraph-pd/settings.xml similarity index 100% rename from settings.xml rename to hugegraph-pd/settings.xml diff --git a/start_pd_server.sh b/hugegraph-pd/start_pd_server.sh similarity index 100% rename from start_pd_server.sh rename to hugegraph-pd/start_pd_server.sh From cbdd65a06f850d0247715defe5e8ad1fb9330ea7 Mon Sep 17 00:00:00 2001 From: V_Galaxy <1904821183@qq.com> Date: Mon, 7 Aug 2023 00:32:23 +0800 Subject: [PATCH 16/18] chore: fix pom for submodules in hugegraph-pd --- hugegraph-pd/build.sh | 3 +- hugegraph-pd/hg-pd-client/pom.xml | 5 ++-- hugegraph-pd/hg-pd-clitools/pom.xml | 4 +-- hugegraph-pd/hg-pd-common/pom.xml | 3 +- hugegraph-pd/hg-pd-core/pom.xml | 7 +++-- .../hugegraph/pd/TaskScheduleService.java | 2 -- hugegraph-pd/hg-pd-dist/pom.xml | 2 +- hugegraph-pd/hg-pd-grpc/pom.xml | 4 +-- hugegraph-pd/hg-pd-service/pom.xml | 12 ++++++-- hugegraph-pd/hg-pd-test/pom.xml | 9 ++++-- hugegraph-pd/pom.xml | 29 +++++-------------- hugegraph-pd/settings.xml | 3 +- pom.xml | 2 +- 13 files changed, 43 insertions(+), 42 deletions(-) diff --git a/hugegraph-pd/build.sh b/hugegraph-pd/build.sh index 785db63dc2..f77358e1cc 100644 --- a/hugegraph-pd/build.sh +++ b/hugegraph-pd/build.sh @@ -20,6 +20,7 @@ export PATH=$MAVEN_3_5_3_BIN:$ORACLEJDK_11_0_7_BIN:$PATH export JAVA_HOME=$ORACLEJDK_11_0_7_HOME export MAVEN_HOME=$MAVEN_3_5_3_HOME +# TODO: remove later readonly REPO_URL=http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots if [ ! -n "$1" ] ;then @@ -35,4 +36,4 @@ ls output echo "mv dist done" echo "show output...." ls output -echo "show output done" \ No newline at end of file +echo "show output done" diff --git a/hugegraph-pd/hg-pd-client/pom.xml b/hugegraph-pd/hg-pd-client/pom.xml index 194e7dde2a..c2d67ff92d 100644 --- a/hugegraph-pd/hg-pd-client/pom.xml +++ b/hugegraph-pd/hg-pd-client/pom.xml @@ -24,7 +24,7 @@ org.apache.hugegraph - hugegraph-pd-root + hugegraph-pd ${revision} ../pom.xml @@ -66,7 +66,8 @@ org.yaml snakeyaml + 1.28 test -
\ No newline at end of file +
diff --git a/hugegraph-pd/hg-pd-clitools/pom.xml b/hugegraph-pd/hg-pd-clitools/pom.xml index c0a3009e1b..d408b45baf 100644 --- a/hugegraph-pd/hg-pd-clitools/pom.xml +++ b/hugegraph-pd/hg-pd-clitools/pom.xml @@ -20,7 +20,7 @@ xmlns="http://maven.apache.org/POM/4.0.0" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - hugegraph-pd-root + hugegraph-pd org.apache.hugegraph ${revision} ../pom.xml @@ -71,4 +71,4 @@ - \ No newline at end of file + diff --git a/hugegraph-pd/hg-pd-common/pom.xml b/hugegraph-pd/hg-pd-common/pom.xml index 4580d14c7e..1997660099 100644 --- a/hugegraph-pd/hg-pd-common/pom.xml +++ b/hugegraph-pd/hg-pd-common/pom.xml @@ -23,7 +23,7 @@ 4.0.0 org.apache.hugegraph - hugegraph-pd-root + hugegraph-pd ${revision} ../pom.xml @@ -43,6 +43,7 @@ org.projectlombok lombok + 1.18.24 org.apache.commons diff --git a/hugegraph-pd/hg-pd-core/pom.xml b/hugegraph-pd/hg-pd-core/pom.xml index a02a4c9deb..374d9a93c9 100644 --- a/hugegraph-pd/hg-pd-core/pom.xml +++ b/hugegraph-pd/hg-pd-core/pom.xml @@ -23,7 +23,7 @@ org.apache.hugegraph - hugegraph-pd-root + hugegraph-pd ${revision} ../pom.xml @@ -58,6 +58,7 @@ org.springframework spring-context + 5.3.20 org.apache.hugegraph @@ -66,10 +67,12 @@ org.springframework.boot spring-boot + 2.5.14 org.projectlombok lombok + 1.18.24 org.apache.commons @@ -88,4 +91,4 @@ ${revision} - \ No newline at end of file + diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java index ba68aab9bd..5fd1005b1c 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -710,8 +710,6 @@ public Boolean dbCompaction(String tableName) throws PDException { /** * 判断是否能把一个store的分区全部迁出,给出判断结果和迁移方案 - * - * @author tianxiaohui@baidu.com */ public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) throws PDException { diff --git a/hugegraph-pd/hg-pd-dist/pom.xml b/hugegraph-pd/hg-pd-dist/pom.xml index b505e54d94..e2d1da0a74 100644 --- a/hugegraph-pd/hg-pd-dist/pom.xml +++ b/hugegraph-pd/hg-pd-dist/pom.xml @@ -20,7 +20,7 @@ xmlns="http://maven.apache.org/POM/4.0.0" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - hugegraph-pd-root + hugegraph-pd org.apache.hugegraph ${revision} ../pom.xml diff --git a/hugegraph-pd/hg-pd-grpc/pom.xml b/hugegraph-pd/hg-pd-grpc/pom.xml index 0c5cff10db..538647a7a3 100644 --- a/hugegraph-pd/hg-pd-grpc/pom.xml +++ b/hugegraph-pd/hg-pd-grpc/pom.xml @@ -24,7 +24,7 @@ org.apache.hugegraph - hugegraph-pd-root + hugegraph-pd ${revision} ../pom.xml @@ -135,4 +135,4 @@ - \ No newline at end of file + diff --git a/hugegraph-pd/hg-pd-service/pom.xml b/hugegraph-pd/hg-pd-service/pom.xml index 4a027fbbcd..e7f51323c2 100644 --- a/hugegraph-pd/hg-pd-service/pom.xml +++ b/hugegraph-pd/hg-pd-service/pom.xml @@ -23,12 +23,12 @@ org.apache.hugegraph - hugegraph-pd-root + hugegraph-pd ${revision} ../pom.xml - hugegraph-pd + hg-pd-service 0.5.10 @@ -65,6 +65,7 @@ org.springframework.boot spring-boot-starter-web + 2.5.14 @@ -82,18 +83,22 @@ org.springframework.boot spring-boot-starter-actuator + 2.5.14 io.micrometer micrometer-registry-prometheus + 1.7.12 org.springframework.boot spring-boot-starter-jetty + 2.5.14 org.springframework.boot spring-boot-starter-log4j2 + 2.5.14 com.lmax @@ -103,6 +108,7 @@ org.projectlombok lombok + 1.18.24 @@ -142,4 +148,4 @@ - \ No newline at end of file + diff --git a/hugegraph-pd/hg-pd-test/pom.xml b/hugegraph-pd/hg-pd-test/pom.xml index 0371e81630..7924ca4c0b 100644 --- a/hugegraph-pd/hg-pd-test/pom.xml +++ b/hugegraph-pd/hg-pd-test/pom.xml @@ -21,7 +21,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hugegraph - hugegraph-pd-root + hugegraph-pd ${revision} ../pom.xml @@ -85,15 +85,17 @@ org.projectlombok lombok - ${lombok.version} + 1.18.24 org.springframework spring-context-support + 5.3.20 org.springframework spring-test + 5.3.20 test @@ -164,6 +166,7 @@ org.springframework.boot spring-boot-starter-test + 2.5.14 org.springframework.boot @@ -364,4 +367,4 @@ - \ No newline at end of file + diff --git a/hugegraph-pd/pom.xml b/hugegraph-pd/pom.xml index 4d429c430e..790078b785 100644 --- a/hugegraph-pd/pom.xml +++ b/hugegraph-pd/pom.xml @@ -20,17 +20,15 @@ xmlns="http://maven.apache.org/POM/4.0.0" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 - - org.apache.hugegraph - hugegraph-pd-root + hugegraph-pd ${revision} pom - - org.springframework.boot - spring-boot-starter-parent - 2.5.14 + org.apache.hugegraph + hugegraph + ${revision} + ../pom.xml @@ -46,7 +44,6 @@ - 1.5.0 11 11 2.17.0 @@ -76,21 +73,11 @@ junit junit + 4.13.2 test - - - Baidu_Local - http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local - - - Baidu_Local_Snapshots - http://maven.baidu-int.com/nexus/content/repositories/Baidu_Local_Snapshots - - - @@ -148,7 +135,7 @@ *.tar.gz .flattened-pom.xml - + false @@ -282,4 +269,4 @@ - \ No newline at end of file + diff --git a/hugegraph-pd/settings.xml b/hugegraph-pd/settings.xml index dc29a27b7e..9b26803442 100644 --- a/hugegraph-pd/settings.xml +++ b/hugegraph-pd/settings.xml @@ -38,6 +38,7 @@ + baidu @@ -129,4 +130,4 @@ baidu -
\ No newline at end of file + diff --git a/pom.xml b/pom.xml index 3170231902..5735ede238 100644 --- a/pom.xml +++ b/pom.xml @@ -95,7 +95,7 @@ hugegraph-server - + hugegraph-pd From 1234c110e75092b2acba5122d6ea059b935c22ed Mon Sep 17 00:00:00 2001 From: V_Galaxy <1904821183@qq.com> Date: Mon, 7 Aug 2023 16:09:32 +0800 Subject: [PATCH 17/18] chore: add apache-rat-plugin for hugegraph-pd and add some license headers --- hugegraph-pd/ci.yml | 19 +++++++- .../apache/hugegraph/pd/client/Channels.java | 17 +++++++ .../hugegraph/pd/client/ClientCache.java | 17 +++++++ .../hugegraph/pd/common/GraphCache.java | 17 +++++++ .../src/assembly/static/conf/application.yml | 17 +++++++ .../static/conf/application.yml.template | 17 +++++++ .../pd/pulse/PdInstructionSubject.java | 17 +++++++ .../src/main/resources/application.yml | 19 +++++++- .../test/resources/application-server0.yml | 19 +++++++- .../test/resources/application-server1.yml | 19 +++++++- .../test/resources/application-server2.yml | 19 +++++++- .../test/resources/application-server3.yml | 19 +++++++- hugegraph-pd/pom.xml | 46 +++++++++++++++++++ 13 files changed, 256 insertions(+), 6 deletions(-) diff --git a/hugegraph-pd/ci.yml b/hugegraph-pd/ci.yml index 1b0d705d8e..0eb7515ae3 100644 --- a/hugegraph-pd/ci.yml +++ b/hugegraph-pd/ci.yml @@ -1,3 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + Global: tool: build_submitter languages: @@ -23,4 +40,4 @@ Profiles: tool: build_submitter env: DECK_CENTOS7U5_K3 command: sh build.sh deploy - release: true \ No newline at end of file + release: true diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java index a8d7d07afa..c441fb8761 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import java.util.concurrent.ConcurrentHashMap; diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java index 05914feff8..92d8cc95d3 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.client; import java.util.HashMap; diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java index de3b395a92..46da3b75e6 100644 --- a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.common; import java.util.Map; diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml index dfd2d14456..7859670420 100644 --- a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml @@ -1,3 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + spring: application: name: hugegraph-pd diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template index c00bf47b32..43f52df609 100644 --- a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/application.yml.template @@ -1,3 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + spring: application: name: hugegraph-pd diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java index c5e4ce1fc3..b10b60f60b 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + package org.apache.hugegraph.pd.pulse; import java.util.function.Function; diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/application.yml b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml index e6917946ea..25471b6cce 100644 --- a/hugegraph-pd/hg-pd-service/src/main/resources/application.yml +++ b/hugegraph-pd/hg-pd-service/src/main/resources/application.yml @@ -1,3 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + spring: application: name: hugegraph-pd @@ -60,4 +77,4 @@ partition: discovery: #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 - heartbeat-try-count: 3 \ No newline at end of file + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml index 9e129a0a23..5e1d63e943 100644 --- a/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server0.yml @@ -1,3 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + spring: application: name: hugegraph-pd @@ -51,4 +68,4 @@ partition: discovery: #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 - heartbeat-try-count: 3 \ No newline at end of file + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml index 982954c499..7cb53fe1c8 100644 --- a/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server1.yml @@ -1,3 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + spring: application: name: hugegraph-pd @@ -51,4 +68,4 @@ partition: discovery: #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 - heartbeat-try-count: 3 \ No newline at end of file + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml index 608acb8c97..5e1dd50a98 100644 --- a/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server2.yml @@ -1,3 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + spring: application: name: hugegraph-pd @@ -53,4 +70,4 @@ partition: discovery: #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 - heartbeat-try-count: 3 \ No newline at end of file + heartbeat-try-count: 3 diff --git a/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml index 3d0b1d94d8..d2b88950ab 100644 --- a/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml +++ b/hugegraph-pd/hg-pd-service/src/test/resources/application-server3.yml @@ -1,3 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + spring: application: name: hugegraph-pd @@ -53,4 +70,4 @@ partition: discovery: #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 - heartbeat-try-count: 3 \ No newline at end of file + heartbeat-try-count: 3 diff --git a/hugegraph-pd/pom.xml b/hugegraph-pd/pom.xml index 790078b785..63ee83ccdd 100644 --- a/hugegraph-pd/pom.xml +++ b/hugegraph-pd/pom.xml @@ -143,6 +143,52 @@ + + + org.apache.rat + apache-rat-plugin + + + **/*.versionsBackup + **/*.proto + **/*.log + **/*.txt + **/*.json + **/*.conf + **/*.map + **/*.properties + dist/**/* + **/assembly/static/bin/hugegraph.service + **/swagger-ui/**/* + scripts/dev/reviewers + scripts/dev/reviewers + **/*.md + **/dependency-reduced-pom.xml + **/logs/*.log + **/META-INF/**/* + **/target/* + style/* + ChangeLog + CONFIG.ini + GROUPS + OWNERS + **/grpc/** + + .github/**/* + .gitignore + .gitattributes + + **/*.iml + **/*.iws + **/*.ipr + **/META-INF/MANIFEST.MF + + .repository/** + **/.flattened-pom.xml + + true + + From 84273f209369b1631bd436dd7248dc970aa51351 Mon Sep 17 00:00:00 2001 From: V_Galaxy <1904821183@qq.com> Date: Mon, 7 Aug 2023 16:53:08 +0800 Subject: [PATCH 18/18] chore: clean some comment/code --- hugegraph-pd/.DS_Store | Bin 6148 -> 0 bytes hugegraph-pd/build-pre.sh | 4 +- hugegraph-pd/deploy-release.sh | 2 +- hugegraph-pd/deploy-snapshot.sh | 2 +- .../pd/client/AbstractClientStubProxy.java | 3 - .../hugegraph/pd/client/Discoverable.java | 3 - .../hugegraph/pd/client/DiscoveryClient.java | 15 ++--- .../pd/client/DiscoveryClientImpl.java | 3 - .../hugegraph/pd/client/LicenseClient.java | 3 - .../apache/hugegraph/pd/client/PDPulse.java | 4 +- .../hugegraph/pd/client/PDPulseImpl.java | 3 - .../apache/hugegraph/pd/client/PDWatch.java | 3 - .../hugegraph/pd/client/PDWatchImpl.java | 40 ++++++------- .../hugegraph/pd/pulse/PulseServerNotice.java | 3 - .../apache/hugegraph/pd/watch/PDWatcher.java | 3 - .../hugegraph/pd/watch/PartitionEvent.java | 3 - .../apache/hugegraph/pd/watch/WatchType.java | 3 - .../pd/client/DiscoveryClientImplTest.java | 3 - .../pd/client/LicenseClientImplTest.java | 19 +++--- .../hugegraph/pd/client/PDPulseTest.java | 3 - .../pd/client/test/HgPDTestUtil.java | 3 - .../apache/hugegraph/pd/common/HgAssert.java | 3 - .../pd/common/PDRuntimeException.java | 3 - .../org/apache/hugegraph/pd/LogService.java | 3 - .../apache/hugegraph/pd/RegistryService.java | 3 - .../apache/hugegraph/pd/config/PDConfig.java | 30 ++++------ .../hugegraph/pd/meta/DiscoveryMetaStore.java | 3 - .../org/apache/hugegraph/pd/meta/LogMeta.java | 3 - .../pd/meta/MetadataRocksDBStore.java | 3 - .../apache/hugegraph/pd/meta/QueueStore.java | 3 - .../hugegraph/pd/metrics/MetricsConfig.java | 3 - .../hugegraph/pd/metrics/PDMetrics.java | 3 - .../apache/hugegraph/pd/model/DemoModel.java | 3 - .../hugegraph/pd/model/PromTargetsModel.java | 3 - .../pd/model/RegistryQueryRestRequest.java | 3 - .../pd/model/RegistryRestRequest.java | 3 - .../pd/model/RegistryRestResponse.java | 3 - .../hugegraph/pd/model/RestApiResponse.java | 4 -- .../hugegraph/pd/model/TimeRangeRequest.java | 3 - .../pd/notice/NoticeBroadcaster.java | 3 - .../pd/pulse/AbstractObserverSubject.java | 3 - .../hugegraph/pd/pulse/PDPulseSubject.java | 54 ++++++++---------- .../pd/pulse/PartitionHeartbeatSubject.java | 3 - .../hugegraph/pd/rest/PromTargetsAPI.java | 14 ++--- .../apache/hugegraph/pd/rest/RegistryAPI.java | 3 - .../org/apache/hugegraph/pd/rest/TestAPI.java | 3 - .../hugegraph/pd/service/PDPulseService.java | 6 +- .../hugegraph/pd/service/PDWatchService.java | 3 - .../pd/service/PromTargetsService.java | 3 - .../hugegraph/pd/service/ServiceGrpc.java | 3 - .../apache/hugegraph/pd/util/DateUtil.java | 3 - .../apache/hugegraph/pd/util/HgMapCache.java | 1 - .../org/apache/hugegraph/pd/util/IdUtil.java | 3 - .../pd/watch/AbstractWatchSubject.java | 3 - .../hugegraph/pd/watch/KvWatchSubject.java | 24 ++++---- .../hugegraph/pd/watch/NodeChangeSubject.java | 12 ++-- .../hugegraph/pd/watch/PDWatchSubject.java | 3 - .../pd/watch/PartitionChangeSubject.java | 23 ++++---- .../src/main/resources/private-keys.store | Bin 1299 -> 0 bytes .../src/main/resources/public-certs.store | Bin 892 -> 0 bytes .../src/test/java/live/PDServer0.java | 3 - .../src/test/java/live/PDServer1.java | 3 - .../src/test/java/live/PDServer2.java | 3 - .../src/test/java/live/PDServer3.java | 3 - .../hugegraph/pd/client/PDClientTest.java | 6 +- .../hugegraph/pd/service/RestApiTest.java | 4 -- hugegraph-pd/settings.xml | 2 +- 67 files changed, 110 insertions(+), 291 deletions(-) delete mode 100644 hugegraph-pd/.DS_Store delete mode 100644 hugegraph-pd/hg-pd-service/src/main/resources/private-keys.store delete mode 100644 hugegraph-pd/hg-pd-service/src/main/resources/public-certs.store diff --git a/hugegraph-pd/.DS_Store b/hugegraph-pd/.DS_Store deleted file mode 100644 index 1b1b3b8c07dfedcef15b4f3094c7204fd8d10d89..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%Wl&^6ur}?#Bqtpf=4$oAcSCUG;`*0&peLjkpTeKoOEjdB>*rm5avvbGK}ge zoUt`kQbc6b#*xoEj?W#;Q-j*H0$PE;TLId;IjBGnJP1?!_b%3b{2@$C!d&>FZw~_C zFakHl`IO>pAjg9)xL_we+0~TidFW%S+b~4$ZRmiHJQsV(8dW7Wsx*(o0Dl~UWPd;- zi}TD>e;Kn8WBet@nU06*)cfKLeQz9AtKZn#dT!%f-Y`tVESk?-eLii4b~qU~?a@=p zJ+Q;RqtcH!_m^qgdF;8f3*TC&103Pzz_ zx*SC}_sZ6Obyl{bgPm&Gx>eho%?ieq>$N*a$Ag!zqSv!G@5Sc?hKIJW3knzT0TY?> zoVG@R&x0P;6F2G8!{so9PEvOE4*gb3s5mhRKQAQwx{xq$Y#JE?{tnTEW?NCTbAU?i z*6T~v>bv`-+NEnZZa-|F_=AAI#d)ADHKUCaH=1Xr9x2U=iDfs&B|d^vTc0!qyrN@V>_y6rd}va zJv^8t+<`SD>R2nF6-X+uF0&=N|L?DU|0hX0rxnl&{8I|B++pjmiBnQ{>zT>XU29=D t#z01XbBTh2k-CmWg0A8%48pL^q6A_?;ann?py@vXLIxda1^%c4KLHX<44eP} diff --git a/hugegraph-pd/build-pre.sh b/hugegraph-pd/build-pre.sh index 9350c36122..f92d104eea 100644 --- a/hugegraph-pd/build-pre.sh +++ b/hugegraph-pd/build-pre.sh @@ -21,7 +21,7 @@ touch output/1 export JAVA_HOME=$ORACLEJDK_1_8_0_HOME readonly VER=3.6.3 -readonly REPO_URL=http://10.14.139.8:8081/artifactory/star-local +readonly REPO_URL=http://127.0.0.1:8081/artifactory/star-local $MAVEN_3_5_3_BIN/mvn -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set $MAVEN_3_5_3_BIN/mvn --settings ./settings.xml -Dmaven.test.skip=true -DaltDeploymentRepository=star-local::default::${REPO_URL} clean deploy @@ -30,7 +30,7 @@ $MAVEN_3_5_3_BIN/mvn versions:revert #------------------repo----------------------- readonly FILE_NAME=hugegraph-pd-3.6.3.tar.gz -readonly REPO_URL_FILE=http://10.14.139.8:8081/artifactory/star-file +readonly REPO_URL_FILE=http://127.0.0.1:8081/artifactory/star-file localFilePath=dist/${FILE_NAME} targetFolder="${REPO_URL_FILE}/dist/$(date '+%Y-%m-%d')/" diff --git a/hugegraph-pd/deploy-release.sh b/hugegraph-pd/deploy-release.sh index e9421724aa..118a214d7f 100644 --- a/hugegraph-pd/deploy-release.sh +++ b/hugegraph-pd/deploy-release.sh @@ -17,7 +17,7 @@ # readonly VER=3.6.3 -readonly REPO_URL=http://10.14.139.8:8081/artifactory/star-local +readonly REPO_URL=http://127.0.0.1:8081/artifactory/star-local #mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set ./mvnw -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set diff --git a/hugegraph-pd/deploy-snapshot.sh b/hugegraph-pd/deploy-snapshot.sh index e9421724aa..118a214d7f 100644 --- a/hugegraph-pd/deploy-snapshot.sh +++ b/hugegraph-pd/deploy-snapshot.sh @@ -17,7 +17,7 @@ # readonly VER=3.6.3 -readonly REPO_URL=http://10.14.139.8:8081/artifactory/star-local +readonly REPO_URL=http://127.0.0.1:8081/artifactory/star-local #mvn -DnewVersion=${VER}-SNAPSHOT -DprocessAllModules=true -DgenerateBackupPoms=false versions:set ./mvnw -DremoveSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=true versions:set diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java index 8c8bf38152..150633ba24 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java @@ -22,9 +22,6 @@ import io.grpc.stub.AbstractBlockingStub; import io.grpc.stub.AbstractStub; -/** - * @date 2022/6/20 - **/ public class AbstractClientStubProxy { private final LinkedList hostList = new LinkedList<>(); diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java index 3dac3478d1..4222bfbe50 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java @@ -20,9 +20,6 @@ import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; import org.apache.hugegraph.pd.grpc.discovery.Query; -/** - * @date 2021/12/20 - **/ public interface Discoverable { NodeInfos getNodeInfos(Query query); diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java index 8d38b23fe7..aa80ec606f 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java @@ -38,9 +38,6 @@ import io.grpc.ManagedChannelBuilder; import lombok.extern.slf4j.Slf4j; -/** - * @date 2021/12/20 - **/ @Slf4j public abstract class DiscoveryClient implements Closeable, Discoverable { @@ -127,20 +124,20 @@ private void resetChannel(String singleAddress) throws PDException { try { if (requireResetStub.get()) { while (channel != null && !channel.shutdownNow().awaitTermination( - 100, TimeUnit.MILLISECONDS)) { + 100, TimeUnit.MILLISECONDS)) { continue; } channel = ManagedChannelBuilder.forTarget( - singleAddress).usePlaintext().build(); + singleAddress).usePlaintext().build(); this.registerStub = DiscoveryServiceGrpc.newBlockingStub( - channel); + channel); this.blockingStub = DiscoveryServiceGrpc.newBlockingStub( - channel); + channel); requireResetStub.set(false); } } catch (Exception e) { throw new PDException(-1, String.format( - "Reset channel with error : %s.", e.getMessage())); + "Reset channel with error : %s.", e.getMessage())); } finally { readWriteLock.writeLock().unlock(); } @@ -212,7 +209,7 @@ public void close() { readWriteLock.writeLock().lock(); try { while (channel != null && !channel.shutdownNow().awaitTermination( - 100, TimeUnit.MILLISECONDS)) { + 100, TimeUnit.MILLISECONDS)) { continue; } } catch (Exception e) { diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java index c65053cc9f..77ec9a36b2 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java @@ -23,9 +23,6 @@ import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; import org.apache.hugegraph.pd.grpc.discovery.RegisterType; -/** - * @date 2021/12/20 - **/ public class DiscoveryClientImpl extends DiscoveryClient { private final String id; diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java index d21741b194..b86d6b3946 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java @@ -27,9 +27,6 @@ import io.grpc.stub.AbstractStub; import lombok.extern.slf4j.Slf4j; -/** - * @date 2022/8/3 - **/ @Slf4j public class LicenseClient extends AbstractClient { diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java index 542311280c..025d6f7ae8 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java @@ -26,8 +26,6 @@ /** * Bidirectional communication interface of pd-client and pd-server - * - * @author lynn.bond@hotmail.com created on 2021/11/9 */ public interface PDPulse { @@ -151,4 +149,4 @@ interface Notifier extends Closeable { void crash(String error); } -} \ No newline at end of file +} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java index 0e473a9c78..2d1ccb743e 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java @@ -38,9 +38,6 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com created on 2021/11/9 - */ @Slf4j public final class PDPulseImpl implements PDPulse { diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java index a16d7b1b02..3da255a825 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java @@ -24,9 +24,6 @@ import org.apache.hugegraph.pd.watch.NodeEvent; import org.apache.hugegraph.pd.watch.PartitionEvent; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ public interface PDWatch { /** diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java index c723eb9479..73796b53fb 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java @@ -17,7 +17,6 @@ package org.apache.hugegraph.pd.client; -import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; import org.apache.hugegraph.pd.grpc.watch.HgPdWatchGrpc; @@ -31,17 +30,14 @@ import org.apache.hugegraph.pd.watch.PartitionEvent; import io.grpc.ManagedChannel; -import io.grpc.ManagedChannelBuilder; import io.grpc.stub.StreamObserver; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ final class PDWatchImpl implements PDWatch { - private HgPdWatchGrpc.HgPdWatchStub stub; + private final HgPdWatchGrpc.HgPdWatchStub stub; + + private final String pdServerAddress; - private String pdServerAddress; // TODO: support several servers. PDWatchImpl(String pdServerAddress) { this.pdServerAddress = pdServerAddress; @@ -55,7 +51,7 @@ public String getCurrentHost() { @Override public boolean checkChannel() { - return stub != null && ! ((ManagedChannel) stub.getChannel()).isShutdown(); + return stub != null && !((ManagedChannel) stub.getChannel()).isShutdown(); } /** @@ -95,9 +91,9 @@ private class GraphWatcher extends AbstractWatcher { private GraphWatcher(Listener listener) { super(listener, () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_GRAPH_CHANGE) - .build() + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_GRAPH_CHANGE) + .build() ); } @@ -112,9 +108,9 @@ private class ShardGroupWatcher extends AbstractWatcher { private ShardGroupWatcher(Listener listener) { super(listener, () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE) - .build() + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE) + .build() ); } @@ -129,9 +125,9 @@ private class PartitionWatcher extends AbstractWatcher { private PartitionWatcher(Listener listener) { super(listener, () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_PARTITION_CHANGE) - .build() + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_PARTITION_CHANGE) + .build() ); } @@ -140,7 +136,7 @@ public void onNext(WatchResponse watchResponse) { WatchPartitionResponse res = watchResponse.getPartitionResponse(); PartitionEvent event = new PartitionEvent(res.getGraph(), res.getPartitionId(), PartitionEvent.ChangeType.grpcTypeOf( - res.getChangeType())); + res.getChangeType())); this.listener.onNext(event); } } @@ -149,9 +145,9 @@ private class NodeWatcher extends AbstractWatcher { private NodeWatcher(Listener listener) { super(listener, () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_STORE_NODE_CHANGE) - .build() + .newBuilder() + .setWatchType(WatchType.WATCH_TYPE_STORE_NODE_CHANGE) + .build() ); } @@ -179,7 +175,7 @@ private AbstractWatcher(Listener listener, void init() { this.reqStream = PDWatchImpl.this.stub.watch(this); this.reqStream.onNext(WatchRequest.newBuilder().setCreateRequest( - this.requestSupplier.get() + this.requestSupplier.get() ).build()); } diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java index 468553714f..251bab07f9 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java @@ -17,9 +17,6 @@ package org.apache.hugegraph.pd.pulse; -/** - * @author lynn.bond@hotmail.com created on 2022/2/13 - */ public interface PulseServerNotice { /** * @throws RuntimeException when failed to send ack-message to pd-server diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java index 8a2a406904..c4ff91b107 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java @@ -17,9 +17,6 @@ package org.apache.hugegraph.pd.watch; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ public class PDWatcher { } diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java index 237a35bfad..76a4fdc7d9 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java @@ -21,9 +21,6 @@ import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ public class PartitionEvent { private final String graph; private final int partitionId; diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java index 493f93c4eb..e14bfafdc1 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java @@ -17,9 +17,6 @@ package org.apache.hugegraph.pd.watch; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ enum WatchType { PARTITION_CHANGE(10); diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java index f2b8139740..8952cc1162 100644 --- a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java @@ -27,9 +27,6 @@ import org.apache.hugegraph.pd.grpc.discovery.Query; import org.junit.Assert; -/** - * @date 2021/12/21 - **/ public class DiscoveryClientImplTest { private static final AtomicLong label = new AtomicLong(); diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java index 6fc2518b0d..390821662e 100644 --- a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java @@ -30,9 +30,6 @@ import lombok.extern.slf4j.Slf4j; -/** - * @date 2021/12/21 - **/ @Slf4j public class LicenseClientImplTest { @@ -49,7 +46,7 @@ public void putLicense() { log.info(error.getMessage()); assert error.getType().equals(Pdpb.ErrorType.OK); } catch (Exception e) { - log.error("put license with error: {}", e); + log.error("put license with error: ", e); } } @@ -66,13 +63,13 @@ public void getKv() { Object property = ymlConfig.get("rocksdb.write_buffer_size"); assert property.toString().equals("32000000"); } catch (Exception e) { - log.error("put license with error: {}", e); + log.error("put license with error: ", e); } } // @Test public void putKv() { - PDConfig pdConfig = PDConfig.of("10.14.139.70:8688"); + PDConfig pdConfig = PDConfig.of("127.0.0.1.70:8688"); pdConfig.setEnableCache(true); try (KvClient c = new KvClient(pdConfig)) { long l = System.currentTimeMillis(); @@ -81,7 +78,7 @@ public void putKv() { log.info(error.getMessage()); assert error.getType().equals(Pdpb.ErrorType.OK); } catch (Exception e) { - log.error("put license with error: {}", e); + log.error("put license with error: ", e); } } @@ -96,14 +93,14 @@ public void putKvLocal() { log.info(error.getMessage()); assert error.getType().equals(Pdpb.ErrorType.OK); } catch (Exception e) { - log.error("put license with error: {}", e); + log.error("put license with error: ", e); } } private Properties getYmlConfig(String yml) { Yaml yaml = new Yaml(); - Iterable load = yaml.loadAll(yml); - Iterator iterator = load.iterator(); + Iterable load = yaml.loadAll(yml); + Iterator iterator = load.iterator(); Properties properties = new Properties(); while (iterator.hasNext()) { Map next = (Map) iterator.next(); @@ -127,4 +124,4 @@ private void map2Properties(Map map, String prefix, Properties p } } -} \ No newline at end of file +} diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java index 27d247a369..dfdc63cf36 100644 --- a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/PDPulseTest.java @@ -25,9 +25,6 @@ import org.apache.hugegraph.pd.pulse.PulseServerNotice; import org.junit.BeforeClass; -/** - * @author lynn.bond@hotmail.com created on 2021/11/8 - */ public class PDPulseTest { private static PDClient pdClient; diff --git a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java index 51152b74f4..99e5f83360 100644 --- a/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java +++ b/hugegraph-pd/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java @@ -22,9 +22,6 @@ import java.util.Iterator; import java.util.List; -/** - * @author lynn.bond@hotmail.com created on 2021/11/8 - */ public class HgPDTestUtil { public static void println(Object str) { diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java index 3e43ef2adf..bb1fccd025 100644 --- a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java @@ -20,9 +20,6 @@ import java.util.Collection; import java.util.Map; -/** - * @author lynn.bond@hotmail.com - */ public final class HgAssert { public static void isTrue(boolean expression, String message) { diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java index 3865639aea..c7efb84031 100644 --- a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java @@ -17,9 +17,6 @@ package org.apache.hugegraph.pd.common; -/** - * @date 2022/8/1 - **/ public class PDRuntimeException extends RuntimeException { // public static final int LICENSE_ERROR = -11; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java index ecb69e82d4..664b6b8dec 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java @@ -31,9 +31,6 @@ import lombok.extern.slf4j.Slf4j; -/** - * @date 2022/3/29 - **/ @Slf4j @Service public class LogService { diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java index 724281a349..4a0f3fef66 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java @@ -25,9 +25,6 @@ import org.apache.hugegraph.pd.meta.DiscoveryMetaStore; import org.apache.hugegraph.pd.meta.MetadataFactory; -/** - * @date 2022/1/14 - **/ public class RegistryService { private final PDConfig pdConfig; private final DiscoveryMetaStore meta; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java index 6d085f5051..84e8581673 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @@ -35,9 +35,6 @@ /** * PD配置文件 - * - * @author: yanjinbing - * @date: 2021/10/20 */ @Data @Component @@ -65,28 +62,14 @@ public class PDConfig { private String licensePath; @Autowired private ThreadPoolGrpc threadPoolGrpc; - - @Data - @Configuration - public class ThreadPoolGrpc { - @Value("${thread.pool.grpc.core:600}") - private int core; - @Value("${thread.pool.grpc.max:1000}") - private int max; - @Value("${thread.pool.grpc.queue:" + Integer.MAX_VALUE + "}") - private int queue; - } - @Autowired private Raft raft; - @Autowired private Store store; @Autowired private Partition partition; @Autowired private Discovery discovery; - private Map initialStoreMap = null; private ConfigService configService; private IdService idService; @@ -128,6 +111,17 @@ public void setIdService(IdService idService) { this.idService = idService; } + @Data + @Configuration + public class ThreadPoolGrpc { + @Value("${thread.pool.grpc.core:600}") + private int core; + @Value("${thread.pool.grpc.max:1000}") + private int max; + @Value("${thread.pool.grpc.queue:" + Integer.MAX_VALUE + "}") + private int queue; + } + @Data @Configuration public class Raft { @@ -207,7 +201,7 @@ public Long getRetentionPeriod() { private Long parseTimeExpression(String exp) { if (exp != null) { Pattern pattern = Pattern.compile( - "(?(\\d+)*)(\\s)*(?(second|minute|hour|day|month|year)$)"); + "(?(\\d+)*)(\\s)*(?(second|minute|hour|day|month|year)$)"); Matcher matcher = pattern.matcher(exp.trim()); if (matcher.find()) { String n = matcher.group("n"); diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java index 81da5e3030..c95185363d 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -30,9 +30,6 @@ import lombok.extern.slf4j.Slf4j; -/** - * @date 2022/1/14 - **/ @Slf4j public class DiscoveryMetaStore extends MetadataRocksDBStore { diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java index dd5a3be16a..ab3660034d 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java @@ -23,9 +23,6 @@ import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.grpc.Metapb; -/** - * @date 2022/3/29 - **/ public class LogMeta extends MetadataRocksDBStore { private final PDConfig pdConfig; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java index c591c116d9..05aa938f07 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java @@ -30,9 +30,6 @@ import com.google.protobuf.Parser; -/** - * @date 2022/1/9 - **/ public class MetadataRocksDBStore extends MetadataStoreBase { HgKVStore store; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java index 979f5fcc25..ce850e5b90 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/QueueStore.java @@ -26,9 +26,6 @@ import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.store.RaftKVStore; -/** - * @author lynn.bond@hotmail.com on 2022/2/10 - */ public class QueueStore extends MetadataRocksDBStore { QueueStore(PDConfig pdConfig) { super(pdConfig); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java index 2a2ec10750..d051bfbdba 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java @@ -24,9 +24,6 @@ import io.micrometer.core.instrument.MeterRegistry; -/** - * @author lynn.bond@hotmail.com on 2022/01/05 - */ @Configuration public class MetricsConfig { @Autowired diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java index 31d9a7c28a..bb230e8d09 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java @@ -31,9 +31,6 @@ import io.micrometer.core.instrument.MeterRegistry; import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com on 2022/1/5 - */ @Component @Slf4j public final class PDMetrics { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java index 3f28e5027c..72d9599c86 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java @@ -19,9 +19,6 @@ import java.util.Objects; -/** - * @author lynn.bond@hotmail.com created on 2021/11/1 - */ public class DemoModel { private int status; private String text; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java index 47ff3ab83a..57e15f74a2 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java @@ -22,9 +22,6 @@ import java.util.Map; import java.util.Set; -/** - * @author lynn.bond@hotmail.com on 2022/2/14 - */ public class PromTargetsModel { private static final String LABEL_METRICS_PATH = "__metrics_path__"; private static final String LABEL_SCHEME = "__scheme__"; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java index a076c93b8e..5833d99ca2 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java @@ -21,9 +21,6 @@ import lombok.Data; -/** - * @date 2022/2/8 - **/ @Data public class RegistryQueryRestRequest { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java index 10ab054758..666a8661c3 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java @@ -21,9 +21,6 @@ import lombok.Data; -/** - * @date 2022/2/8 - **/ @Data public class RegistryRestRequest { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java index 8cd00b5825..93ab584e22 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java @@ -23,9 +23,6 @@ import lombok.Data; -/** - * @date 2022/2/8 - **/ @Data public class RegistryRestResponse { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java index 5136bc5fe0..54f6c60bd5 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java @@ -23,10 +23,6 @@ import lombok.Data; -/** - * @author tianxiaohui - * @date 2022-07-21 - */ @Data public class RestApiResponse { String message; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java index 25b9ed51c0..cc25cedd36 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java @@ -19,9 +19,6 @@ import lombok.Data; -/** - * @date 2022/3/23 - **/ @Data public class TimeRangeRequest { String startTime; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java index 53bcfee357..ec3976b8bf 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -24,9 +24,6 @@ import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com on 2022/2/10 - */ @Slf4j public class NoticeBroadcaster { private final Supplier noticeSupplier; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java index 503bd2d688..64472f5625 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java @@ -34,9 +34,6 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com created on 2021/11/9 - */ @ThreadSafe @Slf4j abstract class AbstractObserverSubject { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java index 36891803bc..ca5cfe7096 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubject.java @@ -57,24 +57,20 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com created on 2021/11/8 - * @version 1.1.0 added ack on 2020/02/11 - */ @Slf4j @ThreadSafe public class PDPulseSubject { private final static long NOTICE_EXPIRATION_TIME = 30 * 60 * 1000; private final static int RETRYING_PERIOD_SECONDS = 60; private final static Map subjectHolder = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private final static ConcurrentLinkedQueue broadcasterQueue = - new ConcurrentLinkedQueue<>(); + new ConcurrentLinkedQueue<>(); private final static ScheduledExecutorService scheduledExecutor = - Executors.newScheduledThreadPool(1); + Executors.newScheduledThreadPool(1); private static Supplier> queueRetrieveFunction = - () -> Collections.emptyList(); + () -> Collections.emptyList(); private static Function queueDurableFunction = (e) -> true; private static Function queueRemoveFunction = (e) -> true; @@ -104,15 +100,15 @@ private static void doSchedule() { private static void appendQueue() { broadcasterQueue.addAll( - getQueueItems() - .parallelStream() - .filter(e -> !broadcasterQueue - .stream() - .anyMatch(b -> e.getItemId().equals(b.getDurableId())) - ).map(e -> createBroadcaster(e)) - .peek(e -> log.info("Appending notice: {}", e)) - .filter(e -> e != null) - .collect(Collectors.toList()) + getQueueItems() + .parallelStream() + .filter(e -> !broadcasterQueue + .stream() + .anyMatch(b -> e.getItemId().equals(b.getDurableId())) + ).map(e -> createBroadcaster(e)) + .peek(e -> log.info("Appending notice: {}", e)) + .filter(e -> e != null) + .collect(Collectors.toList()) ); } @@ -138,13 +134,13 @@ private static List getQueueItems() { } public static void setQueueRetrieveFunction( - Supplier> queueRetrieveFunction) { + Supplier> queueRetrieveFunction) { HgAssert.isArgumentNotNull(queueRetrieveFunction, "queueRetrieveFunction"); PDPulseSubject.queueRetrieveFunction = queueRetrieveFunction; } public static void setQueueDurableFunction( - Function queueDurableFunction) { + Function queueDurableFunction) { HgAssert.isArgumentNotNull(queueDurableFunction, "queueDurableFunction"); PDPulseSubject.queueDurableFunction = queueDurableFunction; } @@ -161,7 +157,7 @@ public static void setQueueRemoveFunction(Function queueRemoveF * @return */ public static StreamObserver addObserver( - StreamObserver responseObserver) { + StreamObserver responseObserver) { isArgumentNotNull(responseObserver, "responseObserver"); return new PDPulseStreamObserver(responseObserver); } @@ -223,7 +219,7 @@ private static NoticeBroadcaster createBroadcaster(PdInstructionResponse notice) // } public static Supplier getNoticeSupplier( - T notice) { + T notice) { PulseType type; if (notice instanceof PdInstructionResponse) { type = PulseType.PULSE_TYPE_PD_INSTRUCTION; @@ -237,7 +233,7 @@ public static Supplier private static Supplier getDurableSupplier( - com.google.protobuf.GeneratedMessageV3 notice) { + com.google.protobuf.GeneratedMessageV3 notice) { return () -> { Metapb.QueueItem queueItem = toQueueItem(notice); String res = null; @@ -247,9 +243,9 @@ private static Supplier getDurableSupplier( res = queueItem.getItemId(); } else { log.error( - "Failed to persist queue-item that contained " + - "PartitionHeartbeatResponse: {}" - , notice); + "Failed to persist queue-item that contained " + + "PartitionHeartbeatResponse: {}" + , notice); } } catch (Throwable t) { log.error("Failed to invoke queueDurableFunction, cause by:", t); @@ -333,7 +329,7 @@ private void cancelObserver() { if (this.subject == null) { this.responseObserver.onError( - new Exception("Invoke cancel-observer before create-observer.")); + new Exception("Invoke cancel-observer before create-observer.")); return; } @@ -378,7 +374,7 @@ private AbstractObserverSubject getSubject(PulseType pulseType) { if (subject == null) { responseObserver.onError( - new Exception("Unsupported pulse-type: " + pulseType.name())); + new Exception("Unsupported pulse-type: " + pulseType.name())); return null; } @@ -396,7 +392,7 @@ private void handleNotice(PulseNoticeRequest noticeRequest) { log.info("send change leader command to watch, due to ERROR-100", pde); notifyClient(PdInstructionResponse.newBuilder() .setInstructionType( - PdInstructionType.CHANGE_TO_FOLLOWER) + PdInstructionType.CHANGE_TO_FOLLOWER) .setLeaderIp(RaftEngine.getInstance() .getLeaderGrpcAddress()) .build()); @@ -429,7 +425,7 @@ public void onNext(PulseRequest pulseRequest) { if (pulseRequest.hasAckRequest()) { this.ackNotice(pulseRequest.getAckRequest().getNoticeId() - , pulseRequest.getAckRequest().getObserverId()); + , pulseRequest.getAckRequest().getObserverId()); } } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java index a08f2c826d..a42ffce1e5 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java @@ -27,9 +27,6 @@ import com.google.protobuf.GeneratedMessageV3; -/** - * @author lynn.bond@hotmail.com created on 2021/11/9 - */ public class PartitionHeartbeatSubject extends AbstractObserverSubject { PartitionHeartbeatSubject() { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java index 8c94cb4a5f..f2432b093f 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PromTargetsAPI.java @@ -34,7 +34,7 @@ import lombok.extern.slf4j.Slf4j; /** - * @author lynn.bond@hotmail.com on 2022/2/14 + * TODO: ensure if we need this class & method (delete) */ @RestController @Slf4j @@ -45,8 +45,9 @@ public class PromTargetsAPI { private PromTargetsService service; @GetMapping(value = "/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) - public ResponseEntity> getPromTargets( - @PathVariable(value = "appName", required = true) String appName) { + public ResponseEntity> getPromTargets(@PathVariable(value = "appName", + required = true) + String appName) { return ResponseEntity.of(Optional.ofNullable(this.service.getTargets(appName))); } @@ -56,9 +57,9 @@ public ResponseEntity> getPromAllTargets() { } @GetMapping(value = "/demo/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) - public List getDemoTargets( - @PathVariable(value = "appName", required = true) String targetType) { - + public List getDemoTargets(@PathVariable(value = "appName", + required = true) String targetType) { + // TODO: ensure the IP addr is correct & useful PromTargetsModel model = null; switch (targetType) { case "node": @@ -86,7 +87,6 @@ public List getDemoTargets( default: } - return Collections.singletonList(model); } } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java index d18608bdda..2aedd5f305 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -50,9 +50,6 @@ import lombok.extern.slf4j.Slf4j; -/** - * @date 2022/2/8 - **/ @RestController @Slf4j @RequestMapping("/v1") diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java index 75c90f16bf..1ab6326112 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java @@ -46,9 +46,6 @@ import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com on 2022/2/9 - */ @RestController @Slf4j @RequestMapping("/test") diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java index d73a0873ac..2f21cfbacd 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java @@ -37,16 +37,12 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ - @Slf4j @GRpcService public class PDPulseService extends HgPdPulseGrpc.HgPdPulseImplBase { private static final Supplier> QUEUE_RETRIEVE_FUNCTION = - () -> Collections.emptyList(); + () -> Collections.emptyList(); private static final Function QUEUE_ITEM_BOOLEAN_FUNCTION = (e) -> true; private static final Function QUEUE_REMOVE_FUNCTION = (e) -> true; @Autowired diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java index 1ddda6de12..d4b9481e9d 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDWatchService.java @@ -26,9 +26,6 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ @Slf4j @GRpcService public class PDWatchService extends HgPdWatchGrpc.HgPdWatchImplBase { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java index 1f797bef4e..7683e58073 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PromTargetsService.java @@ -44,9 +44,6 @@ import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com on 2022/2/24 - */ @Service @Slf4j public class PromTargetsService { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java index 5035e453b5..5aa67f93bf 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java @@ -30,9 +30,6 @@ import io.grpc.ManagedChannelBuilder; import io.grpc.MethodDescriptor; -/** - * @date 2022/6/21 - **/ public interface ServiceGrpc extends RaftStateListener { ConcurrentHashMap channels = new ConcurrentHashMap(); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java index 5435b5b902..f26be26dd9 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java @@ -24,9 +24,6 @@ import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Pdpb; -/** - * @date 2022/3/23 - **/ public class DateUtil { private static final String DATE = "yyyy-MM-dd"; private static final String DATETIME = "yyyy-MM-dd HH:mm:ss"; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java index 496dfee0ff..8b6a4a4dcb 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java @@ -24,7 +24,6 @@ /** * @param * @param - * @author lynn.bond@hotmail.com on 2022/3/10 */ public class HgMapCache { private final Map cache = new ConcurrentHashMap(); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java index 3c5f7a82da..75e4287178 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java @@ -19,9 +19,6 @@ import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com on 2022/2/8 - */ @Slf4j public final class IdUtil { private static final byte[] LOCK = new byte[0]; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java index c3f94821e2..79905511f2 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/AbstractWatchSubject.java @@ -33,9 +33,6 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com created on 2021/11/5 - */ @ThreadSafe @Slf4j abstract class AbstractWatchSubject { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index 9130651d6c..0a2dbd84b5 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -42,8 +42,6 @@ /** * watch订阅、响应处理类 - * - * @date 2022/6/21 **/ @Slf4j public class KvWatchSubject { @@ -53,7 +51,7 @@ public class KvWatchSubject { public static final String ALL_PREFIX = "W"; public static final long WATCH_TTL = 20000L; private static final ConcurrentMap> clients = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); private final KvService kvService; BiPredicate equal = String::equals; BiPredicate startWith = String::startsWith; @@ -76,7 +74,7 @@ private void addWatchKey(String key, String delimiter, long clientId) throws PDE String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); kvService.put(watchKey, "", WATCH_TTL); String clientFirstKey = - KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key, clientId); + KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key, clientId); kvService.put(clientFirstKey, "", WATCH_TTL); } @@ -99,7 +97,7 @@ private void removeWatchKey(String key, String delimiter, long clientId) throws public void addObserver(String key, long clientId, StreamObserver observer, String delimiter) throws PDException { String keyWithoutPrefix = - KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); clients.putIfAbsent(keyWithoutPrefix, observer); addWatchKey(key, delimiter, clientId); log.info("client:{},start to watch key:{}", clientId, key); @@ -108,7 +106,7 @@ public void addObserver(String key, long clientId, StreamObserver public void removeObserver(String key, long clientId, String delimiter) throws PDException { removeWatchKey(key, delimiter, clientId); String keyWithoutPrefix = - KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); clients.remove(keyWithoutPrefix); } @@ -145,15 +143,15 @@ public void notifyObserver(String key, WatchType watchType, continue; } WatchKv watchKv = - WatchKv.newBuilder().setKey(kvKey).setValue(kv.getValue()).build(); + WatchKv.newBuilder().setKey(kvKey).setValue(kv.getValue()).build(); WatchEvent event = - WatchEvent.newBuilder().setCurrent(watchKv).setType(watchType).build(); + WatchEvent.newBuilder().setCurrent(watchKv).setType(watchType).build(); watchEvents.add(event); } StreamObserver observer = clients.get(keyAndClient); watchResponse = - WatchResponse.newBuilder().setState(WatchState.Started).setClientId(clientId) - .addAllEvents(watchEvents).build(); + WatchResponse.newBuilder().setState(WatchState.Started).setClientId(clientId) + .addAllEvents(watchEvents).build(); try { if (observer != null) { @@ -188,7 +186,7 @@ public void keepClientAlive() { WatchResponse testAlive = WatchResponse.newBuilder().setState(WatchState.Alive).build(); Set>> entries = clients.entrySet(); Map.Entry>[] array = - entries.toArray(new Map.Entry[0]); + entries.toArray(new Map.Entry[0]); Arrays.stream(array).parallel().forEach(entry -> { StreamObserver value = entry.getValue(); String key = entry.getKey(); @@ -262,7 +260,7 @@ private void removeClient(StreamObserver value, String key, Strin */ public void notifyClientChangeLeader() { WatchResponse response = - WatchResponse.newBuilder().setState(WatchState.Leader_Changed).build(); + WatchResponse.newBuilder().setState(WatchState.Leader_Changed).build(); for (Map.Entry> entry : clients.entrySet()) { StreamObserver value = entry.getValue(); String key = entry.getKey(); @@ -287,4 +285,4 @@ public void notifyClientChangeLeader() { } } } -} \ No newline at end of file +} diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java index 4e8b50fd13..0e7c26dde5 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/NodeChangeSubject.java @@ -27,8 +27,6 @@ /** * The subject of partition change. - * - * @author lynn.bond@hotmail.com created on 2021/11/26 */ @ThreadSafe final class NodeChangeSubject extends AbstractWatchSubject { @@ -50,11 +48,11 @@ public void notifyWatcher(NodeEventType nodeEventType, String graph, long nodeId super.notifyWatcher(builder -> { builder.setNodeResponse( - builder.getNodeResponseBuilder().clear() - .setGraph(graph) - .setNodeId(nodeId) - .setNodeEventType(nodeEventType) - .build() + builder.getNodeResponseBuilder().clear() + .setGraph(graph) + .setNodeId(nodeId) + .setNodeEventType(nodeEventType) + .build() ); }); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java index cf3e6df06e..92ef98e8e6 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java @@ -35,9 +35,6 @@ import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; -/** - * @author lynn.bond@hotmail.com created on 2021/11/4 - */ @Slf4j @ThreadSafe public class PDWatchSubject implements StreamObserver { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java index 2e664b98f0..85f5e8b7f0 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PartitionChangeSubject.java @@ -28,8 +28,6 @@ /** * The subject of partition change. - * - * @author lynn.bond@hotmail.com created on 2021/11/5 */ @ThreadSafe final class PartitionChangeSubject extends AbstractWatchSubject { @@ -40,11 +38,10 @@ final class PartitionChangeSubject extends AbstractWatchSubject { @Override String toNoticeString(WatchResponse res) { - StringBuilder sb = new StringBuilder(); - return sb.append("graph:").append(res.getPartitionResponse().getGraph()) - .append(",") - .append("partitionId:").append(res.getPartitionResponse().getPartitionId()) - .toString(); + String sb = "graph:" + res.getPartitionResponse().getGraph() + + "," + + "partitionId:" + res.getPartitionResponse().getPartitionId(); + return sb; } public void notifyWatcher(WatchChangeType changeType, String graph, int partitionId) { @@ -53,14 +50,14 @@ public void notifyWatcher(WatchChangeType changeType, String graph, int partitio super.notifyWatcher(builder -> { builder.setPartitionResponse( - builder.getPartitionResponseBuilder().clear() - .setGraph(graph) - .setPartitionId(partitionId) - .setChangeType(changeType) - .build() + builder.getPartitionResponseBuilder().clear() + .setGraph(graph) + .setPartitionId(partitionId) + .setChangeType(changeType) + .build() ); }); } -} \ No newline at end of file +} diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/private-keys.store b/hugegraph-pd/hg-pd-service/src/main/resources/private-keys.store deleted file mode 100644 index 258075b0d7b3670bb6eb00094ec3433d2bc00c2f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1299 zcmezO_TO6u1_mY|W&~rdf}+f_#FEtP)JmXG&b)3p0U+IP(8Snnz{kd=&Bn;WsKq46 z$jHjT(!^NqB{sKy!R$S6#XNgs7OV&su=_qcqxkp?v43~6B{+m?W{H%fIxr(lpIcO%zi00)SpGkuEqmE*{o^ib^0(@>%9gr_ zZ%tdAYW94AT&n7WFC8;CZO(k?EM~xGysav4oupQ8*u-0Ip$;AKeXT#&Zq!-!ojvi# z-rHvdK5%|yo;+ob+i$mLD%;g-{uLAkZ@O4I>)~FpIcGyIbEtpYY{7T(<;7PmCl~l= zO3D6Kkx#2O_Sau;8FuixYJagzE^h8h4UbV;CGR~>fEvs74 zg28I*)5N~-O}l5;#pUp;tz8*ZS@NsnPDhQn;!Wo5>T4cl$MXWC1Q=bc5qhQumJAHc zmIh7CrUp$+pBFGQF)}f+>$ndv5xKXxLNHgfZ*Bre!Po94h+#Y1}-i6_?oPEQ#K($QQSO=xR-;uVx z5x@2y{kqHG+v(Y%IoBJl8)QxLKcyvX&z>2ubjkerFIcJtln(jtUU~iB_G8jNtizul zTd_=(pXGP+x64Q64l|jC2uF!cH2PNhd7XCRb*=ZGl$a&wUi4r*&Lqk(Jt3f9RGItC zrb#=S7QJra{tzMa738$<4;Qpvn{9jV^q$o*W;?=nR!&*J`4A~q zeY>ti`adgV7ietQ;we_B{K7{=OnCcwPNSX^%O2W@Rdt8d^lCgsws><*8k3XYB4vqGJxE1 zyzyV*q#aq0>hEosd_&sMZhGjPlq;9YN@}kO-{;=?k|ut1*ThBJ{B<+ktp$bU*d|> z17tY2ytry9XFV@q-A0sj&aBU1pu;4hq0aC)l+AYTv)YsEG&74}kc pnItpQn_JcVxlcHLRjC^3>K}WyL{jo{$^6#e?qSKFL!!&y0|4Xr{n`Kk diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/public-certs.store b/hugegraph-pd/hg-pd-service/src/main/resources/public-certs.store deleted file mode 100644 index 0da6d226993ecbab1650809dd3456e549d3844af..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 892 zcmezO_TO6u1_mY|W(3n*1*J(jnaRniMI}I?oOwM}-VCe}dZq@J3=GVc22ISS22D(# z7cet1GBL5_WMuakaI>*%wRxPgU}0uuFvu|!FyLcj4rO5zW)IEF&dV>)gNblph_GXb zaAAmWV~8-rMGWM`c?~TMEDX&J%uURUO`^nkjX+!zD3{KzX=2=A(8Q<%3l2sgN1my% zk>PKBaHDLgkY><*uQ_^eo;?31xIM_`y$i!(Is1lffohqqu?|XszawpXBYy2a`gNDV zx6`vjbFMd9H^`dge@aW(o;@>Q>5}>LU$9gQC>`?Oz4H3M?Z>2lScgA9wqltmKg;jt zZss$WDKSgVz39PsoJo{ndO|?Is51ANO_O#uEqdL; z{UJi;E68czA1-LUHrw{z={>7s%yxwDtemob_b;ngO+UkZxNX^+(gP#I`*vN4^nX^! zF3{Mp#Z#$nDF-VoJKt-mOZo)tLhG^>D73!c%7W~=4GW5^A^{0|57wf`uIDk zic!V-%#x4$Rs>GyT+w|0Vbs(4Y?Hdv;u@+$RZ|#!tpA<&)M9RIWdOP3c;mmqNjtJ0 z)!*AN`G&Ni-Sp5oDOWC)mDFAnzR$h&Bct7)myUMrIjP6pZvQV}=M#AqesP`pq8HJ| zSIjdCN*|uQxk{16@@>9nY{klh%XBqMw68X%@7l>}J@vRi|LRso{^fBEDgPYiCU7`T zR`kCvI&c2|Q%`i*YVswgy6rF3Z3|eeXdrJO3rx4Nd@N!tB2nV=zQh%$2gq=4d2!WJ z&U#+Jx{WC5oLQg2K!-_0L!IGqD4XrvXSFBSY3%0mV%}4;l}V&lE9|A@GD&8pH@B+! hbDwbhs!}!L3FzK_@~w;Bl-ae star - http://10.14.139.8:8082/artifactory/star + http://127.0.0.1:8082/artifactory/star true always