diff --git a/.github/workflows/pd-store-ci.yml b/.github/workflows/pd-store-ci.yml new file mode 100644 index 0000000000..25a54c1224 --- /dev/null +++ b/.github/workflows/pd-store-ci.yml @@ -0,0 +1,97 @@ +name: "hugegraph-pd-store-ci" + +on: + push: + branches: + - master + - 'release-*' + - 'test-*' + pull_request: + +# TODO: consider merge to one ci.yml file +jobs: + pd: + runs-on: ubuntu-latest + env: + # TODO: avoid duplicated env setup in pd & store + USE_STAGE: 'false' # Whether to include the stage repository. + # TODO: remove outdated env + TRAVIS_DIR: hugegraph-server/hugegraph-dist/src/assembly/travis + REPORT_DIR: target/site/jacoco + + steps: + - name: Install JDK 11 + uses: actions/setup-java@v3 + with: + java-version: '11' + distribution: 'zulu' + + - name: Cache Maven packages + uses: actions/cache@v3 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2 + + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 5 + + - name: use staged maven repo settings + if: ${{ env.USE_STAGE == 'true' }} + run: | + cp $HOME/.m2/settings.xml /tmp/settings.xml + mv -vf .github/configs/settings.xml $HOME/.m2/settings.xml + + - name: Run common test + run: | + mvn test -pl hugegraph-pd/hg-pd-test -am -P pd-common-test + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3.0.0 + with: + file: ${{ env.REPORT_DIR }}/*.xml + + store: + # TODO: avoid duplicated env setup + runs-on: ubuntu-latest + env: + USE_STAGE: 'false' # Whether to include the stage repository. + # TODO: remove outdated env + TRAVIS_DIR: hugegraph-server/hugegraph-dist/src/assembly/travis + REPORT_DIR: target/site/jacoco + + steps: + - name: Install JDK 11 + uses: actions/setup-java@v3 + with: + java-version: '11' + distribution: 'zulu' + + - name: Cache Maven packages + uses: actions/cache@v3 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2 + + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 5 + + - name: use staged maven repo settings + if: ${{ env.USE_STAGE == 'true' }} + run: | + cp $HOME/.m2/settings.xml /tmp/settings.xml + mv -vf .github/configs/settings.xml $HOME/.m2/settings.xml + + - name: Run common test + run: | + mvn test -pl hugegraph-store/hg-store-test -am -P store-common-test + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3.0.0 + with: + file: ${{ env.REPORT_DIR }}/*.xml diff --git a/.github/workflows/pd-store.yml b/.github/workflows/pd-store.yml deleted file mode 100644 index 65fb3ccc9c..0000000000 --- a/.github/workflows/pd-store.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: "pd-store" - -on: - push: - branches: - - master - - 'release-*' - - 'test-*' - pull_request: - -jobs: - pd: - runs-on: ubuntu-latest - env: - USE_STAGE: 'true' # Whether to include the stage repository. - TRAVIS_DIR: hugegraph-server/hugegraph-dist/src/assembly/travis - REPORT_DIR: target/site/jacoco - - steps: - - name: Install JDK 11 - uses: actions/setup-java@v3 - with: - java-version: '11' - distribution: 'zulu' - - - name: Cache Maven packages - uses: actions/cache@v3 - with: - path: ~/.m2 - key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: ${{ runner.os }}-m2 - - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - - name: use staged maven repo settings - if: ${{ env.USE_STAGE == 'true' }} - run: | - cp $HOME/.m2/settings.xml /tmp/settings.xml - mv -vf .github/configs/settings.xml $HOME/.m2/settings.xml - - - name: Run common test - run: | - mvn test -pl hugegraph-pd/hg-pd-test -am -P pd-common-test - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3.0.0 - with: - file: ${{ env.REPORT_DIR }}/*.xml diff --git a/.github/workflows/ci.yml b/.github/workflows/server-ci.yml similarity index 96% rename from .github/workflows/ci.yml rename to .github/workflows/server-ci.yml index b96383f7cd..7af6c3be0e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/server-ci.yml @@ -1,4 +1,4 @@ -name: "hugegraph-ci" +name: "hugegraph-server-ci" on: push: @@ -13,7 +13,7 @@ jobs: # TODO: we need test & replace it to ubuntu-24.04 or ubuntu-latest runs-on: ubuntu-20.04 env: - USE_STAGE: 'true' # Whether to include the stage repository. + USE_STAGE: 'false' # Whether to include the stage repository. TRAVIS_DIR: hugegraph-server/hugegraph-dist/src/assembly/travis REPORT_DIR: target/site/jacoco BACKEND: ${{ matrix.BACKEND }} @@ -34,7 +34,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 with: - fetch-depth: 2 + fetch-depth: 5 # TODO: Remove this step after install-backend.sh updated - name: Install Java8 for backend diff --git a/hugegraph-store/.gitignore b/hugegraph-store/.gitignore new file mode 100644 index 0000000000..afb9ae8375 --- /dev/null +++ b/hugegraph-store/.gitignore @@ -0,0 +1,2 @@ +# Exclude the generated PB files +hg-store-grpc/src/main/java/ diff --git a/hugegraph-store/README.md b/hugegraph-store/README.md index bef8b53c8a..cd2dedb900 100644 --- a/hugegraph-store/README.md +++ b/hugegraph-store/README.md @@ -1,5 +1,8 @@ -# HugeGraph Store +> Note: From revision 1.5.0, the code of HugeGraph-Store will be adapted to this location (WIP). -HugeGraph Store is a new built-in storage backend, which uses RocksDB as the distributed backend storage engine. +# HugeGraph Store (BETA) -> Note: Currently, the contents of this folder are empty. Starting from revision 1.5.0, the code of HugeGraph Store will be adapted to this location (WIP). +HugeGraph Store is a new built-in storage backend, which uses RocksDB as the distributed backend +storage engine. + +> BTW, if you meet any problem when using HugeGraph Store, please feel free to contact us for help diff --git a/hugegraph-store/hg-store-client/pom.xml b/hugegraph-store/hg-store-client/pom.xml new file mode 100644 index 0000000000..2c402d37aa --- /dev/null +++ b/hugegraph-store/hg-store-client/pom.xml @@ -0,0 +1,107 @@ + + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-store + ${revision} + ../pom.xml + + + hg-store-client + + + 11 + 11 + true + 2.15.0 + 1.18.20 + + + + + org.apache.hugegraph + hg-store-grpc + ${revision} + + + org.apache.hugegraph + hg-store-common + ${revision} + + + org.apache.hugegraph + hg-pd-client + ${revision} + + + org.apache.hugegraph + hg-pd-grpc + ${revision} + + + + org.projectlombok + lombok + ${lombok.version} + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + + junit + junit + 4.13.2 + test + + + + com.google.protobuf + protobuf-java-util + 3.17.2 + + + + commons-io + commons-io + 2.7 + test + + + com.fasterxml.jackson.core + jackson-databind + 2.13.0 + test + + + commons-codec + commons-codec + 1.15 + test + + + + diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvEntry.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvEntry.java new file mode 100644 index 0000000000..ff44db008e --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvEntry.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +public interface HgKvEntry { + + byte[] key(); + + byte[] value(); + + default int code() { + return -1; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvIterator.java new file mode 100644 index 0000000000..38c8b0039b --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvIterator.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +import java.io.Closeable; +import java.util.Iterator; + +/** + * created on 2021/10/21 + */ +public interface HgKvIterator extends Iterator, HgSeekAble, Closeable { + + byte[] key(); + + byte[] value(); + + @Override + void close(); + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvOrderedIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvOrderedIterator.java new file mode 100644 index 0000000000..52df012ef2 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvOrderedIterator.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +/** + * created on 2022/03/10 + */ +public interface HgKvOrderedIterator extends HgKvIterator, Comparable { + + long getSequence(); +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvPagingIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvPagingIterator.java new file mode 100644 index 0000000000..ba9fa33981 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvPagingIterator.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +/** + * created on 2021/10/24 + */ +public interface HgKvPagingIterator extends HgKvIterator, HgPageSize { + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvStore.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvStore.java new file mode 100644 index 0000000000..db640592f3 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgKvStore.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +import java.util.List; + +import org.apache.hugegraph.store.client.grpc.KvCloseableIterator; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; + +/** + * @version 0.2.0 + */ +public interface HgKvStore { + + /** + * CAUTION: THE CONST BELOW MUST KEEP CONSISTENCE TO ScanIterator.Trait. + */ + int SCAN_ANY = 0x80; + int SCAN_PREFIX_BEGIN = 0x01; + int SCAN_PREFIX_END = 0x02; + int SCAN_GT_BEGIN = 0x04; + int SCAN_GTE_BEGIN = 0x0c; + int SCAN_LT_END = 0x10; + int SCAN_LTE_END = 0x30; + int SCAN_KEYONLY = 0x40; + int SCAN_HASHCODE = 0x100; + + boolean put(String table, HgOwnerKey ownerKey, byte[] value); + + /** + * 该版本被store内部使用。向分区写入数据, + * partitionId与key.keyCode必须与pd存储的分区信息保持一致。 + */ + boolean directPut(String table, int partitionId, HgOwnerKey key, byte[] value); + + byte[] get(String table, HgOwnerKey ownerKey); + + boolean clean(int partId); + + boolean delete(String table, HgOwnerKey ownerKey); + + boolean deleteSingle(String table, HgOwnerKey ownerKey); + + boolean deletePrefix(String table, HgOwnerKey prefix); + + boolean deleteRange(String table, HgOwnerKey start, HgOwnerKey end); + + boolean merge(String table, HgOwnerKey key, byte[] value); + + @Deprecated + List batchGetOwner(String table, List keyList); + + HgKvIterator scanIterator(String table); + + HgKvIterator scanIterator(String table, byte[] query); + + HgKvIterator scanIterator(String table, long limit); + + HgKvIterator scanIterator(String table, long limit, byte[] query); + + HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix); + + HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit); + + HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit, + byte[] query); + + HgKvIterator scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey); + + HgKvIterator scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey, + long limit); + + HgKvIterator scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey, + long limit, byte[] query); + + HgKvIterator scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey, + long limit, int scanType, byte[] query); + + HgKvIterator scanIterator(String table, int codeFrom, int codeTo, int scanType, + byte[] query); + + // HgKvIterator scanIterator(ScanStreamReq scanReq); + + HgKvIterator scanIterator(ScanStreamReq.Builder scanReqBuilder); + + long count(String table); + + boolean truncate(); + + default boolean existsTable(String table) { + return false; + } + + boolean createTable(String table); + + boolean deleteTable(String table); + + boolean dropTable(String table); + + boolean deleteGraph(String graph); + + List> scanBatch(HgScanQuery scanQuery); + + KvCloseableIterator> scanBatch2(HgScanQuery scanQuery); + + KvCloseableIterator> scanBatch3(HgScanQuery scanQuery, + KvCloseableIterator iterator); + + HgKvIterator batchPrefix(String table, List prefixList); +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java new file mode 100644 index 0000000000..e9245b3a39 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_BYTES; +import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_OWNER_KEY; + +import java.io.Serializable; +import java.util.Arrays; + +import org.apache.hugegraph.store.client.util.HgStoreClientUtil; + +/** + * created on 2021/10/14 + * + * @version 1.3.0 add canceled assert + */ +public class HgOwnerKey implements Serializable { + + private final byte[] owner; // TODO: consider remove? since it seems to be useless + private int keyCode = 0;// TODO: Be here OK? + private byte[] key; + // Sequence number, used for batch queries to ensure the order of returned results + private int serialNo; + + /** + * @param owner + * @param key + * @see HgOwnerKey:of(byte[] owner, byte[] key) + */ + @Deprecated + public HgOwnerKey(byte[] owner, byte[] key) { + if (owner == null) { + owner = EMPTY_BYTES; + } + if (key == null) { + key = EMPTY_BYTES; + } + this.owner = owner; + this.key = key; + } + + public HgOwnerKey(int code, byte[] key) { + if (key == null) { + key = EMPTY_BYTES; + } + this.owner = EMPTY_BYTES; + this.key = key; + this.keyCode = code; + } + + public static HgOwnerKey emptyOf() { + return EMPTY_OWNER_KEY; + } + + public static HgOwnerKey newEmpty() { + return HgOwnerKey.of(EMPTY_BYTES, EMPTY_BYTES); + } + + public static HgOwnerKey ownerOf(byte[] owner) { + return new HgOwnerKey(owner, EMPTY_BYTES); + } + + public static HgOwnerKey codeOf(int code) { + return HgOwnerKey.of(EMPTY_BYTES, EMPTY_BYTES).setKeyCode(code); + } + + public static HgOwnerKey of(byte[] owner, byte[] key) { + return new HgOwnerKey(owner, key); + } + + public static HgOwnerKey of(int keyCode, byte[] key) { + return new HgOwnerKey(keyCode, key); + } + + public byte[] getOwner() { + return owner; + } + + public byte[] getKey() { + return key; + } + + public int getKeyCode() { + return keyCode; + } + + public HgOwnerKey setKeyCode(int keyCode) { + this.keyCode = keyCode; + return this; + } + + public HgOwnerKey codeToKey(int keyCode) { + this.keyCode = keyCode; + this.key = HgStoreClientUtil.toIntBytes(keyCode); + return this; + } + + public int getSerialNo() { + return this.serialNo; + } + + public HgOwnerKey setSerialNo(int serialNo) { + this.serialNo = serialNo; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HgOwnerKey that = (HgOwnerKey) o; + return Arrays.equals(owner, that.owner) && Arrays.equals(key, that.key); + } + + @Override + public int hashCode() { + int result = Arrays.hashCode(owner); + result = 31 * result + Arrays.hashCode(key); + return result; + } + + @Override + public String toString() { + return "HgOwnerKey{" + + "owner=" + Arrays.toString(owner) + + ", key=" + Arrays.toString(key) + + ", code=" + keyCode + + '}'; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java new file mode 100644 index 0000000000..38163d568f --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +/** + * Return the amount of records returned by one query in pageable-query. + *

+ * created on 2021/10/24 + */ +public interface HgPageSize { + + long getPageSize(); + + default boolean isPageEmpty() { + return false; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java new file mode 100644 index 0000000000..80cdb77471 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +public final class HgPrivate { + + private static final HgPrivate INSTANCE = new HgPrivate(); + + private HgPrivate() { + } + + static HgPrivate of() { + return INSTANCE; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java new file mode 100644 index 0000000000..cc64ba945b --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java @@ -0,0 +1,331 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +import org.apache.hugegraph.store.client.util.HgAssert; +import org.apache.hugegraph.store.grpc.common.ScanOrderType; + +/** + * 2022/3/4 + * + * @version 0.5.0 + */ +public interface HgScanQuery { + + static HgScanQuery tableOf(String table) { + return ScanBuilder.tableOf(table).build(); + } + + static HgScanQuery rangeOf(String table, List startList, List endList) { + return ScanBuilder.rangeOf(table, startList, endList).build(); + } + + static HgScanQuery prefixOf(String table, List prefixList) { + return ScanBuilder.prefixOf(table, prefixList).build(); + } + + static HgScanQuery prefixOf(String table, List prefixList, + ScanOrderType orderType) { + return ScanBuilder.prefixOf(table, prefixList).setOrderType(orderType).build(); + } + + static HgScanQuery prefixIteratorOf(String table, Iterator prefixItr) { + return ScanBuilder.prefixIteratorOf(table, prefixItr).build(); + } + + static HgScanQuery prefixIteratorOf(String table, Iterator prefixItr, + ScanOrderType orderType) { + return ScanBuilder.prefixIteratorOf(table, prefixItr).setOrderType(orderType).build(); + } + + String getTable(); + + HgScanQuery.ScanMethod getScanMethod(); + + List getPrefixList(); + + Iterator getPrefixItr(); + + List getStartList(); + + List getEndList(); + + long getLimit(); + + long getPerKeyLimit(); + + long getPerKeyMax(); + + long getSkipDegree(); + + int getScanType(); + + ScanOrderType getOrderType(); + + boolean isOnlyKey(); + + byte[] getQuery(); + + ScanBuilder builder(); + + enum ScanMethod { + ALL, + PREFIX, + RANGE + } + + enum SortType { + UNSORTED, + SORT_BY_EDGE, + SORT_BY_VERTEX + } + + class ScanBuilder { + + private final String table; + private final HgScanQuery.ScanMethod sanMethod; + private long limit = Integer.MAX_VALUE; + private long perKeyLimit = Integer.MAX_VALUE; + private long perKeyMax = Integer.MAX_VALUE; + private int scanType; + private ScanOrderType orderType; + + private long skipDegree; + + private boolean onlyKey; + private byte[] query; + private List prefixList; + private List startList; + private List endList; + private Iterator prefixItr; + + ScanBuilder(HgScanQuery.ScanMethod sanMethod, String table) { + this.table = table; + this.sanMethod = sanMethod; + this.orderType = ScanOrderType.ORDER_NONE; + } + + public static ScanBuilder rangeOf(String table, List startList, + List endList) { + HgAssert.isArgumentValid(table, "table"); + HgAssert.isArgumentValid(startList, "startList"); + HgAssert.isArgumentValid(endList, "endList"); + HgAssert.isTrue(startList.size() == endList.size() + , "The size of startList not equals endList's."); + + ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.RANGE, table); + res.startList = startList; + res.endList = endList; + res.scanType = HgKvStore.SCAN_GTE_BEGIN | HgKvStore.SCAN_LTE_END; + return res; + } + + public static ScanBuilder prefixOf(String table, List prefixList) { + HgAssert.isArgumentValid(table, "table"); + HgAssert.isArgumentValid(prefixList, "prefixList"); + + ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.PREFIX, table); + res.prefixList = prefixList; + return res; + + } + + public static ScanBuilder prefixIteratorOf(String table, Iterator prefixItr) { + HgAssert.isArgumentValid(table, "table"); + + ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.PREFIX, table); + res.prefixItr = prefixItr; + return res; + + } + + public static ScanBuilder tableOf(String table) { + HgAssert.isArgumentValid(table, "table"); + + return new ScanBuilder(HgScanQuery.ScanMethod.ALL, table); + } + + public ScanBuilder setLimit(long limit) { + this.limit = limit; + return this; + } + + public ScanBuilder setPerKeyLimit(long limit) { + this.perKeyLimit = limit; + return this; + } + + public ScanBuilder setPerKeyMax(long max) { + this.perKeyMax = max; + return this; + } + + public ScanBuilder setScanType(int scanType) { + this.scanType = scanType; + return this; + } + + public ScanBuilder setOrderType(ScanOrderType orderType) { + this.orderType = orderType; + return this; + } + + public ScanBuilder setQuery(byte[] query) { + this.query = query; + return this; + } + + public ScanBuilder setSkipDegree(long skipDegree) { + this.skipDegree = skipDegree; + return this; + } + + public ScanBuilder setOnlyKey(boolean onlyKey) { + this.onlyKey = onlyKey; + return this; + } + + public HgScanQuery build() { + return this.new BatchScanQuery(); + } + + private class BatchScanQuery implements HgScanQuery { + + @Override + public String getTable() { + return table; + } + + @Override + public HgScanQuery.ScanMethod getScanMethod() { + return sanMethod; + } + + @Override + public List getPrefixList() { + if (prefixList == null) { + return Collections.EMPTY_LIST; + } else { + return Collections.unmodifiableList(prefixList); + } + } + + @Override + public Iterator getPrefixItr() { + return prefixItr; + } + + @Override + public List getStartList() { + if (startList == null) { + return Collections.EMPTY_LIST; + } else { + return Collections.unmodifiableList(startList); + } + } + + @Override + public List getEndList() { + if (endList == null) { + return Collections.EMPTY_LIST; + } else { + return Collections.unmodifiableList(endList); + } + } + + @Override + public long getLimit() { + return limit; + } + + @Override + public long getPerKeyLimit() { + return perKeyLimit; + } + + @Override + public long getPerKeyMax() { + return perKeyMax; + } + + @Override + public long getSkipDegree() { + return skipDegree; + } + + @Override + public int getScanType() { + return scanType; + } + + @Override + public ScanOrderType getOrderType() { + return orderType; + } + + @Override + public boolean isOnlyKey() { + return onlyKey; + } + + @Override + public byte[] getQuery() { + return query; + } + + @Override + public ScanBuilder builder() { + return ScanBuilder.this; + } + + @Override + public String toString() { + final StringBuffer sb = new StringBuffer("HgScanQuery{"); + sb.append("table='").append(getTable()).append('\''); + sb.append(", scanMethod=").append(getScanMethod()); + sb.append(", prefixList=").append(getPrefixList()); + sb.append(", startList=").append(getStartList()); + sb.append(", endList=").append(getEndList()); + sb.append(", limit=").append(getLimit()); + sb.append(", perKeyLimit=").append(getPerKeyLimit()); + sb.append(", perKeyMax=").append(getPerKeyMax()); + sb.append(", skipDegree=").append(getSkipDegree()); + sb.append(", scanType=").append(getScanType()); + sb.append(", orderType=").append(getOrderType()); + sb.append(", onlyKey=").append(isOnlyKey()); + sb.append(", query="); + if (query == null) { + sb.append("null"); + } else { + sb.append('['); + for (int i = 0; i < query.length; ++i) { + sb.append(i == 0 ? "" : ", ").append(query[i]); + } + sb.append(']'); + } + sb.append('}'); + return sb.toString(); + } + } + + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java new file mode 100644 index 0000000000..fe6a580a1c --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +/** + * created on 2022/03/11 + */ +public interface HgSeekAble { + + byte[] position(); + + void seek(byte[] position); +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java new file mode 100644 index 0000000000..37c2184c80 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.client.HgStoreSessionProvider; + +/** + * Maintain HgStoreSession instances. + * HgStore-clusters. + */ + +@ThreadSafe +public final class HgSessionManager { + + // TODO: Holding more than one HgSessionManager is available,if you want to connect multi + private final static HgSessionManager INSTANCE = new HgSessionManager(); + private final HgSessionProvider sessionProvider; + + private HgSessionManager() { + // TODO: constructed by SPI + this.sessionProvider = new HgStoreSessionProvider(); + } + + public static HgSessionManager getInstance() { + return INSTANCE; + } + + /** + * Retrieve or create a HgStoreSession. + * + * @param graphName + * @return + */ + public HgStoreSession openSession(String graphName) { + return this.sessionProvider.createSession(graphName); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java new file mode 100644 index 0000000000..7049c27b01 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * created on 2021/10/12 + */ +@ThreadSafe +public interface HgSessionProvider { + + HgStoreSession createSession(String graphName); +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java new file mode 100644 index 0000000000..0f8ebb929f --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.store.client.HgStoreNodeManager; +import org.apache.hugegraph.store.client.HgStoreNodePartitionerImpl; +import org.apache.hugegraph.store.client.HgStoreSessionProvider; + +/** + * Maintain HgStoreSession instances. + * HgStore-clusters. + */ + +@ThreadSafe +public final class HgStoreClient { + + // TODO: Holding more than one HgSessionManager is available,if you want to connect multi + private final HgSessionProvider sessionProvider; + private PDClient pdClient; + + public HgStoreClient() { + this.sessionProvider = new HgStoreSessionProvider(); + } + + public HgStoreClient(PDConfig config) { + this.sessionProvider = new HgStoreSessionProvider(); + pdClient = PDClient.create(config); + setPdClient(pdClient); + } + + public HgStoreClient(PDClient pdClient) { + this.sessionProvider = new HgStoreSessionProvider(); + setPdClient(pdClient); + } + + public static HgStoreClient create(PDConfig config) { + return new HgStoreClient(config); + } + + public static HgStoreClient create(PDClient pdClient) { + return new HgStoreClient(pdClient); + } + + public static HgStoreClient create() { + return new HgStoreClient(); + } + + public void setPDConfig(PDConfig config) { + pdClient = PDClient.create(config); + setPdClient(pdClient); + } + + /** + * Retrieve or create a HgStoreSession. + * + * @param graphName + * @return + */ + public HgStoreSession openSession(String graphName) { + return this.sessionProvider.createSession(graphName); + } + + public PDClient getPdClient() { + return pdClient; + } + + public void setPdClient(PDClient client) { + this.pdClient = client; + HgStoreNodeManager nodeManager = + HgStoreNodeManager.getInstance(); + + HgStoreNodePartitionerImpl p = new HgStoreNodePartitionerImpl(pdClient, nodeManager); + nodeManager.setNodeProvider(p); + nodeManager.setNodePartitioner(p); + nodeManager.setNodeNotifier(p); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java new file mode 100644 index 0000000000..2e595e1ba1 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +import org.apache.hugegraph.store.client.type.HgStoreClientException; + +public interface HgStoreSession extends HgKvStore { + + void beginTx(); + + /** + * @throws IllegalStateException when the tx hasn't been beginning. + * @throws HgStoreClientException when failed to commit . + */ + void commit(); + + /** + * @throws IllegalStateException when the tx hasn't been beginning. + * @throws HgStoreClientException when failed to rollback. + */ + void rollback(); + + boolean isTx(); +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java new file mode 100644 index 0000000000..8e08ab656e --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +public interface HgTkvEntry { + + String table(); + + byte[] key(); + + byte[] value(); +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java new file mode 100644 index 0000000000..57ca4d4a91 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store; + +public interface HgTokvEntry { + + String table(); + + HgOwnerKey ownerKey(); + + byte[] value(); +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java new file mode 100644 index 0000000000..6fa354edec --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.Objects; + +/** + * Immutable Object Pattern + *

+ * created on 2021/10/26 + */ +public final class HgNodePartition { + + private final Long nodeId; + //当前key的hashcode + private final Integer keyCode; + + //分区的开始结束范围 + private final Integer startKey; + private final Integer endKey; + private int hash = -1; + + HgNodePartition(Long nodeId, Integer keyCode) { + this.nodeId = nodeId; + this.keyCode = keyCode; + this.startKey = this.endKey = keyCode; + } + + HgNodePartition(Long nodeId, Integer keyCode, Integer startKey, Integer endKey) { + this.nodeId = nodeId; + this.keyCode = keyCode; + this.startKey = startKey; + this.endKey = endKey; + } + + public static HgNodePartition of(Long nodeId, Integer keyCode) { + return new HgNodePartition(nodeId, keyCode); + } + + public static HgNodePartition of(Long nodeId, Integer keyCode, Integer startKey, + Integer endKey) { + return new HgNodePartition(nodeId, keyCode, startKey, endKey); + } + + public Long getNodeId() { + return nodeId; + } + + public Integer getKeyCode() { + return keyCode; + } + + public Integer getStartKey() { + return startKey; + } + + public Integer getEndKey() { + return endKey; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HgNodePartition that = (HgNodePartition) o; + return Objects.equals(nodeId, that.nodeId) && Objects.equals(keyCode, that.keyCode); + } + + @Override + public int hashCode() { + if (this.hash == -1) { + this.hash = Objects.hash(nodeId, keyCode); + } + return this.hash; + } + + @Override + public String toString() { + return "HgNodePartition{" + + "nodeId=" + nodeId + + ", partitionId=" + keyCode + + '}'; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java new file mode 100644 index 0000000000..4bb0705b74 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import static org.apache.hugegraph.store.client.util.HgAssert.isFalse; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +import javax.annotation.concurrent.NotThreadSafe; + +/** + * created on 2021/10/26 + * + * @version 1.0.0 + */ +@NotThreadSafe +public final class HgNodePartitionerBuilder { + + private Set partitions = null; + + static HgNodePartitionerBuilder resetAndGet() { + return new HgNodePartitionerBuilder(); + } + + /** + * @param nodeId + * @param keyCode + * @return + * @see HgNodePartitionerBuilder:setPartitions(Set partitions) + */ + @Deprecated + public HgNodePartitionerBuilder add(Long nodeId, Integer keyCode) { + isFalse(nodeId == null, "The argument is invalid: nodeId"); + isFalse(keyCode == null, "The argument is invalid: keyCode"); + + if (this.partitions == null) { + this.partitions = new HashSet<>(16, 1); + } + + this.partitions.add(HgNodePartition.of(nodeId, keyCode)); + return this; + } + + Collection getPartitions() { + return this.partitions; + } + + public void setPartitions(Set partitions) { + isFalse(partitions == null, "The argument is invalid: partitions"); + this.partitions = partitions; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java new file mode 100644 index 0000000000..ee73485469 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +/** + * created on 2021/10/26 + */ +public class HgPrivate { + + private final static HgPrivate instance = new HgPrivate(); + + private HgPrivate() { + + } + + static HgPrivate getInstance() { + return instance; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java new file mode 100644 index 0000000000..31438c0a53 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import org.apache.hugegraph.store.HgStoreSession; + +/** + * created on 2021/10/11 + * + * @version 0.2.0 + */ +public interface HgStoreNode { + + /** + * Return boolean value of being online or not + * + * @return + */ + default boolean isHealthy() { + return true; + } + + /** + * Return the unique ID of store-node. + * + * @return + */ + Long getNodeId(); + + /** + * A string value concatenated by host and port: "host:port" + * + * @return + */ + String getAddress(); + + /** + * Return a new HgStoreSession instance, that is not Thread safe. + * Return null when the node is not in charge of the graph that was passed from argument. + * + * @return + */ + HgStoreSession openSession(String graphName); + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java new file mode 100644 index 0000000000..c35b5e9343 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +/** + * created on 2021/10/11 + */ +public interface HgStoreNodeBuilder { + + HgStoreNodeBuilder setNodeId(Long nodeId); + + HgStoreNodeBuilder setAddress(String address); + + /** + * To build a HgStoreNode instance. + * + * @return + */ + HgStoreNode build(); + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java new file mode 100644 index 0000000000..d8735cdc6e --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.List; + +/** + * created on 2021/10/12 + */ +public final class HgStoreNodeCandidates { + + List nodeList; + + HgStoreNodeCandidates(List nodeList) { + this.nodeList = nodeList; + } + + public int size() { + return this.nodeList.size(); + } + + public HgStoreNode getNode(int index) { + return this.nodeList.get(index); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java new file mode 100644 index 0000000000..84709f19a9 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.client.grpc.GrpcStoreNodeBuilder; +import org.apache.hugegraph.store.client.type.HgNodeStatus; +import org.apache.hugegraph.store.client.type.HgStoreClientException; +import org.apache.hugegraph.store.client.util.HgAssert; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; + +import lombok.extern.slf4j.Slf4j; + +/** + * // TODO: Mapping to Store-Node-Cluster, one to one. + *

+ * created on 2021/10/11 + * + * @version 0.2.0 + */ +@ThreadSafe +@Slf4j +public final class HgStoreNodeManager { + + private final static Set CLUSTER_ID_SET = new HashSet<>(); + private final static HgStoreNodeManager instance = new HgStoreNodeManager(); + + private final String clusterId; + private final Map addressMap = new ConcurrentHashMap<>(); + private final Map nodeIdMap = new ConcurrentHashMap<>(); + private final Map> graphNodesMap = new ConcurrentHashMap<>(); + + private HgStoreNodeProvider nodeProvider; + private HgStoreNodePartitioner nodePartitioner; + private HgStoreNodeNotifier nodeNotifier; + + private HgStoreNodeManager() { + this.clusterId = HgStoreClientConst.DEFAULT_NODE_CLUSTER_ID; + } + + private HgStoreNodeManager(String clusterId) { + synchronized (CLUSTER_ID_SET) { + if (CLUSTER_ID_SET.contains(clusterId)) { + throw new RuntimeException("The cluster [" + clusterId + "] has been existing."); + } + CLUSTER_ID_SET.add(clusterId); + this.clusterId = clusterId; + } + } + + public static HgStoreNodeManager getInstance() { + return instance; + } + + /** + * Return the HgStoreNodeBuilder + * + * @return + */ + public HgStoreNodeBuilder getNodeBuilder() { + // TODO: Constructed by a provider that retrieved by SPI + return new GrpcStoreNodeBuilder(this, HgPrivate.getInstance()); + } + + /** + * Return an instance of HgStoreNode whose ID is matched to the argument. + * + * @param nodeId + * @return null when none of instance is matched to the argument,or argument is invalid. + */ + public HgStoreNode getStoreNode(Long nodeId) { + if (nodeId == null) { + return null; + } + return this.nodeIdMap.get(nodeId); + } + + /** + * Apply a HgStoreNode instance with graph-name and node-id. + * CAUTION: + * It won't work when user haven't set a HgStoreNodeProvider via setNodeProvider method. + * + * @param graphName + * @param nodeId + * @return + */ + HgStoreNode applyNode(String graphName, Long nodeId) { + HgStoreNode node = this.nodeIdMap.get(nodeId); + + if (node != null) { + return node; + } + + if (this.nodeProvider == null) { + return null; + } + + node = this.nodeProvider.apply(graphName, nodeId); + + if (node == null) { + + log.warn("Failed to apply a HgStoreNode instance form the nodeProvider [ " + + this.nodeProvider.getClass().getName() + " ]."); + notifying(graphName, nodeId, HgNodeStatus.NOT_EXIST); + return null; + } + + this.addNode(graphName, node); + + return node; + } + + private void notifying(String graphName, Long nodeId, HgNodeStatus status) { + if (this.nodeNotifier != null) { + try { + this.nodeNotifier.notice(graphName, HgStoreNotice.of(nodeId, status)); + } catch (Throwable t) { + log.error("Failed to invoke " + this.nodeNotifier.getClass().getSimpleName() + + ":notice(" + nodeId + "," + status + ")", t); + } + } + } + + /** + * @param graphName + * @param notice + * @return null: when there is no HgStoreNodeNotifier in the nodeManager; + * @throws HgStoreClientException + */ + public Integer notifying(String graphName, HgStoreNotice notice) { + + if (this.nodeNotifier != null) { + + synchronized (Thread.currentThread()) { + try { + return this.nodeNotifier.notice(graphName, notice); + } catch (Throwable t) { + String msg = + "Failed to invoke " + this.nodeNotifier.getClass().getSimpleName() + + ", notice: [ " + notice + " ]"; + log.error(msg, t); + throw new HgStoreClientException(msg); + } + } + + } + + return null; + } + + /** + * Return a collection of HgStoreNode who is in charge of the graph passed in the argument. + * + * @param graphName + * @return null when none matched to argument or any argument is invalid. + */ + public List getStoreNodes(String graphName) { + if (HgAssert.isInvalid(graphName)) { + return null; + } + + return this.graphNodesMap.get(graphName); + } + + /** + * Adding a new Store-Node, return the argument's value if the host+port was not existing, + * otherwise return the HgStoreNode-instance added early. + * + * @param storeNode + * @return + * @throws IllegalArgumentException when any argument is invalid. + */ + public HgStoreNode addNode(HgStoreNode storeNode) { + HgAssert.isFalse(storeNode == null, "the argument: storeNode is null."); + + Long nodeId = storeNode.getNodeId(); + + HgStoreNode node = null; + + synchronized (this.nodeIdMap) { + node = this.addressMap.get(nodeId); + if (node == null) { + node = storeNode; + this.nodeIdMap.put(nodeId, node); + this.addressMap.put(storeNode.getAddress(), node); + } + } + + return node; + } + + /** + * @param graphName + * @param storeNode + * @return + * @throws IllegalArgumentException when any argument is invalid. + */ + public HgStoreNode addNode(String graphName, HgStoreNode storeNode) { + HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument is invalid: graphName"); + HgStoreNode node = this.addNode(storeNode); + + List nodes = null; + + synchronized (this.graphNodesMap) { + nodes = this.graphNodesMap.get(graphName); + if (nodes == null) { + nodes = new ArrayList<>(); + this.graphNodesMap.put(graphName, nodes); + } + nodes.add(node); + } + + return node; + } + + public HgStoreNodePartitioner getNodePartitioner() { + return nodePartitioner; + } + + public HgStoreNodeManager setNodePartitioner(HgStoreNodePartitioner nodePartitioner) { + HgAssert.isFalse(nodePartitioner == null, "the argument is invalid: nodePartitioner"); + this.nodePartitioner = nodePartitioner; + return this; + } + + public HgStoreNodeNotifier getNodeNotifier() { + return nodeNotifier; + } + + public HgStoreNodeManager setNodeNotifier(HgStoreNodeNotifier nodeNotifier) { + HgAssert.isFalse(nodeNotifier == null, "the argument is invalid: nodeNotifier"); + this.nodeNotifier = nodeNotifier; + return this; + } + + public HgStoreNodeManager setNodeProvider(HgStoreNodeProvider nodeProvider) { + this.nodeProvider = nodeProvider; + return this; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java new file mode 100644 index 0000000000..0319d6c4de --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +/** + * created on 2021/10/12 + * + * @version 1.0.0 + */ +public interface HgStoreNodeNotifier { + + /** + * It will be invoked by NodeManager, when some exception or issue was happened. + * + * @param graphName + * @param storeNotice + * @return return 0 please, for no matter what. + */ + int notice(String graphName, HgStoreNotice storeNotice); + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java new file mode 100644 index 0000000000..d540f68aa7 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import org.apache.hugegraph.store.client.util.HgStoreClientConst; + +/** + * created on 2021/10/12 + * + * @version 1.0.0 + */ +public interface HgStoreNodePartitioner { + + /** + * The partition algorithm implementation, that specialized by user. + * + * @param builder The builder of HgNodePartitionerBuilder. It's supposed to be invoked + * directly by user. + * e.g. builder.add(nodeId,address,partitionId); + * @param graphName + * @param startKey + * @param endKey + * @return status: + *

    + *
  • 0: The partitioner is OK.
  • + *
  • 10: The partitioner is not work.
  • + *
+ */ + int partition(HgNodePartitionerBuilder builder, String graphName, byte[] startKey, + byte[] endKey); + + /** + * @param builder + * @param graphName + * @param startCode hash code + * @param endCode hash code + * @return + */ + default int partition(HgNodePartitionerBuilder builder, String graphName, int startCode, + int endCode) { + return this.partition(builder, graphName + , HgStoreClientConst.ALL_PARTITION_OWNER + , HgStoreClientConst.ALL_PARTITION_OWNER); + } + + default int partition(HgNodePartitionerBuilder builder, String graphName, int partitionId) { + return this.partition(builder, graphName + , HgStoreClientConst.ALL_PARTITION_OWNER + , HgStoreClientConst.ALL_PARTITION_OWNER); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java new file mode 100644 index 0000000000..dba939ec86 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.store.client.type.HgNodeStatus; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class HgStoreNodePartitionerImpl implements HgStoreNodePartitioner, + HgStoreNodeProvider, + HgStoreNodeNotifier { + + private PDClient pdClient; + private HgStoreNodeManager nodeManager; + + protected HgStoreNodePartitionerImpl() { + } + + public HgStoreNodePartitionerImpl(PDClient pdClient, HgStoreNodeManager nodeManager) { + this.pdClient = pdClient; + this.nodeManager = nodeManager; + } + + /** + * 查询分区信息,结果通过HgNodePartitionerBuilder返回 + */ + @Override + public int partition(HgNodePartitionerBuilder builder, String graphName, + byte[] startKey, byte[] endKey) { + try { + HashSet partitions = null; + if (HgStoreClientConst.ALL_PARTITION_OWNER == startKey) { + List stores = pdClient.getActiveStores(graphName); + partitions = new HashSet<>(stores.size()); + for (Metapb.Store store : stores) { + partitions.add(HgNodePartition.of(store.getId(), -1)); + } + + } else if (endKey == HgStoreClientConst.EMPTY_BYTES + || startKey == endKey || Arrays.equals(startKey, endKey)) { + KVPair partShard = + pdClient.getPartition(graphName, startKey); + Metapb.Shard leader = partShard.getValue(); + partitions = new HashSet<>(); + partitions.add(HgNodePartition.of(leader.getStoreId(), + pdClient.keyToCode(graphName, startKey))); + } else { + log.warn( + "StartOwnerkey is not equal to endOwnerkey, which is meaningless!!, It is" + + " a error!!"); + List stores = pdClient.getActiveStores(graphName); + partitions = new HashSet<>(stores.size()); + for (Metapb.Store store : stores) { + partitions.add(HgNodePartition.of(store.getId(), -1)); + } + } + builder.setPartitions(partitions); + } catch (PDException e) { + log.error("An error occurred while getting partition information :{}", e.getMessage()); + throw new RuntimeException(e.getMessage(), e); + } + return 0; + } + + @Override + public int partition(HgNodePartitionerBuilder builder, String graphName, + int startKey, int endKey) { + try { + HashSet partitions = new HashSet<>(); + Metapb.Partition partition = null; + while ((partition == null || partition.getEndKey() < endKey) + && startKey < PartitionUtils.MAX_VALUE) { + KVPair partShard = + pdClient.getPartitionByCode(graphName, startKey); + if (partShard != null) { + partition = partShard.getKey(); + Metapb.Shard leader = partShard.getValue(); + partitions.add(HgNodePartition.of(leader.getStoreId(), startKey, + (int) partition.getStartKey(), + (int) partition.getEndKey())); + startKey = (int) partition.getEndKey(); + } else { + break; + } + } + builder.setPartitions(partitions); + } catch (PDException e) { + log.error("An error occurred while getting partition information :{}", e.getMessage()); + throw new RuntimeException(e.getMessage(), e); + } + return 0; + } + + @Override + public int partition(HgNodePartitionerBuilder builder, String graphName, + int partitionId) { + try { + HashSet partitions = new HashSet<>(); + Metapb.Partition partition = null; + + KVPair partShard = + pdClient.getPartitionById(graphName, partitionId); + if (partShard != null) { + partition = partShard.getKey(); + Metapb.Shard leader = partShard.getValue(); + partitions.add( + HgNodePartition.of(leader.getStoreId(), (int) partition.getStartKey())); + } + builder.setPartitions(partitions); + } catch (PDException e) { + log.error("An error occurred while getting partition information :{}", e.getMessage()); + throw new RuntimeException(e.getMessage(), e); + } + return 0; + } + + /** + * 查询hgstore信息 + * + * @return hgstore + */ + @Override + public HgStoreNode apply(String graphName, Long nodeId) { + try { + Metapb.Store store = pdClient.getStore(nodeId); + return nodeManager.getNodeBuilder().setNodeId(store.getId()) + .setAddress(store.getAddress()).build(); + } catch (PDException e) { + throw new RuntimeException(e.getMessage(), e); + } + } + + /** + * 通知更新缓存 + */ + @Override + public int notice(String graphName, HgStoreNotice storeNotice) { + log.warn(storeNotice.toString()); + if (storeNotice.getPartitionLeaders() != null) { + storeNotice.getPartitionLeaders().forEach((partId, leader) -> { + pdClient.updatePartitionLeader(graphName, partId, leader); + log.warn("updatePartitionLeader:{}-{}-{}", + graphName, partId, leader); + }); + } + if (storeNotice.getPartitionIds() != null) { + storeNotice.getPartitionIds().forEach(partId -> { + pdClient.invalidPartitionCache(graphName, partId); + }); + } + if (!storeNotice.getNodeStatus().equals( + HgNodeStatus.PARTITION_COMMON_FAULT) + && !storeNotice.getNodeStatus().equals( + HgNodeStatus.NOT_PARTITION_LEADER)) { + pdClient.invalidPartitionCache(); + log.warn("invalidPartitionCache:{} ", storeNotice.getNodeStatus()); + } + return 0; + } + + public Metapb.Graph delGraph(String graphName) { + try { + return pdClient.delGraph(graphName); + } catch (PDException e) { + log.error("delGraph {} exception, {}", graphName, e.getMessage()); + } + return null; + } + + public void setNodeManager(HgStoreNodeManager nodeManager) { + this.nodeManager = nodeManager; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java new file mode 100644 index 0000000000..2d0a7b5ed5 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +/** + * created on 2021/10/27 + */ +public interface HgStoreNodeProvider { + + /** + * Applying a new HgStoreNode instance + * + * @param graphName + * @param nodeId + * @return + */ + HgStoreNode apply(String graphName, Long nodeId); + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java new file mode 100644 index 0000000000..17387eebc1 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import org.apache.hugegraph.store.HgStoreSession; + +/** + * created on 2021/10/11 + * + * @version 0.1.0 + */ +public interface HgStoreNodeSession extends HgStoreSession { + + /** + * Return the name of graph. + * + * @return + */ + String getGraphName(); + + /** + * Return an instance of HgStoreNode, which provided the connection of Store-Node machine. + * + * @return + */ + HgStoreNode getStoreNode(); + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java new file mode 100644 index 0000000000..083cb8d381 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.List; +import java.util.Map; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.client.type.HgNodeStatus; +import org.apache.hugegraph.store.client.util.HgAssert; + +/** + * 2021/11/16 + */ +@NotThreadSafe +public class HgStoreNotice { + + private final Long nodeId; + private final HgNodeStatus nodeStatus; + private final String message; + private Map partitionLeaders; + private List partitionIds; + + private HgStoreNotice(Long nodeId, HgNodeStatus nodeStatus, String message) { + this.nodeId = nodeId; + this.nodeStatus = nodeStatus; + this.message = message; + } + + public static HgStoreNotice of(Long nodeId, HgNodeStatus nodeStatus) { + HgAssert.isArgumentNotNull(nodeId, "nodeId"); + HgAssert.isArgumentNotNull(nodeStatus, "nodeStatus"); + return new HgStoreNotice(nodeId, nodeStatus, ""); + } + + public static HgStoreNotice of(Long nodeId, HgNodeStatus nodeStatus, String message) { + HgAssert.isArgumentNotNull(nodeId, "nodeId"); + HgAssert.isArgumentNotNull(nodeStatus, "nodeStatus"); + HgAssert.isArgumentNotNull(message, "message"); + + return new HgStoreNotice(nodeId, nodeStatus, message); + } + + public Long getNodeId() { + return nodeId; + } + + public HgNodeStatus getNodeStatus() { + return nodeStatus; + } + + public String getMessage() { + return message; + } + + public Map getPartitionLeaders() { + return partitionLeaders; + } + + public HgStoreNotice setPartitionLeaders(Map partitionLeaders) { + this.partitionLeaders = partitionLeaders; + return this; + } + + public List getPartitionIds() { + return partitionIds; + } + + public HgStoreNotice setPartitionIds(List partitionIds) { + this.partitionIds = partitionIds; + return this; + } + + @Override + public String toString() { + return "HgStoreNotice{" + + "nodeId=" + nodeId + + ", nodeStatus=" + nodeStatus + + ", message='" + message + '\'' + + ", partitionLeaders=" + partitionLeaders + + ", partitionIds=" + partitionIds + + '}'; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java new file mode 100644 index 0000000000..c0e2be6b59 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +public class HgStoreService { + + private static final HgStoreService instance = new HgStoreService(); + + private HgStoreService() { + } + + static HgStoreService of() { + return instance; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java new file mode 100644 index 0000000000..37fa51cb4a --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.HgSessionProvider; +import org.apache.hugegraph.store.HgStoreSession; + +/** + * created on 2021/10/12 + */ +@ThreadSafe +public class HgStoreSessionProvider implements HgSessionProvider { + + private final MultiNodeSessionFactory sessionFactory = MultiNodeSessionFactory.getInstance(); + + @Override + public HgStoreSession createSession(String graphName) { + return this.sessionFactory.createStoreSession(graphName); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java new file mode 100644 index 0000000000..ab0c7fdce9 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.Arrays; +import java.util.Objects; + +import org.apache.hugegraph.store.HgTkvEntry; + +/** + * created on 2021/10/14 + */ +class HgTkvEntryImpl implements HgTkvEntry { + + private final String table; + private final byte[] key; + private final byte[] value; + + HgTkvEntryImpl(String table, byte[] key, byte[] value) { + this.table = table; + this.key = key; + this.value = value; + } + + @Override + public String table() { + return this.table; + } + + @Override + public byte[] key() { + return this.key; + } + + @Override + public byte[] value() { + return this.value; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HgTkvEntryImpl that = (HgTkvEntryImpl) o; + return Objects.equals(table, that.table) && Arrays.equals(key, that.key) && + Arrays.equals(value, that.value); + } + + @Override + public int hashCode() { + int result = Objects.hash(table); + result = 31 * result + Arrays.hashCode(key); + result = 31 * result + Arrays.hashCode(value); + return result; + } + + @Override + public String toString() { + return "HgTkvEntryImpl{" + + "table='" + table + '\'' + + ", key=" + Arrays.toString(key) + + ", value=" + Arrays.toString(value) + + '}'; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java new file mode 100644 index 0000000000..932864a55b --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.Arrays; +import java.util.Objects; + +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgTokvEntry; + +/** + * created on 2021/10/14 + */ +class HgTokvEntryImpl implements HgTokvEntry { + + private final String table; + private final HgOwnerKey ownerKey; + private final byte[] value; + + HgTokvEntryImpl(String table, HgOwnerKey ownerKey, byte[] value) { + this.table = table; + this.ownerKey = ownerKey; + this.value = value; + } + + @Override + public String table() { + return this.table; + } + + @Override + public HgOwnerKey ownerKey() { + return this.ownerKey; + } + + @Override + public byte[] value() { + return this.value; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HgTokvEntryImpl that = (HgTokvEntryImpl) o; + return Objects.equals(table, that.table) && Objects.equals(ownerKey, that.ownerKey) && + Arrays.equals(value, that.value); + } + + @Override + public int hashCode() { + int result = Objects.hash(table, ownerKey); + result = 31 * result + Arrays.hashCode(value); + return result; + } + + @Override + public String toString() { + return "HgTokvEntryImpl{" + + "table='" + table + '\'' + + ", okv=" + ownerKey + + ", value=" + Arrays.toString(value) + + '}'; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java new file mode 100644 index 0000000000..ff7cde0db8 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.HgStoreSession; + +/** + * created on 2021/10/12 + */ +@ThreadSafe +public final class MultiNodeSessionFactory { + + // TODO multi-instance ? + private final static MultiNodeSessionFactory INSTANCE = new MultiNodeSessionFactory(); + // TODO multi-instance ? + private final HgStoreNodeManager nodeManager = HgStoreNodeManager.getInstance(); + // TODO: to be a chain assigned to each graph + //private HgStoreNodeDispatcher storeNodeDispatcher; + + private MultiNodeSessionFactory() { + } + + static MultiNodeSessionFactory getInstance() { + return INSTANCE; + } + + HgStoreSession createStoreSession(String graphName) { + return buildProxy(graphName); + } + + private HgStoreSession buildProxy(String graphName) { + //return new MultiNodeSessionProxy(graphName, nodeManager, storeNodeDispatcher); + //return new NodePartitionSessionProxy(graphName,nodeManager); + //return new NodeRetrySessionProxy(graphName,nodeManager); + return new NodeTxSessionProxy(graphName, nodeManager); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java new file mode 100644 index 0000000000..e78ced4c10 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.Objects; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgStoreSession; + +/** + * created on 2021/10/26 + */ +@ThreadSafe +class NodeTkv { + + private final HgNodePartition nodePartition; + private final String table; + private final HgOwnerKey key; + private final HgOwnerKey endKey; + private HgStoreSession session; + + NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey key) { + this.nodePartition = nodePartition; + this.table = table; + this.key = key; + this.endKey = key; + this.key.setKeyCode(this.nodePartition.getKeyCode()); + } + + NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey key, int keyCode) { + this.nodePartition = nodePartition; + this.table = table; + this.key = key; + this.endKey = key; + + this.key.setKeyCode(keyCode); + } + + NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey startKey, + HgOwnerKey endKey) { + this.nodePartition = nodePartition; + this.table = table; + this.key = startKey; + this.endKey = endKey; + this.key.setKeyCode(nodePartition.getStartKey()); + this.endKey.setKeyCode(nodePartition.getEndKey()); + } + + public Long getNodeId() { + return this.nodePartition.getNodeId(); + } + + public String getTable() { + return table; + } + + public HgOwnerKey getKey() { + return key; + } + + public HgOwnerKey getEndKey() { + return endKey; + } + + public NodeTkv setKeyCode(int code) { + this.key.setKeyCode(code); + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + NodeTkv nptKv = (NodeTkv) o; + return Objects.equals(nodePartition, nptKv.nodePartition) && + Objects.equals(table, nptKv.table) + && Objects.equals(key, nptKv.key) + && Objects.equals(endKey, nptKv.endKey); + } + + @Override + public int hashCode() { + int result = Objects.hash(nodePartition, table, key, endKey); + return result; + } + + @Override + public String toString() { + return "NptKv{" + + "nodePartition=" + nodePartition + + ", table='" + table + '\'' + + ", key=" + key + + ", endKey=" + endKey + + '}'; + } + + public HgStoreSession getSession() { + return session; + } + + public void setSession(HgStoreSession session) { + this.session = session; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java new file mode 100644 index 0000000000..01eea1af79 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java @@ -0,0 +1,431 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_LIST; +import static org.apache.hugegraph.store.client.util.HgStoreClientConst.NODE_MAX_RETRYING_TIMES; +import static org.apache.hugegraph.store.client.util.HgStoreClientConst.TX_SESSIONS_MAP_CAPACITY; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgStoreSession; +import org.apache.hugegraph.store.client.type.HgStoreClientException; +import org.apache.hugegraph.store.client.util.HgAssert; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.term.HgPair; +import org.apache.hugegraph.store.term.HgTriple; + +import lombok.extern.slf4j.Slf4j; + +/** + * 2021/11/18 + */ +@Slf4j +@NotThreadSafe +final class NodeTxExecutor { + + private static final String maxTryMsg = + "the number of retries reached the upper limit : " + NODE_MAX_RETRYING_TIMES + + ",caused by:"; + private static final String msg = + "Not all tx-data delivered to real-node-session successfully."; + + static { + System.setProperty("java.util.concurrent.ForkJoinPool.common.parallelism", + String.valueOf(Runtime.getRuntime().availableProcessors() * 2)); + } + + private final String graphName; + NodeTxSessionProxy proxy; + Collector>> collector = Collectors.groupingBy( + nkv -> nkv.getNodeId(), Collectors.mapping(NodeTkv::getKey, Collectors.toList())); + private Map sessions = new HashMap<>(TX_SESSIONS_MAP_CAPACITY, 1); + private boolean isTx; + private List, + Function>> entries = new LinkedList<>(); + + private NodeTxExecutor(String graphName, NodeTxSessionProxy proxy) { + this.graphName = graphName; + this.proxy = proxy; + } + + static NodeTxExecutor graphOf(String graphName, NodeTxSessionProxy proxy) { + return new NodeTxExecutor(graphName, proxy); + } + + public boolean isTx() { + return isTx; + } + + void setTx(boolean tx) { + isTx = tx; + } + + void commitTx() { + if (!this.isTx) { + throw new IllegalStateException("It's not in tx state"); + } + + this.doCommit(); + } + + void rollbackTx() { + if (!this.isTx) { + return; + } + try { + this.sessions.values().stream().filter(HgStoreSession::isTx) + .forEach(HgStoreSession::rollback); + } catch (Throwable t) { + throw t; + } finally { + this.isTx = false; + this.sessions.clear(); + } + } + + void doCommit() { + try { + this.retryingInvoke(() -> { + if (this.entries.isEmpty()) { + return true; + } + AtomicBoolean allSuccess = new AtomicBoolean(true); + for (HgPair, Function> e : + this.entries) { + doAction(e.getKey(), e.getValue()); + } + if (!allSuccess.get()) { + throw HgStoreClientException.of(msg); + } + AtomicReference throwable = new AtomicReference<>(); + Collection sessions = this.sessions.values(); + sessions.parallelStream().forEach(e -> { + if (e.isTx()) { + try { + e.commit(); + } catch (Throwable t) { + throwable.compareAndSet(null, t); + allSuccess.set(false); + } + } + }); + if (!allSuccess.get()) { + if (isTx) { + try { + sessions.stream().forEach(HgStoreSession::rollback); + } catch (Exception e) { + + } + } + Throwable cause = throwable.get(); + if (cause.getCause() != null) { + cause = cause.getCause(); + } + if (cause instanceof HgStoreClientException) { + throw (HgStoreClientException) cause; + } + throw HgStoreClientException.of(cause); + } + return true; + }); + + } catch (Throwable t) { + throw t; + } finally { + this.isTx = false; + this.entries = new LinkedList<>(); + this.sessions = new HashMap<>(TX_SESSIONS_MAP_CAPACITY, 1); + } + } + + // private Function, + // List>> nodeStreamWrapper = nodeParams -> { + // if (nodeParams.getZ() == null) { + // return this.proxy.getNode(nodeParams.getX(), + // nodeParams.getY()); + // } else { + // if (nodeParams.getZ() instanceof HgOwnerKey) { + // return this.proxy.getNode(nodeParams.getX(), + // nodeParams.getY(), + // (HgOwnerKey) nodeParams.getZ()); + // } if ( nodeParams.getZ() instanceof Integer ){ + // return this.proxy.doPartition(nodeParams.getX(), (Integer) nodeParams.getZ()) + // .stream() + // .map(e -> new NodeTkv(e, nodeParams.getX(), nodeParams.getY(), + // nodeParams.getY() + // .getKeyCode())) + // .map( + // e -> new HgPair<>(this.proxy.getStoreNode(e.getNodeId + // ()), e) + // ); + // }else { + // HgAssert.isTrue(nodeParams.getZ() instanceof byte[], + // "Illegal parameter to get node id"); + // throw new NotImplementedException(); + // } + // } + // }; + + // private Function, + // List>> nodeStreamWrapper = nodeParams -> { + // if (nodeParams.getZ() == null) { + // return this.proxy.getNode(nodeParams.getX(), nodeParams.getY()); + // } else { + // if (nodeParams.getZ() instanceof HgOwnerKey) { + // return this.proxy.getNode(nodeParams.getX(), nodeParams.getY(), + // (HgOwnerKey) nodeParams.getZ()); + // } + // if (nodeParams.getZ() instanceof Integer) { + // Collection nodePartitions = this.proxy.doPartition(nodeParams + // .getX(), + // (Integer) + // nodeParams + // .getZ()); + // ArrayList> hgPairs = new ArrayList<> + // (nodePartitions.size()); + // for (HgNodePartition nodePartition : nodePartitions) { + // NodeTkv nodeTkv = new NodeTkv(nodePartition, nodeParams.getX(), nodeParams + // .getY(), + // nodeParams.getY().getKeyCode()); + // hgPairs.add(new HgPair<>(this.proxy.getStoreNode(nodeTkv.getNodeId()), + // nodeTkv)); + // + // } + // return hgPairs; + // } else { + // HgAssert.isTrue(nodeParams.getZ() instanceof byte[], "Illegal parameter to get + // node id"); + // throw new RuntimeException("not implemented"); + // } + // } + // }; + + private boolean doAction(HgTriple nodeParams, + Function action) { + if (nodeParams.getZ() == null) { + return this.proxy.doAction(nodeParams.getX(), nodeParams.getY(), nodeParams.getY(), + action); + } else { + if (nodeParams.getZ() instanceof HgOwnerKey) { + boolean result = this.proxy.doAction(nodeParams.getX(), nodeParams.getY(), + (HgOwnerKey) nodeParams.getZ(), action); + return result; + } + if (nodeParams.getZ() instanceof Integer) { + return this.proxy.doAction(nodeParams.getX(), nodeParams.getY(), + (Integer) nodeParams.getZ(), action); + } else { + HgAssert.isTrue(nodeParams.getZ() instanceof byte[], + "Illegal parameter to get node id"); + throw new RuntimeException("not implemented"); + } + } + } + + boolean prepareTx(HgTriple nodeParams, + Function sessionMapper) { + if (this.isTx) { + return this.entries.add(new HgPair(nodeParams, sessionMapper)); + } else { + return this.isAllTrue(nodeParams, sessionMapper); + } + } + + public HgStoreSession openNodeSession(HgStoreNode node) { + HgStoreSession res = this.sessions.get(node.getNodeId()); + if (res == null) { + this.sessions.put(node.getNodeId(), (res = node.openSession(this.graphName))); + } + if (this.isTx) { + res.beginTx(); + } + + return res; + } + + R limitOne( + Supplier>> nodeStreamSupplier, + Function, R> sessionMapper, R emptyObj) { + + Optional res = retryingInvoke( + () -> nodeStreamSupplier.get() + .parallel() + .map( + pair -> new SessionData( + openNodeSession(pair.getKey()), + pair.getValue()) + ).map(sessionMapper) + .filter( + r -> isValid(r) + ) + .findAny() + .orElseGet(() -> emptyObj) + ); + return res.orElse(emptyObj); + } + + List toList(Function nodeFunction + , List keyList + , Function> flatMapper + , Function>, List> sessionMapper) { + Optional> res = retryingInvoke( + () -> keyList.stream() + .flatMap(flatMapper) + .collect(collector) + .entrySet() + .stream() + .map( + e -> new SessionData<> + ( + openNodeSession( + nodeFunction.apply(e.getKey())), + e.getValue() + ) + ) + .parallel() + .map(sessionMapper) + .flatMap( + e -> e.stream() + ) + //.distinct() + .collect(Collectors.toList()) + ); + + return res.orElse(EMPTY_LIST); + } + + private boolean isAllTrue(HgTriple nodeParams, + Function action) { + Optional res = retryingInvoke(() -> doAction(nodeParams, action)); + return res.orElse(false); + } + + boolean isAllTrue(Supplier>> dataSource, + Function, Boolean> action) { + Optional res = retryingInvoke( + () -> dataSource.get() + .parallel() + .map( + pair -> new SessionData( + openNodeSession(pair.getKey()), + pair.getValue()) + ).map(action) + .allMatch(Boolean::booleanValue) + ); + + return res.orElse(false); + } + + boolean ifAnyTrue(Supplier>> nodeStreamSupplier + , Function, Boolean> sessionMapper) { + + Optional res = retryingInvoke( + () -> nodeStreamSupplier.get() + .parallel() + .map( + pair -> new SessionData( + openNodeSession(pair.getKey()), + pair.getValue()) + ) + .map(sessionMapper) + .anyMatch(Boolean::booleanValue) + ); + + return res.orElse(false); + } + + Optional retryingInvoke(Supplier supplier) { + return IntStream.rangeClosed(0, NODE_MAX_RETRYING_TIMES).boxed() + .map( + i -> { + T buffer = null; + try { + buffer = supplier.get(); + } catch (Throwable t) { + if (i + 1 <= NODE_MAX_RETRYING_TIMES) { + try { + int sleepTime; + // 前三次每隔一秒做一次尝试 + if (i < 3) { + sleepTime = 1; + } else { + // 后面逐次递增 + sleepTime = i - 1; + } + log.info("Waiting {} seconds " + + "for the next try.", + sleepTime); + Thread.sleep(sleepTime * 1000L); + } catch (InterruptedException e) { + log.error("Failed to sleep", e); + } + } else { + log.error(maxTryMsg, t); + throw HgStoreClientException.of( + t.getMessage(), t); + } + } + return buffer; + } + ) + .filter(e -> e != null) + .findFirst(); + + } + + private boolean isValid(Object obj) { + if (obj == null) { + return false; + } + + if (HgStoreClientConst.EMPTY_BYTES.equals(obj)) { + return false; + } + + return !EMPTY_LIST.equals(obj); + } + + class SessionData { + + HgStoreSession session; + T data; + + SessionData(HgStoreSession session, T data) { + this.session = session; + this.data = data; + } + + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java new file mode 100644 index 0000000000..066f96893d --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java @@ -0,0 +1,887 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import static java.util.stream.Collectors.groupingBy; +import static org.apache.hugegraph.store.client.util.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.store.client.util.HgAssert.isArgumentValid; +import static org.apache.hugegraph.store.client.util.HgAssert.isFalse; +import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_STRING; +import static org.apache.hugegraph.store.client.util.HgStoreClientUtil.err; +import static org.apache.hugegraph.store.client.util.HgStoreClientUtil.toStr; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgKvOrderedIterator; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgScanQuery; +import org.apache.hugegraph.store.HgStoreSession; +import org.apache.hugegraph.store.client.grpc.KvBatchScanner; +import org.apache.hugegraph.store.client.grpc.KvCloseableIterator; +import org.apache.hugegraph.store.client.util.HgAssert; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.client.util.HgStoreClientUtil; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq.Builder; +import org.apache.hugegraph.store.term.HgPair; +import org.apache.hugegraph.store.term.HgTriple; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2022/01/19 + * + * @version 0.6.0 added batch scan on 2022/03/03 + */ +@Slf4j +@NotThreadSafe +class NodeTxSessionProxy implements HgStoreSession { + + private final HgStoreNodeManager nodeManager; + private final HgStoreNodePartitioner nodePartitioner; + private final String graphName; + private final NodeTxExecutor txExecutor; + + NodeTxSessionProxy(String graphName, HgStoreNodeManager nodeManager) { + this.nodeManager = nodeManager; + this.graphName = graphName; + this.nodePartitioner = this.nodeManager.getNodePartitioner(); + this.txExecutor = NodeTxExecutor.graphOf(this.graphName, this); + + isFalse(this.nodePartitioner == null, + "Failed to retrieve the node-partitioner from node-manager."); + } + + @Override + public void beginTx() { + this.txExecutor.setTx(true); + } + + @Override + public void commit() { + this.txExecutor.commitTx(); + } + + @Override + public void rollback() { + this.txExecutor.rollbackTx(); + } + + @Override + public boolean isTx() { + return this.txExecutor.isTx(); + } + + @Override + public boolean put(String table, HgOwnerKey ownerKey, byte[] value) { + // isArgumentValid(table, "table"); + // isArgumentNotNull(ownerKey, "ownerKey"); + // log.info("put -> graph: {}, table: {}, key: {}, value: {}", + // graphName, table, ownerKey, toByteStr(value)); + // return this.txExecutor.prepareTx( + // () -> getNodeStream(table, ownerKey), + // e -> e.session.put(table, e.data.getKey(), value) + // ); + return this.txExecutor.prepareTx(new HgTriple(table, ownerKey, null), + e -> e.getSession().put(table, + e.getKey(), + value)); + } + + @Override + public boolean directPut(String table, int partitionId, HgOwnerKey ownerKey, byte[] value) { + isArgumentValid(table, "table"); + isArgumentNotNull(ownerKey, "ownerKey"); + + return this.txExecutor.prepareTx( + new HgTriple(table, ownerKey, partitionId), + e -> e.getSession().put(table, e.getKey(), value) + ); + } + + @Override + public boolean delete(String table, HgOwnerKey ownerKey) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(ownerKey == null, "The argument is invalid: ownerKey"); + + if (log.isDebugEnabled()) { + log.debug("delete -> graph: {}, table: {}, key: {}" + , graphName, table, toStr(ownerKey)); + } + + return this.txExecutor + .prepareTx( + new HgTriple(table, ownerKey, null), + e -> e.getSession().delete(table, e.getKey()) + ); + } + + @Override + public boolean deleteSingle(String table, HgOwnerKey ownerKey) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(ownerKey == null, "The argument is invalid: ownerKey"); + + if (log.isDebugEnabled()) { + log.debug("deleteSingle -> graph: {}, table: {}, key: {}" + , graphName, table, toStr(ownerKey)); + } + + return this.txExecutor + .prepareTx( + new HgTriple(table, ownerKey, null), + e -> e.getSession().deleteSingle(table, e.getKey()) + ); + } + + @Override + public boolean deletePrefix(String table, HgOwnerKey prefix) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(prefix == null, "The argument is invalid: prefix"); + + if (log.isDebugEnabled()) { + log.debug("deletePrefix -> graph: {}, table: {}, prefix: {}" + , graphName, table, toStr(prefix)); + } + + return this.txExecutor + .prepareTx( + new HgTriple(table, prefix, null), + e -> e.getSession().deletePrefix(table, e.getKey()) + ); + } + + @Override + public boolean deleteRange(String table, HgOwnerKey start, HgOwnerKey end) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(start == null, "The argument is invalid: start"); + HgAssert.isFalse(end == null, "The argument is invalid: end"); + + if (log.isDebugEnabled()) { + log.debug("deleteRange -> graph: {}, table: {}, start: {}, end: {}" + , graphName, table, toStr(start), toStr(end)); + } + + return this.txExecutor + .prepareTx( + new HgTriple(table, start, end), + e -> e.getSession().deleteRange(table, e.getKey(), e.getEndKey()) + ); + } + + @Override + public boolean merge(String table, HgOwnerKey key, byte[] value) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(key == null, "The argument is invalid: key"); + HgAssert.isFalse(value == null, "The argument is invalid: value"); + + if (log.isDebugEnabled()) { + log.debug("merge -> graph: {}, table: {}, key: {}, value: {}" + , graphName, table, toStr(key), toStr(value)); + } + + return this.txExecutor + .prepareTx( + new HgTriple(table, key, value), + e -> e.getSession().merge(table, e.getKey(), value) + ); + } + + /*--- tx end ---*/ + + @Override + public byte[] get(String table, HgOwnerKey ownerKey) { + isArgumentValid(table, "table"); + isArgumentNotNull(ownerKey, "ownerKey"); + + return this.txExecutor + .limitOne( + () -> this.getNodeStream(table, ownerKey), + e -> e.session.get(table, e.data.getKey()), HgStoreClientConst.EMPTY_BYTES + ); + } + + @Override + public boolean clean(int partId) { + Collection nodes = this.doPartition("", partId); + return nodes.parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .clean(partId) + ).findFirst().get(); + } + + @Override + @Deprecated + public List batchGetOwner(String table, List keyList) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(HgAssert.isInvalid(keyList), "The argument is invalid: keyList"); + + return this.txExecutor + .toList( + (l) -> this.getStoreNode(l), + keyList, + key -> this.toNodeTkvList(table, key, key).stream(), + e -> e.session.batchGetOwner(table, e.data) + ); + } + + @Override + public HgKvIterator batchPrefix(String table, List keyList) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(HgAssert.isInvalid(keyList), "The argument is invalid: keyList"); + return this.toHgKvIteratorProxy( + this.txExecutor + .toList( + (l) -> this.getStoreNode(l), + keyList, + key -> this.toNodeTkvList(table, key, key).stream(), + e -> Collections.singletonList(e.session.batchPrefix(table, e.data)) + ) + , Long.MAX_VALUE); + } + + @Override + public boolean truncate() { + return this.txExecutor + .isAllTrue( + () -> this.getNodeStream(EMPTY_STRING), + e -> e.session.truncate() + ); + } + + @Override + public boolean existsTable(String table) { + return this.txExecutor + .ifAnyTrue( + () -> this.getNodeStream(EMPTY_STRING), + e -> e.session.existsTable(table) + ); + } + + @Override + public boolean createTable(String table) { + return this.txExecutor + .isAllTrue( + () -> this.getNodeStream(EMPTY_STRING), + e -> e.session.createTable(table) + ); + } + + @Override + public boolean deleteTable(String table) { + return this.txExecutor + .isAllTrue( + () -> this.getNodeStream(EMPTY_STRING), + e -> e.session.deleteTable(table) + ); + } + + @Override + public boolean dropTable(String table) { + return this.txExecutor + .isAllTrue( + () -> this.getNodeStream(table), + e -> e.session.dropTable(table) + ); + } + + @Override + public boolean deleteGraph(String graph) { + return this.txExecutor + .isAllTrue( + () -> this.getNodeStream(EMPTY_STRING), + e -> e.session.deleteGraph(graph) + ); + } + + @Override + public HgKvIterator scanIterator(String table) { + return scanIterator(table, 0); + } + + @Override + public HgKvIterator scanIterator(String table, byte[] query) { + return scanIterator(table, 0, query); + } + + @Override + public HgKvIterator scanIterator(String table, long limit) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + + return this.toHgKvIteratorProxy( + this.toNodeTkvList(table) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .scanIterator(e.getTable(), limit) + ) + .collect(Collectors.toList()) + , limit); + } + + @Override + public HgKvIterator scanIterator(String table, long limit, byte[] query) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + + return this.toHgKvIteratorProxy( + this.toNodeTkvList(table) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .scanIterator(e.getTable(), e.getKey(), limit, query) + ) + .collect(Collectors.toList()) + , limit); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix) { + return scanIterator(table, keyPrefix, 0); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(keyPrefix == null, "The argument is invalid: keyPrefix"); + + return this.toHgKvIteratorProxy( + this.toNodeTkvList(table, keyPrefix) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .scanIterator(e.getTable(), e.getKey(), limit) + ) + .collect(Collectors.toList()) + , limit); + + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit, + byte[] query) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(keyPrefix == null, "The argument is invalid: keyPrefix"); + + return this.toHgKvIteratorProxy( + this.toNodeTkvList(table, keyPrefix) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .scanIterator(e.getTable(), e.getKey(), limit, query) + ) + .collect(Collectors.toList()) + , limit); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey startKey, + HgOwnerKey endKey) { + return this.scanIterator(table, startKey, endKey, 0, null); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey startKey, + HgOwnerKey endKey, long limit) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(startKey == null, "The argument is invalid: startKey"); + HgAssert.isFalse(endKey == null, "The argument is invalid: endKey"); + + return this.toHgKvIteratorProxy( + this.toNodeTkvList(table, startKey, endKey) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit) + ) + .collect(Collectors.toList()) + , limit); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey + , long limit, byte[] query) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(startKey == null, "The argument is invalid: startKey"); + HgAssert.isFalse(endKey == null, "The argument is invalid: endKey"); + + return this.toHgKvIteratorProxy( + this.toNodeTkvList(table, startKey, endKey) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit, + query) + ) + .collect(Collectors.toList()) + , limit); + + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey + , long limit, int scanType, byte[] query) { + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + HgAssert.isFalse(startKey == null, "The argument is invalid: startKey"); + HgAssert.isFalse(endKey == null, "The argument is invalid: endKey"); + + return this.toHgKvIteratorProxy( + this.toNodeTkvList(table, startKey, endKey) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit, + scanType, query) + ) + .collect(Collectors.toList()) + , limit); + + } + + @Override + public HgKvIterator scanIterator(String table, int codeFrom, int codeTo, + int scanType, byte[] query) { + if (log.isDebugEnabled()) { + log.debug("graph: {}, table: {}, codeFrom: {}, codeTo: {}, scanType: {}, query: {}" + , graphName, table, codeFrom, codeTo, scanType, HgStoreClientUtil.toStr(query)); + } + + HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table"); + return this.toHgKvIteratorProxy( + this.toNodeTkvList(table, codeFrom, codeTo) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .scanIterator(e.getTable() + , e.getKey().getKeyCode() + , e.getEndKey().getKeyCode(), + scanType, query) + ) + .collect(Collectors.toList()) + , 0); + + } + + @Override + public HgKvIterator scanIterator(Builder scanReqBuilder) { + List nodeTKvs = this.toNodeTkvList(scanReqBuilder); + Function> hgKvIteratorFunction = e -> { + HgStoreSession session = this.getStoreNode(e.getNodeId()) + .openSession(this.graphName); + return session.scanIterator(scanReqBuilder); + }; + List iterators = nodeTKvs.parallelStream() + .map(hgKvIteratorFunction) + .collect(Collectors.toList()); + return this.toHgKvIteratorProxy(iterators, scanReqBuilder.getLimit()); + } + + @Override + public long count(String table) { + return this.toNodeTkvList(table) + .parallelStream() + .map( + e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName) + .count(e.getTable()) + ) + .collect(Collectors.summingLong(l -> l)); + } + + @Override + public List> scanBatch(HgScanQuery scanQuery) { + HgAssert.isArgumentNotNull(scanQuery, "scanQuery"); + + return this.toTkvMapFunc(scanQuery.getScanMethod()) + .apply(scanQuery) + .entrySet() + .stream() + .map(e -> + this.getStoreNode(e.getKey()) + .openSession(this.graphName) + .scanBatch(toScanQueryFunc(scanQuery.getScanMethod()) + .apply(scanQuery.getTable(), e.getValue()) + .setQuery(scanQuery.getQuery()) + .setLimit(scanQuery.getLimit()) + .setPerKeyLimit(scanQuery.getPerKeyLimit()) + .setPerKeyMax((scanQuery.getPerKeyMax())) + .setScanType(scanQuery.getScanType()) + .build() + ) + ) + //.peek(e->log.info("{}",e)) + .flatMap(List::stream) + .collect(Collectors.toList()); + + } + + @Override + public KvCloseableIterator> scanBatch2(HgScanQuery scanQuery) { + return scanBatch3(scanQuery, null); + } + + @Override + public KvCloseableIterator> scanBatch3(HgScanQuery scanQuery, + KvCloseableIterator iterator) { + KvCloseableIterator notifierWrap = KvBatchScanner.ofMerger(scanQuery, (query, notifier) -> { + Map> nodeTkvs = this.toTkvMapFunc(scanQuery.getScanMethod()) + .apply(query); + + nodeTkvs.forEach((storeId, tkvs) -> { + this.getStoreNode(storeId) + .openSession(this.graphName) + .scanBatch3(toScanQueryFunc(scanQuery.getScanMethod()) + .apply(scanQuery.getTable(), tkvs) + .setQuery(scanQuery.getQuery()) + .setLimit(scanQuery.getLimit()) + .setSkipDegree(scanQuery.getSkipDegree()) + .setPerKeyLimit(scanQuery.getPerKeyLimit()) + .setPerKeyMax((scanQuery.getPerKeyMax())) + .setScanType(scanQuery.getScanType()) + .setOrderType(scanQuery.getOrderType()) + .build(), notifier + ); + }); + return true; + }); + return notifierWrap; + } + + private Function>> toTkvMapFunc( + HgScanQuery.ScanMethod scanMethod) { + switch (scanMethod) { + case RANGE: + return scanQuery -> { + List starts = scanQuery.getStartList(); + List ends = scanQuery.getEndList(); + int size = starts.size(); + return IntStream.range(0, size) + .boxed() + .map(i -> this.toNodeTkvList(scanQuery.getTable(), + starts.get(i), ends.get(i))) + .flatMap(List::stream) + .collect(groupingBy(NodeTkv::getNodeId)); + }; + case PREFIX: + return scanQuery -> + scanQuery.getPrefixList() + .stream() + .map(keyPrefix -> this.toNodeTkvList(scanQuery.getTable(), + keyPrefix)) + .flatMap(List::stream) + .collect(groupingBy(NodeTkv::getNodeId)); + + default: + return scanQuery -> this.toNodeTkvList(scanQuery.getTable()) + .stream() + .collect(groupingBy(NodeTkv::getNodeId)); + } + } + + private BiFunction, HgScanQuery.ScanBuilder> toScanQueryFunc( + HgScanQuery.ScanMethod scanMethod) { + switch (scanMethod) { + case RANGE: + return (table, tkvList) -> { + List startList = new LinkedList(); + List endList = new LinkedList(); + + tkvList.stream().forEach(e -> { + startList.add(e.getKey()); + endList.add(e.getEndKey()); + }); + + return HgScanQuery.ScanBuilder.rangeOf(table, startList, endList); + }; + case PREFIX: + return (table, tkvList) -> + HgScanQuery.ScanBuilder.prefixOf(table, + tkvList.stream() + .map(e -> e.getKey()) + .collect(Collectors.toList()) + ); + default: + return (table, tkvList) -> HgScanQuery.ScanBuilder.tableOf(table); + } + + } + + /*-- common --*/ + private HgKvIterator toHgKvIteratorProxy(List iteratorList, long limit) { + boolean isAllOrderedLimiter = iteratorList.stream() + .allMatch( + e -> e instanceof HgKvOrderedIterator); + + HgKvIterator iterator; + if (isAllOrderedLimiter) { + iterator = new SequencedIterator(iteratorList.stream() + .map(e -> (HgKvOrderedIterator) e) + .collect(Collectors.toList()), limit); + } else { + iterator = new TopWorkIteratorProxy(iteratorList, limit); + } + + return iterator; + } + + HgStoreNode getStoreNode(Long nodeId) { + HgStoreNode res = this.nodeManager.applyNode(this.graphName, nodeId); + + if (res == null) { + throw err("Failed to apply for an instance of HgStoreNode from node-manager."); + } + + return res; + } + + public boolean doAction(String table, HgOwnerKey startKey, HgOwnerKey endKey, + Function action) { + Collection partitions = + doPartition(table, startKey.getOwner(), endKey.getOwner()); + for (HgNodePartition partition : partitions) { + HgStoreNode storeNode = this.getStoreNode(partition.getNodeId()); + HgStoreSession session = this.txExecutor.openNodeSession(storeNode); + NodeTkv data = new NodeTkv(partition, table, startKey, endKey); + data.setSession(session); + if (!action.apply(data)) { + return false; + } + } + return true; + } + + public boolean doAction(String table, HgOwnerKey startKey, Integer code, + Function action) { + Collection partitions = this.doPartition(table, code); + for (HgNodePartition partition : partitions) { + HgStoreNode storeNode = this.getStoreNode(partition.getNodeId()); + HgStoreSession session = this.txExecutor.openNodeSession(storeNode); + NodeTkv data = new NodeTkv(partition, table, startKey, code); + data.setSession(session); + if (!action.apply(data)) { + return false; + } + } + return true; + } + + private List toNodeTkvList(Builder scanReqBuilder) { + // TODO 使用builder获取owner + String table = scanReqBuilder.getTable(); + HgOwnerKey ownerKey = HgStoreClientConst.ALL_PARTITION_OWNER_KEY; + byte[] allOwner = ownerKey.getOwner(); + Collection partitions = doPartition(table, + allOwner, + allOwner); + List nodeTkvs = new ArrayList<>(partitions.size()); + for (HgNodePartition partition : partitions) { + nodeTkvs.add(new NodeTkv(partition, table, ownerKey, ownerKey)); + } + return nodeTkvs; + } + + private List toNodeTkvList(String table) { + Collection partitions = doPartition(table, + HgStoreClientConst.ALL_PARTITION_OWNER_KEY.getOwner(), + HgStoreClientConst.ALL_PARTITION_OWNER_KEY.getOwner()); + ArrayList nodeTkvs = new ArrayList<>(partitions.size()); + for (HgNodePartition partition : partitions) { + nodeTkvs.add(new NodeTkv(partition, table, HgStoreClientConst.ALL_PARTITION_OWNER_KEY, + HgStoreClientConst.ALL_PARTITION_OWNER_KEY)); + } + return nodeTkvs; + } + + private List toNodeTkvList(String table, HgOwnerKey ownerKey) { + Collection partitions = + doPartition(table, ownerKey.getOwner(), ownerKey.getOwner()); + ArrayList nodeTkvs = new ArrayList<>(partitions.size()); + for (HgNodePartition partition : partitions) { + nodeTkvs.add(new NodeTkv(partition, table, ownerKey, ownerKey)); + } + + return nodeTkvs; + } + + private List toNodeTkvList(String table, HgOwnerKey startKey, HgOwnerKey endKey) { + Collection partitions = + doPartition(table, startKey.getOwner(), endKey.getOwner()); + ArrayList nodeTkvs = new ArrayList<>(partitions.size()); + for (HgNodePartition partition : partitions) { + nodeTkvs.add(new NodeTkv(partition, table, startKey, endKey)); + } + return nodeTkvs; + } + + private List toNodeTkvList(String table, int startCode, int endCode) { + Collection partitions = this.doPartition(table, startCode, endCode); + ArrayList nodeTkvs = new ArrayList<>(partitions.size()); + for (HgNodePartition partition : partitions) { + nodeTkvs.add( + new NodeTkv(partition, table, HgOwnerKey.codeOf(startCode), + HgOwnerKey.codeOf(endCode))); + } + return nodeTkvs; + } + + /** + * @return not null + */ + private Collection doPartition(String table, byte[] startKey, byte[] endKey) { + HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet(); + + int status = this.nodePartitioner.partition(partitionerBuilder, this.graphName, startKey, + endKey); + + if (status != 0) { + throw err("The node-partitioner is not work."); + } + + Collection partitions = partitionerBuilder.getPartitions(); + + if (partitions.isEmpty()) { + throw err("Failed to get the collection of HgNodePartition from node-partitioner."); + } + + return partitions; + } + + /** + * @return @return not null + */ + private Collection doPartition(String table, int startCode, int endCode) { + HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet(); + int status = this.nodePartitioner.partition(partitionerBuilder, this.graphName, startCode, + endCode); + + if (status != 0) { + throw err("The node-partitioner is not work."); + } + + Collection partitions = partitionerBuilder.getPartitions(); + + if (partitions.isEmpty()) { + throw err("Failed to get the collection of HgNodePartition from node-partitioner."); + } + + return partitions; + } + + Collection doPartition(String table, int partitionId) { + HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet(); + int status = + this.nodePartitioner.partition(partitionerBuilder, this.graphName, partitionId); + + if (status != 0) { + throw err("The node-partitioner is not work."); + } + + Collection partitions = partitionerBuilder.getPartitions(); + + if (partitions.isEmpty()) { + throw err("Failed to get the collection of HgNodePartition from node-partitioner."); + } + + return partitions; + } + + private Stream> getNodeStream(String table) { + return this.toNodeTkvList(table) + .stream() + .map( + e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e) + ); + } + + Stream> getNodeStream(String table, + HgOwnerKey ownerKey) { + return this.toNodeTkvList(table, ownerKey) + .stream() + .map( + e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e) + ); + } + + Stream> getNodeStream(String table, HgOwnerKey startKey, + HgOwnerKey endKey) { + return this.toNodeTkvList(table, startKey, endKey) + .stream() + .map( + e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e) + ); + + } + + // private List> getNode(String table) { + // List nodeTkvList = this.toNodeTkvList(table); + // return nodeTkv2Node(nodeTkvList); + // } + + List> getNode(String table, HgOwnerKey ownerKey) { + List nodeTkvList = this.toNodeTkvList(table, ownerKey); + return nodeTkv2Node(nodeTkvList); + } + + List> getNode(String table, HgOwnerKey startKey, + HgOwnerKey endKey) { + List nodeTkvList = this.toNodeTkvList(table, startKey, endKey); + return nodeTkv2Node(nodeTkvList); + + } + // + //boolean doAction(String table, HgOwnerKey startKey, HgOwnerKey endKey, + // Function action) { + // return this.doAction(table, startKey, endKey, action); + // + //} + + // List> getNode(String table, Integer endKey) { + // .stream() + // .map(e -> new NodeTkv(e, nodeParams.getX(), nodeParams.getY(), nodeParams.getY + // ().getKeyCode())) + // .map( + // e -> new HgPair<>(this.proxy.getStoreNode(e.getNodeId()), e) + // ); + // Collection nodePartitions = this.doPartition(table, endKey); + // for (HgNodePartition nodePartition: nodePartitions) { + // + // } + // return nodeTkv2Node(nodeTkvList); + // + // } + + private List> nodeTkv2Node(Collection nodeTkvList) { + ArrayList> hgPairs = new ArrayList<>(nodeTkvList.size()); + for (NodeTkv nodeTkv : nodeTkvList) { + hgPairs.add(new HgPair<>(this.getStoreNode(nodeTkv.getNodeId()), nodeTkv)); + } + return hgPairs; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java new file mode 100644 index 0000000000..aca7bb70b3 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Queue; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgKvOrderedIterator; + +import lombok.extern.slf4j.Slf4j; + +/** + * Proxy iterator orderly, to switch next one will happen when the current one is empty. + *

+ * created on 2022/03/10 + * + * @version 0.1.0 + */ +@Slf4j +public class SequencedIterator implements HgKvIterator { + + private static final byte[] EMPTY_BYTES = new byte[0]; + private final Queue queue; + private final long limit; + private HgKvOrderedIterator iterator; + private HgKvEntry entry; + private int count; + private byte[] position = EMPTY_BYTES; + private byte[] position4Seeking = EMPTY_BYTES; + + SequencedIterator(List iterators, long limit) { + Collections.sort(iterators); + this.queue = new LinkedList(iterators); + this.limit = limit <= 0 ? Integer.MAX_VALUE : limit; + } + + private HgKvOrderedIterator getIterator() { + if (this.queue.isEmpty()) { + return null; + } + HgKvOrderedIterator buf; + while ((buf = this.queue.poll()) != null) { + buf.seek(this.position4Seeking); + if (buf.hasNext()) { + break; + } + } + return buf; + } + + private void closeIterators() { + if (this.queue.isEmpty()) { + return; + } + HgKvOrderedIterator buf; + while ((buf = this.queue.poll()) != null) { + buf.close(); + } + + } + + @Override + public byte[] key() { + if (this.entry != null) { + return this.entry.key(); + } + return null; + } + + @Override + public byte[] value() { + if (this.entry != null) { + return this.entry.value(); + } + return null; + } + + @Override + public byte[] position() { + return this.position; + } + + @Override + public void seek(byte[] pos) { + if (pos != null) { + this.position4Seeking = pos; + } + } + + @Override + public boolean hasNext() { + if (this.count >= this.limit) { + return false; + } + if (this.iterator == null) { + this.iterator = this.getIterator(); + } else if (!this.iterator.hasNext()) { + this.iterator.close(); + this.iterator = this.getIterator(); + } + return this.iterator != null; + } + + @Override + public Object next() { + if (this.iterator == null) { + hasNext(); + } + if (this.iterator == null) { + throw new NoSuchElementException(); + } + this.entry = this.iterator.next(); + this.position = this.iterator.position(); + if (!this.iterator.hasNext()) { + this.iterator.close(); + this.iterator = null; + } + this.count++; + return this.entry; + } + + @Override + public void close() { + if (this.iterator != null) { + this.iterator.close(); + } + this.closeIterators(); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java new file mode 100644 index 0000000000..474b042bf2 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.LinkedList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Queue; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgKvPagingIterator; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/10/24 + * + * @version 0.1.1 + */ +@Slf4j +public class ShiftWorkIteratorProxy implements HgKvIterator { + + private static final byte[] EMPTY_BYTES = new byte[0]; + private final int limit; + private HgKvPagingIterator iterator; + private Queue queue = new LinkedList<>(); + private HgKvEntry entry; + private int count; + private int shiftCount; + + ShiftWorkIteratorProxy(List iterators, int limit) { + this.queue = new LinkedList<>(iterators); + this.limit = limit <= 0 ? Integer.MAX_VALUE : limit; + } + + private HgKvPagingIterator getIterator() { + if (this.queue.isEmpty()) { + return null; + } + + HgKvPagingIterator buf = null; + + while ((buf = this.queue.poll()) != null) { + if (buf.hasNext()) { + break; + } + } + + if (buf == null) { + return null; + } + + this.queue.add(buf); + + return buf; + } + + private void closeIterators() { + if (this.queue.isEmpty()) { + return; + } + + HgKvPagingIterator buf; + + while ((buf = this.queue.poll()) != null) { + buf.close(); + } + + } + + private void setIterator() { + + // if (++this.shiftCount >= this.iterator.getPageSize() / 2) { + if (++this.shiftCount >= this.iterator.getPageSize()) { + this.iterator = null; + this.shiftCount = 0; + } + + } + + private void doNext() { + + } + + @Override + public byte[] key() { + if (this.entry != null) { + return this.entry.key(); + } + return null; + } + + @Override + public byte[] value() { + if (this.entry != null) { + return this.entry.value(); + } + return null; + } + + @Override + public byte[] position() { + return this.iterator != null ? this.iterator.position() : EMPTY_BYTES; + } + + @Override + public void seek(byte[] position) { + if (this.iterator != null) { + this.iterator.seek(position); + } + } + + @Override + public boolean hasNext() { + if (this.count >= this.limit) { + return false; + } + if (this.iterator == null + || !this.iterator.hasNext()) { + this.iterator = this.getIterator(); + } + return this.iterator != null; + } + + @Override + public Object next() { + if (this.iterator == null) { + hasNext(); + } + if (this.iterator == null) { + throw new NoSuchElementException(); + } + this.entry = this.iterator.next(); + this.setIterator(); + this.count++; + //log.info("next - > {}",this.entry); + return this.entry; + } + + @Override + public void close() { + if (this.iterator != null) { + this.iterator.close(); + } + this.closeIterators(); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java new file mode 100644 index 0000000000..21a37ae3df --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client; + +import java.util.LinkedList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Queue; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; + +/** + * created on 2021/10/21 + * + * @version 0.1.0 + */ +class TopWorkIteratorProxy implements HgKvIterator { + + private static final byte[] EMPTY_BYTES = new byte[0]; + private final Queue queue; + private final long limit; + private HgKvIterator iterator; + private HgKvEntry entry; + // result count + private int count; + + TopWorkIteratorProxy(List iterators, long limit) { + this.queue = new LinkedList<>(iterators); + this.limit = limit <= 0 ? Integer.MAX_VALUE : limit; + } + + private HgKvIterator getIterator() { + if (this.queue.isEmpty()) { + return null; + } + + HgKvIterator buf = null; + + while ((buf = this.queue.poll()) != null) { + if (buf.hasNext()) { + break; + } + } + + if (buf == null) { + return null; + } + + this.queue.add(buf); + + return buf; + } + + private void closeIterators() { + if (this.queue.isEmpty()) { + return; + } + + HgKvIterator buf; + + while ((buf = this.queue.poll()) != null) { + buf.close(); + } + + } + + private void setIterator() { + this.iterator = null; + } + + @Override + public byte[] key() { + if (this.entry != null) { + return this.entry.key(); + } + return null; + } + + @Override + public byte[] value() { + if (this.entry != null) { + return this.entry.value(); + } + return null; + } + + @Override + public byte[] position() { + return this.iterator != null ? this.iterator.position() : EMPTY_BYTES; + } + + @Override + public void seek(byte[] position) { + if (this.iterator != null) { + this.iterator.seek(position); + } + } + + @Override + public boolean hasNext() { + if (this.count >= this.limit) { + return false; + } + if (this.iterator == null) { + this.iterator = this.getIterator(); + } + return this.iterator != null; + + } + + @Override + public Object next() { + if (this.iterator == null) { + hasNext(); + } + if (this.iterator == null) { + throw new NoSuchElementException(); + } + this.entry = this.iterator.next(); + this.setIterator(); + this.count++; + return this.entry; + } + + @Override + public void close() { + if (this.iterator != null) { + this.iterator.close(); + } + this.closeIterators(); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java new file mode 100644 index 0000000000..20aa54b39a --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.IntStream; + +import org.apache.hugegraph.store.client.util.ExecutorPool; +import org.apache.hugegraph.store.client.util.HgStoreClientConfig; +import org.apache.hugegraph.store.term.HgPair; + +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.stub.AbstractAsyncStub; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; + +public abstract class AbstractGrpcClient { + + private static Map channels = new ConcurrentHashMap<>(); + private static int n = 5; + private static int concurrency = 1 << n; + private static AtomicLong counter = new AtomicLong(0); + private static long limit = Long.MAX_VALUE >> 1; + private static HgStoreClientConfig config = HgStoreClientConfig.of(); + private Map[]> blockingStubs = + new ConcurrentHashMap<>(); + private Map[]> asyncStubs = + new ConcurrentHashMap<>(); + private ThreadPoolExecutor executor; + + { + executor = ExecutorPool.createExecutor("common", 60, concurrency, concurrency); + } + + public AbstractGrpcClient() { + + } + + public ManagedChannel[] getChannels(String target) { + ManagedChannel[] tc; + if ((tc = channels.get(target)) == null) { + synchronized (channels) { + if ((tc = channels.get(target)) == null) { + try { + ManagedChannel[] value = new ManagedChannel[concurrency]; + CountDownLatch latch = new CountDownLatch(concurrency); + for (int i = 0; i < concurrency; i++) { + int fi = i; + executor.execute(() -> { + try { + value[fi] = getManagedChannel(target); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + latch.countDown(); + } + }); + } + latch.await(); + channels.put(target, tc = value); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + } + return tc; + } + + public abstract AbstractBlockingStub getBlockingStub(ManagedChannel channel); + + public AbstractBlockingStub getBlockingStub(String target) { + ManagedChannel[] channels = getChannels(target); + HgPair[] pairs = blockingStubs.get(target); + long l = counter.getAndIncrement(); + if (l >= limit) { + counter.set(0); + } + int index = (int) (l & (concurrency - 1)); + if (pairs == null) { + synchronized (blockingStubs) { + pairs = blockingStubs.get(target); + if (pairs == null) { + HgPair[] value = new HgPair[concurrency]; + IntStream.range(0, concurrency).forEach(i -> { + ManagedChannel channel = channels[index]; + AbstractBlockingStub stub = getBlockingStub(channel); + value[i] = new HgPair<>(channel, stub); + // log.info("create channel for {}",target); + }); + blockingStubs.put(target, value); + AbstractBlockingStub stub = value[index].getValue(); + return (AbstractBlockingStub) setBlockingStubOption(stub); + } + } + } + return (AbstractBlockingStub) setBlockingStubOption(pairs[index].getValue()); + } + + private AbstractStub setBlockingStubOption(AbstractBlockingStub stub) { + return stub.withDeadlineAfter(config.getGrpcTimeoutSeconds(), TimeUnit.SECONDS) + .withMaxInboundMessageSize( + config.getGrpcMaxInboundMessageSize()) + .withMaxOutboundMessageSize( + config.getGrpcMaxOutboundMessageSize()); + } + + public AbstractAsyncStub getAsyncStub(ManagedChannel channel) { + return null; + } + + public AbstractAsyncStub getAsyncStub(String target) { + ManagedChannel[] channels = getChannels(target); + HgPair[] pairs = asyncStubs.get(target); + long l = counter.getAndIncrement(); + if (l >= limit) { + counter.set(0); + } + int index = (int) (l & (concurrency - 1)); + if (pairs == null) { + synchronized (asyncStubs) { + pairs = asyncStubs.get(target); + if (pairs == null) { + HgPair[] value = new HgPair[concurrency]; + IntStream.range(0, concurrency).parallel().forEach(i -> { + ManagedChannel channel = channels[index]; + AbstractAsyncStub stub = getAsyncStub(channel); + // stub.withMaxInboundMessageSize(config.getGrpcMaxInboundMessageSize()) + // .withMaxOutboundMessageSize(config.getGrpcMaxOutboundMessageSize()); + value[i] = new HgPair<>(channel, stub); + // log.info("create channel for {}",target); + }); + asyncStubs.put(target, value); + AbstractAsyncStub stub = + (AbstractAsyncStub) setStubOption(value[index].getValue()); + return stub; + } + } + } + return (AbstractAsyncStub) setStubOption(pairs[index].getValue()); + + } + + private AbstractStub setStubOption(AbstractStub value) { + return value.withMaxInboundMessageSize( + config.getGrpcMaxInboundMessageSize()) + .withMaxOutboundMessageSize( + config.getGrpcMaxOutboundMessageSize()); + } + + private ManagedChannel getManagedChannel(String target) { + return ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java new file mode 100644 index 0000000000..0cc4b303c7 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Arrays; + +import org.apache.hugegraph.store.HgKvEntry; + +class GrpcKvEntryImpl implements HgKvEntry { + + private final byte[] key; + private final byte[] value; + private final int code; + + GrpcKvEntryImpl(byte[] k, byte[] v, int code) { + this.key = k; + this.value = v; + this.code = code; + } + + @Override + public int code() { + return code; + } + + @Override + public byte[] key() { + return key; + } + + @Override + public byte[] value() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GrpcKvEntryImpl hgKvEntry = (GrpcKvEntryImpl) o; + return Arrays.equals(key, hgKvEntry.key) && Arrays.equals(value, hgKvEntry.value); + } + + @Override + public int hashCode() { + int result = Arrays.hashCode(key); + result = 31 * result + Arrays.hashCode(value); + return result; + } + + @Override + public String toString() { + return "HgKvEntryImpl{" + + "key=" + Arrays.toString(key) + + ", value=" + Arrays.toString(value) + + ", code=" + code + + '}'; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java new file mode 100644 index 0000000000..c9825a60b3 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.List; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgKvOrderedIterator; +import org.apache.hugegraph.store.HgKvPagingIterator; +import org.apache.hugegraph.store.HgPageSize; +import org.apache.hugegraph.store.HgSeekAble; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.client.util.HgStoreClientUtil; +import org.apache.hugegraph.store.grpc.common.Kv; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/10/20 + * + * @version 0.2.1 + */ +@Slf4j +class GrpcKvIteratorImpl implements HgKvPagingIterator, HgKvOrderedIterator { + + private final byte[] emptyBytes = HgStoreClientConst.EMPTY_BYTES; + private final KvCloseableIterator iterator; + private final HgPageSize pageLimiter; + private final HgStoreNodeSession session; + private HgKvEntry element; + + private GrpcKvIteratorImpl(HgStoreNodeSession session, KvCloseableIterator iterator, + HgPageSize pageLimiter) { + this.iterator = iterator; + this.pageLimiter = pageLimiter; + this.session = session; + } + + public static HgKvIterator of(HgStoreNodeSession nodeSession, + KvCloseableIterator iterator) { + if (iterator instanceof HgPageSize) { + return of(nodeSession, iterator, (HgPageSize) iterator); + } + return new GrpcKvIteratorImpl(nodeSession, iterator, () -> 1); + } + + public static HgKvIterator of(HgStoreNodeSession nodeSession, + KvCloseableIterator iterator, + HgPageSize pageLimiter) { + return new GrpcKvIteratorImpl(nodeSession, iterator, pageLimiter); + } + + public static HgKvIterator of(HgStoreNodeSession nodeSession, List kvList) { + int pageSize = kvList.size(); + return new GrpcKvIteratorImpl(nodeSession, new KvListIterator(kvList), () -> pageSize); + } + + @Override + public boolean hasNext() { + // if (log.isDebugEnabled()) { + // if (!this.iterator.hasNext() && !nodeSession.getGraphName().endsWith("/s")) { + // log.debug("[ANALYSIS GrpcKv hasNext-> FALSE] "); + // } + // } + return this.iterator.hasNext(); + } + + @Override + public HgKvEntry next() { + Kv kv = this.iterator.next(); + this.element = new GrpcKvEntryImpl(kv.getKey().toByteArray(), kv.getValue().toByteArray(), + kv.getCode()); + return this.element; + } + + @Override + public byte[] key() { + if (this.element == null) { + return null; + } + return this.element.key(); + } + + @Override + public byte[] value() { + if (this.element == null) { + return null; + } + return this.element.value(); + } + + @Override + public byte[] position() { + if (this.element == null) { + return emptyBytes; + } + byte[] key = this.element.key(); + if (key == null) { + return emptyBytes; + } + if (!(this.iterator instanceof HgSeekAble)) { + return emptyBytes; + } + byte[] upstream = ((HgSeekAble) this.iterator).position(); + byte[] code = HgStoreClientUtil.toIntBytes(this.element.code()); + byte[] result = new byte[upstream.length + Integer.BYTES + key.length]; + System.arraycopy(upstream, 0, result, 0, upstream.length); + System.arraycopy(code, 0, result, upstream.length, Integer.BYTES); + System.arraycopy(key, 0, result, upstream.length + Integer.BYTES, key.length); + return result; + } + + @Override + public void seek(byte[] position) { + if (this.iterator instanceof HgSeekAble) { + ((HgSeekAble) this.iterator).seek(position); + } + } + + @Override + public long getPageSize() { + return pageLimiter.getPageSize(); + } + + @Override + public boolean isPageEmpty() { + return !iterator.hasNext(); + } + + @Override + public int compareTo(HgKvOrderedIterator o) { + return Long.compare(this.getSequence(), o.getSequence()); + } + + @Override + public long getSequence() { + return this.session.getStoreNode().getNodeId().longValue(); + } + + @Override + public void close() { + this.iterator.close(); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java new file mode 100644 index 0000000000..5f6647094d --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.grpc.HealthyGrpc; +import org.apache.hugegraph.store.grpc.HealthyOuterClass; + +import com.google.protobuf.Empty; + +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; + +/** + * + */ +@ThreadSafe +public final class GrpcNodeHealthyClient { + + private final static Map CHANNEL_MAP = new ConcurrentHashMap<>(); + private final static Map STUB_MAP = + new ConcurrentHashMap<>(); + + // TODO: Forbid constructing out of the package. + public GrpcNodeHealthyClient() { + + } + + private ManagedChannel getChannel(String target) { + ManagedChannel channel = CHANNEL_MAP.get(target); + if (channel == null) { + channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + CHANNEL_MAP.put(target, channel); + } + return channel; + } + + private HealthyGrpc.HealthyBlockingStub getStub(String target) { + HealthyGrpc.HealthyBlockingStub stub = STUB_MAP.get(target); + if (stub == null) { + stub = HealthyGrpc.newBlockingStub(getChannel(target)); + STUB_MAP.put(target, stub); + } + return stub; + } + + +/* boolean isHealthy(GrpcStoreNodeImpl node) { + String target = node.getAddress(); + + HealthyOuterClass.StringReply response = getStub(target).isOk(Empty.newBuilder().build()); + String res = response.getMessage(); + + if ("ok".equals(res)) { + return true; + } else { + System.out.printf("gRPC-res-msg: %s%n", res); + return false; + } + }*/ + + public boolean isHealthy() { + String target = "localhost:9080"; + ManagedChannel channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build(); + HealthyGrpc.HealthyBlockingStub stub = HealthyGrpc.newBlockingStub(channel); + HealthyOuterClass.StringReply response = stub.isOk(Empty.newBuilder().build()); + + String res = response.getMessage(); + System.out.printf("gRPC response message:%s%n", res); + + return "ok".equals(res); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java new file mode 100644 index 0000000000..eb215a4f7e --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hugegraph.store.client.HgPrivate; +import org.apache.hugegraph.store.client.HgStoreNode; +import org.apache.hugegraph.store.client.HgStoreNodeBuilder; +import org.apache.hugegraph.store.client.HgStoreNodeManager; +import org.apache.hugegraph.store.client.util.HgAssert; + +/** + * created on 2021/10/12 + */ +public class GrpcStoreNodeBuilder implements HgStoreNodeBuilder { + + private static final GrpcStoreSessionClient sessionClient = new GrpcStoreSessionClient(); + private static final GrpcStoreStreamClient streamClient = new GrpcStoreStreamClient(); + private static final AtomicLong ids = new AtomicLong(0); + private final HgStoreNodeManager nodeManager; + private Long nodeId; + private String address; + + public GrpcStoreNodeBuilder(HgStoreNodeManager nodeManager, HgPrivate hgPrivate) { + HgAssert.isArgumentNotNull(hgPrivate, "hgPrivate"); + HgAssert.isArgumentNotNull(nodeManager, "nodeManager"); + this.nodeManager = nodeManager; + } + + @Override + public GrpcStoreNodeBuilder setAddress(String address) { + HgAssert.isFalse(HgAssert.isInvalid(address), "The argument is invalid: address."); + this.address = address; + return this; + } + + @Override + public GrpcStoreNodeBuilder setNodeId(Long nodeId) { + HgAssert.isFalse(nodeId == null, "The argument is invalid: nodeId."); + this.nodeId = nodeId; + return this; + } + + @Override + public HgStoreNode build() { + // TODO: delete + if (this.nodeId == null) { + this.nodeId = ids.addAndGet(-1L); + } + + HgAssert.isFalse(this.nodeId == null, "nodeId can't to be null"); + HgAssert.isFalse(this.address == null, "address can't to be null"); + + GrpcStoreNodeImpl node = + new GrpcStoreNodeImpl(this.nodeManager, sessionClient, streamClient); + node.setNodeId(this.nodeId); + node.setAddress(this.address); + + return node; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java new file mode 100644 index 0000000000..4ca468ba6c --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Objects; + +import org.apache.hugegraph.store.HgStoreSession; +import org.apache.hugegraph.store.client.HgStoreNode; +import org.apache.hugegraph.store.client.HgStoreNodeManager; + +/** + * created on 2021/10/11 + */ +class GrpcStoreNodeImpl implements HgStoreNode { + + private final GrpcStoreSessionClient sessionClient; + private final GrpcStoreStreamClient streamClient; + private final HgStoreNodeManager nodeManager; + private String address; + private Long nodeId; + + GrpcStoreNodeImpl(HgStoreNodeManager nodeManager, GrpcStoreSessionClient sessionClient, + GrpcStoreStreamClient streamClient) { + this.nodeManager = nodeManager; + this.sessionClient = sessionClient; + this.streamClient = streamClient; + } + + @Override + public Long getNodeId() { + return this.nodeId; + } + + GrpcStoreNodeImpl setNodeId(Long nodeId) { + this.nodeId = nodeId; + return this; + } + + @Override + public String getAddress() { + return this.address; + } + + GrpcStoreNodeImpl setAddress(String address) { + this.address = address; + return this; + } + + @Override + public HgStoreSession openSession(String graphName) { + // HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument: graphName is invalid."); + // return new GrpcStoreNodeSessionImpl2(this, graphName,this.nodeManager, this + // .sessionClient, this + // .streamClient); + return new GrpcStoreNodeSessionImpl(this, graphName, this.nodeManager, this.sessionClient, + this.streamClient); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GrpcStoreNodeImpl that = (GrpcStoreNodeImpl) o; + return Objects.equals(address, that.address) && Objects.equals(nodeId, that.nodeId); + } + + @Override + public int hashCode() { + return Objects.hash(address, nodeId); + } + + @Override + public String toString() { + return "storeNode: {" + + "address: \"" + address + "\"" + + ", nodeId: " + nodeId + + "}"; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java new file mode 100644 index 0000000000..77c8a45537 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java @@ -0,0 +1,545 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgKvStore; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgScanQuery; +import org.apache.hugegraph.store.client.HgStoreNode; +import org.apache.hugegraph.store.client.HgStoreNodeManager; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.util.HgAssert; +import org.apache.hugegraph.store.client.util.HgStoreClientConfig; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.client.util.HgStoreClientUtil; +import org.apache.hugegraph.store.client.util.HgUuid; +import org.apache.hugegraph.store.grpc.common.GraphMethod; +import org.apache.hugegraph.store.grpc.common.Key; +import org.apache.hugegraph.store.grpc.common.OpType; +import org.apache.hugegraph.store.grpc.common.TableMethod; +import org.apache.hugegraph.store.grpc.session.BatchEntry; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamStub; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; + +import com.google.protobuf.ByteString; +import com.google.protobuf.UnsafeByteOperations; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2022/01/19 + * + * @version 0.6.0 added batch get on 2022/04/06 + */ +@Slf4j +@NotThreadSafe +class GrpcStoreNodeSessionImpl implements HgStoreNodeSession { + + private static final HgStoreClientConfig hgStoreClientConfig = HgStoreClientConfig.of(); + private static final ConcurrentHashMap tables = new ConcurrentHashMap<>() {{ + put("unknown", 0); + put("g+v", 1); + put("g+oe", 2); + put("g+ie", 3); + put("g+index", 4); + put("g+task", 5); + put("g+olap", 6); + put("g+server", 7); + }}; + private final HgStoreNode storeNode; + private final String graphName; + private final GrpcStoreSessionClient storeSessionClient; + private final GrpcStoreStreamClient storeStreamClient; + private final HgStoreNodeManager nodeManager; + private final NotifyingExecutor notifier; + private final SwitchingExecutor switcher; + private final BatchEntry.Builder batchEntryBuilder = BatchEntry.newBuilder(); + private final Key.Builder builder = Key.newBuilder(); + private boolean isAutoCommit = true; + private String batchId; + private LinkedList batchEntries = new LinkedList<>(); + + GrpcStoreNodeSessionImpl(HgStoreNode storeNode, String graphName, + HgStoreNodeManager nodeManager, + GrpcStoreSessionClient sessionClient, + GrpcStoreStreamClient streamClient) { + HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument: graphName is invalid."); + HgAssert.isFalse(nodeManager == null, "the argument: nodeManager is null."); + HgAssert.isFalse(storeNode == null, "the argument: storeNode is null."); + HgAssert.isFalse(sessionClient == null, "the argument: sessionClient is null."); + HgAssert.isFalse(streamClient == null, "the argument: streamClient is null."); + + this.graphName = graphName; + this.storeNode = storeNode; + this.storeSessionClient = sessionClient; + this.storeStreamClient = streamClient; + this.nodeManager = nodeManager; + + this.notifier = new NotifyingExecutor(this.graphName, this.nodeManager, this); + this.switcher = SwitchingExecutor.of(); + } + + @Override + public String getGraphName() { + return graphName; + } + + @Override + public HgStoreNode getStoreNode() { + return storeNode; + } + + public Key toKey(HgOwnerKey ownerKey) { + if (ownerKey == null) { + return null; + } + return builder + .setKey(UnsafeByteOperations.unsafeWrap(ownerKey.getKey())) + .setCode(ownerKey.getKeyCode()) + .build(); + } + + @Override + public void beginTx() { + this.isAutoCommit = false; + } + + @Override + public void commit() { + try { + if (this.isAutoCommit) { + throw new IllegalStateException("It's not in tx state"); + } + if (this.batchEntries.isEmpty()) { + this.resetTx(); + return; + } + if (!this.doCommit(this.batchEntries)) { + throw new Exception("Failed to invoke doCommit"); + } + } catch (Throwable t) { + throw new RuntimeException(t); + } finally { + this.resetTx(); + } + + } + + @Override + public void rollback() { + if (this.isAutoCommit) { + throw new IllegalStateException("It's not in tx state"); + } + this.resetTx(); + } + + @Override + public boolean isTx() { + return !this.isAutoCommit; + } + + private void resetTx() { + this.isAutoCommit = true; + this.batchId = null; + this.batchEntries = new LinkedList<>(); + } + + //TODO: not support distributed tx yet. + private String getBatchId() { + if (this.isAutoCommit) { + this.batchId = HgUuid.newUUID(); + } else { + if (this.batchId == null) { + this.batchId = HgUuid.newUUID(); + } + } + return this.batchId; + } + + @Override + public boolean put(String table, HgOwnerKey ownerKey, byte[] value) { + return this.prepareBatchEntry(OpType.OP_TYPE_PUT, table, ownerKey, null, value); + } + + @Override + public boolean directPut(String table, int partitionId, HgOwnerKey key, byte[] value) { + return false; + } + + @Override + public boolean delete(String table, HgOwnerKey ownerKey) { + return this.prepareBatchEntry(OpType.OP_TYPE_DEL, table, ownerKey, null, null); + } + + @Override + public boolean deleteSingle(String table, HgOwnerKey ownerKey) { + return this.prepareBatchEntry(OpType.OP_TYPE_DEL_SINGLE, table, ownerKey, null, null); + } + + @Override + public boolean deletePrefix(String table, HgOwnerKey prefix) { + return this.prepareBatchEntry(OpType.OP_TYPE_DEL_PREFIX, table, prefix, null, null); + } + + @Override + public boolean deleteRange(String table, HgOwnerKey start, HgOwnerKey end) { + return this.prepareBatchEntry(OpType.OP_TYPE_DEL_RANGE, table, start, end, null); + } + + @Override + public boolean merge(String table, HgOwnerKey key, byte[] value) { + return this.prepareBatchEntry(OpType.OP_TYPE_MERGE, table, key, null, value); + } + + private boolean prepareBatchEntry(OpType opType, String table + , HgOwnerKey startKey, HgOwnerKey endKey, byte[] value) { + this.batchEntryBuilder.clear().setOpType(opType); + this.batchEntryBuilder.setTable(tables.get(table)); + if (startKey != null) { + this.batchEntryBuilder.setStartKey(toKey(startKey)); + } + if (endKey != null) { + this.batchEntryBuilder.setEndKey(toKey(endKey)); + } + if (value != null) { + this.batchEntryBuilder.setValue(ByteString.copyFrom(value)); + } + if (this.isAutoCommit) { + return this.doCommit(Collections.singletonList(this.batchEntryBuilder.build())); + } else { + return this.batchEntries.add(this.batchEntryBuilder.build()); + } + + } + + private boolean doCommit(List entries) { + return this.notifier.invoke( + () -> this.storeSessionClient.doBatch(this, this.getBatchId(), entries), + e -> true + ).orElse(false); + } + + @Override + public byte[] get(String table, HgOwnerKey ownerKey) { + return this.notifier.invoke( + () -> this.storeSessionClient.doGet(this, table, ownerKey) + , + e -> e.getValueResponse().getValue().toByteArray() + ).orElse(HgStoreClientConst.EMPTY_BYTES); + } + + @Override + public boolean clean(int partId) { + return this.notifier.invoke( + () -> this.storeSessionClient.doClean(this, partId) + , + e -> true + + ).orElse(false); + } + + @Override + public List batchGetOwner(String table, List keyList) { + return this.notifier.invoke( + () -> this.storeSessionClient.doBatchGet(this, table, keyList), + e -> e.getKeyValueResponse().getKvList() + .stream() + .map(kv -> (HgKvEntry) new GrpcKvEntryImpl(kv.getKey().toByteArray() + , kv.getValue().toByteArray(), kv.getCode()) + ) + .collect(Collectors.toList())) + .orElse((List) HgStoreClientConst.EMPTY_LIST); + } + + @Override + public HgKvIterator batchPrefix(String table, List keyList) { + return GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doBatchScanOneShot(this, + HgScanQuery.prefixOf( + table, + keyList)) + ); + } + + @Override + public boolean existsTable(String table) { + return this.notifier.invoke( + () -> this.storeSessionClient.doTable(this, table, + TableMethod.TABLE_METHOD_EXISTS), + e -> true) + .orElse(false); + } + + @Override + public boolean createTable(String table) { + return this.notifier.invoke( + () -> this.storeSessionClient.doTable(this, table, + TableMethod.TABLE_METHOD_CREATE), + e -> true) + .orElse(false); + } + + @Override + public boolean deleteTable(String table) { + return this.notifier.invoke( + () -> this.storeSessionClient.doTable(this, table, + TableMethod.TABLE_METHOD_DELETE), + e -> true) + .orElse(false); + } + + @Override + public boolean dropTable(String table) { + return this.notifier.invoke( + () -> this.storeSessionClient.doTable(this, table, + TableMethod.TABLE_METHOD_DROP), + e -> true) + .orElse(false); + } + + @Override + public boolean deleteGraph(String graph) { + return this.notifier.invoke( + () -> this.storeSessionClient.doGraph(this, graph, + GraphMethod.GRAPH_METHOD_DELETE), + e -> true) + .orElse(false); + } + + @Override + public boolean truncate() { + return this.notifier.invoke( + () -> this.storeSessionClient.doTable(this, + HgStoreClientConst.EMPTY_TABLE + , TableMethod.TABLE_METHOD_TRUNCATE), + e -> true) + .orElse(false); + } + + @Override + public HgKvIterator scanIterator(String table) { + return GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan(this, table, 0)); + } + + @Override + public HgKvIterator scanIterator(String table, long limit) { + return this.switcher.invoke(getSwitcherSupplier(limit) + , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan + (this, table, + limit)) + , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScanOneShot + (this, table, + limit)) + ).get(); + } + + @Override + public HgKvIterator scanIterator(ScanStreamReq.Builder builder) { + HgStoreStreamStub stub = getStub(); + KvPageScanner scanner = new KvPageScanner(this, + stub, + builder); + return GrpcKvIteratorImpl.of(this, scanner); + } + + @Override + public long count(String table) { + return this.storeSessionClient.count(this, table); + } + + @Override + public HgKvIterator scanIterator(String table, byte[] query) { + return GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan(this, table, 0, query)); + } + + private HgStoreStreamStub getStub() { + return this.storeStreamClient.getStub(this); + } + + // @Override + // public HgKvIterator scanIterator(ScanStreamReq scanReq) { + // KvPageScanner6 scanner = new KvPageScanner6(this, + // getStub(), + // scanReq.toBuilder()); + // return GrpcKvIteratorImpl.of(this, scanner); + // } + + @Override + public HgKvIterator scanIterator(String table, long limit, byte[] query) { + return this.switcher.invoke(getSwitcherSupplier(limit) + , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan + (this, table, + limit, query)) + , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScanOneShot + (this, table, + limit, query)) + ).get(); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix) { + return GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doScan(this, table, keyPrefix, 0)); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit) { + return this.switcher.invoke(getSwitcherSupplier(limit), + () -> GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doScan(this, + table, + keyPrefix, + limit)), + () -> GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doScanOneShot( + this, + table, + keyPrefix, + limit))) + .get(); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit, + byte[] query) { + return this.switcher.invoke(getSwitcherSupplier(limit), + () -> GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doScan( + this, + table, + keyPrefix, + limit, + query)), + () -> GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doScanOneShot( + this, + table, + keyPrefix, + limit, + query))) + .get(); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey startKey, + HgOwnerKey endKey) { + return scanIterator(table, startKey, endKey, 0, HgKvStore.SCAN_ANY, null); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey startKey, + HgOwnerKey endKey, long limit) { + return scanIterator(table, startKey, endKey, limit, + HgStoreClientUtil.isValid(endKey) ? HgStoreClientConst.SCAN_TYPE_RANGE : + HgStoreClientConst.SCAN_TYPE_ANY, null); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey startKey, + HgOwnerKey endKey, long limit, byte[] query) { + return scanIterator(table, startKey, endKey, limit, + HgStoreClientUtil.isValid(endKey) ? HgStoreClientConst.SCAN_TYPE_RANGE : + HgStoreClientConst.SCAN_TYPE_ANY, query); + } + + @Override + public HgKvIterator scanIterator(String table, HgOwnerKey startKey, + HgOwnerKey endKey, + long limit, int scanType, byte[] query) { + + return this.switcher.invoke(getSwitcherSupplier(limit), + () -> GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doScan( + this, + table, + startKey, + endKey, + limit, + scanType, + query)), + () -> GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doScanOneShot( + this, + table, + startKey, + endKey, + limit, + scanType, + query))) + .get(); + + } + + @Override + public HgKvIterator scanIterator(String table, int codeFrom, int codeTo, + int scanType, byte[] query) { + //TODO: Should be changed when start using hashcode as partitionId. + if (log.isDebugEnabled()) { + log.debug("scanIterator-scanType: {}", scanType); + } + return GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doScan(this, table + , HgOwnerKey.newEmpty().codeToKey(codeFrom) + , HgOwnerKey.newEmpty().codeToKey(codeTo) + , HgStoreClientConst.NO_LIMIT + , HgKvStore.SCAN_PREFIX_BEGIN | + HgKvStore.SCAN_HASHCODE | scanType + , query + ) + ); + } + + @Override + public List> scanBatch(HgScanQuery scanQuery) { + return Collections.singletonList(GrpcKvIteratorImpl.of(this, + this.storeStreamClient.doBatchScan( + this, scanQuery) + )); + } + + @Override + public KvCloseableIterator> scanBatch2(HgScanQuery scanQuery) { + throw new RuntimeException("not implemented"); + } + + @Override + public KvCloseableIterator> scanBatch3(HgScanQuery scanQuery, + KvCloseableIterator iterator) { + return this.storeStreamClient.doBatchScan3(this, scanQuery, iterator); + } + + private Supplier getSwitcherSupplier(long limit) { + return () -> limit <= 0 || limit > hgStoreClientConfig.getNetKvScannerPageSize(); + } + + @Override + public String toString() { + return "storeNodeSession: {" + storeNode + ", graphName: \"" + graphName + "\"}"; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java new file mode 100644 index 0000000000..794a7c1286 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.getHeader; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.grpc.common.GraphMethod; +import org.apache.hugegraph.store.grpc.common.ScanMethod; +import org.apache.hugegraph.store.grpc.common.TableMethod; +import org.apache.hugegraph.store.grpc.session.Agg; +import org.apache.hugegraph.store.grpc.session.BatchEntry; +import org.apache.hugegraph.store.grpc.session.BatchGetReq; +import org.apache.hugegraph.store.grpc.session.BatchReq; +import org.apache.hugegraph.store.grpc.session.BatchWriteReq; +import org.apache.hugegraph.store.grpc.session.CleanReq; +import org.apache.hugegraph.store.grpc.session.FeedbackRes; +import org.apache.hugegraph.store.grpc.session.GetReq; +import org.apache.hugegraph.store.grpc.session.GraphReq; +import org.apache.hugegraph.store.grpc.session.HgStoreSessionGrpc; +import org.apache.hugegraph.store.grpc.session.HgStoreSessionGrpc.HgStoreSessionBlockingStub; +import org.apache.hugegraph.store.grpc.session.TableReq; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; + +import io.grpc.Deadline; +import io.grpc.ManagedChannel; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/11/18 + * + * @version 0.5.0 + */ +@Slf4j +@ThreadSafe +class GrpcStoreSessionClient extends AbstractGrpcClient { + + @Override + public HgStoreSessionBlockingStub getBlockingStub(ManagedChannel channel) { + HgStoreSessionBlockingStub stub; + stub = HgStoreSessionGrpc.newBlockingStub(channel); + return stub; + } + + private HgStoreSessionBlockingStub getBlockingStub(HgStoreNodeSession nodeSession) { + HgStoreSessionBlockingStub stub = + (HgStoreSessionBlockingStub) getBlockingStub( + nodeSession.getStoreNode().getAddress()); + return stub; + } + + FeedbackRes doGet(HgStoreNodeSession nodeSession, String table, HgOwnerKey ownerKey) { + if (log.isDebugEnabled()) { + log.debug("doGet: {}-{}-{}-{}", nodeSession, table, ownerKey, GetReq.newBuilder() + .setHeader( + GrpcUtil.getHeader( + nodeSession)) + .setTk(GrpcUtil.toTk( + table, + ownerKey)) + .build()); + } + return this.getBlockingStub(nodeSession) + .get2(GetReq.newBuilder() + .setHeader(GrpcUtil.getHeader(nodeSession)) + .setTk(GrpcUtil.toTk(table, ownerKey)) + .build() + ); + } + + FeedbackRes doClean(HgStoreNodeSession nodeSession, int partId) { + return this.getBlockingStub(nodeSession) + .clean(CleanReq.newBuilder() + .setHeader(GrpcUtil.getHeader(nodeSession)) + .setPartition(partId) + .build() + ); + } + + FeedbackRes doBatchGet(HgStoreNodeSession nodeSession, String table, List keyList) { + BatchGetReq.Builder builder = BatchGetReq.newBuilder(); + builder.setHeader(GrpcUtil.getHeader(nodeSession)).setTable(table); + + for (HgOwnerKey key : keyList) { + builder.addKey(GrpcUtil.toKey(key)); + } + + if (log.isDebugEnabled()) { + log.debug("batchGet2: {}-{}-{}-{}", nodeSession, table, keyList, builder.build()); + } + return this.getBlockingStub(nodeSession).batchGet2(builder.build()); + + } + + FeedbackRes doBatch(HgStoreNodeSession nodeSession, String batchId, List entries) { + BatchWriteReq.Builder writeReq = BatchWriteReq.newBuilder(); + writeReq.addAllEntry(entries); + return this.getBlockingStub(nodeSession) + .batch(BatchReq.newBuilder() + .setHeader(GrpcUtil.getHeader(nodeSession)) + .setWriteReq(writeReq) + .setBatchId(batchId) + .build() + ); + } + + FeedbackRes doTable(HgStoreNodeSession nodeSession, String table, TableMethod method) { + return this.getBlockingStub(nodeSession) + .table(TableReq.newBuilder() + .setHeader(GrpcUtil.getHeader(nodeSession)) + .setTableName(table) + .setMethod(method) + .build() + ); + } + + FeedbackRes doGraph(HgStoreNodeSession nodeSession, String graph, GraphMethod method) { + return this.getBlockingStub(nodeSession) + .graph(GraphReq.newBuilder() + .setHeader(GrpcUtil.getHeader(nodeSession)) + .setGraphName(graph) + .setMethod(method) + .build() + ); + } + + public long count(HgStoreNodeSession nodeSession, String table) { + Agg agg = this.getBlockingStub(nodeSession).withDeadline(Deadline.after(24, TimeUnit.HOURS)) + .count(ScanStreamReq.newBuilder() + .setHeader(getHeader(nodeSession)) + .setTable(table) + .setMethod(ScanMethod.ALL) + .build() + ); + return agg.getCount(); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStateClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStateClient.java new file mode 100644 index 0000000000..be20d06bb1 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStateClient.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.store.grpc.state.HgStoreStateGrpc; +import org.apache.hugegraph.store.grpc.state.HgStoreStateGrpc.HgStoreStateBlockingStub; +import org.apache.hugegraph.store.grpc.state.ScanState; +import org.apache.hugegraph.store.grpc.state.SubStateReq; + +import io.grpc.ManagedChannel; +import io.grpc.stub.AbstractBlockingStub; +import lombok.extern.slf4j.Slf4j; + +/** + * + */ +@Slf4j +@ThreadSafe +public class GrpcStoreStateClient extends AbstractGrpcClient { + + private final PDConfig pdConfig; + private final PDClient pdClient; + + public GrpcStoreStateClient(PDConfig pdConfig) { + this.pdConfig = pdConfig; + pdClient = PDClient.create(this.pdConfig); + } + + public Set getScanState() throws Exception { + try { + List activeStores = pdClient.getActiveStores(); + Set states = activeStores.parallelStream().map(node -> { + String address = node.getAddress(); + HgStoreStateBlockingStub stub = (HgStoreStateBlockingStub) getBlockingStub(address); + SubStateReq req = SubStateReq.newBuilder().build(); + return stub.getScanState(req); + }).collect(Collectors.toSet()); + return states; + } catch (Exception e) { + throw e; + } + + } + + @Override + public AbstractBlockingStub getBlockingStub(ManagedChannel channel) { + HgStoreStateBlockingStub stub; + stub = HgStoreStateGrpc.newBlockingStub(channel); + return stub; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStreamClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStreamClient.java new file mode 100644 index 0000000000..93cfe7acac --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreStreamClient.java @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgScanQuery; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamBlockingStub; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamStub; + +import io.grpc.ManagedChannel; +import io.grpc.stub.AbstractAsyncStub; +import io.grpc.stub.AbstractBlockingStub; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/10/19 + * + * @version 1.1.1 added synchronized in getChannel. + */ +@Slf4j +@ThreadSafe +public class GrpcStoreStreamClient extends AbstractGrpcClient { + + public HgStoreStreamStub getStub(HgStoreNodeSession nodeSession) { + return (HgStoreStreamStub) getAsyncStub(nodeSession.getStoreNode().getAddress()); + } + + @Override + public AbstractAsyncStub getAsyncStub(ManagedChannel channel) { + return HgStoreStreamGrpc.newStub(channel); + } + + private HgStoreStreamBlockingStub getBlockingStub(HgStoreNodeSession nodeSession) { + return (HgStoreStreamBlockingStub) getBlockingStub(nodeSession.getStoreNode().getAddress()); + } + + @Override + public AbstractBlockingStub getBlockingStub(ManagedChannel channel) { + return HgStoreStreamGrpc.newBlockingStub(channel); + } + + KvCloseableIterator doScanOneShot(HgStoreNodeSession nodeSession, String table, long limit, + byte[] query) { + return KvOneShotScanner.scanAll(nodeSession + , this.getBlockingStub(nodeSession) + , table + , limit + , query + ); + } + + KvCloseableIterator doScanOneShot(HgStoreNodeSession nodeSession, String table, + long limit) { + return KvOneShotScanner.scanAll(nodeSession + , this.getBlockingStub(nodeSession) + , table + , limit + , null + ); + } + + KvCloseableIterator doScanOneShot(HgStoreNodeSession nodeSession, String table, + HgOwnerKey prefix, long limit) { + return KvOneShotScanner.scanPrefix(nodeSession + , this.getBlockingStub(nodeSession) + , table + , prefix + , limit + , null + ); + } + + KvCloseableIterator doScanOneShot(HgStoreNodeSession nodeSession, String table, + HgOwnerKey prefix, long limit, + byte[] query) { + return KvOneShotScanner.scanPrefix(nodeSession + , this.getBlockingStub(nodeSession) + , table + , prefix + , limit + , query + ); + } + + KvCloseableIterator doScanOneShot(HgStoreNodeSession nodeSession, String table, + HgOwnerKey startKey, + HgOwnerKey endKey + , long limit + , int scanType + , byte[] query) { + + return KvOneShotScanner.scanRange(nodeSession + , this.getBlockingStub(nodeSession) + , table + , startKey + , endKey + , limit + , scanType + , query + ); + } + + KvCloseableIterator doScan(HgStoreNodeSession nodeSession + , String table + , long limit + , byte[] query) { + + return KvPageScanner.scanAll(nodeSession + , this.getStub(nodeSession) + , table + , limit + , query + ); + } + + KvCloseableIterator doScan(HgStoreNodeSession nodeSession + , String table + , long limit) { + + return KvPageScanner.scanAll(nodeSession + , this.getStub(nodeSession) + , table + , limit + , null + ); + } + + KvCloseableIterator doScan(HgStoreNodeSession nodeSession + , String table + , HgOwnerKey prefix + , long limit) { + + return KvPageScanner.scanPrefix(nodeSession + , this.getStub(nodeSession) + , table + , prefix + , limit + , null + ); + } + + KvCloseableIterator doScan(HgStoreNodeSession nodeSession + , String table + , HgOwnerKey prefix + , long limit + , byte[] query) { + + return KvPageScanner.scanPrefix(nodeSession + , this.getStub(nodeSession) + , table + , prefix + , limit + , query + ); + } + + KvCloseableIterator doScan(HgStoreNodeSession nodeSession + , String table + , HgOwnerKey startKey + , HgOwnerKey endKey + , long limit + , int scanType + , byte[] query) { + + return KvPageScanner.scanRange(nodeSession + , this.getStub(nodeSession) + , table + , startKey + , endKey + , limit + , scanType + , query + ); + } + + KvCloseableIterator doBatchScan(HgStoreNodeSession nodeSession, HgScanQuery scanQuery) { + return KvBatchScanner5.scan(nodeSession, this.getStub(nodeSession), scanQuery); + } + + // 返回多个小的迭代器,允许上层并行处理 + KvCloseableIterator> doBatchScan3(HgStoreNodeSession nodeSession, + HgScanQuery scanQuery, + KvCloseableIterator iterator) { + KvBatchScanner.scan(this.getStub(nodeSession), nodeSession.getGraphName(), scanQuery, + iterator); + return iterator; + } + + KvCloseableIterator doBatchScanOneShot(HgStoreNodeSession nodeSession, + HgScanQuery scanQuery) { + return KvBatchOneShotScanner.scan(nodeSession, this.getBlockingStub(nodeSession), + scanQuery); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcUtil.java new file mode 100644 index 0000000000..2191f7aee7 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcUtil.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.grpc.common.Header; +import org.apache.hugegraph.store.grpc.common.Key; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.common.Tk; +import org.apache.hugegraph.store.grpc.common.Tkv; +import org.apache.hugegraph.store.grpc.common.Tp; +import org.apache.hugegraph.store.grpc.common.Tse; + +import com.google.protobuf.ByteString; + +import io.grpc.Status; +import io.grpc.StatusRuntimeException; + +/** + * 2022/1/19 + */ +final class GrpcUtil { + + private static final ThreadLocal keyBuilder = new ThreadLocal(); + + static Header getHeader(HgStoreNodeSession nodeSession) { + return Header.newBuilder() + .setGraph(nodeSession.getGraphName()) + .build(); + } + + static Tk toTk(String table, HgOwnerKey ownerKey) { + return Tk.newBuilder() + .setTable(table) + .setKey(ByteString.copyFrom(ownerKey.getKey())) + .setCode(ownerKey.getKeyCode()) + .build(); + } + + static Key.Builder getOwnerKeyBuilder() { + Key.Builder builder = keyBuilder.get(); + if (builder == null) { + builder = Key.newBuilder(); + // TODO 线程级变量,寻找删除时机 + keyBuilder.set(builder); + } + return builder; + } + + static Key toKey(HgOwnerKey ownerKey, Key.Builder builder) { + if (ownerKey == null) { + return null; + } + return builder + .setKey(ByteString.copyFrom(ownerKey.getKey())) + .setCode(ownerKey.getKeyCode()) + .build(); + } + + static Key toKey(HgOwnerKey ownerKey) { + if (ownerKey == null) { + return null; + } + Key.Builder builder = keyBuilder.get(); + if (builder == null) { + builder = Key.newBuilder(); + // TODO 线程级变量,寻找删除时机 + keyBuilder.set(builder); + } + return builder + .setKey(ByteString.copyFrom(ownerKey.getKey())) + .setCode(ownerKey.getKeyCode()) + .build(); + } + + static Tkv toTkv(String table, HgOwnerKey ownerKey, byte[] value) { + return Tkv.newBuilder() + .setTable(table) + .setKey(ByteString.copyFrom(ownerKey.getKey())) + .setValue(ByteString.copyFrom(value)) + .setCode(ownerKey.getKeyCode()) + .build(); + } + + static Tp toTp(String table, HgOwnerKey ownerKey) { + return Tp.newBuilder() + .setTable(table) + .setPrefix(ByteString.copyFrom(ownerKey.getKey())) + .setCode(ownerKey.getKeyCode()) + .build(); + } + + static Tse toTse(String table, HgOwnerKey startKey, HgOwnerKey endKey) { + return Tse.newBuilder() + .setTable(table) + .setStart(toKey(startKey)) + .setEnd(toKey(endKey)) + .build(); + + } + + static List toList(List kvList) { + if (kvList == null || kvList.isEmpty()) { + return HgStoreClientConst.EMPTY_LIST; + } + + Iterator iter = kvList.iterator(); + List resList = new ArrayList<>(kvList.size()); + + while (iter.hasNext()) { + Kv entry = iter.next(); + resList.add(new GrpcKvEntryImpl(entry.getKey().toByteArray(), + entry.getValue().toByteArray(), entry.getCode())); + } + + return resList; + } + + static StatusRuntimeException toErr(String msg) { + return new StatusRuntimeException(Status.UNKNOWN.withDescription(msg)); + } + + static ByteString toBs(byte[] bytes) { + return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchOneShotScanner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchOneShotScanner.java new file mode 100644 index 0000000000..166e091fb4 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchOneShotScanner.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.EMPTY_POSITION; +import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.createQueryReq; +import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.getHeader; + +import java.util.Iterator; +import java.util.List; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgPageSize; +import org.apache.hugegraph.store.HgScanQuery; +import org.apache.hugegraph.store.HgSeekAble; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc; +import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2022/04/08 + */ +@Slf4j +@NotThreadSafe +class KvBatchOneShotScanner implements KvCloseableIterator, HgPageSize, HgSeekAble { + + private final HgStoreNodeSession nodeSession; + private final HgStoreStreamGrpc.HgStoreStreamBlockingStub stub; + private final HgScanQuery scanQuery; + + private Iterator iterator; + private List list = null; + + private KvBatchOneShotScanner(HgStoreNodeSession nodeSession, + HgStoreStreamGrpc.HgStoreStreamBlockingStub stub, + HgScanQuery scanQuery) { + + this.nodeSession = nodeSession; + this.stub = stub; + this.scanQuery = scanQuery; + } + + public static KvCloseableIterator scan(HgStoreNodeSession nodeSession, + HgStoreStreamGrpc.HgStoreStreamBlockingStub stub, + HgScanQuery scanQuery) { + + return new KvBatchOneShotScanner(nodeSession, stub, scanQuery); + } + + private ScanStreamBatchReq createReq() { + return ScanStreamBatchReq.newBuilder() + .setHeader(getHeader(this.nodeSession)) + .setQueryRequest(createQueryReq(this.scanQuery, Integer.MAX_VALUE)) + .build(); + } + + private Iterator createIterator() { + this.list = this.stub.scanBatchOneShot(this.createReq()).getDataList(); + return this.list.iterator(); + } + + /*** Iterator ***/ + @Override + public boolean hasNext() { + if (this.iterator == null) { + this.iterator = this.createIterator(); + } + return this.iterator.hasNext(); + } + + @Override + public Kv next() { + if (this.iterator == null) { + this.iterator = this.createIterator(); + } + return this.iterator.next(); + } + + @Override + public long getPageSize() { + return Integer.MAX_VALUE; + } + + @Override + public boolean isPageEmpty() { + return !this.iterator.hasNext(); + } + + @Override + public byte[] position() { + //TODO: to implement + return EMPTY_POSITION; + } + + @Override + public void seek(byte[] position) { + //TODO: to implement + } + + @Override + public void close() { + //Nothing to do + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner.java new file mode 100644 index 0000000000..dce65636be --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner.java @@ -0,0 +1,410 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.createQueryReq; + +import java.io.Closeable; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Supplier; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgKvOrderedIterator; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgScanQuery; +import org.apache.hugegraph.store.buffer.KVByteBuffer; +import org.apache.hugegraph.store.client.util.PropertyUtil; +import org.apache.hugegraph.store.grpc.common.Header; +import org.apache.hugegraph.store.grpc.common.ScanOrderType; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc; +import org.apache.hugegraph.store.grpc.stream.KvStream; +import org.apache.hugegraph.store.grpc.stream.ScanReceiptRequest; +import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * 批量流式查询客户端实现类 + *

+ * created on 2022/07/23 + * + * @version 3.0.0 + */ +@Slf4j +@NotThreadSafe +public class KvBatchScanner implements Closeable { + + static final Supplier> NO_DATA = () -> null; + static int maxTaskSizePerStore = PropertyUtil.getInt("net.kv.scanner.task.size", 8); + private final StreamObserver sender; // 命令发送器 + private final KvBatchScannerMerger notifier; // 数据通知 + private final String graphName; // 图名 + private final HgScanQuery scanQuery; + private final ScanReceiptRequest.Builder responseBuilder = ScanReceiptRequest.newBuilder(); + private final KvBatchReceiver receiver; + volatile int currentSeqNo = 0; + private volatile boolean running; + + public KvBatchScanner( + HgStoreStreamGrpc.HgStoreStreamStub stub, + String graphName, + HgScanQuery scanQuery, + KvCloseableIterator iterator) { + + this.graphName = graphName; + this.notifier = (KvBatchScannerMerger) iterator; + this.notifier.registerScanner(this); + this.running = true; + this.scanQuery = scanQuery; + receiver = + new KvBatchReceiver(this, scanQuery.getOrderType() == ScanOrderType.ORDER_STRICT); + sender = stub.scanBatch2(receiver); + sendQuery(this.scanQuery); // 发送查询请求 + } + + /** + * 构建流式查询迭代器 + * scanQuery进行拆分,启动多个流式请求,提升store的并发性 + * + * @param scanQuery scanQuery + * @param handler task handler + * @return data merger iterator + */ + public static KvCloseableIterator ofMerger( + HgScanQuery scanQuery, BiFunction handler) { + KvBatchScannerMerger merger; + if (scanQuery.getOrderType() == ScanOrderType.ORDER_STRICT) { + merger = new KvBatchScannerMerger.SortedScannerMerger( + new TaskSplitter(scanQuery, handler)); + } else { + merger = new KvBatchScannerMerger(new TaskSplitter(scanQuery, handler)); + } + merger.startTask(); + return merger; + } + + public static void scan( + HgStoreStreamGrpc.HgStoreStreamStub stub, + String graphName, + HgScanQuery scanQuery, + KvCloseableIterator iterator) { + new KvBatchScanner(stub, graphName, scanQuery, iterator); + } + + /** + * 发送查询请求 + * + * @param query scan query + */ + public void sendQuery(HgScanQuery query) { + synchronized (this.sender) { + if (running) { + this.sender.onNext( + ScanStreamBatchReq.newBuilder() + .setHeader( + Header.newBuilder().setGraph(graphName).build()) + .setQueryRequest(createQueryReq(query, 0)) + .build()); + } + } + } + + /** + * 发送应答 + */ + public void sendResponse() { + try { + sendResponse(currentSeqNo); + } catch (Exception e) { + log.error("exception", e); + } + } + + public void sendResponse(int seqNo) { + currentSeqNo = seqNo; + synchronized (this.sender) { + if (running) { + this.sender.onNext( + ScanStreamBatchReq.newBuilder() + .setHeader( + Header.newBuilder().setGraph(graphName).build()) + .setReceiptRequest( + responseBuilder.setTimes(seqNo).build()) + .build()); + } + } + } + + public void dataArrived(Supplier> supplier) throws + InterruptedException { + notifier.dataArrived(this, supplier); + } + + /** + * 数据接收结束 + */ + public void dataComplete() { + close(); + } + + // 流被关闭 + @Override + public void close() { + try { + if (notifier.unregisterScanner(this) < 0) { + notifier.dataArrived(this, NO_DATA); // 任务结束,唤醒队列 + } + } catch (InterruptedException e) { + log.error("exception ", e); + } + synchronized (this.sender) { + try { + if (running) { + sender.onCompleted(); + } + } catch (Exception e) { + } + running = false; + } + } + + /** + * 任务拆分器 + */ + static class TaskSplitter { + + final HgScanQuery scanQuery; + final BiFunction taskHandler; + private KvBatchScannerMerger notifier; + private Iterator prefixItr; + private int maxTaskSize = 0; // 最大并行任务数 + private int maxBatchSize = PropertyUtil.getInt("net.kv.scanner.batch.size", 1000); + // 每批次最大点数量 + private volatile boolean finished = false; + private volatile boolean splitting = false; + private volatile int nextKeySerialNo = 1; + + public TaskSplitter(HgScanQuery scanQuery, + BiFunction handler) { + this.scanQuery = scanQuery; + this.taskHandler = handler; + if (scanQuery.getScanMethod() == HgScanQuery.ScanMethod.PREFIX) { + if (scanQuery.getPrefixItr() != null) { + prefixItr = scanQuery.getPrefixItr(); + } else { + prefixItr = scanQuery.getPrefixList().listIterator(); + } + } + } + + public void setNotifier(KvBatchScannerMerger notifier) { + this.notifier = notifier; + } + + public boolean isFinished() { + return finished; + } + + /** + * 评估最大任务数 + */ + private void evaluateMaxTaskSize() { + if (maxTaskSize == 0) { // 根据第一批次任务,得到store数量,然后计算最大任务数 + if (scanQuery.getOrderType() == ScanOrderType.ORDER_STRICT) { + maxTaskSize = 1; // 点排序,每台机器一个流, 所有store流结束后才能启动其他流 + } else { + maxTaskSize = this.notifier.getScannerCount() * maxTaskSizePerStore; + } + maxBatchSize = this.notifier.getScannerCount() * maxBatchSize; // 每台机器最多1000条 + + /* + * Limit少于10000时启动一个流,节省网络带宽 + */ + if (scanQuery.getLimit() < maxBatchSize * 30L) { + maxTaskSize = 1; + } + } + } + + /** + * 拆分任务,任务拆分为多个grpc请求 + */ + public void splitTask() { + if (this.finished || this.splitting) { + return; + } + synchronized (this) { + if (this.finished) { + return; + } + this.splitting = true; + if (scanQuery.getScanMethod() == HgScanQuery.ScanMethod.PREFIX) { + if (prefixItr.hasNext() && + (maxTaskSize == 0 || notifier.getScannerCount() < maxTaskSize)) { + List keys = new ArrayList<>(maxBatchSize); + for (int i = 0; i < maxBatchSize && prefixItr.hasNext(); i++) { + keys.add(prefixItr.next().setSerialNo(nextKeySerialNo++)); + } + taskHandler.apply( + HgScanQuery.prefixOf(scanQuery.getTable(), keys, + scanQuery.getOrderType()), this.notifier); + // 评估最大任务数 + evaluateMaxTaskSize(); + if (this.notifier.getScannerCount() < this.maxTaskSize) { + splitTask(); // 未达到最大任务数,继续拆分 + } + } + this.finished = !prefixItr.hasNext(); + } else { + taskHandler.apply(scanQuery, this.notifier); + this.finished = true; + } + this.splitting = false; + } + } + + public synchronized void close() { + finished = true; + } + } + + /** + * 查询结果接收器 + */ + static class KvBatchReceiver implements StreamObserver { + + KvBatchScanner scanner; + boolean sortByVertex; + + KvBatchReceiver(KvBatchScanner scanner, boolean sortByVertex) { + this.scanner = scanner; + this.sortByVertex = sortByVertex; + } + + @Override + public void onNext(KvStream value) { + try { + ByteBuffer buffer = value.getStream(); + int seqNo = value.getSeqNo(); + boolean isOver = value.getOver(); + scanner.dataArrived( + () -> { + scanner.sendResponse(seqNo); + if (isOver) { + scanner.dataComplete(); + } + return new KVBytesIterator(buffer, sortByVertex, scanner); + }); + } catch (InterruptedException e) { + close(); + log.error("exception ", e); + throw new RuntimeException(e); + } + } + + @Override + public void onError(Throwable t) { + log.error("exception ", t); + close(); + } + + @Override + public void onCompleted() { + close(); + } + + private void close() { + if (scanner != null) { + scanner.close(); + } + } + } + + static class KVBytesIterator implements HgKvOrderedIterator { + + private final KvBatchScanner scanner; + KVByteBuffer buffer; + HgKvEntry entry; + // sequence no + int sn; + boolean hasSN; + + public KVBytesIterator(ByteBuffer buffer, boolean hasNo, KvBatchScanner scanner) { + this.buffer = new KVByteBuffer(buffer); + this.hasSN = hasNo; + this.scanner = scanner; + } + + @Override + public void close() { + // this.scanner.close(); + } + + @Override + public byte[] key() { + return entry.key(); + } + + @Override + public byte[] value() { + return entry.value(); + } + + @Override + public byte[] position() { + return new byte[0]; + } + + @Override + public void seek(byte[] position) { + throw new RuntimeException("not implemented"); + } + + @Override + public boolean hasNext() { + return buffer.hasRemaining(); + } + + @Override + public HgKvEntry next() { + if (hasSN) { + sn = buffer.getInt(); + } + entry = new GrpcKvEntryImpl(buffer.getBytes(), buffer.getBytes(), 0); + return entry; + } + + @Override + public long getSequence() { + return sn; + } + + @Override + public int compareTo(HgKvOrderedIterator o) { + return Long.compare(this.getSequence(), o.getSequence()); + } + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner5.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner5.java new file mode 100644 index 0000000000..2ee91f62b7 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScanner5.java @@ -0,0 +1,454 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgPageSize; +import org.apache.hugegraph.store.HgScanQuery; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.type.HgStoreClientException; +import org.apache.hugegraph.store.client.util.Base58; +import org.apache.hugegraph.store.client.util.HgStoreClientConfig; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc; +import org.apache.hugegraph.store.grpc.stream.KvPageRes; +import org.apache.hugegraph.store.grpc.stream.ScanCancelRequest; +import org.apache.hugegraph.store.grpc.stream.ScanReceiptRequest; +import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2022/03/23 + * + * @version 5.0.0 + */ +@Slf4j +@NotThreadSafe +class KvBatchScanner5 { + + private final static HgStoreClientConfig storeClientConfig = HgStoreClientConfig.of(); + //private final static int HAVE_NEXT_TIMEOUT_SECONDS = storeClientConfig + // .getNetKvScannerHaveNextTimeout(); + private final static int HAVE_NEXT_TIMEOUT_SECONDS = 60; + private final static long PAGE_SIZE = storeClientConfig.getNetKvScannerPageSize(); + + public static KvCloseableIterator scan(HgStoreNodeSession nodeSession, + HgStoreStreamGrpc.HgStoreStreamStub stub, + HgScanQuery scanQuery) { + return new OrderConsumer(new OrderBroker(stub, scanQuery, nodeSession)); + } + + private enum OrderState { + NEW(0), + WORKING(1), + COMPLETED(10); + + int value; + + OrderState(int value) { + this.value = value; + } + } + + /*** Broker ***/ + private static class OrderBroker { + + public final OrderKeeper keeper = new OrderKeeper(); + private final HgScanQuery scanQuery; + private final StreamObserver requestObserver; + private final ScanStreamBatchReq.Builder reqBuilder; + private final ReentrantLock senderLock = new ReentrantLock(); + private final AtomicBoolean serverFinished = new AtomicBoolean(); + private final AtomicBoolean clientFinished = new AtomicBoolean(); + private final ScanReceiptRequest.Builder receiptReqBuilder = + ScanReceiptRequest.newBuilder(); + private final ScanCancelRequest cancelReq = ScanCancelRequest.newBuilder().build(); + private final HgStoreNodeSession nodeSession; + private final OrderAgent agent; + private final AtomicLong receivedCount = new AtomicLong(); + private final AtomicInteger receivedLastTimes = new AtomicInteger(); + private final BlockingQueue timesQueue = new LinkedBlockingQueue(); + String brokerId = ""; + private OrderState state = OrderState.NEW; + + OrderBroker(HgStoreStreamGrpc.HgStoreStreamStub stub, + HgScanQuery scanQuery, + HgStoreNodeSession nodeSession) { + + if (log.isDebugEnabled()) { + if (scanQuery.getPrefixList() != null && scanQuery.getPrefixList().size() > 0) { + brokerId = Base58.encode(scanQuery.getPrefixList().get(0).getKey()); + + log.debug( + "[ANALYSIS START] [{}] firstKey: {}, keyLength: {}, table: {}, node: {}" + , brokerId + , scanQuery.getPrefixList().get(0) + , scanQuery.getPrefixList().size() + , scanQuery.getTable() + , nodeSession.getStoreNode().getAddress()); + } + } + + this.scanQuery = scanQuery; + this.reqBuilder = KvBatchUtil.getRequestBuilder(nodeSession); + this.nodeSession = nodeSession; + this.agent = new OrderAgent(brokerId); + this.requestObserver = stub.scanBatch(agent); + + } + + List oneMore() { + + if (this.state == OrderState.NEW) { + synchronized (this.state) { + if (this.state == OrderState.NEW) { + this.makeADeal(); + this.state = OrderState.WORKING; + } + } + } else { + this.sendReceipt(); + } + + return this.keeper.pickUp(); + } + + void receipt(int times) { + this.timesQueue.offer(times); + receivedLastTimes.set(times); + } + + void sendReceipt() { + Integer buf = this.timesQueue.poll(); + + if (buf == null) { + buf = this.receivedLastTimes.get(); + } + + AtomicInteger timesBuf = new AtomicInteger(buf); + + if (!this.clientFinished.get()) { + this.send(() -> + getReqBuilder().setReceiptRequest( + this.receiptReqBuilder.setTimes(timesBuf.get()).build()) + .build() + ); + } + } + + private void makeADeal() { + this.send(() -> getReqBuilder() + .setQueryRequest(KvBatchUtil.createQueryReq(scanQuery, PAGE_SIZE)).build() + ); + } + + private void finish(long tookAmt) { + this.clientFinished.set(true); + if (log.isDebugEnabled()) { + log.debug("[ANALYSIS END] [{}] times: {}, received: {}, took: {}" + , this.brokerId + , this.receivedLastTimes.get() + , this.receivedCount.get() + , tookAmt + ); + } + if (this.receivedCount.get() != tookAmt) { + if (log.isDebugEnabled()) { + log.debug("[ANALYSIS END] [{}] times: {}, received: {}, took: {}" + , this.brokerId + , this.receivedLastTimes.get() + , this.receivedCount.get() + , tookAmt + ); + } + } + synchronized (this.state) { + if (this.state.value < OrderState.COMPLETED.value) { + this.send(() -> getReqBuilder().setCancelRequest(this.cancelReq).build()); + this.state = OrderState.COMPLETED; + } + } + } + + private ScanStreamBatchReq.Builder getReqBuilder() { + return this.reqBuilder.clearQueryRequest(); + } + + private void send(Supplier supplier) { + this.senderLock.lock(); + try { + if (!this.serverFinished.get()) { + this.requestObserver.onNext(supplier.get()); + } + Thread.yield(); + } finally { + this.senderLock.unlock(); + } + } + + private class OrderAgent implements StreamObserver { + + private final AtomicInteger count = new AtomicInteger(0); + private final AtomicBoolean over = new AtomicBoolean(false); + private final String agentId; + + OrderAgent(String agentId) { + this.agentId = agentId; + } + + @Override + public void onNext(KvPageRes value) { + if (log.isDebugEnabled()) { + log.debug("Scan [ {} ] [ {} ] times, received: [ {} ]" + , nodeSession.getStoreNode().getAddress(), value.getTimes(), + value.getDataList().size()); + } + + serverFinished.set(value.getOver()); + + List buffer = value.getDataList(); + count.addAndGet(buffer.size()); + if (log.isDebugEnabled()) { + if (value.getOver()) { + log.debug("[ANALYSIS OVER] [{}] count: {}", agentId, count); + } + } + keeper.receive(buffer, value.getTimes()); + this.over.set(value.getOver()); + this.checkOver(value.getTimes()); + } + + private void checkOver(int times) { + if (this.over.get()) { + requestObserver.onCompleted(); + keeper.done(times); + } + } + + @Override + public void onError(Throwable t) { + log.error("received server onError event, Throwable:", t); + keeper.shout(t); + } + + @Override + public void onCompleted() { + if (log.isDebugEnabled()) { + log.debug("received sever completed event."); + } + serverFinished.set(true); + + } + + } + + /*** Inventory Keeper ***/ + private class OrderKeeper { + + private final BlockingQueue>> queue = new LinkedBlockingQueue<>(); + private final ReentrantLock pickUpLock = new ReentrantLock(); + private final AtomicBoolean done = new AtomicBoolean(); + private final AtomicBoolean stop = new AtomicBoolean(); + private int timesOfOver; + private int lastTimes; + private Throwable serverErr; + + void receive(List data, int times) { + receivedCount.addAndGet(data.size()); + this.queue.offer(() -> data); + receipt(times); + + this.lastTimes = times; + } + + private List pickUp() { + Supplier> res; + + pickUpLock.lock(); + try { + + if (this.done.get()) { + if (this.stop.get()) { + log.warn("Invoking pickUp method after OrderKeeper has bean closing."); + } + res = this.queue.poll(); + if (res == null) { + res = () -> null; + } + } else { + res = this.queue.poll(HAVE_NEXT_TIMEOUT_SECONDS, TimeUnit.SECONDS); + if (res == null) { + if (this.done.get()) { + res = () -> null; + } else { + throw HgStoreClientException.of( + "Timeout, max time: " + HAVE_NEXT_TIMEOUT_SECONDS + + " seconds" + + ", isOver: " + this.done.get() + + ", isStop: " + this.stop.get() + + ", last-times: " + this.lastTimes + + ", over-times: " + this.timesOfOver); + } + } + + } + } catch (InterruptedException e) { + log.error( + "Failed to receive List from queue because of interruption of " + + "current thread [" + + Thread.currentThread().getName() + "]"); + + Thread.currentThread().interrupt(); + + throw HgStoreClientException.of( + "Failed to receive List from queue, cause by:", e); + } finally { + pickUpLock.unlock(); + } + + checkServerErr(); + return res.get(); + + } + + void done(int times) { + this.timesOfOver = times; + this.done.set(true); + this.queue.offer(() -> null); + } + + void shout(Throwable t) { + this.serverErr = t; + log.error("Failed to receive from sever", t); + this.queue.offer(() -> null); + } + + private void checkServerErr() { + if (this.serverErr != null) { + throw HgStoreClientException.of(this.serverErr); + } + } + } + + } + + /* iterator */ + private static class OrderConsumer implements KvCloseableIterator, HgPageSize { + + private final OrderBroker broker; + private final String consumerId; + private Iterator dataIterator; + private long tookCount = 0; + + OrderConsumer(OrderBroker broker) { + this.broker = broker; + consumerId = broker.brokerId; + } + + private Iterator getIterator() { + List list = this.broker.oneMore(); + + if (log.isDebugEnabled()) { + if (list != null && list.isEmpty()) { + log.debug("[ANALYSIS EMPTY] [{}] , tookCount: {}", consumerId, tookCount); + } + } + + if (list == null || list.isEmpty()) { + return null; + } else { + return list.iterator(); + } + } + + @Override + public void close() { + this.broker.finish(this.tookCount); + } + + @Override + public long getPageSize() { + return PAGE_SIZE; + } + + @Override + public boolean hasNext() { + + if (this.dataIterator == null) { + this.dataIterator = this.getIterator(); + } else { + if (this.dataIterator.hasNext()) { + return true; + } else { + this.dataIterator = this.getIterator(); + } + } + + if (this.dataIterator == null) { + if (log.isDebugEnabled()) { + log.debug("[ANALYSIS NULL -> FALSE] [{}] , tookCount: {}", consumerId, + tookCount); + } + return false; + } else { + if (log.isDebugEnabled()) { + if (!this.dataIterator.hasNext()) { + log.debug("[ANALYSIS hasNext -> FALSE] [{}] , tookCount: {}", consumerId, + tookCount); + } + } + return this.dataIterator.hasNext(); + } + + } + + @Override + public Kv next() { + if (this.dataIterator == null) { + if (!this.hasNext()) { + throw new NoSuchElementException(); + } + } + + if (log.isDebugEnabled()) { + tookCount++; + if (tookCount % 10000 == 0) { + log.debug("[ANALYSIS NEXT] [{}] , tookCount: {}", consumerId, tookCount); + } + } + return this.dataIterator.next(); + } + + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScannerMerger.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScannerMerger.java new file mode 100644 index 0000000000..4f666c973e --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchScannerMerger.java @@ -0,0 +1,334 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import org.apache.hugegraph.store.HgKvEntry; +import org.apache.hugegraph.store.HgKvIterator; +import org.apache.hugegraph.store.HgKvOrderedIterator; +import org.apache.hugegraph.store.HgPageSize; +import org.apache.hugegraph.store.client.util.PropertyUtil; + +import lombok.extern.slf4j.Slf4j; + +/** + * 批量查询结果归并,阻塞队列工作模式 + * 对请求任务的拆分,创建多个请求队列 + */ +@Slf4j +public class KvBatchScannerMerger implements KvCloseableIterator>, + HgPageSize { + + static int maxWaitCount = PropertyUtil.getInt("net.kv.scanner.wait.timeout", 60); + protected final BlockingQueue>> queue = + new LinkedBlockingQueue<>(); + private final KvBatchScanner.TaskSplitter taskSplitter; + private final List scanners = new CopyOnWriteArrayList<>(); + private Supplier> current = null; + + public KvBatchScannerMerger(KvBatchScanner.TaskSplitter splitter) { + this.taskSplitter = splitter; + splitter.setNotifier(this); + } + + public void startTask() { + taskSplitter.splitTask(); + } + + public void dataArrived(KvBatchScanner scanner, Supplier> supplier) + throws InterruptedException { + queue.put(supplier); + } + + @Override + public boolean hasNext() { + int waitTime = 0; + while (current == null) { + try { + // 队列有数据,还有活跃的查询器,任务未分配完 + if (queue.size() != 0 || scanners.size() > 0 || !taskSplitter.isFinished()) { + current = queue.poll(1, TimeUnit.SECONDS); //定期检查client是否被关闭了 + } else { + break; + } + if (current == null) { + // 超时重试 + sendTimeout(); + if (++waitTime > maxWaitCount) { + log.error( + "KvBatchScanner wait data timeout {}, closeables is {}, task is {}", + waitTime, scanners.size(), taskSplitter.isFinished()); + break; + } + } + } catch (InterruptedException e) { + log.error("hasNext interrupted {}", e); + throw new RuntimeException(e.getMessage(), e); + } + } + return current != null && current != KvBatchScanner.NO_DATA; + } + + @Override + public HgKvIterator next() { + HgKvIterator iterator = null; + if (current != null) { + iterator = current.get(); + } + current = null; + return iterator; + } + + @Override + public void close() { + taskSplitter.close(); + scanners.forEach(c -> c.close()); + } + + private void sendTimeout() { + scanners.forEach(v -> { + v.sendResponse(); + }); + } + + @Override + public long getPageSize() { + return 0; + } + + public void registerScanner(KvBatchScanner closeable) { + this.scanners.add(closeable); + } + + /** + * 返回值<0表示任务结束 + * + * @param closeable + * @return + */ + public int unregisterScanner(KvBatchScanner closeable) { + this.scanners.remove(closeable); + try { + taskSplitter.splitTask(); + } catch (Exception e) { + log.error("exception ", e); + } + return taskSplitter.isFinished() && this.scanners.size() == 0 ? + -1 : this.scanners.size(); + } + + public int getScannerCount() { + return this.scanners.size(); + } + + /** + * 组装一个Scanner的多个有序迭代器为一个迭代器 + */ + static class ScannerDataQueue { + + private BlockingQueue>> queue; + private HgKvOrderedIterator iterator = null; + private int currentSN = 0; + private HgKvEntry entry; + + public ScannerDataQueue() { + queue = new LinkedBlockingQueue<>(); + } + + public int sn() { + return currentSN; + } + + public void add(Supplier> supplier) { + if (queue != null) { + queue.add(supplier); + } + } + + /** + * 迭代器是否有效,如果没有数据,等待数据到达 + * + * @return + */ + public boolean hasNext() { + while (entry == null && queue != null) { + try { + int waitTime = 0; + Supplier> current; + current = queue.poll(1, TimeUnit.SECONDS); //定期检查client是否被关闭了 + if (current == null) { + if (++waitTime > maxWaitCount) { + break; + } + } else if (current == KvBatchScanner.NO_DATA) { + queue = null; + break; + } else { + iterator = (HgKvOrderedIterator) current.get(); + if (iterator != null && iterator.hasNext()) { + moveNext(); + } else { + iterator = null; + } + } + } catch (InterruptedException e) { + log.error("hasNext interrupted {}", e); + throw new RuntimeException(e.getMessage(), e); + } + } + return entry != null; + } + + public HgKvEntry next() { + HgKvEntry current = entry; + moveNext(); + return current; + } + + private void moveNext() { + if (iterator.hasNext()) { + entry = iterator.next(); + currentSN = (int) iterator.getSequence(); + } else { + entry = null; + iterator = null; + } + } + } + + /** + * 对多个Scanner返回结果进行归并排序 + */ + static class SortedScannerMerger extends KvBatchScannerMerger { + + // 每一个流对应一个接收队列 + private final Map scannerQueues = + new ConcurrentHashMap<>(); + + public SortedScannerMerger(KvBatchScanner.TaskSplitter splitter) { + super(splitter); + queue.add(() -> { + // 对store返回结果进行归并排序 + return new HgKvIterator<>() { + private ScannerDataQueue iterator; + private int currentSN = 0; + private HgKvEntry entry; + + @Override + public byte[] key() { + return entry.key(); + } + + @Override + public byte[] value() { + return entry.value(); + } + + @Override + public void close() { + + } + + @Override + public byte[] position() { + return new byte[0]; + } + + @Override + public void seek(byte[] position) { + throw new RuntimeException("not implemented"); + } + + @Override + public boolean hasNext() { + // + if (iterator == null || !iterator.hasNext() || currentSN != iterator.sn()) { + iterator = selectIterator(); + } + + if (iterator != null) { + currentSN = iterator.sn(); + } + return iterator != null; + } + + @Override + public HgKvEntry next() { + entry = iterator.next(); + return entry; + } + }; + }); + } + + /** + * 从多个Scanner中挑选一个sn最小的迭代器 + * 如果Scanner没有数据,等待数据到达。 + * + * @return + */ + private ScannerDataQueue selectIterator() { + int sn = Integer.MAX_VALUE; + ScannerDataQueue current = null; + while (current == null && !scannerQueues.isEmpty()) { + Iterator itr = scannerQueues.keySet().iterator(); + while (itr.hasNext()) { + KvBatchScanner key = itr.next(); + ScannerDataQueue kvItr = scannerQueues.get(key); + if (!kvItr.hasNext()) { + scannerQueues.remove(key); + continue; + } + if (kvItr.sn() <= sn) { + sn = kvItr.sn(); + current = kvItr; + } + } + } + return current; + } + + @Override + public void registerScanner(KvBatchScanner scanner) { + super.registerScanner(scanner); + scannerQueues.putIfAbsent(scanner, new ScannerDataQueue()); + } + + @Override + public int unregisterScanner(KvBatchScanner scanner) { + dataArrived(scanner, KvBatchScanner.NO_DATA); + return super.unregisterScanner(scanner); + } + + @Override + public void dataArrived(KvBatchScanner scanner, + Supplier> supplier) { + scannerQueues.putIfAbsent(scanner, new ScannerDataQueue()); + scannerQueues.get(scanner).add(supplier); + } + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchUtil.java new file mode 100644 index 0000000000..7f79aec0b1 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvBatchUtil.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.List; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgScanQuery; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.grpc.common.Header; +import org.apache.hugegraph.store.grpc.common.ScanMethod; +import org.apache.hugegraph.store.grpc.stream.ScanCondition; +import org.apache.hugegraph.store.grpc.stream.ScanQueryRequest; +import org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq; + +import com.google.protobuf.ByteString; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2022/04/23 + */ +@Slf4j +@NotThreadSafe +class KvBatchUtil { + + static final byte[] EMPTY_POSITION = HgStoreClientConst.EMPTY_BYTES; + + static ScanStreamBatchReq.Builder getRequestBuilder(HgStoreNodeSession nodeSession) { + return ScanStreamBatchReq.newBuilder().setHeader(getHeader(nodeSession)); + } + + static ScanQueryRequest createQueryReq(HgScanQuery scanQuery, long pageSize) { + + ScanQueryRequest.Builder qb = ScanQueryRequest.newBuilder(); + ScanCondition.Builder cb = ScanCondition.newBuilder(); + + qb.setLimit(getLimit(scanQuery.getLimit())); + qb.setPerKeyLimit(getLimit(scanQuery.getPerKeyLimit())); + qb.setPerKeyMax(getLimit(scanQuery.getPerKeyMax())); + + switch (scanQuery.getScanMethod()) { + case ALL: + qb.setMethod(ScanMethod.ALL); + break; + case PREFIX: + qb.setMethod(ScanMethod.PREFIX); + addPrefixCondition(scanQuery, qb, cb); + break; + case RANGE: + qb.setMethod(ScanMethod.RANGE); + addRangeCondition(scanQuery, qb, cb); + break; + default: + throw new RuntimeException("Unsupported ScanType: " + scanQuery.getScanMethod()); + } + + qb.setTable(scanQuery.getTable()); + qb.setPageSize(pageSize); + qb.setQuery(toBs(scanQuery.getQuery())); + qb.setScanType(scanQuery.getScanType()); + qb.setOrderType(scanQuery.getOrderType()); + qb.setSkipDegree(scanQuery.getSkipDegree()); + + return qb.build(); + } + + static long getLimit(long limit) { + return limit <= HgStoreClientConst.NO_LIMIT ? Integer.MAX_VALUE : limit; + } + + static Header getHeader(HgStoreNodeSession nodeSession) { + return Header.newBuilder().setGraph(nodeSession.getGraphName()).build(); + } + + static void addPrefixCondition(HgScanQuery scanQuery, ScanQueryRequest.Builder qb, + ScanCondition.Builder cb) { + List prefixList = scanQuery.getPrefixList(); + + if (prefixList == null || prefixList.isEmpty()) { + throw new RuntimeException( + "The start-list of ScanQuery shouldn't to be invalid in ScanMethod.PREFIX " + + "mode."); + } + + prefixList.forEach((e) -> { + qb.addCondition(cb.clear() + .setPrefix(toBs(e.getKey())) + .setCode(e.getKeyCode()) + .setSerialNo(e.getSerialNo()) + .build() + ); + }); + + } + + static void addRangeCondition(HgScanQuery scanQuery, ScanQueryRequest.Builder qb, + ScanCondition.Builder cb) { + List startList = scanQuery.getStartList(); + List endList = scanQuery.getEndList(); + + if (startList == null || startList.isEmpty()) { + throw new RuntimeException( + "The start-list of ScanQuery shouldn't to be invalid in ScanMethod.RANGE mode" + + "."); + } + + if (endList == null || endList.isEmpty()) { + throw new RuntimeException( + "The end-list of ScanQuery shouldn't to be invalid in ScanMethod.RANGE mode."); + } + + if (startList.size() != endList.size()) { + throw new RuntimeException("The size of start-list not equals end-list's."); + } + + for (int i = 0, s = startList.size(); i < s; i++) { + HgOwnerKey start = startList.get(i); + HgOwnerKey end = endList.get(i); + qb.addCondition(cb.clear().setCode(start.getKeyCode()) + .setStart(toBs(start.getKey())) + .setEnd(toBs(end.getKey())) + .setSerialNo(start.getSerialNo()) + .build() + ); + } + + } + + static HgOwnerKey toOk(HgOwnerKey key) { + return key == null ? HgStoreClientConst.EMPTY_OWNER_KEY : key; + } + + static ByteString toBs(byte[] bytes) { + return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvCloseableIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvCloseableIterator.java new file mode 100644 index 0000000000..d23b3a45e0 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvCloseableIterator.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.io.Closeable; +import java.util.Iterator; + +/** + * 2022/3/16 + */ +public interface KvCloseableIterator extends Iterator, Closeable { + + @Override + void close(); +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvListIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvListIterator.java new file mode 100644 index 0000000000..ef807c550e --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvListIterator.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Iterator; +import java.util.List; + +/** + * 2022/4/6 + */ +class KvListIterator implements KvCloseableIterator { + + private final Iterator iterator; + + KvListIterator(List list) { + this.iterator = list.iterator(); + } + + @Override + public void close() { + /*Nothing to do.*/ + } + + @Override + public boolean hasNext() { + return this.iterator.hasNext(); + } + + @Override + public T next() { + return this.iterator.next(); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvOneShotScanner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvOneShotScanner.java new file mode 100644 index 0000000000..7375a05f07 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvOneShotScanner.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Iterator; +import java.util.List; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgKvStore; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgPageSize; +import org.apache.hugegraph.store.HgSeekAble; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.util.HgStoreClientConfig; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.client.util.HgStoreClientUtil; +import org.apache.hugegraph.store.grpc.common.Header; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.common.ScanMethod; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamBlockingStub; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; + +import com.google.protobuf.ByteString; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/12/1 + */ +@Slf4j +@NotThreadSafe +class KvOneShotScanner implements KvCloseableIterator, HgPageSize, HgSeekAble { + + private static final HgStoreClientConfig storeClientConfig = HgStoreClientConfig.of(); + private final HgStoreNodeSession session; + private final HgStoreStreamBlockingStub stub; + private final ScanStreamReq.Builder reqBuilder = ScanStreamReq.newBuilder(); + private final String table; + private final HgOwnerKey startKey; + private final HgOwnerKey endKey; + private final HgOwnerKey prefix; + private final ScanMethod scanMethod; + private final long limit; + private final int partition; + private final int scanType; + private final byte[] query; + private final int pageSize; + private ScanStreamReq req; + private Iterator iterator; + private List list = null; + private boolean in = true; + private byte[] nodePosition = HgStoreClientConst.EMPTY_BYTES; + + private KvOneShotScanner(ScanMethod scanMethod, HgStoreNodeSession session, + HgStoreStreamBlockingStub stub, + String table, HgOwnerKey prefix, HgOwnerKey startKey, + HgOwnerKey endKey, long limit, + int partition, int scanType, byte[] query) { + this.scanMethod = scanMethod; + this.session = session; + this.stub = stub; + this.table = table; + this.startKey = toOk(startKey); + this.endKey = toOk(endKey); + this.prefix = toOk(prefix); + this.partition = partition; + this.scanType = scanType; + this.query = query != null ? query : HgStoreClientConst.EMPTY_BYTES; + this.limit = limit <= HgStoreClientConst.NO_LIMIT ? Integer.MAX_VALUE : + limit; // <=0 means no limit + this.pageSize = storeClientConfig.getNetKvScannerPageSize(); + + } + + public static KvCloseableIterator scanAll(HgStoreNodeSession session, + HgStoreStreamBlockingStub stub, + String table, long limit, byte[] query) { + return new KvOneShotScanner(ScanMethod.ALL, session, stub, table, null, null, null, limit, + -1, HgKvStore.SCAN_ANY, + query); + } + + public static KvCloseableIterator scanPrefix(HgStoreNodeSession session, + HgStoreStreamBlockingStub stub, + String table, HgOwnerKey prefix, long limit, + byte[] query) { + return new KvOneShotScanner(ScanMethod.PREFIX, session, stub, table, prefix, null, null, + limit, + prefix.getKeyCode(), HgKvStore.SCAN_PREFIX_BEGIN, query); + } + + public static KvCloseableIterator scanRange(HgStoreNodeSession nodeSession, + HgStoreStreamBlockingStub stub, + String table, HgOwnerKey startKey, + HgOwnerKey endKey, long limit, + int scanType, byte[] query) { + return new KvOneShotScanner(ScanMethod.RANGE, nodeSession, stub, table, null, startKey, + endKey, limit, + startKey.getKeyCode(), scanType, query); + } + + static HgOwnerKey toOk(HgOwnerKey key) { + return key == null ? HgStoreClientConst.EMPTY_OWNER_KEY : key; + } + + static ByteString toBs(byte[] bytes) { + return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES); + } + + private Header getHeader(HgStoreNodeSession nodeSession) { + return Header.newBuilder().setGraph(nodeSession.getGraphName()).build(); + } + + private void createReq() { + this.req = this.reqBuilder + .setHeader(this.getHeader(this.session)) + .setMethod(this.scanMethod) + .setTable(this.table) + .setStart(toBs(this.startKey.getKey())) + .setEnd(toBs(this.endKey.getKey())) + .setLimit(this.limit) + .setPrefix(toBs(this.prefix.getKey())) + .setCode(this.partition) + .setScanType(this.scanType) + .setQuery(toBs(this.query)) + .setPageSize(pageSize) + .setPosition(toBs(this.nodePosition)) + .build(); + } + + private void init() { + + if (this.iterator == null) { + this.createReq(); + this.list = this.stub.scanOneShot(this.req).getDataList(); + this.iterator = this.list.iterator(); + } + + } + + @Override + public boolean hasNext() { + if (!this.in) { + return false; + } + if (this.iterator == null) { + this.init(); + } + return this.iterator.hasNext(); + } + + @Override + public Kv next() { + if (this.iterator == null) { + this.init(); + } + return this.iterator.next(); + } + + @Override + public long getPageSize() { + return this.limit; + } + + @Override + public boolean isPageEmpty() { + return !this.iterator.hasNext(); + } + + @Override + public byte[] position() { + return HgStoreClientUtil.toBytes(this.session.getStoreNode().getNodeId().longValue()); + } + + @Override + public void seek(byte[] position) { + if (position == null || position.length < Long.BYTES) { + return; + } + byte[] nodeIdBytes = new byte[Long.BYTES]; + System.arraycopy(position, 0, nodeIdBytes, 0, Long.BYTES); + long nodeId = this.session.getStoreNode().getNodeId().longValue(); + long pId = HgStoreClientUtil.toLong(nodeIdBytes); + this.in = nodeId >= pId; + if (this.in && nodeId == pId) { + this.nodePosition = new byte[position.length - Long.BYTES]; + System.arraycopy(position, Long.BYTES, this.nodePosition, 0, this.nodePosition.length); + } else { + this.nodePosition = HgStoreClientConst.EMPTY_BYTES; + } + } + + @Override + public void close() { + //TODO: implements + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvPageScanner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvPageScanner.java new file mode 100644 index 0000000000..2879a50466 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/KvPageScanner.java @@ -0,0 +1,311 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hugegraph.store.HgKvStore; +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.HgPageSize; +import org.apache.hugegraph.store.HgSeekAble; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.util.HgBufferProxy; +import org.apache.hugegraph.store.client.util.HgStoreClientConfig; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.client.util.HgStoreClientUtil; +import org.apache.hugegraph.store.client.util.MetricX; +import org.apache.hugegraph.store.grpc.common.Header; +import org.apache.hugegraph.store.grpc.common.Kv; +import org.apache.hugegraph.store.grpc.common.ScanMethod; +import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamStub; +import org.apache.hugegraph.store.grpc.stream.KvPageRes; +import org.apache.hugegraph.store.grpc.stream.ScanStreamReq; +import org.apache.hugegraph.store.grpc.stream.SelectParam; + +import com.google.protobuf.ByteString; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/10/21 + * + * @version 0.6.1 lynn.bond@hotamil.com on 2022/04/05 + */ +@Slf4j +@NotThreadSafe +class KvPageScanner implements KvCloseableIterator, HgPageSize, HgSeekAble { + + private static final HgStoreClientConfig clientConfig = HgStoreClientConfig.of(); + private static final int nextTimeout = clientConfig.getNetKvScannerHaveNextTimeout(); + private final HgStoreNodeSession session; + private final HgStoreStreamStub stub; + private final AtomicBoolean completed = new AtomicBoolean(false); + private final SelectParam.Builder selectBuilder = SelectParam.newBuilder(); + private final BlockingQueue reqQueue = new LinkedBlockingQueue<>(); + private int pageSize = clientConfig.getNetKvScannerPageSize(); + private HgBufferProxy> proxy; + private Iterator iterator; + private StreamObserver observer; + private ScanStreamReq.Builder reqBuilder = ScanStreamReq.newBuilder(); + private boolean in = true; + private byte[] nodePosition = HgStoreClientConst.EMPTY_BYTES; + + private KvPageScanner(ScanMethod scanMethod, HgStoreNodeSession session, HgStoreStreamStub stub, + String table, + HgOwnerKey prefix, HgOwnerKey startKey, HgOwnerKey endKey, long limit, + int partition, + int scanType, byte[] query) { + this.session = session; + this.stub = stub; + this.pageSize = clientConfig.getNetKvScannerPageSize(); + this.reqBuilder.setHeader(this.getHeader(this.session)) + .setMethod(scanMethod) + .setTable(table) + .setStart(toBs(toOk(startKey).getKey())) + .setEnd(toBs(toOk(endKey).getKey())) + .setLimit(limit <= HgStoreClientConst.NO_LIMIT ? Integer.MAX_VALUE : limit) + .setPrefix(toBs(toOk(prefix).getKey())) + .setCode(partition) + .setScanType(scanType) + .setQuery(toBs(query != null ? query : HgStoreClientConst.EMPTY_BYTES)) + .setPageSize(pageSize) + .setPosition(toBs(this.nodePosition)); + this.init(); + } + + public KvPageScanner(HgStoreNodeSession session, HgStoreStreamStub stub, + ScanStreamReq.Builder reqBuilder) { + this.session = session; + this.stub = stub; + reqBuilder.setPageSize(pageSize); + reqBuilder.setPosition(toBs(this.nodePosition)); + this.reqBuilder = reqBuilder; + this.init(); + } + + public static KvCloseableIterator scanAll(HgStoreNodeSession nodeSession, + HgStoreStreamStub stub, String table, + long limit, byte[] query) { + return new KvPageScanner(ScanMethod.ALL, nodeSession, stub, table, null, null, null, limit, + -1, HgKvStore.SCAN_ANY, query); + } + + public static KvCloseableIterator scanPrefix(HgStoreNodeSession nodeSession, + HgStoreStreamStub stub, + String table, HgOwnerKey prefix, long limit, + byte[] query) { + return new KvPageScanner(ScanMethod.PREFIX, nodeSession, stub, table, prefix, null, null, + limit, + prefix.getKeyCode(), HgKvStore.SCAN_PREFIX_BEGIN, query); + } + + public static KvCloseableIterator scanRange(HgStoreNodeSession nodeSession, + HgStoreStreamStub stub, + String table, HgOwnerKey startKey, + HgOwnerKey endKey, long limit, + int scanType, byte[] query) { + return new KvPageScanner(ScanMethod.RANGE, nodeSession, stub, table, null, startKey, endKey, + limit, + startKey.getKeyCode(), scanType, query); + } + + static HgOwnerKey toOk(HgOwnerKey key) { + return key == null ? HgStoreClientConst.EMPTY_OWNER_KEY : key; + } + + static ByteString toBs(byte[] bytes) { + return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES); + } + + private ScanStreamReq createScanReq() { + return this.reqBuilder.setPosition(toBs(this.nodePosition)).build(); + } + + private ScanStreamReq createStopReq() { + return this.reqBuilder.setHeader(this.getHeader(this.session)).setCloseFlag(1).build(); + } + + private void init() { + this.proxy = HgBufferProxy.of(() -> this.serverScan()); + this.observer = this.stub.scan(new ServeObserverImpl()); + + } + + /*** Server Event End ***/ + + private void serverScan() { + if (this.completed.get()) { + this.proxy.close(); + return; + } + if (this.proxy.isClosed()) { + return; + } + this.send(this.createScanReq()); + } + + private void stopSever() { + this.send(this.createStopReq()); + } + + private void send(ScanStreamReq req) { + if (!this.completed.get()) { + try { + this.observer.onNext(req); + } catch (IllegalStateException | IllegalArgumentException e) { + + } catch (Exception e) { + throw e; + } + } + } + + private void clientError(String msg) { + this.observer.onError(GrpcUtil.toErr(msg)); + } + + /*** Iterator ***/ + @Override + public boolean hasNext() { + if (!this.in) { + return false; + } + // QUESTION: After `this.iterator.hasNext()` evaluates to false, + // no further attempts should make to reconstruct the iterator. + if (this.iterator != null && this.iterator.hasNext()) { + return true; + } + long start = 0; + boolean debugEnabled = log.isDebugEnabled(); + if (debugEnabled) { + start = System.nanoTime(); + } + List data = this.proxy.receive(nextTimeout, (sec) -> { + String msg = "failed to receive data from net scanning, because of timeout [ " + sec + + " ] sec."; + log.error(msg); + this.clientError(msg); + throw new RuntimeException(msg); + }); + if (debugEnabled) { + MetricX.plusIteratorWait(System.nanoTime() - start); + } + if (data != null) { + this.iterator = data.iterator(); + } else { + this.iterator = Collections.emptyIterator(); + } + return this.iterator.hasNext(); + } + + @Override + public Kv next() { + if (this.iterator == null && !this.hasNext()) { + throw new NoSuchElementException(); + } + return this.iterator.next(); + } + + @Override + public long getPageSize() { + return this.pageSize; + } + + @Override + public boolean isPageEmpty() { + return !this.iterator.hasNext(); + } + + @Override + public byte[] position() { + return HgStoreClientUtil.toBytes(this.session.getStoreNode().getNodeId().longValue()); + } + + @Override + public void seek(byte[] position) { + if (position == null || position.length < Long.BYTES) { + return; + } + byte[] nodeIdBytes = new byte[Long.BYTES]; + System.arraycopy(position, 0, nodeIdBytes, 0, Long.BYTES); + long nodeId = this.session.getStoreNode().getNodeId().longValue(); + long pId = HgStoreClientUtil.toLong(nodeIdBytes); + this.in = nodeId >= pId; + if (this.in && nodeId == pId) { + this.nodePosition = new byte[position.length - Long.BYTES]; + System.arraycopy(position, Long.BYTES, this.nodePosition, 0, this.nodePosition.length); + } else { + this.nodePosition = HgStoreClientConst.EMPTY_BYTES; + } + } + + @Override + public void close() { + this.stopSever(); + } + + /*** commons ***/ + private Header getHeader(HgStoreNodeSession nodeSession) { + return Header.newBuilder().setGraph(nodeSession.getGraphName()).build(); + } + + /*** Server event Start ***/ + private class ServeObserverImpl implements StreamObserver { + + @Override + public void onNext(KvPageRes value) { + if (value.getOver()) { + completed.set(true); + observer.onCompleted(); + } + proxy.send(value.getDataList()); + if (completed.get()) { + proxy.close(); + } + } + + @Override + public void onError(Throwable t) { + completed.set(true); + try { + observer.onCompleted(); + } catch (Exception e) { + log.warn("failed to invoke requestObserver.onCompleted(), reason:", e.getMessage()); + } + proxy.close(); + proxy.setError(t); + log.error("failed to complete scan of session: " + session, t); + } + + @Override + public void onCompleted() { + completed.set(true); + proxy.close(); + } + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/NotifyingExecutor.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/NotifyingExecutor.java new file mode 100644 index 0000000000..491ad94b3d --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/NotifyingExecutor.java @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.apache.hugegraph.store.client.HgStoreNodeManager; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.HgStoreNotice; +import org.apache.hugegraph.store.client.type.HgNodeStatus; +import org.apache.hugegraph.store.client.type.HgStoreClientException; +import org.apache.hugegraph.store.grpc.common.ResStatus; +import org.apache.hugegraph.store.grpc.session.FeedbackRes; +import org.apache.hugegraph.store.grpc.session.PartitionFaultResponse; +import org.apache.hugegraph.store.grpc.session.PartitionFaultType; +import org.apache.hugegraph.store.grpc.session.PartitionLeader; + +import com.google.protobuf.util.JsonFormat; + +import lombok.extern.slf4j.Slf4j; + +/** + * 2021/11/18 + * + * @version 0.3.0 on 2022/01/27 + */ +@Slf4j +final class NotifyingExecutor { + + private final String graphName; + private final HgStoreNodeManager nodeManager; + private final HgStoreNodeSession nodeSession; + + private Map> partitionFaultHandlers; + + NotifyingExecutor(String graphName, HgStoreNodeManager nodeManager, + HgStoreNodeSession nodeSession) { + this.graphName = graphName; + this.nodeManager = nodeManager; + this.nodeSession = nodeSession; + } + + private void initHandler() { + this.partitionFaultHandlers = new HashMap<>(); + + this.partitionFaultHandlers.put( + PartitionFaultType.PARTITION_FAULT_TYPE_NOT_LEADER, notifyPartitionLeaderConsumer() + ); + + } + + Optional invoke(Supplier supplier, Function okFunction) { + FeedbackRes res = null; + + try { + res = supplier.get(); + } catch (Throwable t) { + log.error("Failed to invoke: " + supplier.toString() + ", caused " + + "by:", t); + handleErr(t); + throw err(t); + } + + if (log.isDebugEnabled()) { + log.debug("gRPC [{}] status: {}" + , this.nodeSession.getStoreNode().getAddress(), res.getStatus().getCode()); + } + + Optional option = null; + + switch (res.getStatus().getCode()) { + case RES_CODE_OK: + option = Optional.of(okFunction.apply(res)); + break; + case RES_CODE_FAIL: + handleFail(res); + break; + case RES_CODE_NOT_EXIST: + break; + case RES_CODE_EXCESS: + normalFail(res); + break; + default: + log.error("gRPC [{}] status-msg: {}" + , nodeSession.getStoreNode().getAddress(), res.getStatus().getMsg()); + } + + if (option == null) { + option = Optional.empty(); + } + + return option; + } + + private void handleErr(Throwable t) { + try { + notifyErrConsumer(HgNodeStatus.NOT_WORK).accept(t); + } catch (Throwable tt) { + log.error("Failed to notify error to HgStoreNodeNotifier, cause:", tt); + } + } + + private void handleFail(FeedbackRes feedbackRes) { + Supplier exSup; + + if ( + (exSup = handlePartitionFault(feedbackRes)) != null + // add more fault-handler here. + || (exSup = defaultExceptionSupplier(feedbackRes)) != null + ) { + throw exSup.get(); + } + + } + + private void normalFail(FeedbackRes res) { + ResStatus status = res.getStatus(); + HgStoreClientException ex; + try { + String msg = JsonFormat.printer().omittingInsignificantWhitespace() + .print(res); + ex = err(msg); + } catch (Exception e) { + ex = err(status.getCode() + ", " + status.getMsg()); + } + throw ex; + } + + private Supplier defaultExceptionSupplier(FeedbackRes feedbackRes) { + return () -> HgStoreClientException.of(err(feedbackRes.getStatus().getMsg())); + } + + private Supplier handlePartitionFault( + FeedbackRes feedbackRes) { + PartitionFaultResponse res = feedbackRes.getPartitionFaultResponse(); + if (res == null) { + return null; + } + if (this.partitionFaultHandlers == null) { + initHandler(); + } + Consumer consumer = + this.partitionFaultHandlers.get(res.getFaultType()); + if (consumer == null) { + consumer = notifyPartitionConsumer(); + } + String msg = res.toString(); + if (msg == null || msg.length() == 0) { + msg = feedbackRes.getStatus().getMsg(); + } + consumer.accept(res); + String finalMsg = msg; + return () -> HgStoreClientException.of( + err(res.getFaultType() + ", " + + finalMsg)); + } + + private HgStoreClientException err(String msg) { + return err(msg, null); + } + + private HgStoreClientException err(Throwable t) { + return err(t.getMessage(), t); + } + + private HgStoreClientException err(String reason, Throwable t) { + StringBuilder builder = new StringBuilder().append( + "{sessionInfo: {" + this.nodeSession.toString() + + "}, reason: "); + if (reason.startsWith("{")) { + builder.append(reason); + } else { + builder.append("\"").append(reason).append("\""); + } + String msg = builder.append("}").toString(); + if (t != null) { + return HgStoreClientException.of(msg, t); + } + return HgStoreClientException.of(msg); + } + + private Consumer notifyPartitionLeaderConsumer() { + return res -> { + log.info("partitions' leader have changed: [partitionId - leaderId] "); + nodeManager.notifying( + this.graphName, + HgStoreNotice.of(this.nodeSession.getStoreNode().getNodeId(), + HgNodeStatus.NOT_PARTITION_LEADER) + .setPartitionLeaders( + res.getPartitionLeadersList() + .stream() + .peek((e) -> { + log.info("[{} - {}]", e.getPartitionId(), + e.getLeaderId()); + } + ) + .collect( + Collectors.toMap( + PartitionLeader::getPartitionId, + PartitionLeader::getLeaderId + ) + ) + ) + ); + }; + } + + private Consumer notifyPartitionConsumer() { + return notifyPartitionConsumer(HgNodeStatus.PARTITION_COMMON_FAULT); + } + + private Consumer notifyPartitionConsumer(HgNodeStatus status) { + return res -> { + nodeManager.notifying( + this.graphName, + HgStoreNotice.of(this.nodeSession.getStoreNode().getNodeId(), status) + .setPartitionIds(res.getPartitionIdsList()) + ); + }; + } + + private Consumer notifyErrConsumer(HgNodeStatus status) { + return t -> { + nodeManager.notifying( + this.graphName, + HgStoreNotice.of(this.nodeSession.getStoreNode().getNodeId(), status, + t.getMessage()) + ); + }; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/ScanUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/ScanUtil.java new file mode 100644 index 0000000000..289e65f95b --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/ScanUtil.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.client.HgStoreNodeSession; +import org.apache.hugegraph.store.client.util.HgStoreClientConst; +import org.apache.hugegraph.store.grpc.common.Header; + +import com.google.protobuf.ByteString; + +public class ScanUtil { + + public static Header getHeader(HgStoreNodeSession nodeSession) { + return Header.newBuilder().setGraph(nodeSession.getGraphName()).build(); + } + + public static HgOwnerKey toOk(HgOwnerKey key) { + return key == null ? HgStoreClientConst.EMPTY_OWNER_KEY : key; + } + + public static ByteString toBs(byte[] bytes) { + return ByteString.copyFrom((bytes != null) ? bytes : HgStoreClientConst.EMPTY_BYTES); + } + + public static ByteString getHgOwnerKey(HgOwnerKey ownerKey) { + return toBs(toOk(ownerKey).getKey()); + } + + public static byte[] getQuery(byte[] query) { + return query != null ? query : HgStoreClientConst.EMPTY_BYTES; + } + + public static long getLimit(long limit) { + return limit <= HgStoreClientConst.NO_LIMIT ? Integer.MAX_VALUE : limit; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SeekAbleIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SeekAbleIterator.java new file mode 100644 index 0000000000..d8078910a3 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SeekAbleIterator.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Iterator; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import org.apache.hugegraph.store.HgSeekAble; +import org.apache.hugegraph.store.client.util.HgAssert; + +/** + * 2022/3/11 + */ +class SeekAbleIterator implements Iterator, HgSeekAble { + + private final Iterator iterator; + private final Consumer seeker; + private final Supplier positioner; + + private SeekAbleIterator(Iterator iterator, Supplier positioner, + Consumer seeker) { + this.iterator = iterator; + this.positioner = positioner; + this.seeker = seeker; + } + + public static SeekAbleIterator of(Iterator iterator, Supplier positioner, + Consumer seeker) { + HgAssert.isArgumentNotNull(iterator, "iterator"); + HgAssert.isArgumentNotNull(positioner, "positioner"); + HgAssert.isArgumentNotNull(seeker, "seeker"); + return new SeekAbleIterator(iterator, positioner, seeker); + } + + @Override + public byte[] position() { + return this.positioner.get(); + } + + @Override + public void seek(byte[] position) { + this.seeker.accept(position); + } + + @Override + public boolean hasNext() { + return this.iterator.hasNext(); + } + + @Override + public E next() { + return this.iterator.next(); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SwitchingExecutor.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SwitchingExecutor.java new file mode 100644 index 0000000000..4281481825 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/SwitchingExecutor.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.grpc; + +import java.util.Optional; +import java.util.function.Supplier; + +import javax.annotation.concurrent.ThreadSafe; + +/** + * 2021/12/1 + */ +@ThreadSafe +final class SwitchingExecutor { + + private SwitchingExecutor() { + } + + static SwitchingExecutor of() { + return new SwitchingExecutor(); + } + + Optional invoke(Supplier switcher, Supplier trueSupplier, + Supplier falseSupplier) { + Optional option = null; + + if (switcher.get()) { + option = Optional.of(trueSupplier.get()); + } else { + option = Optional.of(falseSupplier.get()); + } + if (option == null) { + option = Optional.empty(); + } + + return option; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgNodeStatus.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgNodeStatus.java new file mode 100644 index 0000000000..374e240c70 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgNodeStatus.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.type; + +/** + * created on 2021/10/26 + */ +public enum HgNodeStatus { + + UNKNOWN(0, "UNKNOWN"), + NOT_EXIST(100, "NOT_EXIST"), // Failed to apply for an instance via node-id from NodeManager. + NOT_ONLINE(105, "NOT_ONLINE"), // Failed to connect to Store-Node at the first time. + NOT_WORK(110, "NOT_WORK"), // When a Store-Node to be not work anymore. + + PARTITION_COMMON_FAULT(200, "PARTITION_COMMON_FAULT"), + NOT_PARTITION_LEADER(205, + "NOT_PARTITION_LEADER"); // When a Store-Node is not a specific + // partition leader. + + private final int status; + private final String name; + + HgNodeStatus(int status, String name) { + this.status = status; + this.name = name; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgStoreClientException.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgStoreClientException.java new file mode 100644 index 0000000000..d35298c1ed --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/type/HgStoreClientException.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.type; + +/** + * created on 2021/10/27 + */ +public class HgStoreClientException extends RuntimeException { + + public HgStoreClientException(String msg) { + super(msg); + } + + public HgStoreClientException(Throwable cause) { + super(cause); + } + + public HgStoreClientException(String message, Throwable cause) { + super(message, cause); + } + + public static HgStoreClientException of(String msg) { + return new HgStoreClientException(msg); + } + + public static HgStoreClientException of(String msg, Throwable cause) { + return new HgStoreClientException(msg, cause); + } + + public static HgStoreClientException of(Throwable cause) { + return new HgStoreClientException(cause); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/Base58.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/Base58.java new file mode 100644 index 0000000000..7fbe8c4e96 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/Base58.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; + +/** + * TODO: refer license later, 78% match, maybe refer to google? ensure it later + */ +public class Base58 { + + public static final char[] ALPHABET = + "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz".toCharArray(); + private static final int[] INDEXES = new int[128]; + + static { + for (int i = 0; i < INDEXES.length; i++) { + INDEXES[i] = -1; + } + for (int i = 0; i < ALPHABET.length; i++) { + INDEXES[ALPHABET[i]] = i; + } + } + + /** + * Encodes the given bytes in base58. No checksum is appended. + */ + public static String encode(byte[] input) { + if (input.length == 0) { + return ""; + } + input = copyOfRange(input, 0, input.length); + // Count leading zeroes. + int zeroCount = 0; + while (zeroCount < input.length && input[zeroCount] == 0) { + ++zeroCount; + } + // The actual encoding. + byte[] temp = new byte[input.length * 2]; + int j = temp.length; + + int startAt = zeroCount; + while (startAt < input.length) { + byte mod = divmod58(input, startAt); + if (input[startAt] == 0) { + ++startAt; + } + temp[--j] = (byte) ALPHABET[mod]; + } + + // Strip extra '1' if there are some after decoding. + while (j < temp.length && temp[j] == ALPHABET[0]) { + ++j; + } + // Add as many leading '1' as there were leading zeros. + while (--zeroCount >= 0) { + temp[--j] = (byte) ALPHABET[0]; + } + + byte[] output = copyOfRange(temp, j, temp.length); + return new String(output, StandardCharsets.US_ASCII); + } + + public static byte[] decode(String input) throws IllegalArgumentException { + if (input.length() == 0) { + return new byte[0]; + } + byte[] input58 = new byte[input.length()]; + // Transform the String to a base58 byte sequence + for (int i = 0; i < input.length(); ++i) { + char c = input.charAt(i); + + int digit58 = -1; + if (c >= 0 && c < 128) { + digit58 = INDEXES[c]; + } + if (digit58 < 0) { + throw new IllegalArgumentException("Illegal character " + c + " at " + i); + } + + input58[i] = (byte) digit58; + } + // Count leading zeroes + int zeroCount = 0; + while (zeroCount < input58.length && input58[zeroCount] == 0) { + ++zeroCount; + } + // The encoding + byte[] temp = new byte[input.length()]; + int j = temp.length; + + int startAt = zeroCount; + while (startAt < input58.length) { + byte mod = divmod256(input58, startAt); + if (input58[startAt] == 0) { + ++startAt; + } + + temp[--j] = mod; + } + // Do no add extra leading zeroes, move j to first non null byte. + while (j < temp.length && temp[j] == 0) { + ++j; + } + + return copyOfRange(temp, j - zeroCount, temp.length); + } + + public static BigInteger decodeToBigInteger(String input) throws IllegalArgumentException { + return new BigInteger(1, decode(input)); + } + + // + // number -> number / 58, returns number % 58 + // + private static byte divmod58(byte[] number, int startAt) { + int remainder = 0; + for (int i = startAt; i < number.length; i++) { + int digit256 = (int) number[i] & 0xFF; + int temp = remainder * 256 + digit256; + + number[i] = (byte) (temp / 58); + + remainder = temp % 58; + } + + return (byte) remainder; + } + + // + // number -> number / 256, returns number % 256 + // + private static byte divmod256(byte[] number58, int startAt) { + int remainder = 0; + for (int i = startAt; i < number58.length; i++) { + int digit58 = (int) number58[i] & 0xFF; + int temp = remainder * 58 + digit58; + + number58[i] = (byte) (temp / 256); + + remainder = temp % 256; + } + + return (byte) remainder; + } + + private static byte[] copyOfRange(byte[] source, int from, int to) { + byte[] range = new byte[to - from]; + System.arraycopy(source, from, range, 0, range.length); + + return range; + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/ExecutorPool.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/ExecutorPool.java new file mode 100644 index 0000000000..006ee38802 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/ExecutorPool.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public final class ExecutorPool { + + public static ThreadFactory newThreadFactory(String namePrefix) { + HgAssert.isArgumentNotNull(namePrefix, "namePrefix"); + return new DefaultThreadFactory(namePrefix); + } + + public static ThreadPoolExecutor createExecutor(String name, long keepAliveTime, + int coreThreads, int maxThreads) { + return new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, + new SynchronousQueue<>(), + newThreadFactory(name), + new ThreadPoolExecutor.CallerRunsPolicy() + ); + } + + public static class DefaultThreadFactory implements ThreadFactory { + + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + + public DefaultThreadFactory(String threadNamePrefix) { + this.namePrefix = threadNamePrefix + "-"; + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(null, r, namePrefix + threadNumber.getAndIncrement(), 0); + t.setDaemon(true); + t.setPriority(Thread.NORM_PRIORITY); + return t; + } + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgAssert.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgAssert.java new file mode 100644 index 0000000000..7da6a91b5a --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgAssert.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.util.Collection; +import java.util.Map; +import java.util.function.Supplier; + +public final class HgAssert { + + public static void isTrue(boolean expression, String message) { + if (message == null) { + throw new IllegalArgumentException("message is null"); + } + if (!expression) { + throw new IllegalArgumentException(message); + } + } + + public static void isTrue(boolean expression, Supplier msg) { + if (msg == null) { + throw new IllegalArgumentException("message supplier is null"); + } + if (!expression) { + throw new IllegalArgumentException(msg.get()); + } + } + + public static void isFalse(boolean expression, String message) { + isTrue(!expression, message); + } + + public static void isFalse(boolean expression, Supplier msg) { + isTrue(!expression, msg); + } + + public static void isArgumentValid(byte[] bytes, String parameter) { + isFalse(isInvalid(bytes), () -> "The argument is invalid: " + parameter); + } + + public static void isArgumentValid(String str, String parameter) { + isFalse(isInvalid(str), () -> "The argument is invalid: " + parameter); + } + + public static void isArgumentValid(Collection collection, String parameter) { + isFalse(isInvalid(collection), () -> "The argument is invalid: " + parameter); + } + + public static void isArgumentNotNull(Object obj, String parameter) { + isTrue(obj != null, () -> "The argument is null: " + parameter); + } + + public static void istValid(byte[] bytes, String msg) { + isFalse(isInvalid(bytes), msg); + } + + public static void isValid(String str, String msg) { + isFalse(isInvalid(str), msg); + } + + public static void isNotNull(Object obj, String msg) { + isTrue(obj != null, msg); + } + + public static boolean isContains(Object[] objs, Object obj) { + if (objs == null || objs.length == 0 || obj == null) { + return false; + } + for (Object item : objs) { + if (obj.equals(item)) { + return true; + } + } + return false; + } + + public static boolean isInvalid(String... strs) { + if (strs == null || strs.length == 0) { + return true; + } + for (String item : strs) { + if (item == null || "".equals(item.trim())) { + return true; + } + } + return false; + } + + public static boolean isInvalid(byte[] bytes) { + return bytes == null || bytes.length == 0; + } + + public static boolean isInvalid(Map map) { + return map == null || map.isEmpty(); + } + + public static boolean isInvalid(Collection list) { + return list == null || list.isEmpty(); + } + + public static boolean isContains(Collection list, T item) { + if (list == null || item == null) { + return false; + } + return list.contains(item); + } + + public static boolean isNull(Object... objs) { + if (objs == null) { + return true; + } + for (Object item : objs) { + if (item == null) { + return true; + } + } + return false; + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgBufferProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgBufferProxy.java new file mode 100644 index 0000000000..9d62d3641c --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgBufferProxy.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import javax.annotation.CheckForNull; + +import org.apache.hugegraph.store.client.type.HgStoreClientException; + +import lombok.extern.slf4j.Slf4j; + +/** + * 2022/3/15 + * + * @version 0.1.0 + */ +@Slf4j +public final class HgBufferProxy { + + private final BlockingQueue> queue; + private final AtomicBoolean closed = new AtomicBoolean(false); + private final ReentrantLock lock = new ReentrantLock(); + private final Runnable task; + private Throwable err; + + private HgBufferProxy(Runnable task) { + this.task = task; + this.queue = new LinkedBlockingQueue<>(); + } + + public static HgBufferProxy of(Runnable task) { + HgAssert.isArgumentNotNull(task, "task"); + return new HgBufferProxy(task); + } + + public void send(T t) { + if (t == null) { + throw new IllegalArgumentException("the argument t is null"); + } + if (this.closed.get()) { + return; + } + this.lock.lock(); + try { + this.queue.offer(() -> t); + } finally { + lock.unlock(); + } + } + + private void apply() { + this.lock.lock(); + try { + if (!this.closed.get()) { + this.task.run(); + Thread.yield(); + } + } finally { + this.lock.unlock(); + } + } + + /** + * return an item from the chan + * + * @return null when the chan has been closed + * @throws RuntimeException + */ + @CheckForNull + public T receive(int time, Consumer callback) { + Supplier s; + if (this.closed.get()) { + s = this.queue.poll(); + this.checkErr(); + return s != null ? s.get() : null; + } + if (this.queue.size() <= 1) { + this.apply(); + } + lock.lock(); + try { + if (this.isClosed()) { + s = this.queue.poll(); + this.checkErr(); + return s != null ? s.get() : null; + } + } finally { + lock.unlock(); + } + try { + s = this.queue.poll(time, TimeUnit.SECONDS); + } catch (Throwable t) { + log.error("failed to receive a item from channel, cause by: ", t); + throw HgStoreClientException.of(t); + } + if (s == null) { + if (this.closed.get()) { + s = this.queue.poll(); + } else { + if (callback == null) { + throw new RuntimeException("timeout, max time: " + time + " seconds"); + } else { + callback.accept(time); + } + } + } + this.checkErr(); + return s != null ? s.get() : null; + } + + public boolean isClosed() { + return this.closed.get(); + } + + /** + * @throws RuntimeException when fail to close the chan + */ + public void close() { + if (this.closed.get()) { + return; + } + lock.lock(); + this.closed.set(true); + try { + this.queue.offer(() -> null); + } finally { + lock.unlock(); + } + } + + public void setError(Throwable streamErr) { + this.err = streamErr; + } + + private void checkErr() { + if (this.err != null) { + throw HgStoreClientException.of(this.err); + } + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConfig.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConfig.java new file mode 100644 index 0000000000..a121f1e4ab --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConfig.java @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.util.PropertyResourceBundle; + +import lombok.extern.slf4j.Slf4j; + +/** + * 2021/11/29 + */ +@Slf4j +public final class HgStoreClientConfig { + + private static final int GRPC_DEFAULT_TIMEOUT_SECONDS = 100; + private static final int GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024; + private static final int GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024; + + private static final int NET_KV_SCANNER_PAGE_SIZE = 10_000; + private static final int NET_KV_SCANNER_HAVE_NEXT_TIMEOUT = 30 * 60; + private static final String fileName = "hg-store-client"; + private static PropertyResourceBundle prb = null; + private static HgStoreClientConfig defaultInstance; + private Integer grpcTimeoutSeconds = GRPC_DEFAULT_TIMEOUT_SECONDS; + private Integer grpcMaxInboundMessageSize = GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE; + private Integer grpcMaxOutboundMessageSize = GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE; + private Integer netKvScannerPageSize = NET_KV_SCANNER_PAGE_SIZE; + private Integer netKvScannerHaveNextTimeout = NET_KV_SCANNER_HAVE_NEXT_TIMEOUT; + + private HgStoreClientConfig() { + } + + public synchronized static HgStoreClientConfig of() { + + if (defaultInstance != null) { + return defaultInstance; + } + + defaultInstance = new HgStoreClientConfig(); + + overrideViaProperties(defaultInstance); + + return defaultInstance; + } + + private static void overrideViaProperties(HgStoreClientConfig config) { + try { + prb = (PropertyResourceBundle) PropertyResourceBundle.getBundle(fileName); + } catch (Throwable t) { + log.warn("Failed to load " + fileName + ".properties."); + log.info("Default configuration was activated."); + return; + } + PropertiesWrapper wrapper = new PropertiesWrapper(prb); + + log.info("grpc.timeout.seconds = " + + (config.grpcTimeoutSeconds = wrapper.getInt("grpc.timeout.seconds" + , config.grpcTimeoutSeconds)) + ); + log.info("grpc.max.inbound.message.size = " + + (config.grpcMaxInboundMessageSize = GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE) + ); + log.info("grpc.max.outbound.message.size = " + + (config.grpcMaxOutboundMessageSize = GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE) + ); + log.info("net.kv.scanner.page.size = " + + (config.netKvScannerPageSize = wrapper.getInt("net.kv.scanner.page.size" + , config.netKvScannerPageSize)) + ); + log.info("net.kv.scanner.have.next.timeout = {}", config.netKvScannerHaveNextTimeout); + } + + public Integer getGrpcTimeoutSeconds() { + return grpcTimeoutSeconds; + } + + public HgStoreClientConfig setGrpcTimeoutSeconds(Integer grpcTimeoutSeconds) { + this.grpcTimeoutSeconds = grpcTimeoutSeconds; + return this; + } + + public Integer getGrpcMaxInboundMessageSize() { + return grpcMaxInboundMessageSize; + } + + public HgStoreClientConfig setGrpcMaxInboundMessageSize(Integer grpcMaxInboundMessageSize) { + this.grpcMaxInboundMessageSize = grpcMaxInboundMessageSize; + return this; + } + + public Integer getGrpcMaxOutboundMessageSize() { + return grpcMaxOutboundMessageSize; + } + + public HgStoreClientConfig setGrpcMaxOutboundMessageSize(Integer grpcMaxOutboundMessageSize) { + this.grpcMaxOutboundMessageSize = grpcMaxOutboundMessageSize; + return this; + } + + public Integer getNetKvScannerPageSize() { + return netKvScannerPageSize; + } + + public HgStoreClientConfig setNetKvScannerPageSize(Integer netKvScannerPageSize) { + this.netKvScannerPageSize = netKvScannerPageSize; + return this; + } + + public Integer getNetKvScannerHaveNextTimeout() { + return netKvScannerHaveNextTimeout; + } + + public HgStoreClientConfig setNetKvScannerHaveNextTimeout(Integer netKvScannerHaveNextTimeout) { + this.netKvScannerHaveNextTimeout = netKvScannerHaveNextTimeout; + return this; + } + + private static class PropertiesWrapper { + + private final PropertyResourceBundle prb; + + PropertiesWrapper(PropertyResourceBundle prb) { + this.prb = prb; + } + + Integer getInt(String key, Integer defaultValue) { + + String buf = this.getStr(key); + if (buf == null || buf.isEmpty()) { + return defaultValue; + } + + Integer res = null; + try { + res = Integer.valueOf(buf); + } catch (Throwable t) { + log.error("Failed to parse a int value[ " + buf + " ] of the key[ " + key + " ].", + t); + } + + return res; + + } + + String getStr(String key, String defaultValue) { + String res = getStr(key); + + if (res == null && defaultValue != null) { + return defaultValue; + } + + return res; + } + + String getStr(String key) { + String value = null; + + if (!prb.containsKey(key)) { + return null; + } + + try { + value = prb.getString(key); + } catch (Exception e) { + log.warn("Failed to get value with key: [" + key + "]"); + return null; + } + + if (value != null) { + value = value.trim(); + } + + return value; + } + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConst.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConst.java new file mode 100644 index 0000000000..8f8543f387 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientConst.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.util.Collections; +import java.util.List; + +import org.apache.hugegraph.store.HgKvStore; +import org.apache.hugegraph.store.HgOwnerKey; + +public final class HgStoreClientConst { + + public final static String DEFAULT_NODE_CLUSTER_ID = "default-node-cluster"; + + public final static String EMPTY_STRING = ""; + public final static String EMPTY_TABLE = ""; + public final static byte[] EMPTY_BYTES = new byte[0]; + public final static byte[] MAX_BYTES = new byte[]{(byte) 0b11111111}; + public final static List EMPTY_LIST = Collections.EMPTY_LIST; + + public final static byte[] ALL_PARTITION_OWNER = new byte[0]; + // means to dispatch to all partitions. + public final static HgOwnerKey EMPTY_OWNER_KEY = HgOwnerKey.of(EMPTY_BYTES, EMPTY_BYTES); + public final static HgOwnerKey ALL_PARTITION_OWNER_KEY = + HgOwnerKey.of(ALL_PARTITION_OWNER, ALL_PARTITION_OWNER); + + //public final static int SCAN_GTE_BEGIN_LT_END = SCAN_GTE_BEGIN | SCAN_LT_END; + public final static int SCAN_TYPE_RANGE = HgKvStore.SCAN_GTE_BEGIN | HgKvStore.SCAN_LTE_END; + public final static int SCAN_TYPE_ANY = HgKvStore.SCAN_ANY; + public final static int NO_LIMIT = 0; + + public final static int TX_SESSIONS_MAP_CAPACITY = 32; + public static final int NODE_MAX_RETRYING_TIMES = 10; + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientUtil.java new file mode 100644 index 0000000000..5032d5aad6 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgStoreClientUtil.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import org.apache.hugegraph.store.HgOwnerKey; +import org.apache.hugegraph.store.client.type.HgStoreClientException; + +import lombok.extern.slf4j.Slf4j; + +/** + * created on 2021/10/14 + */ +@Slf4j +public final class HgStoreClientUtil { + + public static HgOwnerKey toOwnerKey(byte[] key) { + return new HgOwnerKey(HgStoreClientConst.EMPTY_BYTES, key); + } + + public static HgOwnerKey toOwnerKey(String key) { + return new HgOwnerKey(HgStoreClientConst.EMPTY_BYTES, toBytes(key)); + } + + public static HgOwnerKey toAllNodeKey(String key) { + return new HgOwnerKey(HgStoreClientConst.ALL_PARTITION_OWNER, toBytes(key)); + } + + public static HgOwnerKey toOwnerKey(String owner, String key) { + return new HgOwnerKey(toBytes(owner), toBytes(key)); + } + + public static HgStoreClientException err(String msg) { + log.error(msg); + return HgStoreClientException.of(msg); + } + + public static boolean isValid(HgOwnerKey key) { + if (key == null) { + return false; + } + if (key.getKey() == null) { + return false; + } + return key.getKey().length != 0; + } + + public static String toStr(byte[] b) { + if (b == null) { + return ""; + } + if (b.length == 0) { + return ""; + } + return new String(b, StandardCharsets.UTF_8); + } + + public static String toByteStr(byte[] b) { + if (b == null) { + return ""; + } + if (b.length == 0) { + return ""; + } + return Arrays.toString(b); + } + + public static String toStr(HgOwnerKey ownerKey) { + if (ownerKey == null) { + return ""; + } + return "{ " + + "owner: " + Arrays.toString(ownerKey.getOwner()) + + ", key: " + toStr(ownerKey.getKey()) + + " }"; + } + + public static byte[] toBytes(String str) { + if (str == null) { + return null; + } + return str.getBytes(StandardCharsets.UTF_8); + } + + public static byte[] toBytes(long l) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(l); + return buffer.array(); + } + + public static byte[] toIntBytes(final int i) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.putInt(i); + return buffer.array(); + } + + public static long toLong(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getLong(); + } + + public static int toInt(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getInt(); + } + + public static String getHostAddress() { + String res = null; + + try { + res = InetAddress.getLocalHost().getHostAddress(); + } catch (UnknownHostException e) { + e.printStackTrace(); + res = ""; + } + + return res; + } + + public static byte[] combine(byte[] first, byte[] second) { + if (first == null) { + first = HgStoreClientConst.EMPTY_BYTES; + } + + if (second == null) { + second = HgStoreClientConst.EMPTY_BYTES; + } + + byte[] result = new byte[first.length + second.length]; + System.arraycopy(first, 0, result, 0, first.length); + System.arraycopy(second, 0, result, first.length, second.length); + return result; + } + + public static void printCallStack(String txt, Throwable ex) { + StackTraceElement[] stackElements = ex.getStackTrace(); + StringBuilder sb = new StringBuilder(); + sb.append(txt).append(":\n"); + if (stackElements != null) { + for (int i = 0; i < stackElements.length; i++) { + sb.append(stackElements[i].getClassName()).append(" : ") + .append(stackElements[i].getMethodName()).append(" [ "); + sb.append(stackElements[i].getLineNumber()).append(" ]\n"); + + } + sb.append( + "--------------------------------------------------------------------------------------\n"); + } + log.error(sb.toString()); + } +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgUuid.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgUuid.java new file mode 100644 index 0000000000..fd83fef20a --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/HgUuid.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.nio.ByteBuffer; +import java.util.UUID; + +public final class HgUuid { + + private static String encode(UUID uuid) { + ByteBuffer bb = ByteBuffer.wrap(new byte[16]); + bb.putLong(uuid.getMostSignificantBits()); + bb.putLong(uuid.getLeastSignificantBits()); + return Base58.encode(bb.array()); + } + + /** + * Get a UUID in Base58 FORM + * + * @return + */ + public static String newUUID() { + return encode(UUID.randomUUID()); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/MetricX.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/MetricX.java new file mode 100644 index 0000000000..99d4df1125 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/MetricX.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * 2022/1/29 + */ +public class MetricX { + + // Total working time consumed by Iterators + public static AtomicLong iteratorSum = new AtomicLong(); + // Num of Iterators + public static AtomicLong iteratorCount = new AtomicLong(); + // Max working time consumed by Iterators + public static AtomicLong iteratorMax = new AtomicLong(); + public AtomicLong failureCount = new AtomicLong(); + // Combined to be used to record a task's time + private long start; + private long end; + + private MetricX(long start) { + this.start = start; + } + + public static MetricX ofStart() { + return new MetricX(System.currentTimeMillis()); + } + + public static void plusIteratorWait(long nanoSeconds) { + iteratorSum.addAndGet(nanoSeconds); + iteratorCount.getAndIncrement(); + if (iteratorMax.get() < nanoSeconds) { + iteratorMax.set(nanoSeconds); + } + } + + /** + * amount of waiting + * + * @return millisecond + */ + public static long getIteratorWait() { + return iteratorSum.get() / 1_000_000; + } + + /** + * average of waiting + * + * @return millisecond + */ + public static long getIteratorWaitAvg() { + if (iteratorCount.get() == 0) { + return -1; + } + return getIteratorWait() / iteratorCount.get(); + } + + /** + * maximum of waiting + * + * @return millisecond + */ + public static long getIteratorWaitMax() { + return iteratorMax.get() / 1_000_000; + } + + public static long getIteratorCount() { + return iteratorCount.get(); + } + + public long start() { + return this.start = System.currentTimeMillis(); + } + + public long end() { + return this.end = System.currentTimeMillis(); + } + + public long past() { + return this.end - this.start; + } + + public void countFail() { + this.failureCount.getAndIncrement(); + } + + public long getFailureCount() { + return this.failureCount.get(); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/PropertyUtil.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/PropertyUtil.java new file mode 100644 index 0000000000..c59eed2a7f --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/util/PropertyUtil.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.client.util; + +import java.security.AccessController; +import java.security.PrivilegedAction; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class PropertyUtil { + + private static final Logger LOG = LoggerFactory.getLogger(PropertyUtil.class); + + public static String get(String key) { + return get(key, null); + } + + public static String get(final String key, String def) { + if (key == null) { + throw new NullPointerException("key"); + } + if (key.isEmpty()) { + throw new IllegalArgumentException("key must not be empty."); + } + + String value = null; + try { + if (System.getSecurityManager() == null) { + value = System.getProperty(key); + } else { + value = AccessController.doPrivileged( + (PrivilegedAction) () -> System.getProperty(key)); + } + } catch (Exception e) { + LOG.error("exception {}", e); + } + + if (value == null) { + return def; + } + + return value; + } + + public static boolean getBoolean(String key, boolean def) { + String value = get(key, Boolean.toString(def)); + value = value.trim().toLowerCase(); + if (value.isEmpty()) { + return true; + } + + if ("true".equals(value) || "yes".equals(value) || "1".equals(value)) { + return true; + } + + if ("false".equals(value) || "no".equals(value) || "0".equals(value)) { + return false; + } + return def; + } + + public static int getInt(String key, int def) { + String value = get(key); + if (value == null) { + return def; + } + + value = value.trim().toLowerCase(); + try { + return Integer.parseInt(value); + } catch (Exception e) { + LOG.warn("exception ", e); + } + return def; + } + + public static Object setProperty(String key, String value) { + return System.getProperties().setProperty(key, value); + } + +} diff --git a/hugegraph-store/hg-store-client/src/main/resources/hg-store-client.properties b/hugegraph-store/hg-store-client/src/main/resources/hg-store-client.properties new file mode 100644 index 0000000000..aa967e8fb1 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/resources/hg-store-client.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#grpc.timeout.seconds=10 +#grpc.max.inbound.message.size= +#grpc.max.outbound.message.size= +#net.kv.scanner.page.size = 2000 +#Unit:second +#net.kv.scanner.have.next.timeout=60 diff --git a/hugegraph-store/hg-store-client/src/main/resources/log4j2.xml b/hugegraph-store/hg-store-client/src/main/resources/log4j2.xml new file mode 100644 index 0000000000..6c64f58d90 --- /dev/null +++ b/hugegraph-store/hg-store-client/src/main/resources/log4j2.xml @@ -0,0 +1,102 @@ + + + + + + + + logs + hg-store-client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hugegraph-store/hg-store-common/pom.xml b/hugegraph-store/hg-store-common/pom.xml new file mode 100644 index 0000000000..7746c76155 --- /dev/null +++ b/hugegraph-store/hg-store-common/pom.xml @@ -0,0 +1,33 @@ + + + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-store + ${revision} + ../pom.xml + + + hg-store-common + diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/buffer/ByteBufferAllocator.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/buffer/ByteBufferAllocator.java new file mode 100644 index 0000000000..25a5f1814b --- /dev/null +++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/buffer/ByteBufferAllocator.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.buffer; + +import java.nio.ByteBuffer; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +public class ByteBufferAllocator { + + // size of each Buffer + final int capacity; + // max num of Buffers + final int maxCount; + final BlockingQueue freeQueue = new LinkedBlockingQueue<>(); + // current num of Buffers in queue + AtomicInteger totalCount; + + public ByteBufferAllocator(int cap, int count) { + this.capacity = cap; + this.maxCount = count; + this.totalCount = new AtomicInteger(0); + } + + public ByteBuffer get() throws InterruptedException { + ByteBuffer buffer = null; + while (buffer == null) { + if (freeQueue.size() > 0) { + buffer = freeQueue.poll(); + } else if (totalCount.get() < maxCount) { + buffer = ByteBuffer.allocate(capacity); + totalCount.incrementAndGet(); + } else { + buffer = freeQueue.poll(1, TimeUnit.SECONDS); + } + } + return buffer; + } + + public void release(ByteBuffer buffer) { + if (freeQueue.size() < maxCount) { + buffer.clear(); + freeQueue.add(buffer); + } + } +} diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/buffer/KVByteBuffer.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/buffer/KVByteBuffer.java new file mode 100644 index 0000000000..524a0f58fc --- /dev/null +++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/buffer/KVByteBuffer.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.buffer; + +import java.nio.ByteBuffer; + +public class KVByteBuffer { + + ByteBuffer buffer; + + public KVByteBuffer(int capacity) { + buffer = ByteBuffer.allocate(capacity); + } + + public KVByteBuffer(byte[] buffer) { + this.buffer = ByteBuffer.wrap(buffer); + } + + public KVByteBuffer(ByteBuffer buffer) { + this.buffer = buffer; + } + + public void clear() { + this.buffer.clear(); + } + + public KVByteBuffer flip() { + buffer.flip(); + return this; + } + + public ByteBuffer getBuffer() { + return buffer; + } + + public ByteBuffer copyBuffer() { + byte[] buf = new byte[buffer.position()]; + System.arraycopy(buffer.array(), 0, buf, 0, buffer.position()); + return ByteBuffer.wrap(buf); + } + + public void put(byte data) { + buffer.put(data); + } + + public void put(byte[] data) { + if (data != null) { + buffer.putInt(data.length); + buffer.put(data); + } + } + + public byte[] getBytes() { + int len = buffer.getInt(); + byte[] data = new byte[len]; + buffer.get(data); + return data; + } + + public byte get() { + return buffer.get(); + } + + public void putInt(int data) { + buffer.putInt(data); + } + + public int getInt() { + return buffer.getInt(); + } + + public byte[] array() { + return this.buffer.array(); + } + + public int position() { + return this.buffer.position(); + } + + public final boolean hasRemaining() { + return this.buffer.hasRemaining(); + } +} diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/Bits.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/Bits.java new file mode 100644 index 0000000000..2b78a22b8d --- /dev/null +++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/Bits.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.term; + +import java.nio.ByteBuffer; + +public class Bits { + + /** + * 大头字节序写入short + */ + public static void putShort(byte[] buf, int offSet, int x) { + buf[offSet] = (byte) (x >> 8); + buf[offSet + 1] = (byte) (x); + } + + public static void putInt(byte[] buf, int offSet, int x) { + buf[offSet] = (byte) (x >> 24); + buf[offSet + 1] = (byte) (x >> 16); + buf[offSet + 2] = (byte) (x >> 8); + buf[offSet + 3] = (byte) (x); + } + + /** + * 大头字节序读取short + */ + public static int getShort(byte[] buf, int offSet) { + int x = buf[offSet] & 0xff; + x = (x << 8) + (buf[offSet + 1] & 0xff); + return x; + } + + public static int getInt(byte[] buf, int offSet) { + int x = (buf[offSet] << 24) + + ((buf[offSet + 1] & 0xff) << 16) + + ((buf[offSet + 2] & 0xff) << 8) + + (buf[offSet + 3] & 0xff); + return x; + } + + public static void put(byte[] buf, int offSet, byte[] srcBuf) { + System.arraycopy(srcBuf, 0, buf, offSet, srcBuf.length); + } + + public static int toInt(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getInt(); + } +} diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/HgPair.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/HgPair.java new file mode 100644 index 0000000000..84aa00b9af --- /dev/null +++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/HgPair.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.term; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Copy from javafx.util:Pair + * TODO: refer license later, ?% match, maybe refer to avafx.util:Pair + *

+ * created on 2021/10/15 + */ +public class HgPair implements Serializable { + + /** + * Key of this Pair. + */ + private K key; + /** + * Value of this Pair. + */ + private V value; + + public HgPair() { + + } + + /** + * Creates a new pair + * + * @param key The key for this pair + * @param value The value to use for this pair + */ + public HgPair(K key, V value) { + this.key = key; + this.value = value; + } + + /** + * Gets the key for this pair. + * + * @return key for this pair + */ + public K getKey() { + return key; + } + + public void setKey(K key) { + this.key = key; + } + + /** + * Gets the value for this pair. + * + * @return value for this pair + */ + public V getValue() { + return value; + } + + public void setValue(V value) { + this.value = value; + } + + /** + *

String representation of this + * Pair.

+ * + *

The default name/value delimiter '=' is always used.

+ * + * @return String representation of this Pair + */ + @Override + public String toString() { + return key + "=" + value; + } + + /** + *

Generate a hash code for this Pair.

+ * + *

The hash code is calculated using both the name and + * the value of the Pair.

+ * + * @return hash code for this Pair + */ + @Override + public int hashCode() { + // name's hashCode is multiplied by an arbitrary prime number (13) + // in order to make sure there is a difference in the hashCode between + // these two parameters: + // name: a value: aa + // name: aa value: a + return key.hashCode() * 13 + (value == null ? 0 : value.hashCode()); + } + + /** + *

Test this Pair for equality with another + * Object.

+ * + *

If the Object to be tested is not a + * Pair or is null, then this method + * returns false.

+ * + *

Two Pairs are considered equal if and only if + * both the names and values are equal.

+ * + * @param o the Object to test for + * equality with this Pair + * @return true if the given Object is + * equal to this Pair else false + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o instanceof HgPair) { + HgPair pair = (HgPair) o; + if (!Objects.equals(key, pair.key)) { + return false; + } + return Objects.equals(value, pair.value); + } + return false; + } +} diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/HgTriple.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/HgTriple.java new file mode 100644 index 0000000000..5206b24447 --- /dev/null +++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/term/HgTriple.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.term; + +import java.util.Objects; + +/** + * created on 2021/10/19 + */ +public class HgTriple { + + private final X x; + private final Y y; + private final Z z; + private int hash = -1; + + public HgTriple(X x, Y y, Z z) { + this.x = x; + this.y = y; + this.z = z; + } + + public X getX() { + return x; + } + + public Y getY() { + return y; + } + + public Z getZ() { + return z; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + HgTriple hgTriple = (HgTriple) o; + return Objects.equals(x, hgTriple.x) && Objects.equals(y, hgTriple.y) && + Objects.equals(z, hgTriple.z); + } + + @Override + public int hashCode() { + if (hash == -1) { + hash = Objects.hash(x, y, z); + } + return this.hash; + } + + @Override + public String toString() { + return "HgTriple{" + + "x=" + x + + ", y=" + y + + ", z=" + z + + '}'; + } +} diff --git a/hugegraph-store/hg-store-grpc/pom.xml b/hugegraph-store/hg-store-grpc/pom.xml new file mode 100644 index 0000000000..dac8a811d7 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/pom.xml @@ -0,0 +1,162 @@ + + + + + 4.0.0 + + + org.apache.hugegraph + hugegraph-store + ${revision} + ../pom.xml + + + hg-store-grpc + + + 11 + 11 + + 1.6.2 + 1.39.0 + 3.17.2 + 0.6.1 + + + + + + io.grpc + grpc-netty-shaded + ${grpc.version} + + + io.grpc + grpc-protobuf + ${grpc.version} + + + io.grpc + grpc-stub + ${grpc.version} + + + javax.annotation + javax.annotation-api + 1.3.2 + + + + + ${basedir}/src/main/java + + + src/main/resources + + + src/main/proto + + + + + kr.motd.maven + os-maven-plugin + ${os.plugin.version} + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf.plugin.version} + true + + + com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} + + grpc-java + + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + ${project.basedir}/src/main/proto + + + ${project.basedir}/src/main/java + + false + + + + + + generate-sources + + + compile + + compile-custom + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.3.0 + + + add-source + generate-sources + + add-source + + + + ${basedir}/src/main/java + ${basedir}/src/main/dev + + + + + + + maven-clean-plugin + 3.1.0 + + + + src/main/java + + + + + + + initialize + + clean + + + + + + + diff --git a/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/HgStoreStreamGrpc.java b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/HgStoreStreamGrpc.java new file mode 100644 index 0000000000..bd7a2a2b33 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/HgStoreStreamGrpc.java @@ -0,0 +1,560 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.grpc.stream; + +import static io.grpc.MethodDescriptor.generateFullMethodName; + +/** + *
+ **
+ * In order to improve performance, reuse memory, and reduce gc recycling, the KvStream.writeTo method needs to be rewritten.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.39.0)", + comments = "Source: store_stream.proto") +public final class HgStoreStreamGrpc { + + private HgStoreStreamGrpc() {} + + public static final String SERVICE_NAME = "HgStoreStream"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor getScanMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "Scan", + requestType = org.apache.hugegraph.store.grpc.stream.ScanStreamReq.class, + responseType = org.apache.hugegraph.store.grpc.stream.KvPageRes.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor getScanMethod() { + io.grpc.MethodDescriptor getScanMethod; + if ((getScanMethod = HgStoreStreamGrpc.getScanMethod) == null) { + synchronized (HgStoreStreamGrpc.class) { + if ((getScanMethod = HgStoreStreamGrpc.getScanMethod) == null) { + HgStoreStreamGrpc.getScanMethod = getScanMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "Scan")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.ScanStreamReq.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.KvPageRes.getDefaultInstance())) + .setSchemaDescriptor(new HgStoreStreamMethodDescriptorSupplier("Scan")) + .build(); + } + } + } + return getScanMethod; + } + + private static volatile io.grpc.MethodDescriptor getScanOneShotMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ScanOneShot", + requestType = org.apache.hugegraph.store.grpc.stream.ScanStreamReq.class, + responseType = org.apache.hugegraph.store.grpc.stream.KvPageRes.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getScanOneShotMethod() { + io.grpc.MethodDescriptor getScanOneShotMethod; + if ((getScanOneShotMethod = HgStoreStreamGrpc.getScanOneShotMethod) == null) { + synchronized (HgStoreStreamGrpc.class) { + if ((getScanOneShotMethod = HgStoreStreamGrpc.getScanOneShotMethod) == null) { + HgStoreStreamGrpc.getScanOneShotMethod = getScanOneShotMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ScanOneShot")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.ScanStreamReq.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.KvPageRes.getDefaultInstance())) + .setSchemaDescriptor(new HgStoreStreamMethodDescriptorSupplier("ScanOneShot")) + .build(); + } + } + } + return getScanOneShotMethod; + } + + private static volatile io.grpc.MethodDescriptor getScanBatchMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ScanBatch", + requestType = org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq.class, + responseType = org.apache.hugegraph.store.grpc.stream.KvPageRes.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor getScanBatchMethod() { + io.grpc.MethodDescriptor getScanBatchMethod; + if ((getScanBatchMethod = HgStoreStreamGrpc.getScanBatchMethod) == null) { + synchronized (HgStoreStreamGrpc.class) { + if ((getScanBatchMethod = HgStoreStreamGrpc.getScanBatchMethod) == null) { + HgStoreStreamGrpc.getScanBatchMethod = getScanBatchMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ScanBatch")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.KvPageRes.getDefaultInstance())) + .setSchemaDescriptor(new HgStoreStreamMethodDescriptorSupplier("ScanBatch")) + .build(); + } + } + } + return getScanBatchMethod; + } + + private static volatile io.grpc.MethodDescriptor getScanBatch2Method; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ScanBatch2", + requestType = org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq.class, + responseType = KvStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor getScanBatch2Method() { + io.grpc.MethodDescriptor getScanBatch2Method; + if ((getScanBatch2Method = HgStoreStreamGrpc.getScanBatch2Method) == null) { + synchronized (HgStoreStreamGrpc.class) { + if ((getScanBatch2Method = HgStoreStreamGrpc.getScanBatch2Method) == null) { + HgStoreStreamGrpc.getScanBatch2Method = getScanBatch2Method = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ScanBatch2")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + KvStream.getDefaultInstance())) + .setSchemaDescriptor(new HgStoreStreamMethodDescriptorSupplier("ScanBatch2")) + .build(); + } + } + } + return getScanBatch2Method; + } + + private static volatile io.grpc.MethodDescriptor getScanBatchOneShotMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ScanBatchOneShot", + requestType = org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq.class, + responseType = org.apache.hugegraph.store.grpc.stream.KvPageRes.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor getScanBatchOneShotMethod() { + io.grpc.MethodDescriptor getScanBatchOneShotMethod; + if ((getScanBatchOneShotMethod = HgStoreStreamGrpc.getScanBatchOneShotMethod) == null) { + synchronized (HgStoreStreamGrpc.class) { + if ((getScanBatchOneShotMethod = HgStoreStreamGrpc.getScanBatchOneShotMethod) == null) { + HgStoreStreamGrpc.getScanBatchOneShotMethod = getScanBatchOneShotMethod = + io.grpc.MethodDescriptor.newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ScanBatchOneShot")) + .setSampledToLocalTracing(true) + .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq.getDefaultInstance())) + .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( + org.apache.hugegraph.store.grpc.stream.KvPageRes.getDefaultInstance())) + .setSchemaDescriptor(new HgStoreStreamMethodDescriptorSupplier("ScanBatchOneShot")) + .build(); + } + } + } + return getScanBatchOneShotMethod; + } + + /** + * Creates a new async stub that supports all call types for the service + */ + public static HgStoreStreamStub newStub(io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public HgStoreStreamStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HgStoreStreamStub(channel, callOptions); + } + }; + return HgStoreStreamStub.newStub(factory, channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static HgStoreStreamBlockingStub newBlockingStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public HgStoreStreamBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HgStoreStreamBlockingStub(channel, callOptions); + } + }; + return HgStoreStreamBlockingStub.newStub(factory, channel); + } + + /** + * Creates a new ListenableFuture-style stub that supports unary calls on the service + */ + public static HgStoreStreamFutureStub newFutureStub( + io.grpc.Channel channel) { + io.grpc.stub.AbstractStub.StubFactory factory = + new io.grpc.stub.AbstractStub.StubFactory() { + @java.lang.Override + public HgStoreStreamFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HgStoreStreamFutureStub(channel, callOptions); + } + }; + return HgStoreStreamFutureStub.newStub(factory, channel); + } + + /** + *
+   **
+   * In order to improve performance, reuse memory, and reduce gc recycling, the KvStream.writeTo method needs to be rewritten.
+   * 
+ */ + public static abstract class HgStoreStreamImplBase implements io.grpc.BindableService { + + /** + */ + public io.grpc.stub.StreamObserver scan( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getScanMethod(), responseObserver); + } + + /** + */ + public void scanOneShot(org.apache.hugegraph.store.grpc.stream.ScanStreamReq request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getScanOneShotMethod(), responseObserver); + } + + /** + */ + public io.grpc.stub.StreamObserver scanBatch( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getScanBatchMethod(), responseObserver); + } + + /** + */ + public io.grpc.stub.StreamObserver scanBatch2( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getScanBatch2Method(), responseObserver); + } + + /** + */ + public void scanBatchOneShot(org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getScanBatchOneShotMethod(), responseObserver); + } + + @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getScanMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + org.apache.hugegraph.store.grpc.stream.ScanStreamReq, + org.apache.hugegraph.store.grpc.stream.KvPageRes>( + this, METHODID_SCAN))) + .addMethod( + getScanOneShotMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + org.apache.hugegraph.store.grpc.stream.ScanStreamReq, + org.apache.hugegraph.store.grpc.stream.KvPageRes>( + this, METHODID_SCAN_ONE_SHOT))) + .addMethod( + getScanBatchMethod(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq, + org.apache.hugegraph.store.grpc.stream.KvPageRes>( + this, METHODID_SCAN_BATCH))) + .addMethod( + getScanBatch2Method(), + io.grpc.stub.ServerCalls.asyncBidiStreamingCall( + new MethodHandlers< + org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq, + KvStream>( + this, METHODID_SCAN_BATCH2))) + .addMethod( + getScanBatchOneShotMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq, + org.apache.hugegraph.store.grpc.stream.KvPageRes>( + this, METHODID_SCAN_BATCH_ONE_SHOT))) + .build(); + } + } + + /** + *
+   **
+   * In order to improve performance, reuse memory, and reduce gc recycling, the KvStream.writeTo method needs to be rewritten.
+   * 
+ */ + public static final class HgStoreStreamStub extends io.grpc.stub.AbstractAsyncStub { + private HgStoreStreamStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected HgStoreStreamStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HgStoreStreamStub(channel, callOptions); + } + + /** + */ + public io.grpc.stub.StreamObserver scan( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getScanMethod(), getCallOptions()), responseObserver); + } + + /** + */ + public void scanOneShot(org.apache.hugegraph.store.grpc.stream.ScanStreamReq request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getScanOneShotMethod(), getCallOptions()), request, responseObserver); + } + + /** + */ + public io.grpc.stub.StreamObserver scanBatch( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getScanBatchMethod(), getCallOptions()), responseObserver); + } + + /** + */ + public io.grpc.stub.StreamObserver scanBatch2( + io.grpc.stub.StreamObserver responseObserver) { + return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( + getChannel().newCall(getScanBatch2Method(), getCallOptions()), responseObserver); + } + + /** + */ + public void scanBatchOneShot(org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq request, + io.grpc.stub.StreamObserver responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getScanBatchOneShotMethod(), getCallOptions()), request, responseObserver); + } + } + + /** + *
+   **
+   * In order to improve performance, reuse memory, and reduce gc recycling, the KvStream.writeTo method needs to be rewritten.
+   * 
+ */ + public static final class HgStoreStreamBlockingStub extends io.grpc.stub.AbstractBlockingStub { + private HgStoreStreamBlockingStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected HgStoreStreamBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HgStoreStreamBlockingStub(channel, callOptions); + } + + /** + */ + public org.apache.hugegraph.store.grpc.stream.KvPageRes scanOneShot(org.apache.hugegraph.store.grpc.stream.ScanStreamReq request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getScanOneShotMethod(), getCallOptions(), request); + } + + /** + */ + public org.apache.hugegraph.store.grpc.stream.KvPageRes scanBatchOneShot(org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getScanBatchOneShotMethod(), getCallOptions(), request); + } + } + + /** + *
+   **
+   * In order to improve performance, reuse memory, and reduce gc recycling, the KvStream.writeTo method needs to be rewritten.
+   * 
+ */ + public static final class HgStoreStreamFutureStub extends io.grpc.stub.AbstractFutureStub { + private HgStoreStreamFutureStub( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected HgStoreStreamFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new HgStoreStreamFutureStub(channel, callOptions); + } + + /** + */ + public com.google.common.util.concurrent.ListenableFuture scanOneShot( + org.apache.hugegraph.store.grpc.stream.ScanStreamReq request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getScanOneShotMethod(), getCallOptions()), request); + } + + /** + */ + public com.google.common.util.concurrent.ListenableFuture scanBatchOneShot( + org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getScanBatchOneShotMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_SCAN_ONE_SHOT = 0; + private static final int METHODID_SCAN_BATCH_ONE_SHOT = 1; + private static final int METHODID_SCAN = 2; + private static final int METHODID_SCAN_BATCH = 3; + private static final int METHODID_SCAN_BATCH2 = 4; + + private static final class MethodHandlers implements + io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final HgStoreStreamImplBase serviceImpl; + private final int methodId; + + MethodHandlers(HgStoreStreamImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_SCAN_ONE_SHOT: + serviceImpl.scanOneShot((org.apache.hugegraph.store.grpc.stream.ScanStreamReq) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + case METHODID_SCAN_BATCH_ONE_SHOT: + serviceImpl.scanBatchOneShot((org.apache.hugegraph.store.grpc.stream.ScanStreamBatchReq) request, + (io.grpc.stub.StreamObserver) responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_SCAN: + return (io.grpc.stub.StreamObserver) serviceImpl.scan( + (io.grpc.stub.StreamObserver) responseObserver); + case METHODID_SCAN_BATCH: + return (io.grpc.stub.StreamObserver) serviceImpl.scanBatch( + (io.grpc.stub.StreamObserver) responseObserver); + case METHODID_SCAN_BATCH2: + return (io.grpc.stub.StreamObserver) serviceImpl.scanBatch2( + (io.grpc.stub.StreamObserver) responseObserver); + default: + throw new AssertionError(); + } + } + } + + private static abstract class HgStoreStreamBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { + HgStoreStreamBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return HgStoreStreamProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("HgStoreStream"); + } + } + + private static final class HgStoreStreamFileDescriptorSupplier + extends HgStoreStreamBaseDescriptorSupplier { + HgStoreStreamFileDescriptorSupplier() {} + } + + private static final class HgStoreStreamMethodDescriptorSupplier + extends HgStoreStreamBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + HgStoreStreamMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (HgStoreStreamGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new HgStoreStreamFileDescriptorSupplier()) + .addMethod(getScanMethod()) + .addMethod(getScanOneShotMethod()) + .addMethod(getScanBatchMethod()) + .addMethod(getScanBatch2Method()) + .addMethod(getScanBatchOneShotMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/HgStoreStreamProto.java b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/HgStoreStreamProto.java new file mode 100644 index 0000000000..7fb363b407 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/HgStoreStreamProto.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: store_stream.proto + +package org.apache.hugegraph.store.grpc.stream; + +public final class HgStoreStreamProto { + private HgStoreStreamProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + static final com.google.protobuf.Descriptors.Descriptor + internal_static_KvStream_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_KvStream_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\022store_stream.proto\032\022store_common.proto" + + "\032\027store_stream_meta.proto\"I\n\010KvStream\022\016\n" + + "\006seq_no\030\001 \001(\005\022\014\n\004over\030\002 \001(\010\022\017\n\007version\030\004" + + " \001(\r\022\016\n\006stream\030\005 \001(\0142\205\002\n\rHgStoreStream\022(" + + "\n\004Scan\022\016.ScanStreamReq\032\n.KvPageRes\"\000(\0010\001" + + "\022+\n\013ScanOneShot\022\016.ScanStreamReq\032\n.KvPage" + + "Res\"\000\0222\n\tScanBatch\022\023.ScanStreamBatchReq\032" + + "\n.KvPageRes\"\000(\0010\001\0222\n\nScanBatch2\022\023.ScanSt" + + "reamBatchReq\032\t.KvStream\"\000(\0010\001\0225\n\020ScanBat" + + "chOneShot\022\023.ScanStreamBatchReq\032\n.KvPageR" + + "es\"\000B=\n%org.apache.hugegraph.store.grpc.s" + + "treamB\022HgStoreStreamProtoP\001b\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hugegraph.store.grpc.common.HgStoreCommonProto.getDescriptor(), + org.apache.hugegraph.store.grpc.stream.HgStoreStreamMetaProto.getDescriptor(), + }); + internal_static_KvStream_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_KvStream_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_KvStream_descriptor, + new java.lang.String[] { "SeqNo", "Over", "Version", "Stream", }); + org.apache.hugegraph.store.grpc.common.HgStoreCommonProto.getDescriptor(); + org.apache.hugegraph.store.grpc.stream.HgStoreStreamMetaProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/KvStream.java b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/KvStream.java new file mode 100644 index 0000000000..00476ffeb1 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/KvStream.java @@ -0,0 +1,910 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: store_stream.proto + +package org.apache.hugegraph.store.grpc.stream; + +import java.nio.ByteBuffer; +import java.util.function.Consumer; + +/** + * In order to improve performance, reuse memory, and reduce gc recycling, the KvStream.writeTo method needs to be rewritten. + */ +public final class KvStream extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point + // (message_implements + // :KvStream) + KvStreamOrBuilder { + public static final int SEQ_NO_FIELD_NUMBER = 1; + public static final int OVER_FIELD_NUMBER = 2; + public static final int VERSION_FIELD_NUMBER = 4; + public static final int STREAM_FIELD_NUMBER = 5; + public static final int TYPE_FIELD_NUMBER = 6; + private static final long serialVersionUID = 0L; + // @@protoc_insertion_point(class_scope:KvStream) + private static final KvStream DEFAULT_INSTANCE; + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser<>() { + @java.lang.Override + public KvStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new KvStream(input, extensionRegistry); + } + }; + + static { + DEFAULT_INSTANCE = new KvStream(); + } + + private int seqNo_; + private boolean over_; + private int version_; + private ByteBuffer stream_; + private Consumer complete_; + private int type_; + private byte memoizedIsInitialized = -1; + + // Use KvStream.newBuilder() to construct. + private KvStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private KvStream() { + stream_ = ByteBuffer.allocate(0); + } + private KvStream( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + + seqNo_ = input.readInt32(); + break; + } + case 16: { + + over_ = input.readBool(); + break; + } + case 32: { + + version_ = input.readUInt32(); + break; + } + case 42: { + + stream_ = input.readByteBuffer(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return HgStoreStreamProto.internal_static_KvStream_descriptor; + } + + public static KvStream parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static KvStream parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static KvStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static KvStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static KvStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static KvStream parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static KvStream parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + + public static KvStream parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static KvStream parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + + public static KvStream parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + + public static KvStream parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + + public static KvStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(KvStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + public static KvStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new KvStream(); + } + + @java.lang.Override + public com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return HgStoreStreamProto.internal_static_KvStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + KvStream.class, KvStream.Builder.class); + } + + /** + *
+     * query times.
+     * 
+ * + * int32 seq_no = 1; + * + * @return The seqNo. + */ + @java.lang.Override + public int getSeqNo() { + return seqNo_; + } + + /** + *
+     * true=no more data
+     * 
+ * + * bool over = 2; + * + * @return The over. + */ + @java.lang.Override + public boolean getOver() { + return over_; + } + + /** + * uint32 version = 4; + * + * @return The version. + */ + @java.lang.Override + public int getVersion() { + return version_; + } + + /** + * bytes stream = 5; + * + * @return The stream. + */ + @java.lang.Override + public ByteBuffer getStream() { + return stream_; + } + + /** + * .KvStreamType type = 6; + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * .KvStreamType type = 6; + * + * @return The type. + */ + @java.lang.Override + public org.apache.hugegraph.store.grpc.stream.KvStreamType getType() { + @SuppressWarnings("deprecation") + org.apache.hugegraph.store.grpc.stream.KvStreamType result = + org.apache.hugegraph.store.grpc.stream.KvStreamType.valueOf(type_); + return result == null ? org.apache.hugegraph.store.grpc.stream.KvStreamType.UNRECOGNIZED : + result; + } + + @java.lang.Override + public boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (seqNo_ != 0) { + output.writeInt32(1, seqNo_); + } + if (over_) { + output.writeBool(2, over_); + } + if (version_ != 0) { + output.writeUInt32(4, version_); + } + if (stream_.limit() > 0) { + output.writeByteArray(5, stream_.array(), 0, stream_.limit()); + } + if (type_ != + org.apache.hugegraph.store.grpc.stream.KvStreamType.STREAM_TYPE_NONE.getNumber()) { + output.writeEnum(6, type_); + } + unknownFields.writeTo(output); + if (complete_ != null) { + complete_.accept(this); + } + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (seqNo_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(1, seqNo_); + } + if (over_) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, over_); + } + if (version_ != 0) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(4, version_); + } + if (stream_.limit() > 0) { + size += com.google.protobuf.CodedOutputStream + .computeTagSize(5) + + com.google.protobuf.CodedOutputStream + .computeUInt32SizeNoTag(stream_.limit()) + + stream_.limit(); + } + if (type_ != + org.apache.hugegraph.store.grpc.stream.KvStreamType.STREAM_TYPE_NONE.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(6, type_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof KvStream)) { + return super.equals(obj); + } + KvStream other = (KvStream) obj; + + if (getSeqNo() + != other.getSeqNo()) { + return false; + } + if (getOver() + != other.getOver()) { + return false; + } + if (getVersion() + != other.getVersion()) { + return false; + } + if (!getStream() + .equals(other.getStream())) { + return false; + } + return unknownFields.equals(other.unknownFields); + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SEQ_NO_FIELD_NUMBER; + hash = (53 * hash) + getSeqNo(); + hash = (37 * hash) + OVER_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getOver()); + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + hash = (37 * hash) + STREAM_FIELD_NUMBER; + hash = (53 * hash) + getStream().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public KvStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + /** + * Protobuf type {@code KvStream} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:KvStream) + KvStreamOrBuilder { + private int seqNo_; + private boolean over_; + private int version_; + private ByteBuffer stream_ = ByteBuffer.allocate(0); + private int type_ = 0; + private Consumer complete_; + + // Construct using org.apache.hugegraph.store.grpc.stream.KvStream.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + public static com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return HgStoreStreamProto.internal_static_KvStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return HgStoreStreamProto.internal_static_KvStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + KvStream.class, KvStream.Builder.class); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + seqNo_ = 0; + + over_ = false; + + version_ = 0; + + stream_ = ByteBuffer.allocate(0); + + complete_ = null; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return HgStoreStreamProto.internal_static_KvStream_descriptor; + } + + @java.lang.Override + public KvStream getDefaultInstanceForType() { + return KvStream.getDefaultInstance(); + } + + @java.lang.Override + public KvStream build() { + KvStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public KvStream buildPartial() { + KvStream result = new KvStream(this); + result.seqNo_ = seqNo_; + result.over_ = over_; + result.version_ = version_; + result.stream_ = stream_; + result.complete_ = complete_; + onBuilt(); + // d��� + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof KvStream) { + return mergeFrom((KvStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(KvStream other) { + if (other == KvStream.getDefaultInstance()) return this; + if (other.getSeqNo() != 0) { + setSeqNo(other.getSeqNo()); + } + if (other.getOver()) { + setOver(other.getOver()); + } + if (other.getVersion() != 0) { + setVersion(other.getVersion()); + } + if (other.getStream() != ByteBuffer.allocate(0)) { + setStream(other.getStream()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + KvStream parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (KvStream) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + /** + *
+         * query times.
+         * 
+ * + * int32 seq_no = 1; + * + * @return The seqNo. + */ + @java.lang.Override + public int getSeqNo() { + return seqNo_; + } + + /** + *
+         * query times.
+         * 
+ * + * int32 seq_no = 1; + * + * @param value The seqNo to set. + * @return This builder for chaining. + */ + public Builder setSeqNo(int value) { + + seqNo_ = value; + onChanged(); + return this; + } + + /** + *
+         * query times.
+         * 
+ * + * int32 seq_no = 1; + * + * @return This builder for chaining. + */ + public Builder clearSeqNo() { + + seqNo_ = 0; + onChanged(); + return this; + } + + /** + *
+         * true=no more data
+         * 
+ * + * bool over = 2; + * + * @return The over. + */ + @java.lang.Override + public boolean getOver() { + return over_; + } + + /** + *
+         * true=no more data
+         * 
+ * + * bool over = 2; + * + * @param value The over to set. + * @return This builder for chaining. + */ + public Builder setOver(boolean value) { + + over_ = value; + onChanged(); + return this; + } + + /** + *
+         * true=no more data
+         * 
+ * + * bool over = 2; + * + * @return This builder for chaining. + */ + public Builder clearOver() { + + over_ = false; + onChanged(); + return this; + } + + /** + * uint32 version = 4; + * + * @return The version. + */ + @java.lang.Override + public int getVersion() { + return version_; + } + + /** + * uint32 version = 4; + * + * @param value The version to set. + * @return This builder for chaining. + */ + public Builder setVersion(int value) { + + version_ = value; + onChanged(); + return this; + } + + /** + * uint32 version = 4; + * + * @return This builder for chaining. + */ + public Builder clearVersion() { + + version_ = 0; + onChanged(); + return this; + } + + /** + * bytes stream = 5; + * + * @return The stream. + */ + @java.lang.Override + public ByteBuffer getStream() { + return stream_; + } + + /** + * bytes stream = 5; + * + * @param value The stream to set. + * @return This builder for chaining. + */ + public Builder setStream(ByteBuffer value) { + if (value == null) { + throw new NullPointerException(); + } + + stream_ = value; + onChanged(); + return this; + } + + /** + * bytes stream = 5; + * + * @return This builder for chaining. + */ + public Builder clearStream() { + + stream_ = getDefaultInstance().getStream(); + onChanged(); + return this; + } + + /** + * .KvStreamType type = 6; + * + * @return The enum numeric value on the wire for type. + */ + @java.lang.Override + public int getTypeValue() { + return type_; + } + + /** + * .KvStreamType type = 6; + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + + type_ = value; + onChanged(); + return this; + } + + /** + * .KvStreamType type = 6; + * + * @return The type. + */ + @java.lang.Override + public org.apache.hugegraph.store.grpc.stream.KvStreamType getType() { + @SuppressWarnings("deprecation") + org.apache.hugegraph.store.grpc.stream.KvStreamType result = + org.apache.hugegraph.store.grpc.stream.KvStreamType.valueOf(type_); + return result == null ? + org.apache.hugegraph.store.grpc.stream.KvStreamType.UNRECOGNIZED : result; + } + + /** + * .KvStreamType type = 6; + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType(org.apache.hugegraph.store.grpc.stream.KvStreamType value) { + if (value == null) { + throw new NullPointerException(); + } + + type_ = value.getNumber(); + onChanged(); + return this; + } + + /** + * .KvStreamType type = 6; + * + * @return This builder for chaining. + */ + public Builder clearType() { + + type_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + public void complete(Consumer consumer) { + this.complete_ = consumer; + } + + // @@protoc_insertion_point(builder_scope:KvStream) + } + +} diff --git a/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/KvStreamOrBuilder.java b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/KvStreamOrBuilder.java new file mode 100644 index 0000000000..b848996432 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/KvStreamOrBuilder.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: store_stream.proto + +package org.apache.hugegraph.store.grpc.stream; + +import java.nio.ByteBuffer; + +public interface KvStreamOrBuilder extends + // @@protoc_insertion_point(interface_extends:KvStream) + com.google.protobuf.MessageOrBuilder { + + /** + *
+   *query times.
+   * 
+ * + * int32 seq_no = 1; + * @return The seqNo. + */ + int getSeqNo(); + + /** + *
+   *true=no more data
+   * 
+ * + * bool over = 2; + * @return The over. + */ + boolean getOver(); + + /** + * uint32 version = 4; + * @return The version. + */ + int getVersion(); + + /** + * bytes stream = 5; + * @return The stream. + */ + ByteBuffer getStream(); + /** + * .KvStreamType type = 6; + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + /** + * .KvStreamType type = 6; + * @return The type. + */ + org.apache.hugegraph.store.grpc.stream.KvStreamType getType(); +} diff --git a/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/store_stream.proto b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/store_stream.proto new file mode 100644 index 0000000000..7ea840d9f7 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/dev/org/apache/hugegraph/store/grpc/stream/store_stream.proto @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +import "store_common.proto"; +import "store_stream_meta.proto"; +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.store.grpc.stream"; +option java_outer_classname = "HgStoreStreamProto"; + +/** + In order to improve performance, reuse memory, and reduce gc recycling, the KvStream.writeTo method needs to be overwrite. + */ +service HgStoreStream { + + rpc Scan(stream ScanStreamReq) returns (stream KvPageRes) {} + rpc ScanOneShot(ScanStreamReq) returns (KvPageRes) {} + rpc ScanBatch(stream ScanStreamBatchReq) returns (stream KvPageRes) {} + rpc ScanBatch2(stream ScanStreamBatchReq) returns (stream KvStream) {} + rpc ScanBatchOneShot(ScanStreamBatchReq) returns (KvPageRes) {} +} + + +message KvStream { + int32 seq_no = 1; //query times. + bool over = 2; //true=no more data + uint32 version = 4; + bytes stream = 5; + KvStreamType type = 6; +} diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/graphpb.proto b/hugegraph-store/hg-store-grpc/src/main/proto/graphpb.proto new file mode 100644 index 0000000000..a245002f85 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/proto/graphpb.proto @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; +package graph_pb; + +option java_package = "org.apache.hugegraph.store.grpc"; + +service GraphStore { + rpc ScanPartition(stream ScanPartitionRequest) returns (stream ScanResponse){} +} + +message ScanPartitionRequest{ + enum ScanType{ + SCAN_UNKNOWN = 0; + SCAN_VERTEX = 1; + SCAN_EDGE = 2; + } + // 请求参数 + message Request{ + ScanType scan_type = 1; + string graph_name = 2; + uint32 partition_id = 3; + uint32 start_code = 4; + uint32 end_code = 5; + // 过滤条件 + string condition = 6; + string table = 7; + int64 limit = 8; + int32 boundary = 9; + bytes position = 10; + // 返回条件 + repeated int64 properties = 11; + } + + + message Reply{ + int32 seq_no = 1; + } + RequestHeader header = 1; + oneof request { + Request scan_request = 2; + // 每消费一个数据包,通知服务端一次,返回消息序号 + Reply reply_request = 4; + } +} + +message ScanResponse{ + ResponseHeader header = 1; + // 消息序号 + int32 seq_no = 2; + repeated Vertex vertex = 3; + repeated Edge edge = 4; +} + + +message Property{ + uint64 label = 1; + Variant value = 2; +} + +message Vertex{ + int64 label = 1; // 点类型 + Variant id = 2; // 点ID + repeated Property properties = 3; //点属性 +} + +message Edge{ + int64 label = 1; // 边类型 + int64 sourceLabel = 2; + int64 targetLabel = 3; + Variant source_id = 4; // 源点ID + Variant target_id = 5; // 目标点ID + + repeated Property properties = 6; //边属性 +} + +message Variant { + optional VariantType type = 1; + optional int32 value_int32 = 2; + optional int64 value_int64 = 3; + optional float value_float = 4; + optional double value_double = 5; + optional string value_string = 6; + optional bytes value_bytes = 7; + optional string value_datetime = 8; + optional bool value_boolean = 9; +} + +enum VariantType { + VT_UNKNOWN = 0; + VT_BOOLEAN = 1; + VT_INT = 2; + VT_LONG = 3; + VT_FLOAT = 4; + VT_DOUBLE = 7; + VT_STRING = 8; + VT_BYTES = 9; + VT_DATETIME = 10; +} + + + +message RequestHeader { + // 发送者 ID. + uint64 sender_id = 2; +} + +message ResponseHeader { + uint64 sender_id = 1; + Error error = 2; +} + + +enum ErrorType { + OK = 0; + UNKNOWN = 1; +} + +message Error { + ErrorType type = 1; + string message = 2; +} diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/healthy.proto b/hugegraph-store/hg-store-grpc/src/main/proto/healthy.proto new file mode 100644 index 0000000000..ca6ba06bb6 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/proto/healthy.proto @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +option java_package = "org.apache.hugegraph.store.grpc"; + +import "google/protobuf/empty.proto"; + +service Healthy { + rpc IsOk(google.protobuf.Empty) returns (StringReply) {} +} + +message StringReply { + string message = 1; +} diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto b/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto new file mode 100644 index 0000000000..fc9934dec4 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.store.grpc.common"; +option java_outer_classname = "HgStoreCommonProto"; + +message Header { + string graph = 1; +} +message Tkv { + string table = 1; + bytes key = 2; + bytes value = 3; + int32 code = 9; + +} + +message Tk { + string table = 1; + bytes key = 2; + int32 code = 9; +} + +message Tp { + string table = 1; + bytes prefix = 2; + int32 code = 9; +} + +message Tse { + string table = 1; + Key start = 2; + Key end = 3; +} + +message Key { + bytes key = 1; + int32 code = 9; +} + +message Kv { + bytes key = 1; + bytes value = 2; + int32 code = 9; +} + +message ResStatus { + ResCode code = 1; + string msg = 2; +} + +/*--- enum ---*/ +enum ResCode { + RES_CODE_OK = 0; + RES_CODE_FAIL = 1; + RES_CODE_NOT_EXIST = 2; + RES_CODE_EXCESS = 3; +} + +enum ScanMethod { + UNKNOWN_SCAN_TYPE = 0; + ALL = 1; + PREFIX = 2; + RANGE = 3; +} + +enum ScanOrderType{ + // 批量接口下,返回顺序的要求 + ORDER_NONE = 0; // 允许无序 + ORDER_WITHIN_VERTEX = 1; // 一个点内的边不会被打断,单不同点之间为无序 + ORDER_STRICT = 2; // 保证原始的输入点顺序 +} + +enum OpType { + OP_TYPE_UNKNOWN = 0; + OP_TYPE_PUT = 1; + OP_TYPE_DEL = 2; + OP_TYPE_DEL_SINGLE = 3; + OP_TYPE_DEL_PREFIX = 4; + OP_TYPE_DEL_RANGE = 5; + OP_TYPE_MERGE = 6; +} + +enum TableMethod{ + TABLE_METHOD_UNKNOWN = 0; + TABLE_METHOD_EXISTS = 1; + TABLE_METHOD_CREATE = 2; + TABLE_METHOD_DELETE = 3; + TABLE_METHOD_DROP = 4; + TABLE_METHOD_TRUNCATE = 5; +} + +enum GraphMethod{ + GRAPH_METHOD_UNKNOWN = 0; + GRAPH_METHOD_DELETE = 3; +} diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto b/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto new file mode 100644 index 0000000000..b659645a63 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.store.grpc.session"; +option java_outer_classname = "HgStoreSessionProto"; + +import "store_common.proto"; +import "store_stream_meta.proto"; + +service HgStoreSession { + rpc Get2(GetReq) returns (FeedbackRes) {} + rpc BatchGet2(BatchGetReq) returns (FeedbackRes) {} + rpc Batch(BatchReq) returns (FeedbackRes){} + rpc Table(TableReq) returns (FeedbackRes){}; + rpc Graph(GraphReq) returns (FeedbackRes){}; + rpc Clean(CleanReq) returns (FeedbackRes) {} + rpc Count(ScanStreamReq) returns (Agg) {} +} + +message TableReq{ + Header header = 1; + TableMethod method = 2; + string table_name = 3; +} + +message GraphReq{ + Header header = 1; + GraphMethod method = 2; + string graph_name = 3; +} + +message BatchReq{ + Header header = 1; + string batch_id = 2; + oneof requests{ + BatchWriteReq write_req = 10; + BatchCommitReq commit_req = 11; + BatchRollbackReq rollback_req = 12; + } +} + +message BatchWriteReq{ + repeated BatchEntry entry = 1; +} + +message BatchCommitReq{} + +message BatchRollbackReq{} + +message BatchEntry{ + OpType op_type = 1; + int32 table = 2; + Key start_key = 3; + Key end_key = 4; + bytes value = 5; +} + +message BatchGetReq { + Header header = 1; + string table = 2; + repeated Key key = 3; + int32 partition = 9; +} + +message GetReq { + Header header = 1; + Tk tk = 2; +} + +message CleanReq{ + Header header = 1; + int32 partition = 2; +} + + +message FeedbackRes { + ResStatus status = 1; + + oneof responses{ + PartitionFaultResponse partition_fault_response = 10; + ValueResponse value_response = 11; + KeyValueResponse key_value_response = 12; + } + +} + +message ValueResponse { + bytes value = 1; +} + +message KeyValueResponse { + repeated Kv kv = 1; +} + +message PartitionFaultResponse{ + PartitionFaultType fault_type = 1; + repeated PartitionLeader partition_leaders = 2; + repeated int32 partition_ids = 3; +} + +message PartitionLeader { + int32 partitionId = 2; + int64 leaderId = 3; +} + +enum PartitionFaultType{ + PARTITION_FAULT_TYPE_UNKNOWN = 0; + // 当前不是Leader,返回Leader所在store + PARTITION_FAULT_TYPE_NOT_LEADER = 1; + // 等待Leader超时,可能raft group创建失败 + PARTITION_FAULT_TYPE_WAIT_LEADER_TIMEOUT = 2; + // 分区不属于本机 + PARTITION_FAULT_TYPE_NOT_LOCAL = 3; + +} +message Agg { + Header header = 1; + int64 count = 2; +} diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_state.proto b/hugegraph-store/hg-store-grpc/src/main/proto/store_state.proto new file mode 100644 index 0000000000..d2b0aa3613 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_state.proto @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +import "google/protobuf/empty.proto"; +import "store_common.proto"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.store.grpc.state"; +option java_outer_classname = "HgStoreStateProto"; + +service HgStoreState { + + // Subscribe Store Node state publishing. + rpc SubState(SubStateReq) returns (stream NodeStateRes) {} + + // Unsubscribe Store Node state publishing. + rpc UnsubState(SubStateReq) returns (google.protobuf.Empty){} + rpc getScanState(SubStateReq) returns (ScanState){} + +} + +message SubStateReq{ + string subId = 1; +} + +message NodeStateRes { + NodeStateType state = 1; +} + +message ScanState{ + string address = 1; + uint64 taskCount = 2; + uint64 completedTaskCount = 3; + uint32 activeCount = 4; + uint32 largestPoolSize = 5; + uint32 poolSize = 6; + int32 maximumPoolSize = 7; + int32 queueSize = 8; + int32 queueRemainingCapacity = 9; +} + +enum NodeStateType { + UNKNOWN_STATE_TYPE = 0; + STARTING = 10; + STANDBY = 20; + ONLINE = 30; + PAUSE = 40; + PENDING = 50; + STOPPING = 60; + HALTED = 70; + ERROR = 90; + ZOMBIE = 99; +} + +message QuotaRequest { + map limits = 1; +} diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_stream_meta.proto b/hugegraph-store/hg-store-grpc/src/main/proto/store_stream_meta.proto new file mode 100644 index 0000000000..7c2211cab7 --- /dev/null +++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_stream_meta.proto @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +import "store_common.proto"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.store.grpc.stream"; +option java_outer_classname = "HgStoreStreamMetaProto"; + + +message ScanStreamBatchReq { + Header header = 1; + oneof query { + ScanQueryRequest query_request = 10; + ScanPagingRequest paging_request = 11; + ScanPauseRequest pause_request = 12; + ScanCancelRequest cancel_request = 13; + ScanReceiptRequest receipt_request = 14; + } + int64 logId = 15; +} + +message ScanQueryRequest { + ScanMethod method = 2; + string table = 3; + int64 limit = 4; + int64 pageSize = 5; + int32 scanType = 6; + bytes query = 7; + bytes position = 8; + repeated ScanCondition condition = 9; + int64 perKeyLimit = 10; + int64 skipDegree = 11; + ScanOrderType orderType = 12; + int64 perKeyMax = 13; +} + +message ScanPagingRequest { + int64 pageSize = 1; +} +message ScanPauseRequest {} +message ScanCancelRequest {} +message ScanReceiptRequest { + uint32 times = 1; +} + +message ScanCondition { + int32 code = 1; // owner key hashcode + bytes prefix = 2; // key prefix + bytes start = 3; // start key + bytes end = 4; // end key + int32 serialNo = 5; // serial no +} + +message ScanStreamReq { + Header header = 1; + ScanMethod method = 2; + string table = 3; + int32 code = 4; // partitionId + bytes prefix = 5; // key prefix + bytes start = 6; //start key + bytes end = 7; //end key + int64 limit = 8; + int32 scanType = 9; + bytes query = 10; + int32 pageSize = 11; + bytes position = 12; + uint32 closeFlag = 13; + SelectParam selects = 14; +} + +message SelectParam { + bytes filter = 1; + bool withNoProperties = 2; + repeated int32 properties = 3; +} + +message KvPageRes { + int32 times = 1; //query times. + bool over = 2; //true=no more data + repeated Kv data = 3; + uint32 version = 4; + bytes stream = 5; +} + +enum KvStreamType { + STREAM_TYPE_NONE = 0; + STREAM_TYPE_KV = 1; + STREAM_TYPE_K = 2; + STREAM_TYPE_SKV = 3; + STREAM_TYPE_SK = 4; +} diff --git a/hugegraph-store/hg-store-test/pom.xml b/hugegraph-store/hg-store-test/pom.xml new file mode 100644 index 0000000000..bd66c99441 --- /dev/null +++ b/hugegraph-store/hg-store-test/pom.xml @@ -0,0 +1,339 @@ + + + + + + hugegraph-store + org.apache.hugegraph + ${revision} + ../pom.xml + + 4.0.0 + + hg-store-test + + + + 2.15.0 + 1.18.20 + + + + + jacoco + + false + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + + default + verify + + report-aggregate + + + ${project.basedir}/../target/site/jacoco + + + + + + + + + + + + + org.apache.hugegraph + hg-store-grpc + ${revision} + + + org.apache.hugegraph + hg-store-common + ${revision} + + + org.projectlombok + lombok + ${lombok.version} + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + org.apache.hugegraph + hg-pd-client + ${revision} + + + io.grpc + * + + + + + + + + + org.apache.hugegraph + hg-pd-grpc + ${revision} + + + commons-io + commons-io + 2.7 + + + com.fasterxml.jackson.core + jackson-databind + 2.13.0 + + + + junit + junit + 4.13.2 + + + com.google.protobuf + protobuf-java-util + 3.17.2 + compile + + + org.springframework + spring-context-support + 5.3.20 + + + org.springframework + spring-test + 5.3.20 + test + + + com.fasterxml.jackson.core + jackson-core + 2.13.0 + + + com.fasterxml.jackson.core + jackson-annotations + 2.13.0 + + + org.springframework.boot + spring-boot-starter-test + 2.5.14 + + + org.springframework.boot + spring-boot-starter-logging + + + + + org.powermock + powermock-classloading-xstream + 2.0.0-RC.3 + + + org.powermock + powermock-module-junit4-rule + 2.0.0-RC.3 + + + org.powermock + powermock-api-support + 2.0.0-RC.3 + + + org.powermock + powermock-module-junit4 + 2.0.0-RC.3 + compile + + + org.powermock + powermock-api-mockito2 + 2.0.0-RC.3 + compile + + + org.apache.hugegraph + hg-store-client + + + com.alipay.sofa + jraft-core + 1.3.9 + + + org.rocksdb + rocksdbjni + + + + + + + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + + + + + + + + + + + + + + + + + + + + + + + + + + store-common-test + + ${basedir}/src/main/java/ + + ${basedir}/target/classes/ + + + **/CommonSuiteTest.java + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + pre-test + + + prepare-agent + + + + post-test + test + + report-aggregate + + + ${basedir}/target/site/jacoco + + + + + + **/grpc/**/* + **/jraft/**/* + **/*Iterator* + **/node/**/* + + + + + + + src/main/resources/ + true + + + + diff --git a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/BaseCommonTest.java b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/BaseCommonTest.java new file mode 100644 index 0000000000..0067b11ae7 --- /dev/null +++ b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/BaseCommonTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.common; + +import org.junit.After; +import org.junit.BeforeClass; + +public class BaseCommonTest { + + @BeforeClass + public static void beforeClass() throws Exception { + } + + @After + public void teardown() throws Exception { + // pass + } +} diff --git a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/BitsTest.java b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/BitsTest.java new file mode 100644 index 0000000000..afa1f2baba --- /dev/null +++ b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/BitsTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.common; + +import org.apache.hugegraph.store.term.Bits; +import org.junit.Assert; +// import org.junit.Test; + +public class BitsTest { + + // @Test + public void test() { + for (int i = 0; i < Integer.MAX_VALUE; i = i + 10) { + byte[] val = new byte[4]; + Bits.putInt(val, 0, i); + int n = Bits.getInt(val, 0); + Assert.assertEquals(i, n); + } + } +} diff --git a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/ByteBufferAllocatorTest.java b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/ByteBufferAllocatorTest.java new file mode 100644 index 0000000000..0faf3457c7 --- /dev/null +++ b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/ByteBufferAllocatorTest.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.common; + +import java.util.concurrent.CountDownLatch; + +import org.apache.hugegraph.store.buffer.ByteBufferAllocator; +import org.junit.Assert; +import org.junit.Test; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class ByteBufferAllocatorTest extends BaseCommonTest { + + @Test + public void getAndReleaseTest() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(2); + + ByteBufferAllocator allocator = new ByteBufferAllocator(1, 2); + + new Thread(() -> { + try { + var buffer1 = allocator.get(); + var buffer2 = allocator.get(); + Thread.sleep(2000); + Assert.assertEquals(buffer1.limit(), 1); + allocator.release(buffer1); + allocator.release(buffer2); + latch.countDown(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }).start(); + + new Thread(() -> { + try { + Thread.sleep(1000); + var buffer1 = allocator.get(); + var buffer2 = allocator.get(); + Assert.assertEquals(buffer1.limit(), 1); + allocator.release(buffer1); + allocator.release(buffer2); + latch.countDown(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }).start(); + + latch.await(); + } +} diff --git a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/CommonSuiteTest.java b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/CommonSuiteTest.java new file mode 100644 index 0000000000..092cc804fe --- /dev/null +++ b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/CommonSuiteTest.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.common; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +import lombok.extern.slf4j.Slf4j; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + ByteBufferAllocatorTest.class, + KVByteBufferTest.class +}) + +@Slf4j +public class CommonSuiteTest { + +} diff --git a/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/KVByteBufferTest.java b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/KVByteBufferTest.java new file mode 100644 index 0000000000..38ddb1af3f --- /dev/null +++ b/hugegraph-store/hg-store-test/src/main/java/org/apache/hugegraph/store/common/KVByteBufferTest.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.store.common; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.nio.ByteBuffer; + +import org.apache.hugegraph.store.buffer.KVByteBuffer; +import org.junit.Test; + +public class KVByteBufferTest { + + @Test + public void testOps() { + KVByteBuffer buffer1 = new KVByteBuffer(10); + buffer1.put((byte) 10); + // just put a byte + assertEquals(1, buffer1.position()); + // 9 left + assertTrue(buffer1.hasRemaining()); + buffer1.clear(); + assertEquals(10, buffer1.get()); + + buffer1.clear(); + buffer1.putInt(10); + buffer1.clear(); + assertEquals(10, buffer1.getInt()); + + buffer1.flip(); + // just write to a int + assertEquals(4, buffer1.getBuffer().limit()); + + byte[] bytes = new byte[]{10, 20, 30}; + KVByteBuffer buffer2 = new KVByteBuffer(bytes); + assertArrayEquals(buffer2.array(), bytes); + + ByteBuffer bb = ByteBuffer.allocate(10); + KVByteBuffer buffer3 = new KVByteBuffer(bb); + buffer3.put(bytes); + buffer3.clear(); + assertArrayEquals(buffer3.getBytes(), bytes); + + // int (4) + byte(3) + assertEquals(7, buffer3.getBuffer().position()); + + ByteBuffer bb2 = buffer3.copyBuffer(); + assertEquals(7, bb2.capacity()); + } +} diff --git a/hugegraph-store/hg-store-test/src/main/resources/log4j2.xml b/hugegraph-store/hg-store-test/src/main/resources/log4j2.xml new file mode 100644 index 0000000000..0a3aae7b9d --- /dev/null +++ b/hugegraph-store/hg-store-test/src/main/resources/log4j2.xml @@ -0,0 +1,102 @@ + + + + + + + + logs + hg-store-test + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hugegraph-store/hg-store-test/src/main/resources/pd-server.yml b/hugegraph-store/hg-store-test/src/main/resources/pd-server.yml new file mode 100644 index 0000000000..5608dc9dd4 --- /dev/null +++ b/hugegraph-store/hg-store-test/src/main/resources/pd-server.yml @@ -0,0 +1,71 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8686 + netty-server: + max-inbound-message-size: 100MB + +server: + port: 8620 + +pd: + + patrol-interval: 3000000 + data-path: tmp/8686 + # 最少节点数,少于该数字,集群停止入库 + initial-store-count: 1 + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + #initial-store-list: 127.0.0.1:8501 +raft: + address: 127.0.0.1:8610 + # raft集群 + peers-list: 127.0.0.1:8610 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 30000 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 180000 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 1 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hugegraph-store/hg-store-test/src/main/resources/version.txt b/hugegraph-store/hg-store-test/src/main/resources/version.txt new file mode 100644 index 0000000000..b55f10804f --- /dev/null +++ b/hugegraph-store/hg-store-test/src/main/resources/version.txt @@ -0,0 +1 @@ +3.6.5 \ No newline at end of file diff --git a/hugegraph-store/pom.xml b/hugegraph-store/pom.xml new file mode 100644 index 0000000000..0bb8cd96e1 --- /dev/null +++ b/hugegraph-store/pom.xml @@ -0,0 +1,317 @@ + + + + + + 4.0.0 + + hugegraph-store + ${revision} + pom + + + org.apache.hugegraph + hugegraph + ${revision} + ../pom.xml + + + + hg-store-common + hg-store-grpc + hg-store-client + hg-store-test + + + + + + + + + + 1.5.0 + 11 + 11 + 2.15.0 + ${project.basedir}/.. + + + + + + + org.apache.hugegraph + hg-store-common + ${project.version} + + + org.apache.hugegraph + hg-store-grpc + ${project.version} + + + + + + + + org.apache.hugegraph + hg-store-client + ${project.version} + + + + + + + + + + + + + org.apache.logging.log4j + log4j-slf4j-impl + 2.15.0 + + + + + + + junit + junit + 4.13.2 + test + + + + + + + org.jacoco + jacoco-maven-plugin + 0.8.4 + + + + prepare-agent + + + + + + org.codehaus.mojo + flatten-maven-plugin + 1.2.7 + + true + resolveCiFriendliesOnly + + + + flatten + process-resources + + flatten + + + + flatten.clean + clean + + clean + + + + + + org.apache.maven.plugins + maven-clean-plugin + + + + ${project.basedir}/ + + *.tar + *.tar.gz + .flattened-pom.xml + + dist/** + + false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + store-common-test + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20 + + + store-common-test + + test + + test + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pom.xml b/pom.xml index ff448ffc29..1224e32a9b 100644 --- a/pom.xml +++ b/pom.xml @@ -92,8 +92,7 @@ hugegraph-server hugegraph-pd - - + hugegraph-store @@ -145,21 +144,23 @@ **/*.conf **/*.map **/*.properties - dist/**/* **/bin/hugegraph.service **/swagger-ui/**/* scripts/dev/reviewers - scripts/dev/reviewers **/*.md **/dependency-reduced-pom.xml **/logs/*.log **/META-INF/**/* - **/target/* style/* ChangeLog CONFIG.ini GROUPS OWNERS + + **/pd/grpc/**/*.java + **/store/grpc/**/*.java + **/target/** + dist/**/* .github/**/* .gitignore @@ -177,8 +178,11 @@ **/hbase-*/** **/apache-cassandra-*/** **/pid + + **/tmp/** **/src/main/java/org/apache/hugegraph/pd/grpc/** + **/src/main/java/org/apache/hugegraph/store/grpc/** true