prefixList);
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java
new file mode 100644
index 0000000000..e9245b3a39
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgOwnerKey.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_BYTES;
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_OWNER_KEY;
+
+import java.io.Serializable;
+import java.util.Arrays;
+
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+
+/**
+ * created on 2021/10/14
+ *
+ * @version 1.3.0 add canceled assert
+ */
+public class HgOwnerKey implements Serializable {
+
+ private final byte[] owner; // TODO: consider remove? since it seems to be useless
+ private int keyCode = 0;// TODO: Be here OK?
+ private byte[] key;
+ // Sequence number, used for batch queries to ensure the order of returned results
+ private int serialNo;
+
+ /**
+ * @param owner
+ * @param key
+ * @see HgOwnerKey:of(byte[] owner, byte[] key)
+ */
+ @Deprecated
+ public HgOwnerKey(byte[] owner, byte[] key) {
+ if (owner == null) {
+ owner = EMPTY_BYTES;
+ }
+ if (key == null) {
+ key = EMPTY_BYTES;
+ }
+ this.owner = owner;
+ this.key = key;
+ }
+
+ public HgOwnerKey(int code, byte[] key) {
+ if (key == null) {
+ key = EMPTY_BYTES;
+ }
+ this.owner = EMPTY_BYTES;
+ this.key = key;
+ this.keyCode = code;
+ }
+
+ public static HgOwnerKey emptyOf() {
+ return EMPTY_OWNER_KEY;
+ }
+
+ public static HgOwnerKey newEmpty() {
+ return HgOwnerKey.of(EMPTY_BYTES, EMPTY_BYTES);
+ }
+
+ public static HgOwnerKey ownerOf(byte[] owner) {
+ return new HgOwnerKey(owner, EMPTY_BYTES);
+ }
+
+ public static HgOwnerKey codeOf(int code) {
+ return HgOwnerKey.of(EMPTY_BYTES, EMPTY_BYTES).setKeyCode(code);
+ }
+
+ public static HgOwnerKey of(byte[] owner, byte[] key) {
+ return new HgOwnerKey(owner, key);
+ }
+
+ public static HgOwnerKey of(int keyCode, byte[] key) {
+ return new HgOwnerKey(keyCode, key);
+ }
+
+ public byte[] getOwner() {
+ return owner;
+ }
+
+ public byte[] getKey() {
+ return key;
+ }
+
+ public int getKeyCode() {
+ return keyCode;
+ }
+
+ public HgOwnerKey setKeyCode(int keyCode) {
+ this.keyCode = keyCode;
+ return this;
+ }
+
+ public HgOwnerKey codeToKey(int keyCode) {
+ this.keyCode = keyCode;
+ this.key = HgStoreClientUtil.toIntBytes(keyCode);
+ return this;
+ }
+
+ public int getSerialNo() {
+ return this.serialNo;
+ }
+
+ public HgOwnerKey setSerialNo(int serialNo) {
+ this.serialNo = serialNo;
+ return this;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ HgOwnerKey that = (HgOwnerKey) o;
+ return Arrays.equals(owner, that.owner) && Arrays.equals(key, that.key);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = Arrays.hashCode(owner);
+ result = 31 * result + Arrays.hashCode(key);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "HgOwnerKey{" +
+ "owner=" + Arrays.toString(owner) +
+ ", key=" + Arrays.toString(key) +
+ ", code=" + keyCode +
+ '}';
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java
new file mode 100644
index 0000000000..38163d568f
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPageSize.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+/**
+ * Return the amount of records returned by one query in pageable-query.
+ *
+ * created on 2021/10/24
+ */
+public interface HgPageSize {
+
+ long getPageSize();
+
+ default boolean isPageEmpty() {
+ return false;
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java
new file mode 100644
index 0000000000..80cdb77471
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgPrivate.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+public final class HgPrivate {
+
+ private static final HgPrivate INSTANCE = new HgPrivate();
+
+ private HgPrivate() {
+ }
+
+ static HgPrivate of() {
+ return INSTANCE;
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java
new file mode 100644
index 0000000000..cc64ba945b
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgScanQuery.java
@@ -0,0 +1,331 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.grpc.common.ScanOrderType;
+
+/**
+ * 2022/3/4
+ *
+ * @version 0.5.0
+ */
+public interface HgScanQuery {
+
+ static HgScanQuery tableOf(String table) {
+ return ScanBuilder.tableOf(table).build();
+ }
+
+ static HgScanQuery rangeOf(String table, List startList, List endList) {
+ return ScanBuilder.rangeOf(table, startList, endList).build();
+ }
+
+ static HgScanQuery prefixOf(String table, List prefixList) {
+ return ScanBuilder.prefixOf(table, prefixList).build();
+ }
+
+ static HgScanQuery prefixOf(String table, List prefixList,
+ ScanOrderType orderType) {
+ return ScanBuilder.prefixOf(table, prefixList).setOrderType(orderType).build();
+ }
+
+ static HgScanQuery prefixIteratorOf(String table, Iterator prefixItr) {
+ return ScanBuilder.prefixIteratorOf(table, prefixItr).build();
+ }
+
+ static HgScanQuery prefixIteratorOf(String table, Iterator prefixItr,
+ ScanOrderType orderType) {
+ return ScanBuilder.prefixIteratorOf(table, prefixItr).setOrderType(orderType).build();
+ }
+
+ String getTable();
+
+ HgScanQuery.ScanMethod getScanMethod();
+
+ List getPrefixList();
+
+ Iterator getPrefixItr();
+
+ List getStartList();
+
+ List getEndList();
+
+ long getLimit();
+
+ long getPerKeyLimit();
+
+ long getPerKeyMax();
+
+ long getSkipDegree();
+
+ int getScanType();
+
+ ScanOrderType getOrderType();
+
+ boolean isOnlyKey();
+
+ byte[] getQuery();
+
+ ScanBuilder builder();
+
+ enum ScanMethod {
+ ALL,
+ PREFIX,
+ RANGE
+ }
+
+ enum SortType {
+ UNSORTED,
+ SORT_BY_EDGE,
+ SORT_BY_VERTEX
+ }
+
+ class ScanBuilder {
+
+ private final String table;
+ private final HgScanQuery.ScanMethod sanMethod;
+ private long limit = Integer.MAX_VALUE;
+ private long perKeyLimit = Integer.MAX_VALUE;
+ private long perKeyMax = Integer.MAX_VALUE;
+ private int scanType;
+ private ScanOrderType orderType;
+
+ private long skipDegree;
+
+ private boolean onlyKey;
+ private byte[] query;
+ private List prefixList;
+ private List startList;
+ private List endList;
+ private Iterator prefixItr;
+
+ ScanBuilder(HgScanQuery.ScanMethod sanMethod, String table) {
+ this.table = table;
+ this.sanMethod = sanMethod;
+ this.orderType = ScanOrderType.ORDER_NONE;
+ }
+
+ public static ScanBuilder rangeOf(String table, List startList,
+ List endList) {
+ HgAssert.isArgumentValid(table, "table");
+ HgAssert.isArgumentValid(startList, "startList");
+ HgAssert.isArgumentValid(endList, "endList");
+ HgAssert.isTrue(startList.size() == endList.size()
+ , "The size of startList not equals endList's.");
+
+ ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.RANGE, table);
+ res.startList = startList;
+ res.endList = endList;
+ res.scanType = HgKvStore.SCAN_GTE_BEGIN | HgKvStore.SCAN_LTE_END;
+ return res;
+ }
+
+ public static ScanBuilder prefixOf(String table, List prefixList) {
+ HgAssert.isArgumentValid(table, "table");
+ HgAssert.isArgumentValid(prefixList, "prefixList");
+
+ ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.PREFIX, table);
+ res.prefixList = prefixList;
+ return res;
+
+ }
+
+ public static ScanBuilder prefixIteratorOf(String table, Iterator prefixItr) {
+ HgAssert.isArgumentValid(table, "table");
+
+ ScanBuilder res = new ScanBuilder(HgScanQuery.ScanMethod.PREFIX, table);
+ res.prefixItr = prefixItr;
+ return res;
+
+ }
+
+ public static ScanBuilder tableOf(String table) {
+ HgAssert.isArgumentValid(table, "table");
+
+ return new ScanBuilder(HgScanQuery.ScanMethod.ALL, table);
+ }
+
+ public ScanBuilder setLimit(long limit) {
+ this.limit = limit;
+ return this;
+ }
+
+ public ScanBuilder setPerKeyLimit(long limit) {
+ this.perKeyLimit = limit;
+ return this;
+ }
+
+ public ScanBuilder setPerKeyMax(long max) {
+ this.perKeyMax = max;
+ return this;
+ }
+
+ public ScanBuilder setScanType(int scanType) {
+ this.scanType = scanType;
+ return this;
+ }
+
+ public ScanBuilder setOrderType(ScanOrderType orderType) {
+ this.orderType = orderType;
+ return this;
+ }
+
+ public ScanBuilder setQuery(byte[] query) {
+ this.query = query;
+ return this;
+ }
+
+ public ScanBuilder setSkipDegree(long skipDegree) {
+ this.skipDegree = skipDegree;
+ return this;
+ }
+
+ public ScanBuilder setOnlyKey(boolean onlyKey) {
+ this.onlyKey = onlyKey;
+ return this;
+ }
+
+ public HgScanQuery build() {
+ return this.new BatchScanQuery();
+ }
+
+ private class BatchScanQuery implements HgScanQuery {
+
+ @Override
+ public String getTable() {
+ return table;
+ }
+
+ @Override
+ public HgScanQuery.ScanMethod getScanMethod() {
+ return sanMethod;
+ }
+
+ @Override
+ public List getPrefixList() {
+ if (prefixList == null) {
+ return Collections.EMPTY_LIST;
+ } else {
+ return Collections.unmodifiableList(prefixList);
+ }
+ }
+
+ @Override
+ public Iterator getPrefixItr() {
+ return prefixItr;
+ }
+
+ @Override
+ public List getStartList() {
+ if (startList == null) {
+ return Collections.EMPTY_LIST;
+ } else {
+ return Collections.unmodifiableList(startList);
+ }
+ }
+
+ @Override
+ public List getEndList() {
+ if (endList == null) {
+ return Collections.EMPTY_LIST;
+ } else {
+ return Collections.unmodifiableList(endList);
+ }
+ }
+
+ @Override
+ public long getLimit() {
+ return limit;
+ }
+
+ @Override
+ public long getPerKeyLimit() {
+ return perKeyLimit;
+ }
+
+ @Override
+ public long getPerKeyMax() {
+ return perKeyMax;
+ }
+
+ @Override
+ public long getSkipDegree() {
+ return skipDegree;
+ }
+
+ @Override
+ public int getScanType() {
+ return scanType;
+ }
+
+ @Override
+ public ScanOrderType getOrderType() {
+ return orderType;
+ }
+
+ @Override
+ public boolean isOnlyKey() {
+ return onlyKey;
+ }
+
+ @Override
+ public byte[] getQuery() {
+ return query;
+ }
+
+ @Override
+ public ScanBuilder builder() {
+ return ScanBuilder.this;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuffer sb = new StringBuffer("HgScanQuery{");
+ sb.append("table='").append(getTable()).append('\'');
+ sb.append(", scanMethod=").append(getScanMethod());
+ sb.append(", prefixList=").append(getPrefixList());
+ sb.append(", startList=").append(getStartList());
+ sb.append(", endList=").append(getEndList());
+ sb.append(", limit=").append(getLimit());
+ sb.append(", perKeyLimit=").append(getPerKeyLimit());
+ sb.append(", perKeyMax=").append(getPerKeyMax());
+ sb.append(", skipDegree=").append(getSkipDegree());
+ sb.append(", scanType=").append(getScanType());
+ sb.append(", orderType=").append(getOrderType());
+ sb.append(", onlyKey=").append(isOnlyKey());
+ sb.append(", query=");
+ if (query == null) {
+ sb.append("null");
+ } else {
+ sb.append('[');
+ for (int i = 0; i < query.length; ++i) {
+ sb.append(i == 0 ? "" : ", ").append(query[i]);
+ }
+ sb.append(']');
+ }
+ sb.append('}');
+ return sb.toString();
+ }
+ }
+
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java
new file mode 100644
index 0000000000..fe6a580a1c
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSeekAble.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+/**
+ * created on 2022/03/11
+ */
+public interface HgSeekAble {
+
+ byte[] position();
+
+ void seek(byte[] position);
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java
new file mode 100644
index 0000000000..37c2184c80
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionManager.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.client.HgStoreSessionProvider;
+
+/**
+ * Maintain HgStoreSession instances.
+ * HgStore-clusters.
+ */
+
+@ThreadSafe
+public final class HgSessionManager {
+
+ // TODO: Holding more than one HgSessionManager is available,if you want to connect multi
+ private final static HgSessionManager INSTANCE = new HgSessionManager();
+ private final HgSessionProvider sessionProvider;
+
+ private HgSessionManager() {
+ // TODO: constructed by SPI
+ this.sessionProvider = new HgStoreSessionProvider();
+ }
+
+ public static HgSessionManager getInstance() {
+ return INSTANCE;
+ }
+
+ /**
+ * Retrieve or create a HgStoreSession.
+ *
+ * @param graphName
+ * @return
+ */
+ public HgStoreSession openSession(String graphName) {
+ return this.sessionProvider.createSession(graphName);
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java
new file mode 100644
index 0000000000..7049c27b01
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgSessionProvider.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/**
+ * created on 2021/10/12
+ */
+@ThreadSafe
+public interface HgSessionProvider {
+
+ HgStoreSession createSession(String graphName);
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java
new file mode 100644
index 0000000000..0f8ebb929f
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreClient.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+import org.apache.hugegraph.store.client.HgStoreNodePartitionerImpl;
+import org.apache.hugegraph.store.client.HgStoreSessionProvider;
+
+/**
+ * Maintain HgStoreSession instances.
+ * HgStore-clusters.
+ */
+
+@ThreadSafe
+public final class HgStoreClient {
+
+ // TODO: Holding more than one HgSessionManager is available,if you want to connect multi
+ private final HgSessionProvider sessionProvider;
+ private PDClient pdClient;
+
+ public HgStoreClient() {
+ this.sessionProvider = new HgStoreSessionProvider();
+ }
+
+ public HgStoreClient(PDConfig config) {
+ this.sessionProvider = new HgStoreSessionProvider();
+ pdClient = PDClient.create(config);
+ setPdClient(pdClient);
+ }
+
+ public HgStoreClient(PDClient pdClient) {
+ this.sessionProvider = new HgStoreSessionProvider();
+ setPdClient(pdClient);
+ }
+
+ public static HgStoreClient create(PDConfig config) {
+ return new HgStoreClient(config);
+ }
+
+ public static HgStoreClient create(PDClient pdClient) {
+ return new HgStoreClient(pdClient);
+ }
+
+ public static HgStoreClient create() {
+ return new HgStoreClient();
+ }
+
+ public void setPDConfig(PDConfig config) {
+ pdClient = PDClient.create(config);
+ setPdClient(pdClient);
+ }
+
+ /**
+ * Retrieve or create a HgStoreSession.
+ *
+ * @param graphName
+ * @return
+ */
+ public HgStoreSession openSession(String graphName) {
+ return this.sessionProvider.createSession(graphName);
+ }
+
+ public PDClient getPdClient() {
+ return pdClient;
+ }
+
+ public void setPdClient(PDClient client) {
+ this.pdClient = client;
+ HgStoreNodeManager nodeManager =
+ HgStoreNodeManager.getInstance();
+
+ HgStoreNodePartitionerImpl p = new HgStoreNodePartitionerImpl(pdClient, nodeManager);
+ nodeManager.setNodeProvider(p);
+ nodeManager.setNodePartitioner(p);
+ nodeManager.setNodeNotifier(p);
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java
new file mode 100644
index 0000000000..2e595e1ba1
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgStoreSession.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+
+public interface HgStoreSession extends HgKvStore {
+
+ void beginTx();
+
+ /**
+ * @throws IllegalStateException when the tx hasn't been beginning.
+ * @throws HgStoreClientException when failed to commit .
+ */
+ void commit();
+
+ /**
+ * @throws IllegalStateException when the tx hasn't been beginning.
+ * @throws HgStoreClientException when failed to rollback.
+ */
+ void rollback();
+
+ boolean isTx();
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java
new file mode 100644
index 0000000000..8e08ab656e
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTkvEntry.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+public interface HgTkvEntry {
+
+ String table();
+
+ byte[] key();
+
+ byte[] value();
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java
new file mode 100644
index 0000000000..57ca4d4a91
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/HgTokvEntry.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store;
+
+public interface HgTokvEntry {
+
+ String table();
+
+ HgOwnerKey ownerKey();
+
+ byte[] value();
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java
new file mode 100644
index 0000000000..6fa354edec
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartition.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Objects;
+
+/**
+ * Immutable Object Pattern
+ *
+ * created on 2021/10/26
+ */
+public final class HgNodePartition {
+
+ private final Long nodeId;
+ //当前key的hashcode
+ private final Integer keyCode;
+
+ //分区的开始结束范围
+ private final Integer startKey;
+ private final Integer endKey;
+ private int hash = -1;
+
+ HgNodePartition(Long nodeId, Integer keyCode) {
+ this.nodeId = nodeId;
+ this.keyCode = keyCode;
+ this.startKey = this.endKey = keyCode;
+ }
+
+ HgNodePartition(Long nodeId, Integer keyCode, Integer startKey, Integer endKey) {
+ this.nodeId = nodeId;
+ this.keyCode = keyCode;
+ this.startKey = startKey;
+ this.endKey = endKey;
+ }
+
+ public static HgNodePartition of(Long nodeId, Integer keyCode) {
+ return new HgNodePartition(nodeId, keyCode);
+ }
+
+ public static HgNodePartition of(Long nodeId, Integer keyCode, Integer startKey,
+ Integer endKey) {
+ return new HgNodePartition(nodeId, keyCode, startKey, endKey);
+ }
+
+ public Long getNodeId() {
+ return nodeId;
+ }
+
+ public Integer getKeyCode() {
+ return keyCode;
+ }
+
+ public Integer getStartKey() {
+ return startKey;
+ }
+
+ public Integer getEndKey() {
+ return endKey;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ HgNodePartition that = (HgNodePartition) o;
+ return Objects.equals(nodeId, that.nodeId) && Objects.equals(keyCode, that.keyCode);
+ }
+
+ @Override
+ public int hashCode() {
+ if (this.hash == -1) {
+ this.hash = Objects.hash(nodeId, keyCode);
+ }
+ return this.hash;
+ }
+
+ @Override
+ public String toString() {
+ return "HgNodePartition{" +
+ "nodeId=" + nodeId +
+ ", partitionId=" + keyCode +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java
new file mode 100644
index 0000000000..4bb0705b74
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgNodePartitionerBuilder.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import static org.apache.hugegraph.store.client.util.HgAssert.isFalse;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+/**
+ * created on 2021/10/26
+ *
+ * @version 1.0.0
+ */
+@NotThreadSafe
+public final class HgNodePartitionerBuilder {
+
+ private Set partitions = null;
+
+ static HgNodePartitionerBuilder resetAndGet() {
+ return new HgNodePartitionerBuilder();
+ }
+
+ /**
+ * @param nodeId
+ * @param keyCode
+ * @return
+ * @see HgNodePartitionerBuilder:setPartitions(Set partitions)
+ */
+ @Deprecated
+ public HgNodePartitionerBuilder add(Long nodeId, Integer keyCode) {
+ isFalse(nodeId == null, "The argument is invalid: nodeId");
+ isFalse(keyCode == null, "The argument is invalid: keyCode");
+
+ if (this.partitions == null) {
+ this.partitions = new HashSet<>(16, 1);
+ }
+
+ this.partitions.add(HgNodePartition.of(nodeId, keyCode));
+ return this;
+ }
+
+ Collection getPartitions() {
+ return this.partitions;
+ }
+
+ public void setPartitions(Set partitions) {
+ isFalse(partitions == null, "The argument is invalid: partitions");
+ this.partitions = partitions;
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java
new file mode 100644
index 0000000000..ee73485469
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgPrivate.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+/**
+ * created on 2021/10/26
+ */
+public class HgPrivate {
+
+ private final static HgPrivate instance = new HgPrivate();
+
+ private HgPrivate() {
+
+ }
+
+ static HgPrivate getInstance() {
+ return instance;
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java
new file mode 100644
index 0000000000..31438c0a53
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNode.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/11
+ *
+ * @version 0.2.0
+ */
+public interface HgStoreNode {
+
+ /**
+ * Return boolean value of being online or not
+ *
+ * @return
+ */
+ default boolean isHealthy() {
+ return true;
+ }
+
+ /**
+ * Return the unique ID of store-node.
+ *
+ * @return
+ */
+ Long getNodeId();
+
+ /**
+ * A string value concatenated by host and port: "host:port"
+ *
+ * @return
+ */
+ String getAddress();
+
+ /**
+ * Return a new HgStoreSession instance, that is not Thread safe.
+ * Return null when the node is not in charge of the graph that was passed from argument.
+ *
+ * @return
+ */
+ HgStoreSession openSession(String graphName);
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java
new file mode 100644
index 0000000000..c35b5e9343
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeBuilder.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+/**
+ * created on 2021/10/11
+ */
+public interface HgStoreNodeBuilder {
+
+ HgStoreNodeBuilder setNodeId(Long nodeId);
+
+ HgStoreNodeBuilder setAddress(String address);
+
+ /**
+ * To build a HgStoreNode instance.
+ *
+ * @return
+ */
+ HgStoreNode build();
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java
new file mode 100644
index 0000000000..d8735cdc6e
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeCandidates.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.List;
+
+/**
+ * created on 2021/10/12
+ */
+public final class HgStoreNodeCandidates {
+
+ List nodeList;
+
+ HgStoreNodeCandidates(List nodeList) {
+ this.nodeList = nodeList;
+ }
+
+ public int size() {
+ return this.nodeList.size();
+ }
+
+ public HgStoreNode getNode(int index) {
+ return this.nodeList.get(index);
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java
new file mode 100644
index 0000000000..84709f19a9
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeManager.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.client.grpc.GrpcStoreNodeBuilder;
+import org.apache.hugegraph.store.client.type.HgNodeStatus;
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * // TODO: Mapping to Store-Node-Cluster, one to one.
+ *
+ * created on 2021/10/11
+ *
+ * @version 0.2.0
+ */
+@ThreadSafe
+@Slf4j
+public final class HgStoreNodeManager {
+
+ private final static Set CLUSTER_ID_SET = new HashSet<>();
+ private final static HgStoreNodeManager instance = new HgStoreNodeManager();
+
+ private final String clusterId;
+ private final Map addressMap = new ConcurrentHashMap<>();
+ private final Map nodeIdMap = new ConcurrentHashMap<>();
+ private final Map> graphNodesMap = new ConcurrentHashMap<>();
+
+ private HgStoreNodeProvider nodeProvider;
+ private HgStoreNodePartitioner nodePartitioner;
+ private HgStoreNodeNotifier nodeNotifier;
+
+ private HgStoreNodeManager() {
+ this.clusterId = HgStoreClientConst.DEFAULT_NODE_CLUSTER_ID;
+ }
+
+ private HgStoreNodeManager(String clusterId) {
+ synchronized (CLUSTER_ID_SET) {
+ if (CLUSTER_ID_SET.contains(clusterId)) {
+ throw new RuntimeException("The cluster [" + clusterId + "] has been existing.");
+ }
+ CLUSTER_ID_SET.add(clusterId);
+ this.clusterId = clusterId;
+ }
+ }
+
+ public static HgStoreNodeManager getInstance() {
+ return instance;
+ }
+
+ /**
+ * Return the HgStoreNodeBuilder
+ *
+ * @return
+ */
+ public HgStoreNodeBuilder getNodeBuilder() {
+ // TODO: Constructed by a provider that retrieved by SPI
+ return new GrpcStoreNodeBuilder(this, HgPrivate.getInstance());
+ }
+
+ /**
+ * Return an instance of HgStoreNode whose ID is matched to the argument.
+ *
+ * @param nodeId
+ * @return null when none of instance is matched to the argument,or argument is invalid.
+ */
+ public HgStoreNode getStoreNode(Long nodeId) {
+ if (nodeId == null) {
+ return null;
+ }
+ return this.nodeIdMap.get(nodeId);
+ }
+
+ /**
+ * Apply a HgStoreNode instance with graph-name and node-id.
+ * CAUTION:
+ * It won't work when user haven't set a HgStoreNodeProvider via setNodeProvider method.
+ *
+ * @param graphName
+ * @param nodeId
+ * @return
+ */
+ HgStoreNode applyNode(String graphName, Long nodeId) {
+ HgStoreNode node = this.nodeIdMap.get(nodeId);
+
+ if (node != null) {
+ return node;
+ }
+
+ if (this.nodeProvider == null) {
+ return null;
+ }
+
+ node = this.nodeProvider.apply(graphName, nodeId);
+
+ if (node == null) {
+
+ log.warn("Failed to apply a HgStoreNode instance form the nodeProvider [ "
+ + this.nodeProvider.getClass().getName() + " ].");
+ notifying(graphName, nodeId, HgNodeStatus.NOT_EXIST);
+ return null;
+ }
+
+ this.addNode(graphName, node);
+
+ return node;
+ }
+
+ private void notifying(String graphName, Long nodeId, HgNodeStatus status) {
+ if (this.nodeNotifier != null) {
+ try {
+ this.nodeNotifier.notice(graphName, HgStoreNotice.of(nodeId, status));
+ } catch (Throwable t) {
+ log.error("Failed to invoke " + this.nodeNotifier.getClass().getSimpleName() +
+ ":notice(" + nodeId + "," + status + ")", t);
+ }
+ }
+ }
+
+ /**
+ * @param graphName
+ * @param notice
+ * @return null: when there is no HgStoreNodeNotifier in the nodeManager;
+ * @throws HgStoreClientException
+ */
+ public Integer notifying(String graphName, HgStoreNotice notice) {
+
+ if (this.nodeNotifier != null) {
+
+ synchronized (Thread.currentThread()) {
+ try {
+ return this.nodeNotifier.notice(graphName, notice);
+ } catch (Throwable t) {
+ String msg =
+ "Failed to invoke " + this.nodeNotifier.getClass().getSimpleName() +
+ ", notice: [ " + notice + " ]";
+ log.error(msg, t);
+ throw new HgStoreClientException(msg);
+ }
+ }
+
+ }
+
+ return null;
+ }
+
+ /**
+ * Return a collection of HgStoreNode who is in charge of the graph passed in the argument.
+ *
+ * @param graphName
+ * @return null when none matched to argument or any argument is invalid.
+ */
+ public List getStoreNodes(String graphName) {
+ if (HgAssert.isInvalid(graphName)) {
+ return null;
+ }
+
+ return this.graphNodesMap.get(graphName);
+ }
+
+ /**
+ * Adding a new Store-Node, return the argument's value if the host+port was not existing,
+ * otherwise return the HgStoreNode-instance added early.
+ *
+ * @param storeNode
+ * @return
+ * @throws IllegalArgumentException when any argument is invalid.
+ */
+ public HgStoreNode addNode(HgStoreNode storeNode) {
+ HgAssert.isFalse(storeNode == null, "the argument: storeNode is null.");
+
+ Long nodeId = storeNode.getNodeId();
+
+ HgStoreNode node = null;
+
+ synchronized (this.nodeIdMap) {
+ node = this.addressMap.get(nodeId);
+ if (node == null) {
+ node = storeNode;
+ this.nodeIdMap.put(nodeId, node);
+ this.addressMap.put(storeNode.getAddress(), node);
+ }
+ }
+
+ return node;
+ }
+
+ /**
+ * @param graphName
+ * @param storeNode
+ * @return
+ * @throws IllegalArgumentException when any argument is invalid.
+ */
+ public HgStoreNode addNode(String graphName, HgStoreNode storeNode) {
+ HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument is invalid: graphName");
+ HgStoreNode node = this.addNode(storeNode);
+
+ List nodes = null;
+
+ synchronized (this.graphNodesMap) {
+ nodes = this.graphNodesMap.get(graphName);
+ if (nodes == null) {
+ nodes = new ArrayList<>();
+ this.graphNodesMap.put(graphName, nodes);
+ }
+ nodes.add(node);
+ }
+
+ return node;
+ }
+
+ public HgStoreNodePartitioner getNodePartitioner() {
+ return nodePartitioner;
+ }
+
+ public HgStoreNodeManager setNodePartitioner(HgStoreNodePartitioner nodePartitioner) {
+ HgAssert.isFalse(nodePartitioner == null, "the argument is invalid: nodePartitioner");
+ this.nodePartitioner = nodePartitioner;
+ return this;
+ }
+
+ public HgStoreNodeNotifier getNodeNotifier() {
+ return nodeNotifier;
+ }
+
+ public HgStoreNodeManager setNodeNotifier(HgStoreNodeNotifier nodeNotifier) {
+ HgAssert.isFalse(nodeNotifier == null, "the argument is invalid: nodeNotifier");
+ this.nodeNotifier = nodeNotifier;
+ return this;
+ }
+
+ public HgStoreNodeManager setNodeProvider(HgStoreNodeProvider nodeProvider) {
+ this.nodeProvider = nodeProvider;
+ return this;
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java
new file mode 100644
index 0000000000..0319d6c4de
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeNotifier.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+/**
+ * created on 2021/10/12
+ *
+ * @version 1.0.0
+ */
+public interface HgStoreNodeNotifier {
+
+ /**
+ * It will be invoked by NodeManager, when some exception or issue was happened.
+ *
+ * @param graphName
+ * @param storeNotice
+ * @return return 0 please, for no matter what.
+ */
+ int notice(String graphName, HgStoreNotice storeNotice);
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java
new file mode 100644
index 0000000000..d540f68aa7
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitioner.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+
+/**
+ * created on 2021/10/12
+ *
+ * @version 1.0.0
+ */
+public interface HgStoreNodePartitioner {
+
+ /**
+ * The partition algorithm implementation, that specialized by user.
+ *
+ * @param builder The builder of HgNodePartitionerBuilder. It's supposed to be invoked
+ * directly by user.
+ * e.g. builder.add(nodeId,address,partitionId);
+ * @param graphName
+ * @param startKey
+ * @param endKey
+ * @return status:
+ *
+ * - 0: The partitioner is OK.
+ * - 10: The partitioner is not work.
+ *
+ */
+ int partition(HgNodePartitionerBuilder builder, String graphName, byte[] startKey,
+ byte[] endKey);
+
+ /**
+ * @param builder
+ * @param graphName
+ * @param startCode hash code
+ * @param endCode hash code
+ * @return
+ */
+ default int partition(HgNodePartitionerBuilder builder, String graphName, int startCode,
+ int endCode) {
+ return this.partition(builder, graphName
+ , HgStoreClientConst.ALL_PARTITION_OWNER
+ , HgStoreClientConst.ALL_PARTITION_OWNER);
+ }
+
+ default int partition(HgNodePartitionerBuilder builder, String graphName, int partitionId) {
+ return this.partition(builder, graphName
+ , HgStoreClientConst.ALL_PARTITION_OWNER
+ , HgStoreClientConst.ALL_PARTITION_OWNER);
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java
new file mode 100644
index 0000000000..dba939ec86
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodePartitionerImpl.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PartitionUtils;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.store.client.type.HgNodeStatus;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class HgStoreNodePartitionerImpl implements HgStoreNodePartitioner,
+ HgStoreNodeProvider,
+ HgStoreNodeNotifier {
+
+ private PDClient pdClient;
+ private HgStoreNodeManager nodeManager;
+
+ protected HgStoreNodePartitionerImpl() {
+ }
+
+ public HgStoreNodePartitionerImpl(PDClient pdClient, HgStoreNodeManager nodeManager) {
+ this.pdClient = pdClient;
+ this.nodeManager = nodeManager;
+ }
+
+ /**
+ * 查询分区信息,结果通过HgNodePartitionerBuilder返回
+ */
+ @Override
+ public int partition(HgNodePartitionerBuilder builder, String graphName,
+ byte[] startKey, byte[] endKey) {
+ try {
+ HashSet partitions = null;
+ if (HgStoreClientConst.ALL_PARTITION_OWNER == startKey) {
+ List stores = pdClient.getActiveStores(graphName);
+ partitions = new HashSet<>(stores.size());
+ for (Metapb.Store store : stores) {
+ partitions.add(HgNodePartition.of(store.getId(), -1));
+ }
+
+ } else if (endKey == HgStoreClientConst.EMPTY_BYTES
+ || startKey == endKey || Arrays.equals(startKey, endKey)) {
+ KVPair partShard =
+ pdClient.getPartition(graphName, startKey);
+ Metapb.Shard leader = partShard.getValue();
+ partitions = new HashSet<>();
+ partitions.add(HgNodePartition.of(leader.getStoreId(),
+ pdClient.keyToCode(graphName, startKey)));
+ } else {
+ log.warn(
+ "StartOwnerkey is not equal to endOwnerkey, which is meaningless!!, It is" +
+ " a error!!");
+ List stores = pdClient.getActiveStores(graphName);
+ partitions = new HashSet<>(stores.size());
+ for (Metapb.Store store : stores) {
+ partitions.add(HgNodePartition.of(store.getId(), -1));
+ }
+ }
+ builder.setPartitions(partitions);
+ } catch (PDException e) {
+ log.error("An error occurred while getting partition information :{}", e.getMessage());
+ throw new RuntimeException(e.getMessage(), e);
+ }
+ return 0;
+ }
+
+ @Override
+ public int partition(HgNodePartitionerBuilder builder, String graphName,
+ int startKey, int endKey) {
+ try {
+ HashSet partitions = new HashSet<>();
+ Metapb.Partition partition = null;
+ while ((partition == null || partition.getEndKey() < endKey)
+ && startKey < PartitionUtils.MAX_VALUE) {
+ KVPair partShard =
+ pdClient.getPartitionByCode(graphName, startKey);
+ if (partShard != null) {
+ partition = partShard.getKey();
+ Metapb.Shard leader = partShard.getValue();
+ partitions.add(HgNodePartition.of(leader.getStoreId(), startKey,
+ (int) partition.getStartKey(),
+ (int) partition.getEndKey()));
+ startKey = (int) partition.getEndKey();
+ } else {
+ break;
+ }
+ }
+ builder.setPartitions(partitions);
+ } catch (PDException e) {
+ log.error("An error occurred while getting partition information :{}", e.getMessage());
+ throw new RuntimeException(e.getMessage(), e);
+ }
+ return 0;
+ }
+
+ @Override
+ public int partition(HgNodePartitionerBuilder builder, String graphName,
+ int partitionId) {
+ try {
+ HashSet partitions = new HashSet<>();
+ Metapb.Partition partition = null;
+
+ KVPair partShard =
+ pdClient.getPartitionById(graphName, partitionId);
+ if (partShard != null) {
+ partition = partShard.getKey();
+ Metapb.Shard leader = partShard.getValue();
+ partitions.add(
+ HgNodePartition.of(leader.getStoreId(), (int) partition.getStartKey()));
+ }
+ builder.setPartitions(partitions);
+ } catch (PDException e) {
+ log.error("An error occurred while getting partition information :{}", e.getMessage());
+ throw new RuntimeException(e.getMessage(), e);
+ }
+ return 0;
+ }
+
+ /**
+ * 查询hgstore信息
+ *
+ * @return hgstore
+ */
+ @Override
+ public HgStoreNode apply(String graphName, Long nodeId) {
+ try {
+ Metapb.Store store = pdClient.getStore(nodeId);
+ return nodeManager.getNodeBuilder().setNodeId(store.getId())
+ .setAddress(store.getAddress()).build();
+ } catch (PDException e) {
+ throw new RuntimeException(e.getMessage(), e);
+ }
+ }
+
+ /**
+ * 通知更新缓存
+ */
+ @Override
+ public int notice(String graphName, HgStoreNotice storeNotice) {
+ log.warn(storeNotice.toString());
+ if (storeNotice.getPartitionLeaders() != null) {
+ storeNotice.getPartitionLeaders().forEach((partId, leader) -> {
+ pdClient.updatePartitionLeader(graphName, partId, leader);
+ log.warn("updatePartitionLeader:{}-{}-{}",
+ graphName, partId, leader);
+ });
+ }
+ if (storeNotice.getPartitionIds() != null) {
+ storeNotice.getPartitionIds().forEach(partId -> {
+ pdClient.invalidPartitionCache(graphName, partId);
+ });
+ }
+ if (!storeNotice.getNodeStatus().equals(
+ HgNodeStatus.PARTITION_COMMON_FAULT)
+ && !storeNotice.getNodeStatus().equals(
+ HgNodeStatus.NOT_PARTITION_LEADER)) {
+ pdClient.invalidPartitionCache();
+ log.warn("invalidPartitionCache:{} ", storeNotice.getNodeStatus());
+ }
+ return 0;
+ }
+
+ public Metapb.Graph delGraph(String graphName) {
+ try {
+ return pdClient.delGraph(graphName);
+ } catch (PDException e) {
+ log.error("delGraph {} exception, {}", graphName, e.getMessage());
+ }
+ return null;
+ }
+
+ public void setNodeManager(HgStoreNodeManager nodeManager) {
+ this.nodeManager = nodeManager;
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java
new file mode 100644
index 0000000000..2d0a7b5ed5
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeProvider.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+/**
+ * created on 2021/10/27
+ */
+public interface HgStoreNodeProvider {
+
+ /**
+ * Applying a new HgStoreNode instance
+ *
+ * @param graphName
+ * @param nodeId
+ * @return
+ */
+ HgStoreNode apply(String graphName, Long nodeId);
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java
new file mode 100644
index 0000000000..17387eebc1
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNodeSession.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/11
+ *
+ * @version 0.1.0
+ */
+public interface HgStoreNodeSession extends HgStoreSession {
+
+ /**
+ * Return the name of graph.
+ *
+ * @return
+ */
+ String getGraphName();
+
+ /**
+ * Return an instance of HgStoreNode, which provided the connection of Store-Node machine.
+ *
+ * @return
+ */
+ HgStoreNode getStoreNode();
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java
new file mode 100644
index 0000000000..083cb8d381
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreNotice.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.List;
+import java.util.Map;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.client.type.HgNodeStatus;
+import org.apache.hugegraph.store.client.util.HgAssert;
+
+/**
+ * 2021/11/16
+ */
+@NotThreadSafe
+public class HgStoreNotice {
+
+ private final Long nodeId;
+ private final HgNodeStatus nodeStatus;
+ private final String message;
+ private Map partitionLeaders;
+ private List partitionIds;
+
+ private HgStoreNotice(Long nodeId, HgNodeStatus nodeStatus, String message) {
+ this.nodeId = nodeId;
+ this.nodeStatus = nodeStatus;
+ this.message = message;
+ }
+
+ public static HgStoreNotice of(Long nodeId, HgNodeStatus nodeStatus) {
+ HgAssert.isArgumentNotNull(nodeId, "nodeId");
+ HgAssert.isArgumentNotNull(nodeStatus, "nodeStatus");
+ return new HgStoreNotice(nodeId, nodeStatus, "");
+ }
+
+ public static HgStoreNotice of(Long nodeId, HgNodeStatus nodeStatus, String message) {
+ HgAssert.isArgumentNotNull(nodeId, "nodeId");
+ HgAssert.isArgumentNotNull(nodeStatus, "nodeStatus");
+ HgAssert.isArgumentNotNull(message, "message");
+
+ return new HgStoreNotice(nodeId, nodeStatus, message);
+ }
+
+ public Long getNodeId() {
+ return nodeId;
+ }
+
+ public HgNodeStatus getNodeStatus() {
+ return nodeStatus;
+ }
+
+ public String getMessage() {
+ return message;
+ }
+
+ public Map getPartitionLeaders() {
+ return partitionLeaders;
+ }
+
+ public HgStoreNotice setPartitionLeaders(Map partitionLeaders) {
+ this.partitionLeaders = partitionLeaders;
+ return this;
+ }
+
+ public List getPartitionIds() {
+ return partitionIds;
+ }
+
+ public HgStoreNotice setPartitionIds(List partitionIds) {
+ this.partitionIds = partitionIds;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return "HgStoreNotice{" +
+ "nodeId=" + nodeId +
+ ", nodeStatus=" + nodeStatus +
+ ", message='" + message + '\'' +
+ ", partitionLeaders=" + partitionLeaders +
+ ", partitionIds=" + partitionIds +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java
new file mode 100644
index 0000000000..c0e2be6b59
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreService.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+public class HgStoreService {
+
+ private static final HgStoreService instance = new HgStoreService();
+
+ private HgStoreService() {
+ }
+
+ static HgStoreService of() {
+ return instance;
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java
new file mode 100644
index 0000000000..37fa51cb4a
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgStoreSessionProvider.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgSessionProvider;
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/12
+ */
+@ThreadSafe
+public class HgStoreSessionProvider implements HgSessionProvider {
+
+ private final MultiNodeSessionFactory sessionFactory = MultiNodeSessionFactory.getInstance();
+
+ @Override
+ public HgStoreSession createSession(String graphName) {
+ return this.sessionFactory.createStoreSession(graphName);
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java
new file mode 100644
index 0000000000..ab0c7fdce9
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTkvEntryImpl.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Arrays;
+import java.util.Objects;
+
+import org.apache.hugegraph.store.HgTkvEntry;
+
+/**
+ * created on 2021/10/14
+ */
+class HgTkvEntryImpl implements HgTkvEntry {
+
+ private final String table;
+ private final byte[] key;
+ private final byte[] value;
+
+ HgTkvEntryImpl(String table, byte[] key, byte[] value) {
+ this.table = table;
+ this.key = key;
+ this.value = value;
+ }
+
+ @Override
+ public String table() {
+ return this.table;
+ }
+
+ @Override
+ public byte[] key() {
+ return this.key;
+ }
+
+ @Override
+ public byte[] value() {
+ return this.value;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ HgTkvEntryImpl that = (HgTkvEntryImpl) o;
+ return Objects.equals(table, that.table) && Arrays.equals(key, that.key) &&
+ Arrays.equals(value, that.value);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = Objects.hash(table);
+ result = 31 * result + Arrays.hashCode(key);
+ result = 31 * result + Arrays.hashCode(value);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "HgTkvEntryImpl{" +
+ "table='" + table + '\'' +
+ ", key=" + Arrays.toString(key) +
+ ", value=" + Arrays.toString(value) +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java
new file mode 100644
index 0000000000..932864a55b
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/HgTokvEntryImpl.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Arrays;
+import java.util.Objects;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgTokvEntry;
+
+/**
+ * created on 2021/10/14
+ */
+class HgTokvEntryImpl implements HgTokvEntry {
+
+ private final String table;
+ private final HgOwnerKey ownerKey;
+ private final byte[] value;
+
+ HgTokvEntryImpl(String table, HgOwnerKey ownerKey, byte[] value) {
+ this.table = table;
+ this.ownerKey = ownerKey;
+ this.value = value;
+ }
+
+ @Override
+ public String table() {
+ return this.table;
+ }
+
+ @Override
+ public HgOwnerKey ownerKey() {
+ return this.ownerKey;
+ }
+
+ @Override
+ public byte[] value() {
+ return this.value;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ HgTokvEntryImpl that = (HgTokvEntryImpl) o;
+ return Objects.equals(table, that.table) && Objects.equals(ownerKey, that.ownerKey) &&
+ Arrays.equals(value, that.value);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = Objects.hash(table, ownerKey);
+ result = 31 * result + Arrays.hashCode(value);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "HgTokvEntryImpl{" +
+ "table='" + table + '\'' +
+ ", okv=" + ownerKey +
+ ", value=" + Arrays.toString(value) +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java
new file mode 100644
index 0000000000..ff7cde0db8
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/MultiNodeSessionFactory.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/12
+ */
+@ThreadSafe
+public final class MultiNodeSessionFactory {
+
+ // TODO multi-instance ?
+ private final static MultiNodeSessionFactory INSTANCE = new MultiNodeSessionFactory();
+ // TODO multi-instance ?
+ private final HgStoreNodeManager nodeManager = HgStoreNodeManager.getInstance();
+ // TODO: to be a chain assigned to each graph
+ //private HgStoreNodeDispatcher storeNodeDispatcher;
+
+ private MultiNodeSessionFactory() {
+ }
+
+ static MultiNodeSessionFactory getInstance() {
+ return INSTANCE;
+ }
+
+ HgStoreSession createStoreSession(String graphName) {
+ return buildProxy(graphName);
+ }
+
+ private HgStoreSession buildProxy(String graphName) {
+ //return new MultiNodeSessionProxy(graphName, nodeManager, storeNodeDispatcher);
+ //return new NodePartitionSessionProxy(graphName,nodeManager);
+ //return new NodeRetrySessionProxy(graphName,nodeManager);
+ return new NodeTxSessionProxy(graphName, nodeManager);
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java
new file mode 100644
index 0000000000..e78ced4c10
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTkv.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Objects;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgStoreSession;
+
+/**
+ * created on 2021/10/26
+ */
+@ThreadSafe
+class NodeTkv {
+
+ private final HgNodePartition nodePartition;
+ private final String table;
+ private final HgOwnerKey key;
+ private final HgOwnerKey endKey;
+ private HgStoreSession session;
+
+ NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey key) {
+ this.nodePartition = nodePartition;
+ this.table = table;
+ this.key = key;
+ this.endKey = key;
+ this.key.setKeyCode(this.nodePartition.getKeyCode());
+ }
+
+ NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey key, int keyCode) {
+ this.nodePartition = nodePartition;
+ this.table = table;
+ this.key = key;
+ this.endKey = key;
+
+ this.key.setKeyCode(keyCode);
+ }
+
+ NodeTkv(HgNodePartition nodePartition, String table, HgOwnerKey startKey,
+ HgOwnerKey endKey) {
+ this.nodePartition = nodePartition;
+ this.table = table;
+ this.key = startKey;
+ this.endKey = endKey;
+ this.key.setKeyCode(nodePartition.getStartKey());
+ this.endKey.setKeyCode(nodePartition.getEndKey());
+ }
+
+ public Long getNodeId() {
+ return this.nodePartition.getNodeId();
+ }
+
+ public String getTable() {
+ return table;
+ }
+
+ public HgOwnerKey getKey() {
+ return key;
+ }
+
+ public HgOwnerKey getEndKey() {
+ return endKey;
+ }
+
+ public NodeTkv setKeyCode(int code) {
+ this.key.setKeyCode(code);
+ return this;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ NodeTkv nptKv = (NodeTkv) o;
+ return Objects.equals(nodePartition, nptKv.nodePartition) &&
+ Objects.equals(table, nptKv.table)
+ && Objects.equals(key, nptKv.key)
+ && Objects.equals(endKey, nptKv.endKey);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = Objects.hash(nodePartition, table, key, endKey);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "NptKv{" +
+ "nodePartition=" + nodePartition +
+ ", table='" + table + '\'' +
+ ", key=" + key +
+ ", endKey=" + endKey +
+ '}';
+ }
+
+ public HgStoreSession getSession() {
+ return session;
+ }
+
+ public void setSession(HgStoreSession session) {
+ this.session = session;
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java
new file mode 100644
index 0000000000..01eea1af79
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxExecutor.java
@@ -0,0 +1,431 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_LIST;
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.NODE_MAX_RETRYING_TIMES;
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.TX_SESSIONS_MAP_CAPACITY;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Collector;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgStoreSession;
+import org.apache.hugegraph.store.client.type.HgStoreClientException;
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.term.HgPair;
+import org.apache.hugegraph.store.term.HgTriple;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 2021/11/18
+ */
+@Slf4j
+@NotThreadSafe
+final class NodeTxExecutor {
+
+ private static final String maxTryMsg =
+ "the number of retries reached the upper limit : " + NODE_MAX_RETRYING_TIMES +
+ ",caused by:";
+ private static final String msg =
+ "Not all tx-data delivered to real-node-session successfully.";
+
+ static {
+ System.setProperty("java.util.concurrent.ForkJoinPool.common.parallelism",
+ String.valueOf(Runtime.getRuntime().availableProcessors() * 2));
+ }
+
+ private final String graphName;
+ NodeTxSessionProxy proxy;
+ Collector>> collector = Collectors.groupingBy(
+ nkv -> nkv.getNodeId(), Collectors.mapping(NodeTkv::getKey, Collectors.toList()));
+ private Map sessions = new HashMap<>(TX_SESSIONS_MAP_CAPACITY, 1);
+ private boolean isTx;
+ private List,
+ Function>> entries = new LinkedList<>();
+
+ private NodeTxExecutor(String graphName, NodeTxSessionProxy proxy) {
+ this.graphName = graphName;
+ this.proxy = proxy;
+ }
+
+ static NodeTxExecutor graphOf(String graphName, NodeTxSessionProxy proxy) {
+ return new NodeTxExecutor(graphName, proxy);
+ }
+
+ public boolean isTx() {
+ return isTx;
+ }
+
+ void setTx(boolean tx) {
+ isTx = tx;
+ }
+
+ void commitTx() {
+ if (!this.isTx) {
+ throw new IllegalStateException("It's not in tx state");
+ }
+
+ this.doCommit();
+ }
+
+ void rollbackTx() {
+ if (!this.isTx) {
+ return;
+ }
+ try {
+ this.sessions.values().stream().filter(HgStoreSession::isTx)
+ .forEach(HgStoreSession::rollback);
+ } catch (Throwable t) {
+ throw t;
+ } finally {
+ this.isTx = false;
+ this.sessions.clear();
+ }
+ }
+
+ void doCommit() {
+ try {
+ this.retryingInvoke(() -> {
+ if (this.entries.isEmpty()) {
+ return true;
+ }
+ AtomicBoolean allSuccess = new AtomicBoolean(true);
+ for (HgPair, Function> e :
+ this.entries) {
+ doAction(e.getKey(), e.getValue());
+ }
+ if (!allSuccess.get()) {
+ throw HgStoreClientException.of(msg);
+ }
+ AtomicReference throwable = new AtomicReference<>();
+ Collection sessions = this.sessions.values();
+ sessions.parallelStream().forEach(e -> {
+ if (e.isTx()) {
+ try {
+ e.commit();
+ } catch (Throwable t) {
+ throwable.compareAndSet(null, t);
+ allSuccess.set(false);
+ }
+ }
+ });
+ if (!allSuccess.get()) {
+ if (isTx) {
+ try {
+ sessions.stream().forEach(HgStoreSession::rollback);
+ } catch (Exception e) {
+
+ }
+ }
+ Throwable cause = throwable.get();
+ if (cause.getCause() != null) {
+ cause = cause.getCause();
+ }
+ if (cause instanceof HgStoreClientException) {
+ throw (HgStoreClientException) cause;
+ }
+ throw HgStoreClientException.of(cause);
+ }
+ return true;
+ });
+
+ } catch (Throwable t) {
+ throw t;
+ } finally {
+ this.isTx = false;
+ this.entries = new LinkedList<>();
+ this.sessions = new HashMap<>(TX_SESSIONS_MAP_CAPACITY, 1);
+ }
+ }
+
+ // private Function,
+ // List>> nodeStreamWrapper = nodeParams -> {
+ // if (nodeParams.getZ() == null) {
+ // return this.proxy.getNode(nodeParams.getX(),
+ // nodeParams.getY());
+ // } else {
+ // if (nodeParams.getZ() instanceof HgOwnerKey) {
+ // return this.proxy.getNode(nodeParams.getX(),
+ // nodeParams.getY(),
+ // (HgOwnerKey) nodeParams.getZ());
+ // } if ( nodeParams.getZ() instanceof Integer ){
+ // return this.proxy.doPartition(nodeParams.getX(), (Integer) nodeParams.getZ())
+ // .stream()
+ // .map(e -> new NodeTkv(e, nodeParams.getX(), nodeParams.getY(),
+ // nodeParams.getY()
+ // .getKeyCode()))
+ // .map(
+ // e -> new HgPair<>(this.proxy.getStoreNode(e.getNodeId
+ // ()), e)
+ // );
+ // }else {
+ // HgAssert.isTrue(nodeParams.getZ() instanceof byte[],
+ // "Illegal parameter to get node id");
+ // throw new NotImplementedException();
+ // }
+ // }
+ // };
+
+ // private Function,
+ // List>> nodeStreamWrapper = nodeParams -> {
+ // if (nodeParams.getZ() == null) {
+ // return this.proxy.getNode(nodeParams.getX(), nodeParams.getY());
+ // } else {
+ // if (nodeParams.getZ() instanceof HgOwnerKey) {
+ // return this.proxy.getNode(nodeParams.getX(), nodeParams.getY(),
+ // (HgOwnerKey) nodeParams.getZ());
+ // }
+ // if (nodeParams.getZ() instanceof Integer) {
+ // Collection nodePartitions = this.proxy.doPartition(nodeParams
+ // .getX(),
+ // (Integer)
+ // nodeParams
+ // .getZ());
+ // ArrayList> hgPairs = new ArrayList<>
+ // (nodePartitions.size());
+ // for (HgNodePartition nodePartition : nodePartitions) {
+ // NodeTkv nodeTkv = new NodeTkv(nodePartition, nodeParams.getX(), nodeParams
+ // .getY(),
+ // nodeParams.getY().getKeyCode());
+ // hgPairs.add(new HgPair<>(this.proxy.getStoreNode(nodeTkv.getNodeId()),
+ // nodeTkv));
+ //
+ // }
+ // return hgPairs;
+ // } else {
+ // HgAssert.isTrue(nodeParams.getZ() instanceof byte[], "Illegal parameter to get
+ // node id");
+ // throw new RuntimeException("not implemented");
+ // }
+ // }
+ // };
+
+ private boolean doAction(HgTriple nodeParams,
+ Function action) {
+ if (nodeParams.getZ() == null) {
+ return this.proxy.doAction(nodeParams.getX(), nodeParams.getY(), nodeParams.getY(),
+ action);
+ } else {
+ if (nodeParams.getZ() instanceof HgOwnerKey) {
+ boolean result = this.proxy.doAction(nodeParams.getX(), nodeParams.getY(),
+ (HgOwnerKey) nodeParams.getZ(), action);
+ return result;
+ }
+ if (nodeParams.getZ() instanceof Integer) {
+ return this.proxy.doAction(nodeParams.getX(), nodeParams.getY(),
+ (Integer) nodeParams.getZ(), action);
+ } else {
+ HgAssert.isTrue(nodeParams.getZ() instanceof byte[],
+ "Illegal parameter to get node id");
+ throw new RuntimeException("not implemented");
+ }
+ }
+ }
+
+ boolean prepareTx(HgTriple nodeParams,
+ Function sessionMapper) {
+ if (this.isTx) {
+ return this.entries.add(new HgPair(nodeParams, sessionMapper));
+ } else {
+ return this.isAllTrue(nodeParams, sessionMapper);
+ }
+ }
+
+ public HgStoreSession openNodeSession(HgStoreNode node) {
+ HgStoreSession res = this.sessions.get(node.getNodeId());
+ if (res == null) {
+ this.sessions.put(node.getNodeId(), (res = node.openSession(this.graphName)));
+ }
+ if (this.isTx) {
+ res.beginTx();
+ }
+
+ return res;
+ }
+
+ R limitOne(
+ Supplier>> nodeStreamSupplier,
+ Function, R> sessionMapper, R emptyObj) {
+
+ Optional res = retryingInvoke(
+ () -> nodeStreamSupplier.get()
+ .parallel()
+ .map(
+ pair -> new SessionData(
+ openNodeSession(pair.getKey()),
+ pair.getValue())
+ ).map(sessionMapper)
+ .filter(
+ r -> isValid(r)
+ )
+ .findAny()
+ .orElseGet(() -> emptyObj)
+ );
+ return res.orElse(emptyObj);
+ }
+
+ List toList(Function nodeFunction
+ , List keyList
+ , Function> flatMapper
+ , Function>, List> sessionMapper) {
+ Optional> res = retryingInvoke(
+ () -> keyList.stream()
+ .flatMap(flatMapper)
+ .collect(collector)
+ .entrySet()
+ .stream()
+ .map(
+ e -> new SessionData<>
+ (
+ openNodeSession(
+ nodeFunction.apply(e.getKey())),
+ e.getValue()
+ )
+ )
+ .parallel()
+ .map(sessionMapper)
+ .flatMap(
+ e -> e.stream()
+ )
+ //.distinct()
+ .collect(Collectors.toList())
+ );
+
+ return res.orElse(EMPTY_LIST);
+ }
+
+ private boolean isAllTrue(HgTriple nodeParams,
+ Function action) {
+ Optional res = retryingInvoke(() -> doAction(nodeParams, action));
+ return res.orElse(false);
+ }
+
+ boolean isAllTrue(Supplier>> dataSource,
+ Function, Boolean> action) {
+ Optional res = retryingInvoke(
+ () -> dataSource.get()
+ .parallel()
+ .map(
+ pair -> new SessionData(
+ openNodeSession(pair.getKey()),
+ pair.getValue())
+ ).map(action)
+ .allMatch(Boolean::booleanValue)
+ );
+
+ return res.orElse(false);
+ }
+
+ boolean ifAnyTrue(Supplier>> nodeStreamSupplier
+ , Function, Boolean> sessionMapper) {
+
+ Optional res = retryingInvoke(
+ () -> nodeStreamSupplier.get()
+ .parallel()
+ .map(
+ pair -> new SessionData(
+ openNodeSession(pair.getKey()),
+ pair.getValue())
+ )
+ .map(sessionMapper)
+ .anyMatch(Boolean::booleanValue)
+ );
+
+ return res.orElse(false);
+ }
+
+ Optional retryingInvoke(Supplier supplier) {
+ return IntStream.rangeClosed(0, NODE_MAX_RETRYING_TIMES).boxed()
+ .map(
+ i -> {
+ T buffer = null;
+ try {
+ buffer = supplier.get();
+ } catch (Throwable t) {
+ if (i + 1 <= NODE_MAX_RETRYING_TIMES) {
+ try {
+ int sleepTime;
+ // 前三次每隔一秒做一次尝试
+ if (i < 3) {
+ sleepTime = 1;
+ } else {
+ // 后面逐次递增
+ sleepTime = i - 1;
+ }
+ log.info("Waiting {} seconds " +
+ "for the next try.",
+ sleepTime);
+ Thread.sleep(sleepTime * 1000L);
+ } catch (InterruptedException e) {
+ log.error("Failed to sleep", e);
+ }
+ } else {
+ log.error(maxTryMsg, t);
+ throw HgStoreClientException.of(
+ t.getMessage(), t);
+ }
+ }
+ return buffer;
+ }
+ )
+ .filter(e -> e != null)
+ .findFirst();
+
+ }
+
+ private boolean isValid(Object obj) {
+ if (obj == null) {
+ return false;
+ }
+
+ if (HgStoreClientConst.EMPTY_BYTES.equals(obj)) {
+ return false;
+ }
+
+ return !EMPTY_LIST.equals(obj);
+ }
+
+ class SessionData {
+
+ HgStoreSession session;
+ T data;
+
+ SessionData(HgStoreSession session, T data) {
+ this.session = session;
+ this.data = data;
+ }
+
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java
new file mode 100644
index 0000000000..066f96893d
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/NodeTxSessionProxy.java
@@ -0,0 +1,887 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import static java.util.stream.Collectors.groupingBy;
+import static org.apache.hugegraph.store.client.util.HgAssert.isArgumentNotNull;
+import static org.apache.hugegraph.store.client.util.HgAssert.isArgumentValid;
+import static org.apache.hugegraph.store.client.util.HgAssert.isFalse;
+import static org.apache.hugegraph.store.client.util.HgStoreClientConst.EMPTY_STRING;
+import static org.apache.hugegraph.store.client.util.HgStoreClientUtil.err;
+import static org.apache.hugegraph.store.client.util.HgStoreClientUtil.toStr;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvOrderedIterator;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.HgStoreSession;
+import org.apache.hugegraph.store.client.grpc.KvBatchScanner;
+import org.apache.hugegraph.store.client.grpc.KvCloseableIterator;
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq.Builder;
+import org.apache.hugegraph.store.term.HgPair;
+import org.apache.hugegraph.store.term.HgTriple;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/01/19
+ *
+ * @version 0.6.0 added batch scan on 2022/03/03
+ */
+@Slf4j
+@NotThreadSafe
+class NodeTxSessionProxy implements HgStoreSession {
+
+ private final HgStoreNodeManager nodeManager;
+ private final HgStoreNodePartitioner nodePartitioner;
+ private final String graphName;
+ private final NodeTxExecutor txExecutor;
+
+ NodeTxSessionProxy(String graphName, HgStoreNodeManager nodeManager) {
+ this.nodeManager = nodeManager;
+ this.graphName = graphName;
+ this.nodePartitioner = this.nodeManager.getNodePartitioner();
+ this.txExecutor = NodeTxExecutor.graphOf(this.graphName, this);
+
+ isFalse(this.nodePartitioner == null,
+ "Failed to retrieve the node-partitioner from node-manager.");
+ }
+
+ @Override
+ public void beginTx() {
+ this.txExecutor.setTx(true);
+ }
+
+ @Override
+ public void commit() {
+ this.txExecutor.commitTx();
+ }
+
+ @Override
+ public void rollback() {
+ this.txExecutor.rollbackTx();
+ }
+
+ @Override
+ public boolean isTx() {
+ return this.txExecutor.isTx();
+ }
+
+ @Override
+ public boolean put(String table, HgOwnerKey ownerKey, byte[] value) {
+ // isArgumentValid(table, "table");
+ // isArgumentNotNull(ownerKey, "ownerKey");
+ // log.info("put -> graph: {}, table: {}, key: {}, value: {}",
+ // graphName, table, ownerKey, toByteStr(value));
+ // return this.txExecutor.prepareTx(
+ // () -> getNodeStream(table, ownerKey),
+ // e -> e.session.put(table, e.data.getKey(), value)
+ // );
+ return this.txExecutor.prepareTx(new HgTriple(table, ownerKey, null),
+ e -> e.getSession().put(table,
+ e.getKey(),
+ value));
+ }
+
+ @Override
+ public boolean directPut(String table, int partitionId, HgOwnerKey ownerKey, byte[] value) {
+ isArgumentValid(table, "table");
+ isArgumentNotNull(ownerKey, "ownerKey");
+
+ return this.txExecutor.prepareTx(
+ new HgTriple(table, ownerKey, partitionId),
+ e -> e.getSession().put(table, e.getKey(), value)
+ );
+ }
+
+ @Override
+ public boolean delete(String table, HgOwnerKey ownerKey) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(ownerKey == null, "The argument is invalid: ownerKey");
+
+ if (log.isDebugEnabled()) {
+ log.debug("delete -> graph: {}, table: {}, key: {}"
+ , graphName, table, toStr(ownerKey));
+ }
+
+ return this.txExecutor
+ .prepareTx(
+ new HgTriple(table, ownerKey, null),
+ e -> e.getSession().delete(table, e.getKey())
+ );
+ }
+
+ @Override
+ public boolean deleteSingle(String table, HgOwnerKey ownerKey) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(ownerKey == null, "The argument is invalid: ownerKey");
+
+ if (log.isDebugEnabled()) {
+ log.debug("deleteSingle -> graph: {}, table: {}, key: {}"
+ , graphName, table, toStr(ownerKey));
+ }
+
+ return this.txExecutor
+ .prepareTx(
+ new HgTriple(table, ownerKey, null),
+ e -> e.getSession().deleteSingle(table, e.getKey())
+ );
+ }
+
+ @Override
+ public boolean deletePrefix(String table, HgOwnerKey prefix) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(prefix == null, "The argument is invalid: prefix");
+
+ if (log.isDebugEnabled()) {
+ log.debug("deletePrefix -> graph: {}, table: {}, prefix: {}"
+ , graphName, table, toStr(prefix));
+ }
+
+ return this.txExecutor
+ .prepareTx(
+ new HgTriple(table, prefix, null),
+ e -> e.getSession().deletePrefix(table, e.getKey())
+ );
+ }
+
+ @Override
+ public boolean deleteRange(String table, HgOwnerKey start, HgOwnerKey end) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(start == null, "The argument is invalid: start");
+ HgAssert.isFalse(end == null, "The argument is invalid: end");
+
+ if (log.isDebugEnabled()) {
+ log.debug("deleteRange -> graph: {}, table: {}, start: {}, end: {}"
+ , graphName, table, toStr(start), toStr(end));
+ }
+
+ return this.txExecutor
+ .prepareTx(
+ new HgTriple(table, start, end),
+ e -> e.getSession().deleteRange(table, e.getKey(), e.getEndKey())
+ );
+ }
+
+ @Override
+ public boolean merge(String table, HgOwnerKey key, byte[] value) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(key == null, "The argument is invalid: key");
+ HgAssert.isFalse(value == null, "The argument is invalid: value");
+
+ if (log.isDebugEnabled()) {
+ log.debug("merge -> graph: {}, table: {}, key: {}, value: {}"
+ , graphName, table, toStr(key), toStr(value));
+ }
+
+ return this.txExecutor
+ .prepareTx(
+ new HgTriple(table, key, value),
+ e -> e.getSession().merge(table, e.getKey(), value)
+ );
+ }
+
+ /*--- tx end ---*/
+
+ @Override
+ public byte[] get(String table, HgOwnerKey ownerKey) {
+ isArgumentValid(table, "table");
+ isArgumentNotNull(ownerKey, "ownerKey");
+
+ return this.txExecutor
+ .limitOne(
+ () -> this.getNodeStream(table, ownerKey),
+ e -> e.session.get(table, e.data.getKey()), HgStoreClientConst.EMPTY_BYTES
+ );
+ }
+
+ @Override
+ public boolean clean(int partId) {
+ Collection nodes = this.doPartition("", partId);
+ return nodes.parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .clean(partId)
+ ).findFirst().get();
+ }
+
+ @Override
+ @Deprecated
+ public List batchGetOwner(String table, List keyList) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(HgAssert.isInvalid(keyList), "The argument is invalid: keyList");
+
+ return this.txExecutor
+ .toList(
+ (l) -> this.getStoreNode(l),
+ keyList,
+ key -> this.toNodeTkvList(table, key, key).stream(),
+ e -> e.session.batchGetOwner(table, e.data)
+ );
+ }
+
+ @Override
+ public HgKvIterator batchPrefix(String table, List keyList) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(HgAssert.isInvalid(keyList), "The argument is invalid: keyList");
+ return this.toHgKvIteratorProxy(
+ this.txExecutor
+ .toList(
+ (l) -> this.getStoreNode(l),
+ keyList,
+ key -> this.toNodeTkvList(table, key, key).stream(),
+ e -> Collections.singletonList(e.session.batchPrefix(table, e.data))
+ )
+ , Long.MAX_VALUE);
+ }
+
+ @Override
+ public boolean truncate() {
+ return this.txExecutor
+ .isAllTrue(
+ () -> this.getNodeStream(EMPTY_STRING),
+ e -> e.session.truncate()
+ );
+ }
+
+ @Override
+ public boolean existsTable(String table) {
+ return this.txExecutor
+ .ifAnyTrue(
+ () -> this.getNodeStream(EMPTY_STRING),
+ e -> e.session.existsTable(table)
+ );
+ }
+
+ @Override
+ public boolean createTable(String table) {
+ return this.txExecutor
+ .isAllTrue(
+ () -> this.getNodeStream(EMPTY_STRING),
+ e -> e.session.createTable(table)
+ );
+ }
+
+ @Override
+ public boolean deleteTable(String table) {
+ return this.txExecutor
+ .isAllTrue(
+ () -> this.getNodeStream(EMPTY_STRING),
+ e -> e.session.deleteTable(table)
+ );
+ }
+
+ @Override
+ public boolean dropTable(String table) {
+ return this.txExecutor
+ .isAllTrue(
+ () -> this.getNodeStream(table),
+ e -> e.session.dropTable(table)
+ );
+ }
+
+ @Override
+ public boolean deleteGraph(String graph) {
+ return this.txExecutor
+ .isAllTrue(
+ () -> this.getNodeStream(EMPTY_STRING),
+ e -> e.session.deleteGraph(graph)
+ );
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table) {
+ return scanIterator(table, 0);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, byte[] query) {
+ return scanIterator(table, 0, query);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, long limit) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+
+ return this.toHgKvIteratorProxy(
+ this.toNodeTkvList(table)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .scanIterator(e.getTable(), limit)
+ )
+ .collect(Collectors.toList())
+ , limit);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, long limit, byte[] query) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+
+ return this.toHgKvIteratorProxy(
+ this.toNodeTkvList(table)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .scanIterator(e.getTable(), e.getKey(), limit, query)
+ )
+ .collect(Collectors.toList())
+ , limit);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix) {
+ return scanIterator(table, keyPrefix, 0);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(keyPrefix == null, "The argument is invalid: keyPrefix");
+
+ return this.toHgKvIteratorProxy(
+ this.toNodeTkvList(table, keyPrefix)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .scanIterator(e.getTable(), e.getKey(), limit)
+ )
+ .collect(Collectors.toList())
+ , limit);
+
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit,
+ byte[] query) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(keyPrefix == null, "The argument is invalid: keyPrefix");
+
+ return this.toHgKvIteratorProxy(
+ this.toNodeTkvList(table, keyPrefix)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .scanIterator(e.getTable(), e.getKey(), limit, query)
+ )
+ .collect(Collectors.toList())
+ , limit);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey startKey,
+ HgOwnerKey endKey) {
+ return this.scanIterator(table, startKey, endKey, 0, null);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey startKey,
+ HgOwnerKey endKey, long limit) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(startKey == null, "The argument is invalid: startKey");
+ HgAssert.isFalse(endKey == null, "The argument is invalid: endKey");
+
+ return this.toHgKvIteratorProxy(
+ this.toNodeTkvList(table, startKey, endKey)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit)
+ )
+ .collect(Collectors.toList())
+ , limit);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey
+ , long limit, byte[] query) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(startKey == null, "The argument is invalid: startKey");
+ HgAssert.isFalse(endKey == null, "The argument is invalid: endKey");
+
+ return this.toHgKvIteratorProxy(
+ this.toNodeTkvList(table, startKey, endKey)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit,
+ query)
+ )
+ .collect(Collectors.toList())
+ , limit);
+
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey startKey, HgOwnerKey endKey
+ , long limit, int scanType, byte[] query) {
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ HgAssert.isFalse(startKey == null, "The argument is invalid: startKey");
+ HgAssert.isFalse(endKey == null, "The argument is invalid: endKey");
+
+ return this.toHgKvIteratorProxy(
+ this.toNodeTkvList(table, startKey, endKey)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .scanIterator(e.getTable(), e.getKey(), e.getEndKey(), limit,
+ scanType, query)
+ )
+ .collect(Collectors.toList())
+ , limit);
+
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, int codeFrom, int codeTo,
+ int scanType, byte[] query) {
+ if (log.isDebugEnabled()) {
+ log.debug("graph: {}, table: {}, codeFrom: {}, codeTo: {}, scanType: {}, query: {}"
+ , graphName, table, codeFrom, codeTo, scanType, HgStoreClientUtil.toStr(query));
+ }
+
+ HgAssert.isFalse(HgAssert.isInvalid(table), "The argument is invalid: table");
+ return this.toHgKvIteratorProxy(
+ this.toNodeTkvList(table, codeFrom, codeTo)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .scanIterator(e.getTable()
+ , e.getKey().getKeyCode()
+ , e.getEndKey().getKeyCode(),
+ scanType, query)
+ )
+ .collect(Collectors.toList())
+ , 0);
+
+ }
+
+ @Override
+ public HgKvIterator scanIterator(Builder scanReqBuilder) {
+ List nodeTKvs = this.toNodeTkvList(scanReqBuilder);
+ Function> hgKvIteratorFunction = e -> {
+ HgStoreSession session = this.getStoreNode(e.getNodeId())
+ .openSession(this.graphName);
+ return session.scanIterator(scanReqBuilder);
+ };
+ List iterators = nodeTKvs.parallelStream()
+ .map(hgKvIteratorFunction)
+ .collect(Collectors.toList());
+ return this.toHgKvIteratorProxy(iterators, scanReqBuilder.getLimit());
+ }
+
+ @Override
+ public long count(String table) {
+ return this.toNodeTkvList(table)
+ .parallelStream()
+ .map(
+ e -> this.getStoreNode(e.getNodeId()).openSession(this.graphName)
+ .count(e.getTable())
+ )
+ .collect(Collectors.summingLong(l -> l));
+ }
+
+ @Override
+ public List> scanBatch(HgScanQuery scanQuery) {
+ HgAssert.isArgumentNotNull(scanQuery, "scanQuery");
+
+ return this.toTkvMapFunc(scanQuery.getScanMethod())
+ .apply(scanQuery)
+ .entrySet()
+ .stream()
+ .map(e ->
+ this.getStoreNode(e.getKey())
+ .openSession(this.graphName)
+ .scanBatch(toScanQueryFunc(scanQuery.getScanMethod())
+ .apply(scanQuery.getTable(), e.getValue())
+ .setQuery(scanQuery.getQuery())
+ .setLimit(scanQuery.getLimit())
+ .setPerKeyLimit(scanQuery.getPerKeyLimit())
+ .setPerKeyMax((scanQuery.getPerKeyMax()))
+ .setScanType(scanQuery.getScanType())
+ .build()
+ )
+ )
+ //.peek(e->log.info("{}",e))
+ .flatMap(List::stream)
+ .collect(Collectors.toList());
+
+ }
+
+ @Override
+ public KvCloseableIterator> scanBatch2(HgScanQuery scanQuery) {
+ return scanBatch3(scanQuery, null);
+ }
+
+ @Override
+ public KvCloseableIterator> scanBatch3(HgScanQuery scanQuery,
+ KvCloseableIterator iterator) {
+ KvCloseableIterator notifierWrap = KvBatchScanner.ofMerger(scanQuery, (query, notifier) -> {
+ Map> nodeTkvs = this.toTkvMapFunc(scanQuery.getScanMethod())
+ .apply(query);
+
+ nodeTkvs.forEach((storeId, tkvs) -> {
+ this.getStoreNode(storeId)
+ .openSession(this.graphName)
+ .scanBatch3(toScanQueryFunc(scanQuery.getScanMethod())
+ .apply(scanQuery.getTable(), tkvs)
+ .setQuery(scanQuery.getQuery())
+ .setLimit(scanQuery.getLimit())
+ .setSkipDegree(scanQuery.getSkipDegree())
+ .setPerKeyLimit(scanQuery.getPerKeyLimit())
+ .setPerKeyMax((scanQuery.getPerKeyMax()))
+ .setScanType(scanQuery.getScanType())
+ .setOrderType(scanQuery.getOrderType())
+ .build(), notifier
+ );
+ });
+ return true;
+ });
+ return notifierWrap;
+ }
+
+ private Function>> toTkvMapFunc(
+ HgScanQuery.ScanMethod scanMethod) {
+ switch (scanMethod) {
+ case RANGE:
+ return scanQuery -> {
+ List starts = scanQuery.getStartList();
+ List ends = scanQuery.getEndList();
+ int size = starts.size();
+ return IntStream.range(0, size)
+ .boxed()
+ .map(i -> this.toNodeTkvList(scanQuery.getTable(),
+ starts.get(i), ends.get(i)))
+ .flatMap(List::stream)
+ .collect(groupingBy(NodeTkv::getNodeId));
+ };
+ case PREFIX:
+ return scanQuery ->
+ scanQuery.getPrefixList()
+ .stream()
+ .map(keyPrefix -> this.toNodeTkvList(scanQuery.getTable(),
+ keyPrefix))
+ .flatMap(List::stream)
+ .collect(groupingBy(NodeTkv::getNodeId));
+
+ default:
+ return scanQuery -> this.toNodeTkvList(scanQuery.getTable())
+ .stream()
+ .collect(groupingBy(NodeTkv::getNodeId));
+ }
+ }
+
+ private BiFunction, HgScanQuery.ScanBuilder> toScanQueryFunc(
+ HgScanQuery.ScanMethod scanMethod) {
+ switch (scanMethod) {
+ case RANGE:
+ return (table, tkvList) -> {
+ List startList = new LinkedList();
+ List endList = new LinkedList();
+
+ tkvList.stream().forEach(e -> {
+ startList.add(e.getKey());
+ endList.add(e.getEndKey());
+ });
+
+ return HgScanQuery.ScanBuilder.rangeOf(table, startList, endList);
+ };
+ case PREFIX:
+ return (table, tkvList) ->
+ HgScanQuery.ScanBuilder.prefixOf(table,
+ tkvList.stream()
+ .map(e -> e.getKey())
+ .collect(Collectors.toList())
+ );
+ default:
+ return (table, tkvList) -> HgScanQuery.ScanBuilder.tableOf(table);
+ }
+
+ }
+
+ /*-- common --*/
+ private HgKvIterator toHgKvIteratorProxy(List iteratorList, long limit) {
+ boolean isAllOrderedLimiter = iteratorList.stream()
+ .allMatch(
+ e -> e instanceof HgKvOrderedIterator);
+
+ HgKvIterator iterator;
+ if (isAllOrderedLimiter) {
+ iterator = new SequencedIterator(iteratorList.stream()
+ .map(e -> (HgKvOrderedIterator) e)
+ .collect(Collectors.toList()), limit);
+ } else {
+ iterator = new TopWorkIteratorProxy(iteratorList, limit);
+ }
+
+ return iterator;
+ }
+
+ HgStoreNode getStoreNode(Long nodeId) {
+ HgStoreNode res = this.nodeManager.applyNode(this.graphName, nodeId);
+
+ if (res == null) {
+ throw err("Failed to apply for an instance of HgStoreNode from node-manager.");
+ }
+
+ return res;
+ }
+
+ public boolean doAction(String table, HgOwnerKey startKey, HgOwnerKey endKey,
+ Function action) {
+ Collection partitions =
+ doPartition(table, startKey.getOwner(), endKey.getOwner());
+ for (HgNodePartition partition : partitions) {
+ HgStoreNode storeNode = this.getStoreNode(partition.getNodeId());
+ HgStoreSession session = this.txExecutor.openNodeSession(storeNode);
+ NodeTkv data = new NodeTkv(partition, table, startKey, endKey);
+ data.setSession(session);
+ if (!action.apply(data)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public boolean doAction(String table, HgOwnerKey startKey, Integer code,
+ Function action) {
+ Collection partitions = this.doPartition(table, code);
+ for (HgNodePartition partition : partitions) {
+ HgStoreNode storeNode = this.getStoreNode(partition.getNodeId());
+ HgStoreSession session = this.txExecutor.openNodeSession(storeNode);
+ NodeTkv data = new NodeTkv(partition, table, startKey, code);
+ data.setSession(session);
+ if (!action.apply(data)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private List toNodeTkvList(Builder scanReqBuilder) {
+ // TODO 使用builder获取owner
+ String table = scanReqBuilder.getTable();
+ HgOwnerKey ownerKey = HgStoreClientConst.ALL_PARTITION_OWNER_KEY;
+ byte[] allOwner = ownerKey.getOwner();
+ Collection partitions = doPartition(table,
+ allOwner,
+ allOwner);
+ List nodeTkvs = new ArrayList<>(partitions.size());
+ for (HgNodePartition partition : partitions) {
+ nodeTkvs.add(new NodeTkv(partition, table, ownerKey, ownerKey));
+ }
+ return nodeTkvs;
+ }
+
+ private List toNodeTkvList(String table) {
+ Collection partitions = doPartition(table,
+ HgStoreClientConst.ALL_PARTITION_OWNER_KEY.getOwner(),
+ HgStoreClientConst.ALL_PARTITION_OWNER_KEY.getOwner());
+ ArrayList nodeTkvs = new ArrayList<>(partitions.size());
+ for (HgNodePartition partition : partitions) {
+ nodeTkvs.add(new NodeTkv(partition, table, HgStoreClientConst.ALL_PARTITION_OWNER_KEY,
+ HgStoreClientConst.ALL_PARTITION_OWNER_KEY));
+ }
+ return nodeTkvs;
+ }
+
+ private List toNodeTkvList(String table, HgOwnerKey ownerKey) {
+ Collection partitions =
+ doPartition(table, ownerKey.getOwner(), ownerKey.getOwner());
+ ArrayList nodeTkvs = new ArrayList<>(partitions.size());
+ for (HgNodePartition partition : partitions) {
+ nodeTkvs.add(new NodeTkv(partition, table, ownerKey, ownerKey));
+ }
+
+ return nodeTkvs;
+ }
+
+ private List toNodeTkvList(String table, HgOwnerKey startKey, HgOwnerKey endKey) {
+ Collection partitions =
+ doPartition(table, startKey.getOwner(), endKey.getOwner());
+ ArrayList nodeTkvs = new ArrayList<>(partitions.size());
+ for (HgNodePartition partition : partitions) {
+ nodeTkvs.add(new NodeTkv(partition, table, startKey, endKey));
+ }
+ return nodeTkvs;
+ }
+
+ private List toNodeTkvList(String table, int startCode, int endCode) {
+ Collection partitions = this.doPartition(table, startCode, endCode);
+ ArrayList nodeTkvs = new ArrayList<>(partitions.size());
+ for (HgNodePartition partition : partitions) {
+ nodeTkvs.add(
+ new NodeTkv(partition, table, HgOwnerKey.codeOf(startCode),
+ HgOwnerKey.codeOf(endCode)));
+ }
+ return nodeTkvs;
+ }
+
+ /**
+ * @return not null
+ */
+ private Collection doPartition(String table, byte[] startKey, byte[] endKey) {
+ HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet();
+
+ int status = this.nodePartitioner.partition(partitionerBuilder, this.graphName, startKey,
+ endKey);
+
+ if (status != 0) {
+ throw err("The node-partitioner is not work.");
+ }
+
+ Collection partitions = partitionerBuilder.getPartitions();
+
+ if (partitions.isEmpty()) {
+ throw err("Failed to get the collection of HgNodePartition from node-partitioner.");
+ }
+
+ return partitions;
+ }
+
+ /**
+ * @return @return not null
+ */
+ private Collection doPartition(String table, int startCode, int endCode) {
+ HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet();
+ int status = this.nodePartitioner.partition(partitionerBuilder, this.graphName, startCode,
+ endCode);
+
+ if (status != 0) {
+ throw err("The node-partitioner is not work.");
+ }
+
+ Collection partitions = partitionerBuilder.getPartitions();
+
+ if (partitions.isEmpty()) {
+ throw err("Failed to get the collection of HgNodePartition from node-partitioner.");
+ }
+
+ return partitions;
+ }
+
+ Collection doPartition(String table, int partitionId) {
+ HgNodePartitionerBuilder partitionerBuilder = HgNodePartitionerBuilder.resetAndGet();
+ int status =
+ this.nodePartitioner.partition(partitionerBuilder, this.graphName, partitionId);
+
+ if (status != 0) {
+ throw err("The node-partitioner is not work.");
+ }
+
+ Collection partitions = partitionerBuilder.getPartitions();
+
+ if (partitions.isEmpty()) {
+ throw err("Failed to get the collection of HgNodePartition from node-partitioner.");
+ }
+
+ return partitions;
+ }
+
+ private Stream> getNodeStream(String table) {
+ return this.toNodeTkvList(table)
+ .stream()
+ .map(
+ e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e)
+ );
+ }
+
+ Stream> getNodeStream(String table,
+ HgOwnerKey ownerKey) {
+ return this.toNodeTkvList(table, ownerKey)
+ .stream()
+ .map(
+ e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e)
+ );
+ }
+
+ Stream> getNodeStream(String table, HgOwnerKey startKey,
+ HgOwnerKey endKey) {
+ return this.toNodeTkvList(table, startKey, endKey)
+ .stream()
+ .map(
+ e -> new HgPair<>(this.getStoreNode(e.getNodeId()), e)
+ );
+
+ }
+
+ // private List> getNode(String table) {
+ // List nodeTkvList = this.toNodeTkvList(table);
+ // return nodeTkv2Node(nodeTkvList);
+ // }
+
+ List> getNode(String table, HgOwnerKey ownerKey) {
+ List nodeTkvList = this.toNodeTkvList(table, ownerKey);
+ return nodeTkv2Node(nodeTkvList);
+ }
+
+ List> getNode(String table, HgOwnerKey startKey,
+ HgOwnerKey endKey) {
+ List nodeTkvList = this.toNodeTkvList(table, startKey, endKey);
+ return nodeTkv2Node(nodeTkvList);
+
+ }
+ //
+ //boolean doAction(String table, HgOwnerKey startKey, HgOwnerKey endKey,
+ // Function action) {
+ // return this.doAction(table, startKey, endKey, action);
+ //
+ //}
+
+ // List> getNode(String table, Integer endKey) {
+ // .stream()
+ // .map(e -> new NodeTkv(e, nodeParams.getX(), nodeParams.getY(), nodeParams.getY
+ // ().getKeyCode()))
+ // .map(
+ // e -> new HgPair<>(this.proxy.getStoreNode(e.getNodeId()), e)
+ // );
+ // Collection nodePartitions = this.doPartition(table, endKey);
+ // for (HgNodePartition nodePartition: nodePartitions) {
+ //
+ // }
+ // return nodeTkv2Node(nodeTkvList);
+ //
+ // }
+
+ private List> nodeTkv2Node(Collection nodeTkvList) {
+ ArrayList> hgPairs = new ArrayList<>(nodeTkvList.size());
+ for (NodeTkv nodeTkv : nodeTkvList) {
+ hgPairs.add(new HgPair<>(this.getStoreNode(nodeTkv.getNodeId()), nodeTkv));
+ }
+ return hgPairs;
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java
new file mode 100644
index 0000000000..aca7bb70b3
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/SequencedIterator.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvOrderedIterator;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * Proxy iterator orderly, to switch next one will happen when the current one is empty.
+ *
+ * created on 2022/03/10
+ *
+ * @version 0.1.0
+ */
+@Slf4j
+public class SequencedIterator implements HgKvIterator {
+
+ private static final byte[] EMPTY_BYTES = new byte[0];
+ private final Queue queue;
+ private final long limit;
+ private HgKvOrderedIterator iterator;
+ private HgKvEntry entry;
+ private int count;
+ private byte[] position = EMPTY_BYTES;
+ private byte[] position4Seeking = EMPTY_BYTES;
+
+ SequencedIterator(List iterators, long limit) {
+ Collections.sort(iterators);
+ this.queue = new LinkedList(iterators);
+ this.limit = limit <= 0 ? Integer.MAX_VALUE : limit;
+ }
+
+ private HgKvOrderedIterator getIterator() {
+ if (this.queue.isEmpty()) {
+ return null;
+ }
+ HgKvOrderedIterator buf;
+ while ((buf = this.queue.poll()) != null) {
+ buf.seek(this.position4Seeking);
+ if (buf.hasNext()) {
+ break;
+ }
+ }
+ return buf;
+ }
+
+ private void closeIterators() {
+ if (this.queue.isEmpty()) {
+ return;
+ }
+ HgKvOrderedIterator buf;
+ while ((buf = this.queue.poll()) != null) {
+ buf.close();
+ }
+
+ }
+
+ @Override
+ public byte[] key() {
+ if (this.entry != null) {
+ return this.entry.key();
+ }
+ return null;
+ }
+
+ @Override
+ public byte[] value() {
+ if (this.entry != null) {
+ return this.entry.value();
+ }
+ return null;
+ }
+
+ @Override
+ public byte[] position() {
+ return this.position;
+ }
+
+ @Override
+ public void seek(byte[] pos) {
+ if (pos != null) {
+ this.position4Seeking = pos;
+ }
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (this.count >= this.limit) {
+ return false;
+ }
+ if (this.iterator == null) {
+ this.iterator = this.getIterator();
+ } else if (!this.iterator.hasNext()) {
+ this.iterator.close();
+ this.iterator = this.getIterator();
+ }
+ return this.iterator != null;
+ }
+
+ @Override
+ public Object next() {
+ if (this.iterator == null) {
+ hasNext();
+ }
+ if (this.iterator == null) {
+ throw new NoSuchElementException();
+ }
+ this.entry = this.iterator.next();
+ this.position = this.iterator.position();
+ if (!this.iterator.hasNext()) {
+ this.iterator.close();
+ this.iterator = null;
+ }
+ this.count++;
+ return this.entry;
+ }
+
+ @Override
+ public void close() {
+ if (this.iterator != null) {
+ this.iterator.close();
+ }
+ this.closeIterators();
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java
new file mode 100644
index 0000000000..474b042bf2
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/ShiftWorkIteratorProxy.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvPagingIterator;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/10/24
+ *
+ * @version 0.1.1
+ */
+@Slf4j
+public class ShiftWorkIteratorProxy implements HgKvIterator {
+
+ private static final byte[] EMPTY_BYTES = new byte[0];
+ private final int limit;
+ private HgKvPagingIterator iterator;
+ private Queue queue = new LinkedList<>();
+ private HgKvEntry entry;
+ private int count;
+ private int shiftCount;
+
+ ShiftWorkIteratorProxy(List iterators, int limit) {
+ this.queue = new LinkedList<>(iterators);
+ this.limit = limit <= 0 ? Integer.MAX_VALUE : limit;
+ }
+
+ private HgKvPagingIterator getIterator() {
+ if (this.queue.isEmpty()) {
+ return null;
+ }
+
+ HgKvPagingIterator buf = null;
+
+ while ((buf = this.queue.poll()) != null) {
+ if (buf.hasNext()) {
+ break;
+ }
+ }
+
+ if (buf == null) {
+ return null;
+ }
+
+ this.queue.add(buf);
+
+ return buf;
+ }
+
+ private void closeIterators() {
+ if (this.queue.isEmpty()) {
+ return;
+ }
+
+ HgKvPagingIterator buf;
+
+ while ((buf = this.queue.poll()) != null) {
+ buf.close();
+ }
+
+ }
+
+ private void setIterator() {
+
+ // if (++this.shiftCount >= this.iterator.getPageSize() / 2) {
+ if (++this.shiftCount >= this.iterator.getPageSize()) {
+ this.iterator = null;
+ this.shiftCount = 0;
+ }
+
+ }
+
+ private void doNext() {
+
+ }
+
+ @Override
+ public byte[] key() {
+ if (this.entry != null) {
+ return this.entry.key();
+ }
+ return null;
+ }
+
+ @Override
+ public byte[] value() {
+ if (this.entry != null) {
+ return this.entry.value();
+ }
+ return null;
+ }
+
+ @Override
+ public byte[] position() {
+ return this.iterator != null ? this.iterator.position() : EMPTY_BYTES;
+ }
+
+ @Override
+ public void seek(byte[] position) {
+ if (this.iterator != null) {
+ this.iterator.seek(position);
+ }
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (this.count >= this.limit) {
+ return false;
+ }
+ if (this.iterator == null
+ || !this.iterator.hasNext()) {
+ this.iterator = this.getIterator();
+ }
+ return this.iterator != null;
+ }
+
+ @Override
+ public Object next() {
+ if (this.iterator == null) {
+ hasNext();
+ }
+ if (this.iterator == null) {
+ throw new NoSuchElementException();
+ }
+ this.entry = this.iterator.next();
+ this.setIterator();
+ this.count++;
+ //log.info("next - > {}",this.entry);
+ return this.entry;
+ }
+
+ @Override
+ public void close() {
+ if (this.iterator != null) {
+ this.iterator.close();
+ }
+ this.closeIterators();
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java
new file mode 100644
index 0000000000..21a37ae3df
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/TopWorkIteratorProxy.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Queue;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+
+/**
+ * created on 2021/10/21
+ *
+ * @version 0.1.0
+ */
+class TopWorkIteratorProxy implements HgKvIterator {
+
+ private static final byte[] EMPTY_BYTES = new byte[0];
+ private final Queue queue;
+ private final long limit;
+ private HgKvIterator iterator;
+ private HgKvEntry entry;
+ // result count
+ private int count;
+
+ TopWorkIteratorProxy(List iterators, long limit) {
+ this.queue = new LinkedList<>(iterators);
+ this.limit = limit <= 0 ? Integer.MAX_VALUE : limit;
+ }
+
+ private HgKvIterator getIterator() {
+ if (this.queue.isEmpty()) {
+ return null;
+ }
+
+ HgKvIterator buf = null;
+
+ while ((buf = this.queue.poll()) != null) {
+ if (buf.hasNext()) {
+ break;
+ }
+ }
+
+ if (buf == null) {
+ return null;
+ }
+
+ this.queue.add(buf);
+
+ return buf;
+ }
+
+ private void closeIterators() {
+ if (this.queue.isEmpty()) {
+ return;
+ }
+
+ HgKvIterator buf;
+
+ while ((buf = this.queue.poll()) != null) {
+ buf.close();
+ }
+
+ }
+
+ private void setIterator() {
+ this.iterator = null;
+ }
+
+ @Override
+ public byte[] key() {
+ if (this.entry != null) {
+ return this.entry.key();
+ }
+ return null;
+ }
+
+ @Override
+ public byte[] value() {
+ if (this.entry != null) {
+ return this.entry.value();
+ }
+ return null;
+ }
+
+ @Override
+ public byte[] position() {
+ return this.iterator != null ? this.iterator.position() : EMPTY_BYTES;
+ }
+
+ @Override
+ public void seek(byte[] position) {
+ if (this.iterator != null) {
+ this.iterator.seek(position);
+ }
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (this.count >= this.limit) {
+ return false;
+ }
+ if (this.iterator == null) {
+ this.iterator = this.getIterator();
+ }
+ return this.iterator != null;
+
+ }
+
+ @Override
+ public Object next() {
+ if (this.iterator == null) {
+ hasNext();
+ }
+ if (this.iterator == null) {
+ throw new NoSuchElementException();
+ }
+ this.entry = this.iterator.next();
+ this.setIterator();
+ this.count++;
+ return this.entry;
+ }
+
+ @Override
+ public void close() {
+ if (this.iterator != null) {
+ this.iterator.close();
+ }
+ this.closeIterators();
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java
new file mode 100644
index 0000000000..20aa54b39a
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/AbstractGrpcClient.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.IntStream;
+
+import org.apache.hugegraph.store.client.util.ExecutorPool;
+import org.apache.hugegraph.store.client.util.HgStoreClientConfig;
+import org.apache.hugegraph.store.term.HgPair;
+
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import io.grpc.stub.AbstractAsyncStub;
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+
+public abstract class AbstractGrpcClient {
+
+ private static Map channels = new ConcurrentHashMap<>();
+ private static int n = 5;
+ private static int concurrency = 1 << n;
+ private static AtomicLong counter = new AtomicLong(0);
+ private static long limit = Long.MAX_VALUE >> 1;
+ private static HgStoreClientConfig config = HgStoreClientConfig.of();
+ private Map[]> blockingStubs =
+ new ConcurrentHashMap<>();
+ private Map[]> asyncStubs =
+ new ConcurrentHashMap<>();
+ private ThreadPoolExecutor executor;
+
+ {
+ executor = ExecutorPool.createExecutor("common", 60, concurrency, concurrency);
+ }
+
+ public AbstractGrpcClient() {
+
+ }
+
+ public ManagedChannel[] getChannels(String target) {
+ ManagedChannel[] tc;
+ if ((tc = channels.get(target)) == null) {
+ synchronized (channels) {
+ if ((tc = channels.get(target)) == null) {
+ try {
+ ManagedChannel[] value = new ManagedChannel[concurrency];
+ CountDownLatch latch = new CountDownLatch(concurrency);
+ for (int i = 0; i < concurrency; i++) {
+ int fi = i;
+ executor.execute(() -> {
+ try {
+ value[fi] = getManagedChannel(target);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ latch.countDown();
+ }
+ });
+ }
+ latch.await();
+ channels.put(target, tc = value);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+ return tc;
+ }
+
+ public abstract AbstractBlockingStub getBlockingStub(ManagedChannel channel);
+
+ public AbstractBlockingStub getBlockingStub(String target) {
+ ManagedChannel[] channels = getChannels(target);
+ HgPair[] pairs = blockingStubs.get(target);
+ long l = counter.getAndIncrement();
+ if (l >= limit) {
+ counter.set(0);
+ }
+ int index = (int) (l & (concurrency - 1));
+ if (pairs == null) {
+ synchronized (blockingStubs) {
+ pairs = blockingStubs.get(target);
+ if (pairs == null) {
+ HgPair[] value = new HgPair[concurrency];
+ IntStream.range(0, concurrency).forEach(i -> {
+ ManagedChannel channel = channels[index];
+ AbstractBlockingStub stub = getBlockingStub(channel);
+ value[i] = new HgPair<>(channel, stub);
+ // log.info("create channel for {}",target);
+ });
+ blockingStubs.put(target, value);
+ AbstractBlockingStub stub = value[index].getValue();
+ return (AbstractBlockingStub) setBlockingStubOption(stub);
+ }
+ }
+ }
+ return (AbstractBlockingStub) setBlockingStubOption(pairs[index].getValue());
+ }
+
+ private AbstractStub setBlockingStubOption(AbstractBlockingStub stub) {
+ return stub.withDeadlineAfter(config.getGrpcTimeoutSeconds(), TimeUnit.SECONDS)
+ .withMaxInboundMessageSize(
+ config.getGrpcMaxInboundMessageSize())
+ .withMaxOutboundMessageSize(
+ config.getGrpcMaxOutboundMessageSize());
+ }
+
+ public AbstractAsyncStub getAsyncStub(ManagedChannel channel) {
+ return null;
+ }
+
+ public AbstractAsyncStub getAsyncStub(String target) {
+ ManagedChannel[] channels = getChannels(target);
+ HgPair[] pairs = asyncStubs.get(target);
+ long l = counter.getAndIncrement();
+ if (l >= limit) {
+ counter.set(0);
+ }
+ int index = (int) (l & (concurrency - 1));
+ if (pairs == null) {
+ synchronized (asyncStubs) {
+ pairs = asyncStubs.get(target);
+ if (pairs == null) {
+ HgPair[] value = new HgPair[concurrency];
+ IntStream.range(0, concurrency).parallel().forEach(i -> {
+ ManagedChannel channel = channels[index];
+ AbstractAsyncStub stub = getAsyncStub(channel);
+ // stub.withMaxInboundMessageSize(config.getGrpcMaxInboundMessageSize())
+ // .withMaxOutboundMessageSize(config.getGrpcMaxOutboundMessageSize());
+ value[i] = new HgPair<>(channel, stub);
+ // log.info("create channel for {}",target);
+ });
+ asyncStubs.put(target, value);
+ AbstractAsyncStub stub =
+ (AbstractAsyncStub) setStubOption(value[index].getValue());
+ return stub;
+ }
+ }
+ }
+ return (AbstractAsyncStub) setStubOption(pairs[index].getValue());
+
+ }
+
+ private AbstractStub setStubOption(AbstractStub value) {
+ return value.withMaxInboundMessageSize(
+ config.getGrpcMaxInboundMessageSize())
+ .withMaxOutboundMessageSize(
+ config.getGrpcMaxOutboundMessageSize());
+ }
+
+ private ManagedChannel getManagedChannel(String target) {
+ return ManagedChannelBuilder.forTarget(target).usePlaintext().build();
+ }
+
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java
new file mode 100644
index 0000000000..0cc4b303c7
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvEntryImpl.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Arrays;
+
+import org.apache.hugegraph.store.HgKvEntry;
+
+class GrpcKvEntryImpl implements HgKvEntry {
+
+ private final byte[] key;
+ private final byte[] value;
+ private final int code;
+
+ GrpcKvEntryImpl(byte[] k, byte[] v, int code) {
+ this.key = k;
+ this.value = v;
+ this.code = code;
+ }
+
+ @Override
+ public int code() {
+ return code;
+ }
+
+ @Override
+ public byte[] key() {
+ return key;
+ }
+
+ @Override
+ public byte[] value() {
+ return value;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ GrpcKvEntryImpl hgKvEntry = (GrpcKvEntryImpl) o;
+ return Arrays.equals(key, hgKvEntry.key) && Arrays.equals(value, hgKvEntry.value);
+ }
+
+ @Override
+ public int hashCode() {
+ int result = Arrays.hashCode(key);
+ result = 31 * result + Arrays.hashCode(value);
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "HgKvEntryImpl{" +
+ "key=" + Arrays.toString(key) +
+ ", value=" + Arrays.toString(value) +
+ ", code=" + code +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java
new file mode 100644
index 0000000000..c9825a60b3
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcKvIteratorImpl.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.List;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvOrderedIterator;
+import org.apache.hugegraph.store.HgKvPagingIterator;
+import org.apache.hugegraph.store.HgPageSize;
+import org.apache.hugegraph.store.HgSeekAble;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+import org.apache.hugegraph.store.grpc.common.Kv;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/10/20
+ *
+ * @version 0.2.1
+ */
+@Slf4j
+class GrpcKvIteratorImpl implements HgKvPagingIterator, HgKvOrderedIterator {
+
+ private final byte[] emptyBytes = HgStoreClientConst.EMPTY_BYTES;
+ private final KvCloseableIterator iterator;
+ private final HgPageSize pageLimiter;
+ private final HgStoreNodeSession session;
+ private HgKvEntry element;
+
+ private GrpcKvIteratorImpl(HgStoreNodeSession session, KvCloseableIterator iterator,
+ HgPageSize pageLimiter) {
+ this.iterator = iterator;
+ this.pageLimiter = pageLimiter;
+ this.session = session;
+ }
+
+ public static HgKvIterator of(HgStoreNodeSession nodeSession,
+ KvCloseableIterator iterator) {
+ if (iterator instanceof HgPageSize) {
+ return of(nodeSession, iterator, (HgPageSize) iterator);
+ }
+ return new GrpcKvIteratorImpl(nodeSession, iterator, () -> 1);
+ }
+
+ public static HgKvIterator of(HgStoreNodeSession nodeSession,
+ KvCloseableIterator iterator,
+ HgPageSize pageLimiter) {
+ return new GrpcKvIteratorImpl(nodeSession, iterator, pageLimiter);
+ }
+
+ public static HgKvIterator of(HgStoreNodeSession nodeSession, List kvList) {
+ int pageSize = kvList.size();
+ return new GrpcKvIteratorImpl(nodeSession, new KvListIterator(kvList), () -> pageSize);
+ }
+
+ @Override
+ public boolean hasNext() {
+ // if (log.isDebugEnabled()) {
+ // if (!this.iterator.hasNext() && !nodeSession.getGraphName().endsWith("/s")) {
+ // log.debug("[ANALYSIS GrpcKv hasNext-> FALSE] ");
+ // }
+ // }
+ return this.iterator.hasNext();
+ }
+
+ @Override
+ public HgKvEntry next() {
+ Kv kv = this.iterator.next();
+ this.element = new GrpcKvEntryImpl(kv.getKey().toByteArray(), kv.getValue().toByteArray(),
+ kv.getCode());
+ return this.element;
+ }
+
+ @Override
+ public byte[] key() {
+ if (this.element == null) {
+ return null;
+ }
+ return this.element.key();
+ }
+
+ @Override
+ public byte[] value() {
+ if (this.element == null) {
+ return null;
+ }
+ return this.element.value();
+ }
+
+ @Override
+ public byte[] position() {
+ if (this.element == null) {
+ return emptyBytes;
+ }
+ byte[] key = this.element.key();
+ if (key == null) {
+ return emptyBytes;
+ }
+ if (!(this.iterator instanceof HgSeekAble)) {
+ return emptyBytes;
+ }
+ byte[] upstream = ((HgSeekAble) this.iterator).position();
+ byte[] code = HgStoreClientUtil.toIntBytes(this.element.code());
+ byte[] result = new byte[upstream.length + Integer.BYTES + key.length];
+ System.arraycopy(upstream, 0, result, 0, upstream.length);
+ System.arraycopy(code, 0, result, upstream.length, Integer.BYTES);
+ System.arraycopy(key, 0, result, upstream.length + Integer.BYTES, key.length);
+ return result;
+ }
+
+ @Override
+ public void seek(byte[] position) {
+ if (this.iterator instanceof HgSeekAble) {
+ ((HgSeekAble) this.iterator).seek(position);
+ }
+ }
+
+ @Override
+ public long getPageSize() {
+ return pageLimiter.getPageSize();
+ }
+
+ @Override
+ public boolean isPageEmpty() {
+ return !iterator.hasNext();
+ }
+
+ @Override
+ public int compareTo(HgKvOrderedIterator o) {
+ return Long.compare(this.getSequence(), o.getSequence());
+ }
+
+ @Override
+ public long getSequence() {
+ return this.session.getStoreNode().getNodeId().longValue();
+ }
+
+ @Override
+ public void close() {
+ this.iterator.close();
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java
new file mode 100644
index 0000000000..5f6647094d
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcNodeHealthyClient.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.grpc.HealthyGrpc;
+import org.apache.hugegraph.store.grpc.HealthyOuterClass;
+
+import com.google.protobuf.Empty;
+
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+
+/**
+ *
+ */
+@ThreadSafe
+public final class GrpcNodeHealthyClient {
+
+ private final static Map CHANNEL_MAP = new ConcurrentHashMap<>();
+ private final static Map STUB_MAP =
+ new ConcurrentHashMap<>();
+
+ // TODO: Forbid constructing out of the package.
+ public GrpcNodeHealthyClient() {
+
+ }
+
+ private ManagedChannel getChannel(String target) {
+ ManagedChannel channel = CHANNEL_MAP.get(target);
+ if (channel == null) {
+ channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build();
+ CHANNEL_MAP.put(target, channel);
+ }
+ return channel;
+ }
+
+ private HealthyGrpc.HealthyBlockingStub getStub(String target) {
+ HealthyGrpc.HealthyBlockingStub stub = STUB_MAP.get(target);
+ if (stub == null) {
+ stub = HealthyGrpc.newBlockingStub(getChannel(target));
+ STUB_MAP.put(target, stub);
+ }
+ return stub;
+ }
+
+
+/* boolean isHealthy(GrpcStoreNodeImpl node) {
+ String target = node.getAddress();
+
+ HealthyOuterClass.StringReply response = getStub(target).isOk(Empty.newBuilder().build());
+ String res = response.getMessage();
+
+ if ("ok".equals(res)) {
+ return true;
+ } else {
+ System.out.printf("gRPC-res-msg: %s%n", res);
+ return false;
+ }
+ }*/
+
+ public boolean isHealthy() {
+ String target = "localhost:9080";
+ ManagedChannel channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build();
+ HealthyGrpc.HealthyBlockingStub stub = HealthyGrpc.newBlockingStub(channel);
+ HealthyOuterClass.StringReply response = stub.isOk(Empty.newBuilder().build());
+
+ String res = response.getMessage();
+ System.out.printf("gRPC response message:%s%n", res);
+
+ return "ok".equals(res);
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java
new file mode 100644
index 0000000000..eb215a4f7e
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeBuilder.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hugegraph.store.client.HgPrivate;
+import org.apache.hugegraph.store.client.HgStoreNode;
+import org.apache.hugegraph.store.client.HgStoreNodeBuilder;
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+import org.apache.hugegraph.store.client.util.HgAssert;
+
+/**
+ * created on 2021/10/12
+ */
+public class GrpcStoreNodeBuilder implements HgStoreNodeBuilder {
+
+ private static final GrpcStoreSessionClient sessionClient = new GrpcStoreSessionClient();
+ private static final GrpcStoreStreamClient streamClient = new GrpcStoreStreamClient();
+ private static final AtomicLong ids = new AtomicLong(0);
+ private final HgStoreNodeManager nodeManager;
+ private Long nodeId;
+ private String address;
+
+ public GrpcStoreNodeBuilder(HgStoreNodeManager nodeManager, HgPrivate hgPrivate) {
+ HgAssert.isArgumentNotNull(hgPrivate, "hgPrivate");
+ HgAssert.isArgumentNotNull(nodeManager, "nodeManager");
+ this.nodeManager = nodeManager;
+ }
+
+ @Override
+ public GrpcStoreNodeBuilder setAddress(String address) {
+ HgAssert.isFalse(HgAssert.isInvalid(address), "The argument is invalid: address.");
+ this.address = address;
+ return this;
+ }
+
+ @Override
+ public GrpcStoreNodeBuilder setNodeId(Long nodeId) {
+ HgAssert.isFalse(nodeId == null, "The argument is invalid: nodeId.");
+ this.nodeId = nodeId;
+ return this;
+ }
+
+ @Override
+ public HgStoreNode build() {
+ // TODO: delete
+ if (this.nodeId == null) {
+ this.nodeId = ids.addAndGet(-1L);
+ }
+
+ HgAssert.isFalse(this.nodeId == null, "nodeId can't to be null");
+ HgAssert.isFalse(this.address == null, "address can't to be null");
+
+ GrpcStoreNodeImpl node =
+ new GrpcStoreNodeImpl(this.nodeManager, sessionClient, streamClient);
+ node.setNodeId(this.nodeId);
+ node.setAddress(this.address);
+
+ return node;
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java
new file mode 100644
index 0000000000..4ca468ba6c
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeImpl.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Objects;
+
+import org.apache.hugegraph.store.HgStoreSession;
+import org.apache.hugegraph.store.client.HgStoreNode;
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+
+/**
+ * created on 2021/10/11
+ */
+class GrpcStoreNodeImpl implements HgStoreNode {
+
+ private final GrpcStoreSessionClient sessionClient;
+ private final GrpcStoreStreamClient streamClient;
+ private final HgStoreNodeManager nodeManager;
+ private String address;
+ private Long nodeId;
+
+ GrpcStoreNodeImpl(HgStoreNodeManager nodeManager, GrpcStoreSessionClient sessionClient,
+ GrpcStoreStreamClient streamClient) {
+ this.nodeManager = nodeManager;
+ this.sessionClient = sessionClient;
+ this.streamClient = streamClient;
+ }
+
+ @Override
+ public Long getNodeId() {
+ return this.nodeId;
+ }
+
+ GrpcStoreNodeImpl setNodeId(Long nodeId) {
+ this.nodeId = nodeId;
+ return this;
+ }
+
+ @Override
+ public String getAddress() {
+ return this.address;
+ }
+
+ GrpcStoreNodeImpl setAddress(String address) {
+ this.address = address;
+ return this;
+ }
+
+ @Override
+ public HgStoreSession openSession(String graphName) {
+ // HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument: graphName is invalid.");
+ // return new GrpcStoreNodeSessionImpl2(this, graphName,this.nodeManager, this
+ // .sessionClient, this
+ // .streamClient);
+ return new GrpcStoreNodeSessionImpl(this, graphName, this.nodeManager, this.sessionClient,
+ this.streamClient);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ GrpcStoreNodeImpl that = (GrpcStoreNodeImpl) o;
+ return Objects.equals(address, that.address) && Objects.equals(nodeId, that.nodeId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(address, nodeId);
+ }
+
+ @Override
+ public String toString() {
+ return "storeNode: {" +
+ "address: \"" + address + "\"" +
+ ", nodeId: " + nodeId +
+ "}";
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java
new file mode 100644
index 0000000000..77c8a45537
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreNodeSessionImpl.java
@@ -0,0 +1,545 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hugegraph.store.HgKvEntry;
+import org.apache.hugegraph.store.HgKvIterator;
+import org.apache.hugegraph.store.HgKvStore;
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.HgScanQuery;
+import org.apache.hugegraph.store.client.HgStoreNode;
+import org.apache.hugegraph.store.client.HgStoreNodeManager;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.client.util.HgAssert;
+import org.apache.hugegraph.store.client.util.HgStoreClientConfig;
+import org.apache.hugegraph.store.client.util.HgStoreClientConst;
+import org.apache.hugegraph.store.client.util.HgStoreClientUtil;
+import org.apache.hugegraph.store.client.util.HgUuid;
+import org.apache.hugegraph.store.grpc.common.GraphMethod;
+import org.apache.hugegraph.store.grpc.common.Key;
+import org.apache.hugegraph.store.grpc.common.OpType;
+import org.apache.hugegraph.store.grpc.common.TableMethod;
+import org.apache.hugegraph.store.grpc.session.BatchEntry;
+import org.apache.hugegraph.store.grpc.stream.HgStoreStreamGrpc.HgStoreStreamStub;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.UnsafeByteOperations;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2022/01/19
+ *
+ * @version 0.6.0 added batch get on 2022/04/06
+ */
+@Slf4j
+@NotThreadSafe
+class GrpcStoreNodeSessionImpl implements HgStoreNodeSession {
+
+ private static final HgStoreClientConfig hgStoreClientConfig = HgStoreClientConfig.of();
+ private static final ConcurrentHashMap tables = new ConcurrentHashMap<>() {{
+ put("unknown", 0);
+ put("g+v", 1);
+ put("g+oe", 2);
+ put("g+ie", 3);
+ put("g+index", 4);
+ put("g+task", 5);
+ put("g+olap", 6);
+ put("g+server", 7);
+ }};
+ private final HgStoreNode storeNode;
+ private final String graphName;
+ private final GrpcStoreSessionClient storeSessionClient;
+ private final GrpcStoreStreamClient storeStreamClient;
+ private final HgStoreNodeManager nodeManager;
+ private final NotifyingExecutor notifier;
+ private final SwitchingExecutor switcher;
+ private final BatchEntry.Builder batchEntryBuilder = BatchEntry.newBuilder();
+ private final Key.Builder builder = Key.newBuilder();
+ private boolean isAutoCommit = true;
+ private String batchId;
+ private LinkedList batchEntries = new LinkedList<>();
+
+ GrpcStoreNodeSessionImpl(HgStoreNode storeNode, String graphName,
+ HgStoreNodeManager nodeManager,
+ GrpcStoreSessionClient sessionClient,
+ GrpcStoreStreamClient streamClient) {
+ HgAssert.isFalse(HgAssert.isInvalid(graphName), "the argument: graphName is invalid.");
+ HgAssert.isFalse(nodeManager == null, "the argument: nodeManager is null.");
+ HgAssert.isFalse(storeNode == null, "the argument: storeNode is null.");
+ HgAssert.isFalse(sessionClient == null, "the argument: sessionClient is null.");
+ HgAssert.isFalse(streamClient == null, "the argument: streamClient is null.");
+
+ this.graphName = graphName;
+ this.storeNode = storeNode;
+ this.storeSessionClient = sessionClient;
+ this.storeStreamClient = streamClient;
+ this.nodeManager = nodeManager;
+
+ this.notifier = new NotifyingExecutor(this.graphName, this.nodeManager, this);
+ this.switcher = SwitchingExecutor.of();
+ }
+
+ @Override
+ public String getGraphName() {
+ return graphName;
+ }
+
+ @Override
+ public HgStoreNode getStoreNode() {
+ return storeNode;
+ }
+
+ public Key toKey(HgOwnerKey ownerKey) {
+ if (ownerKey == null) {
+ return null;
+ }
+ return builder
+ .setKey(UnsafeByteOperations.unsafeWrap(ownerKey.getKey()))
+ .setCode(ownerKey.getKeyCode())
+ .build();
+ }
+
+ @Override
+ public void beginTx() {
+ this.isAutoCommit = false;
+ }
+
+ @Override
+ public void commit() {
+ try {
+ if (this.isAutoCommit) {
+ throw new IllegalStateException("It's not in tx state");
+ }
+ if (this.batchEntries.isEmpty()) {
+ this.resetTx();
+ return;
+ }
+ if (!this.doCommit(this.batchEntries)) {
+ throw new Exception("Failed to invoke doCommit");
+ }
+ } catch (Throwable t) {
+ throw new RuntimeException(t);
+ } finally {
+ this.resetTx();
+ }
+
+ }
+
+ @Override
+ public void rollback() {
+ if (this.isAutoCommit) {
+ throw new IllegalStateException("It's not in tx state");
+ }
+ this.resetTx();
+ }
+
+ @Override
+ public boolean isTx() {
+ return !this.isAutoCommit;
+ }
+
+ private void resetTx() {
+ this.isAutoCommit = true;
+ this.batchId = null;
+ this.batchEntries = new LinkedList<>();
+ }
+
+ //TODO: not support distributed tx yet.
+ private String getBatchId() {
+ if (this.isAutoCommit) {
+ this.batchId = HgUuid.newUUID();
+ } else {
+ if (this.batchId == null) {
+ this.batchId = HgUuid.newUUID();
+ }
+ }
+ return this.batchId;
+ }
+
+ @Override
+ public boolean put(String table, HgOwnerKey ownerKey, byte[] value) {
+ return this.prepareBatchEntry(OpType.OP_TYPE_PUT, table, ownerKey, null, value);
+ }
+
+ @Override
+ public boolean directPut(String table, int partitionId, HgOwnerKey key, byte[] value) {
+ return false;
+ }
+
+ @Override
+ public boolean delete(String table, HgOwnerKey ownerKey) {
+ return this.prepareBatchEntry(OpType.OP_TYPE_DEL, table, ownerKey, null, null);
+ }
+
+ @Override
+ public boolean deleteSingle(String table, HgOwnerKey ownerKey) {
+ return this.prepareBatchEntry(OpType.OP_TYPE_DEL_SINGLE, table, ownerKey, null, null);
+ }
+
+ @Override
+ public boolean deletePrefix(String table, HgOwnerKey prefix) {
+ return this.prepareBatchEntry(OpType.OP_TYPE_DEL_PREFIX, table, prefix, null, null);
+ }
+
+ @Override
+ public boolean deleteRange(String table, HgOwnerKey start, HgOwnerKey end) {
+ return this.prepareBatchEntry(OpType.OP_TYPE_DEL_RANGE, table, start, end, null);
+ }
+
+ @Override
+ public boolean merge(String table, HgOwnerKey key, byte[] value) {
+ return this.prepareBatchEntry(OpType.OP_TYPE_MERGE, table, key, null, value);
+ }
+
+ private boolean prepareBatchEntry(OpType opType, String table
+ , HgOwnerKey startKey, HgOwnerKey endKey, byte[] value) {
+ this.batchEntryBuilder.clear().setOpType(opType);
+ this.batchEntryBuilder.setTable(tables.get(table));
+ if (startKey != null) {
+ this.batchEntryBuilder.setStartKey(toKey(startKey));
+ }
+ if (endKey != null) {
+ this.batchEntryBuilder.setEndKey(toKey(endKey));
+ }
+ if (value != null) {
+ this.batchEntryBuilder.setValue(ByteString.copyFrom(value));
+ }
+ if (this.isAutoCommit) {
+ return this.doCommit(Collections.singletonList(this.batchEntryBuilder.build()));
+ } else {
+ return this.batchEntries.add(this.batchEntryBuilder.build());
+ }
+
+ }
+
+ private boolean doCommit(List entries) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doBatch(this, this.getBatchId(), entries),
+ e -> true
+ ).orElse(false);
+ }
+
+ @Override
+ public byte[] get(String table, HgOwnerKey ownerKey) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doGet(this, table, ownerKey)
+ ,
+ e -> e.getValueResponse().getValue().toByteArray()
+ ).orElse(HgStoreClientConst.EMPTY_BYTES);
+ }
+
+ @Override
+ public boolean clean(int partId) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doClean(this, partId)
+ ,
+ e -> true
+
+ ).orElse(false);
+ }
+
+ @Override
+ public List batchGetOwner(String table, List keyList) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doBatchGet(this, table, keyList),
+ e -> e.getKeyValueResponse().getKvList()
+ .stream()
+ .map(kv -> (HgKvEntry) new GrpcKvEntryImpl(kv.getKey().toByteArray()
+ , kv.getValue().toByteArray(), kv.getCode())
+ )
+ .collect(Collectors.toList()))
+ .orElse((List) HgStoreClientConst.EMPTY_LIST);
+ }
+
+ @Override
+ public HgKvIterator batchPrefix(String table, List keyList) {
+ return GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doBatchScanOneShot(this,
+ HgScanQuery.prefixOf(
+ table,
+ keyList))
+ );
+ }
+
+ @Override
+ public boolean existsTable(String table) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doTable(this, table,
+ TableMethod.TABLE_METHOD_EXISTS),
+ e -> true)
+ .orElse(false);
+ }
+
+ @Override
+ public boolean createTable(String table) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doTable(this, table,
+ TableMethod.TABLE_METHOD_CREATE),
+ e -> true)
+ .orElse(false);
+ }
+
+ @Override
+ public boolean deleteTable(String table) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doTable(this, table,
+ TableMethod.TABLE_METHOD_DELETE),
+ e -> true)
+ .orElse(false);
+ }
+
+ @Override
+ public boolean dropTable(String table) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doTable(this, table,
+ TableMethod.TABLE_METHOD_DROP),
+ e -> true)
+ .orElse(false);
+ }
+
+ @Override
+ public boolean deleteGraph(String graph) {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doGraph(this, graph,
+ GraphMethod.GRAPH_METHOD_DELETE),
+ e -> true)
+ .orElse(false);
+ }
+
+ @Override
+ public boolean truncate() {
+ return this.notifier.invoke(
+ () -> this.storeSessionClient.doTable(this,
+ HgStoreClientConst.EMPTY_TABLE
+ , TableMethod.TABLE_METHOD_TRUNCATE),
+ e -> true)
+ .orElse(false);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table) {
+ return GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan(this, table, 0));
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, long limit) {
+ return this.switcher.invoke(getSwitcherSupplier(limit)
+ , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan
+ (this, table,
+ limit))
+ , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScanOneShot
+ (this, table,
+ limit))
+ ).get();
+ }
+
+ @Override
+ public HgKvIterator scanIterator(ScanStreamReq.Builder builder) {
+ HgStoreStreamStub stub = getStub();
+ KvPageScanner scanner = new KvPageScanner(this,
+ stub,
+ builder);
+ return GrpcKvIteratorImpl.of(this, scanner);
+ }
+
+ @Override
+ public long count(String table) {
+ return this.storeSessionClient.count(this, table);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, byte[] query) {
+ return GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan(this, table, 0, query));
+ }
+
+ private HgStoreStreamStub getStub() {
+ return this.storeStreamClient.getStub(this);
+ }
+
+ // @Override
+ // public HgKvIterator scanIterator(ScanStreamReq scanReq) {
+ // KvPageScanner6 scanner = new KvPageScanner6(this,
+ // getStub(),
+ // scanReq.toBuilder());
+ // return GrpcKvIteratorImpl.of(this, scanner);
+ // }
+
+ @Override
+ public HgKvIterator scanIterator(String table, long limit, byte[] query) {
+ return this.switcher.invoke(getSwitcherSupplier(limit)
+ , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScan
+ (this, table,
+ limit, query))
+ , () -> GrpcKvIteratorImpl.of(this, this.storeStreamClient.doScanOneShot
+ (this, table,
+ limit, query))
+ ).get();
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix) {
+ return GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doScan(this, table, keyPrefix, 0));
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit) {
+ return this.switcher.invoke(getSwitcherSupplier(limit),
+ () -> GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doScan(this,
+ table,
+ keyPrefix,
+ limit)),
+ () -> GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doScanOneShot(
+ this,
+ table,
+ keyPrefix,
+ limit)))
+ .get();
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey keyPrefix, long limit,
+ byte[] query) {
+ return this.switcher.invoke(getSwitcherSupplier(limit),
+ () -> GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doScan(
+ this,
+ table,
+ keyPrefix,
+ limit,
+ query)),
+ () -> GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doScanOneShot(
+ this,
+ table,
+ keyPrefix,
+ limit,
+ query)))
+ .get();
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey startKey,
+ HgOwnerKey endKey) {
+ return scanIterator(table, startKey, endKey, 0, HgKvStore.SCAN_ANY, null);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey startKey,
+ HgOwnerKey endKey, long limit) {
+ return scanIterator(table, startKey, endKey, limit,
+ HgStoreClientUtil.isValid(endKey) ? HgStoreClientConst.SCAN_TYPE_RANGE :
+ HgStoreClientConst.SCAN_TYPE_ANY, null);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey startKey,
+ HgOwnerKey endKey, long limit, byte[] query) {
+ return scanIterator(table, startKey, endKey, limit,
+ HgStoreClientUtil.isValid(endKey) ? HgStoreClientConst.SCAN_TYPE_RANGE :
+ HgStoreClientConst.SCAN_TYPE_ANY, query);
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, HgOwnerKey startKey,
+ HgOwnerKey endKey,
+ long limit, int scanType, byte[] query) {
+
+ return this.switcher.invoke(getSwitcherSupplier(limit),
+ () -> GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doScan(
+ this,
+ table,
+ startKey,
+ endKey,
+ limit,
+ scanType,
+ query)),
+ () -> GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doScanOneShot(
+ this,
+ table,
+ startKey,
+ endKey,
+ limit,
+ scanType,
+ query)))
+ .get();
+
+ }
+
+ @Override
+ public HgKvIterator scanIterator(String table, int codeFrom, int codeTo,
+ int scanType, byte[] query) {
+ //TODO: Should be changed when start using hashcode as partitionId.
+ if (log.isDebugEnabled()) {
+ log.debug("scanIterator-scanType: {}", scanType);
+ }
+ return GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doScan(this, table
+ , HgOwnerKey.newEmpty().codeToKey(codeFrom)
+ , HgOwnerKey.newEmpty().codeToKey(codeTo)
+ , HgStoreClientConst.NO_LIMIT
+ , HgKvStore.SCAN_PREFIX_BEGIN |
+ HgKvStore.SCAN_HASHCODE | scanType
+ , query
+ )
+ );
+ }
+
+ @Override
+ public List> scanBatch(HgScanQuery scanQuery) {
+ return Collections.singletonList(GrpcKvIteratorImpl.of(this,
+ this.storeStreamClient.doBatchScan(
+ this, scanQuery)
+ ));
+ }
+
+ @Override
+ public KvCloseableIterator> scanBatch2(HgScanQuery scanQuery) {
+ throw new RuntimeException("not implemented");
+ }
+
+ @Override
+ public KvCloseableIterator> scanBatch3(HgScanQuery scanQuery,
+ KvCloseableIterator iterator) {
+ return this.storeStreamClient.doBatchScan3(this, scanQuery, iterator);
+ }
+
+ private Supplier getSwitcherSupplier(long limit) {
+ return () -> limit <= 0 || limit > hgStoreClientConfig.getNetKvScannerPageSize();
+ }
+
+ @Override
+ public String toString() {
+ return "storeNodeSession: {" + storeNode + ", graphName: \"" + graphName + "\"}";
+ }
+}
diff --git a/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java
new file mode 100644
index 0000000000..794a7c1286
--- /dev/null
+++ b/hugegraph-store/hg-store-client/src/main/java/org/apache/hugegraph/store/client/grpc/GrpcStoreSessionClient.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.client.grpc;
+
+import static org.apache.hugegraph.store.client.grpc.KvBatchUtil.getHeader;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.hugegraph.store.HgOwnerKey;
+import org.apache.hugegraph.store.client.HgStoreNodeSession;
+import org.apache.hugegraph.store.grpc.common.GraphMethod;
+import org.apache.hugegraph.store.grpc.common.ScanMethod;
+import org.apache.hugegraph.store.grpc.common.TableMethod;
+import org.apache.hugegraph.store.grpc.session.Agg;
+import org.apache.hugegraph.store.grpc.session.BatchEntry;
+import org.apache.hugegraph.store.grpc.session.BatchGetReq;
+import org.apache.hugegraph.store.grpc.session.BatchReq;
+import org.apache.hugegraph.store.grpc.session.BatchWriteReq;
+import org.apache.hugegraph.store.grpc.session.CleanReq;
+import org.apache.hugegraph.store.grpc.session.FeedbackRes;
+import org.apache.hugegraph.store.grpc.session.GetReq;
+import org.apache.hugegraph.store.grpc.session.GraphReq;
+import org.apache.hugegraph.store.grpc.session.HgStoreSessionGrpc;
+import org.apache.hugegraph.store.grpc.session.HgStoreSessionGrpc.HgStoreSessionBlockingStub;
+import org.apache.hugegraph.store.grpc.session.TableReq;
+import org.apache.hugegraph.store.grpc.stream.ScanStreamReq;
+
+import io.grpc.Deadline;
+import io.grpc.ManagedChannel;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * created on 2021/11/18
+ *
+ * @version 0.5.0
+ */
+@Slf4j
+@ThreadSafe
+class GrpcStoreSessionClient extends AbstractGrpcClient {
+
+ @Override
+ public HgStoreSessionBlockingStub getBlockingStub(ManagedChannel channel) {
+ HgStoreSessionBlockingStub stub;
+ stub = HgStoreSessionGrpc.newBlockingStub(channel);
+ return stub;
+ }
+
+ private HgStoreSessionBlockingStub getBlockingStub(HgStoreNodeSession nodeSession) {
+ HgStoreSessionBlockingStub stub =
+ (HgStoreSessionBlockingStub) getBlockingStub(
+ nodeSession.getStoreNode().getAddress());
+ return stub;
+ }
+
+ FeedbackRes doGet(HgStoreNodeSession nodeSession, String table, HgOwnerKey ownerKey) {
+ if (log.isDebugEnabled()) {
+ log.debug("doGet: {}-{}-{}-{}", nodeSession, table, ownerKey, GetReq.newBuilder()
+ .setHeader(
+ GrpcUtil.getHeader(
+ nodeSession))
+ .setTk(GrpcUtil.toTk(
+ table,
+ ownerKey))
+ .build());
+ }
+ return this.getBlockingStub(nodeSession)
+ .get2(GetReq.newBuilder()
+ .setHeader(GrpcUtil.getHeader(nodeSession))
+ .setTk(GrpcUtil.toTk(table, ownerKey))
+ .build()
+ );
+ }
+
+ FeedbackRes doClean(HgStoreNodeSession nodeSession, int partId) {
+ return this.getBlockingStub(nodeSession)
+ .clean(CleanReq.newBuilder()
+ .setHeader(GrpcUtil.getHeader(nodeSession))
+ .setPartition(partId)
+ .build()
+ );
+ }
+
+ FeedbackRes doBatchGet(HgStoreNodeSession nodeSession, String table, List