Skip to content

Commit

Permalink
refact(cassandra): update pom & ApiVersion & fix style
Browse files Browse the repository at this point in the history
TODO: and we need also check the 0.69 in pom
  • Loading branch information
imbajin committed Nov 23, 2022
1 parent 83fa9d5 commit 64e7357
Show file tree
Hide file tree
Showing 13 changed files with 100 additions and 173 deletions.
1 change: 1 addition & 0 deletions hugegraph-api/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,7 @@
</addDefaultSpecificationEntries>
</manifest>
<manifestEntries>
<!-- TODO: update it -->
<Implementation-Version>0.69.0.0</Implementation-Version>
</manifestEntries>
</archive>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,38 +24,35 @@

public final class ApiVersion {

/**
/*
* API Version change log
*
* <p>
* version 0.2:
* [0.2] HugeGraph-527: First add the version to the hugegraph module
* [0.3] HugeGraph-525: Add versions check of components and api
* [0.4] HugeGraph-162: Add schema builder to separate client and
* inner interface.
* [0.4] HugeGraph-162: Add schema builder to separate client and inner interface.
* [0.5] HugeGraph-498: Support three kind of id strategy
*
* <p>
* version 0.3:
*
* [0.6] HugeGraph-614: Add update api of VL/EL to support append and
* eliminate action
* <p>
* [0.6] HugeGraph-614: Add update api of VL/EL to support append and eliminate action
* [0.7] HugeGraph-245: Add nullable-props for vertex label and edge label
* [0.8] HugeGraph-396: Continue to improve variables implementation
* [0.9] HugeGraph-894: Add vertex/edge update api to add property and
* remove property
* [0.9] HugeGraph-894: Add vertex/edge update api to add property and remove property
* [0.10] HugeGraph-919: Add condition query for vertex/edge list API
*
* <p>
* version 0.4:
* [0.11] HugeGraph-938: Remove useless indexnames field in VL/EL API
* [0.11] HugeGraph-938: Remove useless index-names field in VL/EL API
* [0.12] HugeGraph-589: Add schema id for all schema element
* [0.13] HugeGraph-956: Support customize string/number id strategy
*
* <p>
* version 0.5:
* [0.14] HugeGraph-1085: Add enable_label_index to VL/EL
* [0.15] HugeGraph-1105: Support paging for large amounts of records
* [0.16] HugeGraph-944: Support rest shortest path, k-out, k-neighbor
* [0.17] HugeGraph-944: Support rest shortest path, k-out, k-neighbor
* [0.18] HugeGraph-81: Change argument "checkVertex" to "check_vertex"
*
* <p>
* version 0.6:
* [0.19] HugeGraph-1195: Support eliminate userdata on schema
* [0.20] HugeGraph-1210: Add paths api to find paths between two nodes
Expand All @@ -67,35 +64,35 @@ public final class ApiVersion {
* [0.26] HugeGraph-1273: Add some monitoring counters to integrate with
* gremlin's monitoring framework
* [0.27] HugeGraph-889: Use asynchronous mechanism to do schema deletion
*
* <p>
* version 0.8:
* [0.28] Issue-153: Add task-cancel API
* [0.29] Issue-39: Add rays and rings RESTful API
* [0.30] Issue-32: Change index create API to return indexLabel and task id
* [0.31] Issue-182: Support restore graph in restoring and merging mode
*
* <p>
* version 0.9:
* [0.32] Issue-250: Keep depth and degree consistent for traverser api
* [0.33] Issue-305: Implement customized paths and crosspoints RESTful API
* [0.34] Issue-307: Let VertexAPI use simplified property serializer
* [0.35] Issue-287: Support pagination when do index query
* [0.36] Issue-360: Support paging for scan api
* [0.37] Issue-391: Add skip_super_node for shortest path
* [0.38] Issue-274: Add personalrank and neighborrank RESTful API
*
* [0.38] Issue-274: Add personal-rank and neighbor-rank RESTful API
* <p>
* version 0.10:
* [0.39] Issue-522: Add profile RESTful API
* [0.40] Issue-523: Add source_in_ring args for rings RESTful API
* [0.41] Issue-493: Support batch updating properties by multiple strategy
* [0.42] Issue-176: Let gremlin error response consistent with RESTful's
* [0.43] Issue-270 & 398: support shard-index and vertex + sortkey prefix,
* [0.43] Issue-270 & 398: support shard-index and vertex + sort-key prefix,
* and split range to rangeInt, rangeFloat, rangeLong and rangeDouble
* [0.44] Issue-633: Support unique index
* [0.45] Issue-673: Add 'OVERRIDE' update strategy
* [0.46] Issue-618 & 694: Support UUID id type
* [0.47] Issue-691: Support aggregate property
* [0.48] Issue-686: Support get schema by names
*
* <p>
* version 0.11:
* [0.49] Issue-670: Support fusiform similarity API
* [0.50] Issue-746: Support userdata for index label
Expand All @@ -121,11 +118,15 @@ public final class ApiVersion {
* [0.69] Issue-1748: Support Cypher query RESTful API
*/

// The second parameter of Version.of() is for IDE running without JAR
/**
* The second parameter of Version.of() is for IDE running without JAR
* TODO: what shall we set for this version? (consider the basic compatibility)
*/
public static final Version VERSION = Version.of(ApiVersion.class, "0.69");

public static void check() {
// Check version of hugegraph-core. Firstly do check from version 0.3
// TODO: what shall we set for this version? (consider the basic compatibility)
VersionUtil.check(CoreVersion.VERSION, "0.13", "0.14", CoreVersion.NAME);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@
public class Clauses {

public static boolean needAllowFiltering(Clause clause) {
return ContainsKeyClause.class.isInstance(clause) ||
ContainsClause.class.isInstance(clause);
return clause instanceof ContainsKeyClause || clause instanceof ContainsClause;
}

public static Clause and(Clause left, Clause right) {
Expand All @@ -42,9 +41,9 @@ public static Clause in(String name, List<?> values) {

static class BinClause extends Clause {

private Clause left;
private String op;
private Clause right;
private final Clause left;
private final String op;
private final Clause right;

public BinClause(Clause left, String op, Clause right) {
this.left = left;
Expand All @@ -64,11 +63,7 @@ Object firstValue() {

@Override
boolean containsBindMarker() {
if (Utils.containsBindMarker(this.left) ||
Utils.containsBindMarker(this.right)) {
return true;
}
return false;
return Utils.containsBindMarker(this.left) || Utils.containsBindMarker(this.right);
}

@Override
Expand All @@ -89,7 +84,7 @@ void appendTo(StringBuilder sb, List<Object> variables,
static class AndClause extends BinClause {

public AndClause(Clause left, Clause right) {
super(left, "AND", right);
super(left, "AND", right);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.hugegraph.backend.store.BackendEntry;
import org.apache.hugegraph.backend.store.BackendEntryIterator;
import org.apache.hugegraph.util.E;

import com.datastax.driver.core.ExecutionInfo;
import com.datastax.driver.core.PagingState;
import com.datastax.driver.core.ResultSet;
Expand All @@ -39,7 +40,7 @@ public class CassandraEntryIterator extends BackendEntryIterator {
private final Iterator<Row> rows;
private final BiFunction<BackendEntry, Row, BackendEntry> merger;

private int fetchdPageSize;
private int fetchedPageSize;
private long expected;
private BackendEntry next;

Expand All @@ -50,7 +51,7 @@ public CassandraEntryIterator(ResultSet results, Query query,
this.rows = results.iterator();
this.merger = merger;

this.fetchdPageSize = results.getAvailableWithoutFetching();
this.fetchedPageSize = results.getAvailableWithoutFetching();
this.next = null;

if (query.paging()) {
Expand All @@ -60,20 +61,20 @@ public CassandraEntryIterator(ResultSet results, Query query,
this.expected = PageState.fromString(query.page()).offset();
this.skipPageOffset(query.page());
// Check the number of available rows
E.checkState(this.fetchdPageSize <= query.limit(),
E.checkState(this.fetchedPageSize <= query.limit(),
"Unexpected fetched page size: %s",
this.fetchdPageSize);
this.fetchedPageSize);
if (results.isFullyFetched()) {
/*
* All results fetched
* NOTE: it may be enough or not enough for the entire page
*/
this.expected = this.fetchdPageSize;
this.expected = this.fetchedPageSize;
} else {
/*
* Not fully fetched, that's fetchdPageSize == query.limit(),
* Not fully fetched, that's fetchedPageSize == query.limit(),
*
* NOTE: but there may be fetchdPageSize < query.limit(), means
* NOTE: but there may be fetchedPageSize < query.limit(), means
* not fetched the entire page (ScyllaDB may go here #1340),
* try to fetch next page later until got the expected count.
* Can simulate by: `select.setFetchSize(total - 1)`
Expand Down Expand Up @@ -104,10 +105,10 @@ protected final boolean fetch() {
this.expected--;
Row row = this.rows.next();
if (this.query.paging()) {
// Update fetchdPageSize if auto fetch the next page
// Update fetchedPageSize if auto fetch the next page
if (this.expected > 0L && this.availableLocal() == 0) {
if (this.rows.hasNext()) {
this.fetchdPageSize = this.availableLocal();
this.fetchedPageSize = this.availableLocal();
}
}
}
Expand Down Expand Up @@ -167,7 +168,7 @@ protected PageState pageState() {
ExecutionInfo previous = infos.get(infos.size() - 2);
PagingState page = previous.getPagingState();
position = page.toBytes();
offset = this.fetchdPageSize - extra;
offset = this.fetchedPageSize - extra;
} else {
PagingState page = this.results.getExecutionInfo().getPagingState();
if (page == null || this.expected > 0L) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import java.net.Inet4Address;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
Expand All @@ -34,9 +33,6 @@
import org.apache.cassandra.metrics.CassandraMetricsRegistry.JmxTimerMBean;
import org.apache.cassandra.tools.NodeProbe;
import org.apache.cassandra.tools.nodetool.Compact;
import org.apache.tinkerpop.gremlin.util.NumberHelper;
import org.slf4j.Logger;

import org.apache.hugegraph.backend.store.BackendMetrics;
import org.apache.hugegraph.backend.store.BackendStoreProvider;
import org.apache.hugegraph.backend.store.BackendTable;
Expand All @@ -48,6 +44,9 @@
import org.apache.hugegraph.util.InsertionOrderUtil;
import org.apache.hugegraph.util.Log;
import org.apache.hugegraph.util.UnitUtil;
import org.apache.tinkerpop.gremlin.util.NumberHelper;
import org.slf4j.Logger;

import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Host;
import com.google.common.collect.ImmutableList;
Expand Down Expand Up @@ -94,7 +93,7 @@ protected String keyspace() {
}

protected List<String> tables() {
return Collections.unmodifiableList(this.tables);
return this.tables;
}

protected Map<String, Object> getMetricsByHost(String host) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ public final boolean clusterConnected() {

/**
* The Session class is a wrapper of driver Session
* Expect every thread hold a its own session(wrapper)
* Expect every thread hold its own session(wrapper)
*/
public final class Session extends AbstractBackendSession {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,8 @@
* CassandraShard is used for cassandra scanning operations.
* Each shard represents a range of tokens for a node.
* Reading data from a given shard does not cross multiple nodes.
* Refer to AbstractColumnFamilyInputFormat from https://github.com/2013Commons/hive-cassandra/
* Refer to AbstractColumnFamilyInputFormat from:
* <a href="https://github.com/2013Commons/hive-cassandra/">...</a>
*/
public class CassandraShard {

Expand Down Expand Up @@ -89,10 +90,8 @@ public CassandraShard(CassandraSessionPool.Session session,
*/
public List<Shard> getSplits(long splitPartitions, long splitSize) {
// Canonical ranges, split into pieces, fetch the splits in parallel
ExecutorService executor = new ThreadPoolExecutor(
0, 128, 60L,
TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>());
ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS,
new LinkedBlockingQueue<>());

List<Shard> splits = new ArrayList<>();
try {
Expand Down Expand Up @@ -140,10 +139,8 @@ public List<Shard> getSplits(long splitPartitions, long splitSize) {
public List<Shard> getSplits(String start, String end,
int splitPartitions, int splitSize) {

ExecutorService executor = new ThreadPoolExecutor(
0, 128, 60L,
TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>());
ExecutorService executor = new ThreadPoolExecutor(0, 128, 60L, TimeUnit.SECONDS,
new LinkedBlockingQueue<>());

List<Shard> splits = new ArrayList<>();
try {
Expand Down Expand Up @@ -249,8 +246,7 @@ private static Map<TokenRange, Long> describeSplits(
/*
* If we have no data on this split or the size estimate is 0,
* return the full split i.e., do not sub-split
* Assume smallest granularity of partition count available from
* CASSANDRA-7688.
* Assume the smallest granularity of partition count available from CASSANDRA-7688.
*/
if (splitCount == 0) {
return ImmutableMap.of(tokenRange, (long) 128);
Expand Down
Loading

0 comments on commit 64e7357

Please sign in to comment.