diff --git a/.travis.yml b/.travis.yml index 4f7be19fc7..b6dd5ac5ca 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,6 +5,9 @@ jdk: sudo: required +addons: + postgresql: "9.5" + cache: directories: - $HOME/.m2 @@ -80,6 +83,7 @@ env: - BACKEND=mysql - BACKEND=hbase - BACKEND=rocksdb + - BACKEND=postgresql global: - RELEASE_BRANCH=^release-.*$ - RELEASE_TAG=^v[0-9]\..*$ diff --git a/hugegraph-dist/pom.xml b/hugegraph-dist/pom.xml index 7f697cdba8..33063ec6aa 100644 --- a/hugegraph-dist/pom.xml +++ b/hugegraph-dist/pom.xml @@ -59,6 +59,11 @@ hugegraph-hbase ${project.version} + + com.baidu.hugegraph + hugegraph-postgresql + ${project.version} + io.airlift airline diff --git a/hugegraph-dist/src/assembly/static/conf/hugegraph.properties b/hugegraph-dist/src/assembly/static/conf/hugegraph.properties index fe0729034c..2973ffd51d 100644 --- a/hugegraph-dist/src/assembly/static/conf/hugegraph.properties +++ b/hugegraph-dist/src/assembly/static/conf/hugegraph.properties @@ -33,6 +33,7 @@ cassandra.password= # mysql backend config +#jdbc.driver=com.mysql.jdbc.Driver #jdbc.url=jdbc:mysql://127.0.0.1:3306 #jdbc.username=root #jdbc.password= diff --git a/hugegraph-dist/src/assembly/travis/install-backend.sh b/hugegraph-dist/src/assembly/travis/install-backend.sh index 7349b07649..6fdd97ce9f 100755 --- a/hugegraph-dist/src/assembly/travis/install-backend.sh +++ b/hugegraph-dist/src/assembly/travis/install-backend.sh @@ -10,4 +10,6 @@ elif [ "$BACKEND" == "hbase" ]; then $TRAVIS_DIR/install-hbase.sh elif [ "$BACKEND" == "mysql" ]; then $TRAVIS_DIR/install-mysql.sh +elif [ "$BACKEND" == "postgresql" ]; then + $TRAVIS_DIR/install-postgresql.sh fi diff --git a/hugegraph-dist/src/assembly/travis/install-postgresql.sh b/hugegraph-dist/src/assembly/travis/install-postgresql.sh new file mode 100755 index 0000000000..191cff1d6d --- /dev/null +++ b/hugegraph-dist/src/assembly/travis/install-postgresql.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -ev + +TRAVIS_DIR=`dirname $0` +CONF=$TRAVIS_DIR/../../../../hugegraph-test/src/main/resources/hugegraph.properties + +POSTGRESQL_DRIVER=org.postgresql.Driver +POSTGRESQL_URL=jdbc:postgresql://localhost:5432/ +POSTGRESQL_USERNAME=postgres + +# Set PostgreSQL configurations +sed -i "s/jdbc.driver=.*/jdbc.driver=$POSTGRESQL_DRIVER/" $CONF +sed -i "s?jdbc.url=.*?jdbc.url=$POSTGRESQL_URL?" $CONF +sed -i "s/jdbc.username=.*/jdbc.username=$POSTGRESQL_USERNAME/" $CONF + +sudo service postgresql restart diff --git a/hugegraph-dist/src/assembly/travis/start-server.sh b/hugegraph-dist/src/assembly/travis/start-server.sh index 873151d722..6bf0fd8cfe 100755 --- a/hugegraph-dist/src/assembly/travis/start-server.sh +++ b/hugegraph-dist/src/assembly/travis/start-server.sh @@ -7,15 +7,29 @@ BASE_DIR=hugegraph-$VERSION BIN=$BASE_DIR/bin CONF=$BASE_DIR/conf/hugegraph.properties +# PostgreSQL configurations +POSTGRESQL_DRIVER=org.postgresql.Driver +POSTGRESQL_URL=jdbc:postgresql://localhost:5432/ +POSTGRESQL_USERNAME=postgres + declare -A backend_serializer_map=(["memory"]="text" ["cassandra"]="cassandra" \ ["scylladb"]="scylladb" ["mysql"]="mysql" \ - ["hbase"]="hbase" ["rocksdb"]="binary") + ["hbase"]="hbase" ["rocksdb"]="binary" \ + ["postgresql"]="postgresql") SERIALIZER=${backend_serializer_map[$BACKEND]} +# Set backend and serializer sed -i "s/backend=.*/backend=$BACKEND/" $CONF sed -i "s/serializer=.*/serializer=$SERIALIZER/" $CONF +# Set PostgreSQL configurations if needed +if [ "$BACKEND" == "postgresql" ]; then + sed -i "s/#jdbc.driver=.*/jdbc.driver=$POSTGRESQL_DRIVER/" $CONF + sed -i "s?#jdbc.url=.*?jdbc.url=$POSTGRESQL_URL?" $CONF + sed -i "s/#jdbc.username=.*/jdbc.username=$POSTGRESQL_USERNAME/" $CONF +fi + # Append schema.sync_deletion=true to config file echo "schema.sync_deletion=true" >> $CONF diff --git a/hugegraph-dist/src/main/java/com/baidu/hugegraph/dist/RegisterUtil.java b/hugegraph-dist/src/main/java/com/baidu/hugegraph/dist/RegisterUtil.java index a93746b829..7d19f1638a 100644 --- a/hugegraph-dist/src/main/java/com/baidu/hugegraph/dist/RegisterUtil.java +++ b/hugegraph-dist/src/main/java/com/baidu/hugegraph/dist/RegisterUtil.java @@ -89,6 +89,9 @@ private static void registerBackend(String backend) { case "palo": registerPalo(); break; + case "postgresql": + registerPostgresql(); + break; default: throw new HugeException("Unsupported backend type '%s'", backend); } @@ -165,6 +168,18 @@ public static void registerPalo() { "com.baidu.hugegraph.backend.store.palo.PaloStoreProvider"); } + public static void registerPostgresql() { + // Register config + OptionSpace.register("postgresql", + "com.baidu.hugegraph.backend.store.postgresql.PostgresqlOptions"); + // Register serializer + SerializerFactory.register("postgresql", + "com.baidu.hugegraph.backend.store.postgresql.PostgresqlSerializer"); + // Register backend + BackendProviderFactory.register("postgresql", + "com.baidu.hugegraph.backend.store.postgresql.PostgresqlStoreProvider"); + } + public static void registerServer() { OptionSpace.register("server", "com.baidu.hugegraph.config.ServerOptions"); } diff --git a/hugegraph-dist/src/main/resources/backend.properties b/hugegraph-dist/src/main/resources/backend.properties index 98c446a84d..45f95d2f1c 100644 --- a/hugegraph-dist/src/main/resources/backend.properties +++ b/hugegraph-dist/src/main/resources/backend.properties @@ -1 +1 @@ -backends=[cassandra, scylladb, rocksdb, mysql, palo, hbase] +backends=[cassandra, scylladb, rocksdb, mysql, palo, hbase, postgresql] diff --git a/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlSessions.java b/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlSessions.java index 6749596efa..4f29a9c979 100644 --- a/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlSessions.java +++ b/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlSessions.java @@ -147,8 +147,11 @@ public void createDatabase() { try (Connection conn = this.openWithoutDB(0)) { conn.createStatement().execute(sql); } catch (SQLException e) { - throw new BackendException("Failed to create database '%s'", - this.database); + if (!e.getMessage().endsWith("already exists")) { + throw new BackendException("Failed to create database '%s'", e, + this.database); + } + // Ignore exception if database already exists } } diff --git a/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlStore.java b/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlStore.java index da6e62867c..1e8fc05d96 100644 --- a/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlStore.java +++ b/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlStore.java @@ -57,7 +57,7 @@ public abstract class MysqlStore extends AbstractBackendStore { private final Map tables; - private MysqlSessions sessions; + protected MysqlSessions sessions; public MysqlStore(final BackendStoreProvider provider, final String database, final String store) { @@ -114,7 +114,8 @@ public synchronized void open(HugeConfig config) { try { this.sessions.open(config); } catch (Exception e) { - if (!e.getMessage().startsWith("Unknown database")) { + if (!e.getMessage().startsWith("Unknown database") && + !e.getMessage().endsWith("does not exist")) { throw new BackendException("Failed connect with mysql, " + "please ensure it's ok", e); } diff --git a/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlTable.java b/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlTable.java index 716e71d3a2..f675bc9c4d 100644 --- a/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlTable.java +++ b/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlTable.java @@ -521,7 +521,7 @@ protected void wrapOffset(StringBuilder select, Query query) { select.append(";"); } - private static Object serializeValue(Object value) { + protected static Object serializeValue(Object value) { if (value instanceof Id) { value = ((Id) value).asObject(); } diff --git a/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlTables.java b/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlTables.java index c929bd4c4a..594aae3803 100644 --- a/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlTables.java +++ b/hugegraph-mysql/src/main/java/com/baidu/hugegraph/backend/store/mysql/MysqlTables.java @@ -106,8 +106,8 @@ public void increaseCounter(Session session, HugeType type, long increment) { String update = String.format( "INSERT INTO %s VALUES ('%s', %s) " + - "ON DUPLICATE KEY UPDATE " + - "ID = ID + 1;", TABLE, type.name(), increment); + "ON DUPLICATE KEY UPDATE ID = ID + %s;", + TABLE, type.name(), increment, increment); try { session.execute(update); } catch (SQLException e) { diff --git a/hugegraph-postgresql/pom.xml b/hugegraph-postgresql/pom.xml new file mode 100644 index 0000000000..fa3181582f --- /dev/null +++ b/hugegraph-postgresql/pom.xml @@ -0,0 +1,31 @@ + + + + hugegraph + com.baidu.hugegraph + 0.9.2 + + 4.0.0 + + hugegraph-postgresql + + + + com.baidu.hugegraph + hugegraph-core + ${project.version} + + + com.baidu.hugegraph + hugegraph-mysql + ${project.version} + + + org.postgresql + postgresql + 42.1.4 + + + diff --git a/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlFeatures.java b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlFeatures.java new file mode 100644 index 0000000000..8f033288a8 --- /dev/null +++ b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlFeatures.java @@ -0,0 +1,24 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.postgresql; + +import com.baidu.hugegraph.backend.store.mysql.MysqlFeatures; + +public class PostgresqlFeatures extends MysqlFeatures {} diff --git a/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlOptions.java b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlOptions.java new file mode 100644 index 0000000000..bd68cf4889 --- /dev/null +++ b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlOptions.java @@ -0,0 +1,82 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.postgresql; + +import com.baidu.hugegraph.config.ConfigOption; +import com.baidu.hugegraph.config.OptionHolder; + +import static com.baidu.hugegraph.config.OptionChecker.*; + +public class PostgresqlOptions extends OptionHolder { + + private PostgresqlOptions() { + super(); + } + + private static volatile PostgresqlOptions instance; + + public static synchronized PostgresqlOptions instance() { + if (instance == null) { + instance = new PostgresqlOptions(); + instance.registerOptions(); + } + return instance; + } + + public static final ConfigOption JDBC_DRIVER = + new ConfigOption<>( + "jdbc.driver", + "The JDBC driver class to connect database.", + disallowEmpty(), + "org.postgresql.Driver" + ); + + public static final ConfigOption JDBC_URL = + new ConfigOption<>( + "jdbc.url", + "The url of database in JDBC format.", + disallowEmpty(), + "jdbc:postgresql://127.0.0.1:5432" + ); + + public static final ConfigOption SSL_MODE = + new ConfigOption<>( + "jdbc.ssl_mode", + "The url of database in JDBC format.", + disallowEmpty(), + "disable" + ); + + public static final ConfigOption JDBC_USERNAME = + new ConfigOption<>( + "jdbc.username", + "The username to login database.", + disallowEmpty(), + "root" + ); + + public static final ConfigOption JDBC_PASSWORD = + new ConfigOption<>( + "jdbc.password", + "The password corresponding to jdbc.username.", + null, + "" + ); +} diff --git a/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlSerializer.java b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlSerializer.java new file mode 100644 index 0000000000..adb465743a --- /dev/null +++ b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlSerializer.java @@ -0,0 +1,24 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.postgresql; + +import com.baidu.hugegraph.backend.store.mysql.MysqlSerializer; + +public class PostgresqlSerializer extends MysqlSerializer {} diff --git a/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlSessions.java b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlSessions.java new file mode 100644 index 0000000000..204e889ea3 --- /dev/null +++ b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlSessions.java @@ -0,0 +1,36 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.postgresql; + +import com.baidu.hugegraph.backend.store.mysql.MysqlSessions; +import com.baidu.hugegraph.config.HugeConfig; + +public class PostgresqlSessions extends MysqlSessions { + + public PostgresqlSessions(HugeConfig config, String database, String store) { + super(config, database, store); + } + + @Override + protected String buildCreateDatabase(String database) { + return String.format("CREATE DATABASE %s ENCODING='UTF-8' " + + "TEMPLATE=template0 LC_COLLATE='C' LC_CTYPE='C';", database); + } +} diff --git a/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlStore.java b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlStore.java new file mode 100644 index 0000000000..165f6d64eb --- /dev/null +++ b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlStore.java @@ -0,0 +1,40 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.postgresql; + +import com.baidu.hugegraph.backend.store.BackendFeatures; +import com.baidu.hugegraph.backend.store.BackendStoreProvider; +import com.baidu.hugegraph.backend.store.mysql.MysqlStore; +import com.baidu.hugegraph.config.HugeConfig; + +public abstract class PostgresqlStore extends MysqlStore { + + private static final BackendFeatures FEATURES = new PostgresqlFeatures(); + + public PostgresqlStore(BackendStoreProvider provider, + String database, String name) { + super(provider, database, name); + } + + @Override + protected PostgresqlSessions openSessionPool(HugeConfig config) { + return new PostgresqlSessions(config, this.database(), this.store()); + } +} diff --git a/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlStoreProvider.java b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlStoreProvider.java new file mode 100644 index 0000000000..52521518f6 --- /dev/null +++ b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlStoreProvider.java @@ -0,0 +1,139 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.postgresql; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import com.baidu.hugegraph.backend.id.Id; +import com.baidu.hugegraph.backend.store.BackendStore; +import com.baidu.hugegraph.backend.store.BackendStoreProvider; +import com.baidu.hugegraph.backend.store.mysql.MysqlSessions; +import com.baidu.hugegraph.backend.store.mysql.MysqlStoreProvider; +import com.baidu.hugegraph.backend.store.mysql.MysqlTable; +import com.baidu.hugegraph.type.HugeType; +import com.baidu.hugegraph.type.define.Directions; + +public class PostgresqlStoreProvider extends MysqlStoreProvider { + + @Override + protected BackendStore newSchemaStore(String store) { + return new PostgresqlSchemaStore(this, this.database(), store); + } + + @Override + protected BackendStore newGraphStore(String store) { + return new PostgresqlGraphStore(this, this.database(), store); + } + + @Override + public String type() { + return "postgresql"; + } + + @Override + public String version() { + return "1.0"; + } + + public static class PostgresqlSchemaStore extends PostgresqlStore { + + private final PostgresqlTables.Counters counters; + + public PostgresqlSchemaStore(BackendStoreProvider provider, + String database, String store) { + super(provider, database, store); + + this.counters = new PostgresqlTables.Counters(); + + registerTableManager(HugeType.VERTEX_LABEL, + new PostgresqlTables.VertexLabel()); + registerTableManager(HugeType.EDGE_LABEL, + new PostgresqlTables.EdgeLabel()); + registerTableManager(HugeType.PROPERTY_KEY, + new PostgresqlTables.PropertyKey()); + registerTableManager(HugeType.INDEX_LABEL, + new PostgresqlTables.IndexLabel()); + } + + @Override + protected Collection tables() { + List tables = new ArrayList<>(super.tables()); + tables.add(this.counters); + return tables; + } + + @Override + public void increaseCounter(HugeType type, long increment) { + this.checkSessionConnected(); + MysqlSessions.Session session = super.sessions.session(); + this.counters.increaseCounter(session, type, increment); + } + + @Override + public long getCounter(HugeType type) { + this.checkSessionConnected(); + MysqlSessions.Session session = super.sessions.session(); + return this.counters.getCounter(session, type); + } + } + + public static class PostgresqlGraphStore extends PostgresqlStore { + + public PostgresqlGraphStore(BackendStoreProvider provider, + String database, String store) { + super(provider, database, store); + + registerTableManager(HugeType.VERTEX, + new PostgresqlTables.Vertex(store)); + registerTableManager(HugeType.EDGE_OUT, + new PostgresqlTables.Edge(store, + Directions.OUT)); + registerTableManager(HugeType.EDGE_IN, + new PostgresqlTables.Edge(store, + Directions.IN)); + registerTableManager(HugeType.SECONDARY_INDEX, + new PostgresqlTables.SecondaryIndex(store)); + registerTableManager(HugeType.RANGE_INDEX, + new PostgresqlTables.RangeIndex(store)); + registerTableManager(HugeType.SEARCH_INDEX, + new PostgresqlTables.SearchIndex(store)); + } + + @Override + public Id nextId(HugeType type) { + throw new UnsupportedOperationException( + "PostgresqlGraphStore.nextId()"); + } + + @Override + public void increaseCounter(HugeType type, long increment) { + throw new UnsupportedOperationException( + "PostgresqlGraphStore.increaseCounter()"); + } + + @Override + public long getCounter(HugeType type) { + throw new UnsupportedOperationException( + "PostgresqlGraphStore.getCounter()"); + } + } +} diff --git a/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlTable.java b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlTable.java new file mode 100644 index 0000000000..c222ca3766 --- /dev/null +++ b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlTable.java @@ -0,0 +1,341 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.postgresql; + +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.postgresql.core.Utils; +import org.slf4j.Logger; + +import com.baidu.hugegraph.HugeException; +import com.baidu.hugegraph.backend.BackendException; +import com.baidu.hugegraph.backend.id.Id; +import com.baidu.hugegraph.backend.query.Condition; +import com.baidu.hugegraph.backend.query.Query; +import com.baidu.hugegraph.backend.store.TableDefine; +import com.baidu.hugegraph.backend.store.mysql.MysqlBackendEntry; +import com.baidu.hugegraph.backend.store.mysql.MysqlEntryIterator; +import com.baidu.hugegraph.backend.store.mysql.MysqlSessions.Session; +import com.baidu.hugegraph.backend.store.mysql.MysqlTable; +import com.baidu.hugegraph.backend.store.mysql.WhereBuilder; +import com.baidu.hugegraph.type.define.HugeKeys; +import com.baidu.hugegraph.util.Log; + +public abstract class PostgresqlTable extends MysqlTable { + + private static final Logger LOG = Log.logger(PostgresqlStore.class); + + private String insertTemplate; + + public PostgresqlTable(String table) { + super(table); + } + + @Override + protected void createTable(Session session, TableDefine tableDefine) { + StringBuilder sql = new StringBuilder(); + sql.append("CREATE TABLE IF NOT EXISTS "); + sql.append(this.table()).append(" ("); + // Add columns + for (Map.Entry entry : + tableDefine.columns().entrySet()) { + sql.append(formatKey(entry.getKey())); + sql.append(" "); + sql.append(entry.getValue()); + sql.append(", "); + } + // Specified primary keys + sql.append(" PRIMARY KEY ("); + int i = 0; + int size = tableDefine.keys().size(); + for (HugeKeys key : tableDefine.keys()) { + sql.append(formatKey(key)); + if (++i != size) { + sql.append(", "); + } + } + sql.append("))"); + + LOG.debug("Create table: {}", sql); + try { + session.execute(sql.toString()); + } catch (SQLException e) { + throw new BackendException("Failed to create table with '%s'", + e, sql); + } + } + + @Override + protected void dropTable(Session session) { + LOG.debug("Drop table: {}", this.table()); + String sql = String.format("DROP TABLE IF EXISTS %s CASCADE;", + this.table()); + try { + session.execute(sql); + } catch (SQLException e) { + throw new BackendException("Failed to drop table with '%s'", + e, sql); + } + } + + @Override + protected void truncateTable(Session session) { + LOG.debug("Truncate table: {}", this.table()); + String sql = String.format("TRUNCATE TABLE %s CASCADE;", this.table()); + try { + session.execute(sql); + } catch (SQLException e) { + throw new BackendException("Failed to truncate table with '%s'", + e, sql); + } + } + + /** + * Insert an entire row + */ + @Override + public void insert(Session session, MysqlBackendEntry.Row entry) { + String template = this.buildInsertTemplate(entry); + + PreparedStatement insertStmt; + try { + // Create or get insert prepare statement + insertStmt = session.prepareStatement(template); + int i = 1; + int size = entry.columns().size(); + for (Object object : entry.columns().values()) { + if (object.equals("\u0000")) { + object = ""; + } + insertStmt.setObject(i, object); + insertStmt.setObject(size + i++, object); + } + } catch (SQLException e) { + throw new BackendException("Failed to prepare statement '%s'" + + "for entry: %s", template, entry); + } + session.add(insertStmt); + } + + @Override + public void delete(Session session, MysqlBackendEntry.Row entry) { + List idNames = this.idColumnName(); + String template = this.buildDeleteTemplate(idNames); + + PreparedStatement deleteStmt; + try { + deleteStmt = session.prepareStatement(template); + if (entry.columns().isEmpty()) { + // Delete just by id + List idValues = this.idColumnValue(entry); + assert idNames.size() == idValues.size(); + + for (int i = 0, n = idNames.size(); i < n; i++) { + deleteStmt.setObject(i + 1, idValues.get(i)); + } + } else { + // Delete just by column keys(must be id columns) + for (int i = 0, n = idNames.size(); i < n; i++) { + HugeKeys key = idNames.get(i); + Object value = entry.column(key); + if (value != null && value.equals("\u0000")) { + value = "\'\'"; + } + + deleteStmt.setObject(i + 1, value); + } + } + } catch (SQLException e) { + throw new BackendException("Failed to prepare statement '%s'" + + "with entry columns %s", + template, entry.columns().values()); + } + session.add(deleteStmt); + } + + protected String buildInsertTemplate(MysqlBackendEntry.Row entry) { + if (this.insertTemplate != null) { + return this.insertTemplate; + } + + StringBuilder insert = new StringBuilder(); + insert.append("INSERT INTO ").append(this.table()).append(" ("); + + int i = 0; + int size = entry.columns().size(); + for (HugeKeys key : entry.columns().keySet()) { + insert.append(formatKey(key)); + if (++i != size) { + insert.append(", "); + } + } + insert.append(") VALUES ("); + + for (i = 0; i < size; i++) { + insert.append("?"); + if (i != size - 1) { + insert.append(", "); + } + } + insert.append(")"); + + i = 0; + size = this.tableDefine().keys().size(); + insert.append(" ON CONFLICT ("); + for (HugeKeys key : this.tableDefine().keys()) { + insert.append(formatKey(key)); + if (++i != size) { + insert.append(", "); + } + } + insert.append(")"); + + i = 0; + size = entry.columns().keySet().size(); + insert.append(" DO UPDATE SET "); + for (HugeKeys key : entry.columns().keySet()) { + insert.append(formatKey(key)).append(" = ?"); + if (++i != size) { + insert.append(", "); + } + } + + this.insertTemplate = insert.toString(); + return this.insertTemplate; + } + + protected void wrapPage(StringBuilder select, Query query) { + String page = query.page(); + // It's the first time if page is empty + if (!page.isEmpty()) { + MysqlEntryIterator.PageState + pageState = MysqlEntryIterator.PageState.fromString(page); + Map columns = pageState.columns(); + + List idColumnNames = this.idColumnName(); + List values = new ArrayList<>(idColumnNames.size()); + for (HugeKeys key : idColumnNames) { + values.add(columns.get(key)); + } + + // Need add `where` to `select` when query is IdQuery + boolean startWithWhere = query.conditions().isEmpty(); + WhereBuilder where = new WhereBuilder(startWithWhere); + where.gte(formatKeys(idColumnNames), values); + if (!startWithWhere) { + select.append(" AND"); + } + select.append(where.build()); + } + + int i = 0; + int size = this.tableDefine().keys().size(); + + // Set order-by to keep results order consistence for result + select.append(" ORDER BY "); + for (HugeKeys hugeKey : this.tableDefine().keys()) { + String key = formatKey(hugeKey); + select.append(key).append(" "); + select.append("ASC "); + if (++i != size) { + select.append(", "); + } + } + + assert query.limit() != Query.NO_LIMIT; + // Fetch `limit + 1` records for judging whether reached the last page + select.append(" limit "); + select.append(query.limit() + 1); + select.append(";"); + } + + @Override + protected StringBuilder relation2Sql(Condition.Relation relation) { + String key = relation.serialKey().toString(); + Object value = relation.serialValue(); + + value = serializeValue(value); + + StringBuilder sql = new StringBuilder(32); + sql.append(key); + switch (relation.relation()) { + case EQ: + sql.append(" = ").append(value); + break; + case NEQ: + sql.append(" != ").append(value); + break; + case GT: + sql.append(" > ").append(value); + break; + case GTE: + sql.append(" >= ").append(value); + break; + case LT: + sql.append(" < ").append(value); + break; + case LTE: + sql.append(" <= ").append(value); + break; + case IN: + sql.append(" IN ("); + List values = (List) value; + for (int i = 0, n = values.size(); i < n; i++) { + sql.append(serializeValue(values.get(i))); + if (i != n - 1) { + sql.append(", "); + } + } + sql.append(")"); + break; + case CONTAINS: + case CONTAINS_KEY: + case SCAN: + default: + throw new AssertionError("Unsupported relation: " + relation); + } + return sql; + } + + protected static Object serializeValue(Object value) { + if (value instanceof Id) { + value = ((Id) value).asObject(); + } + if (value instanceof String) { + if (value == "\u0000") { + return "\'\'"; + } + StringBuilder builder = new StringBuilder(32); + builder.append('\''); + try { + Utils.escapeLiteral(builder, (String) value, false); + } catch (SQLException e) { + throw new HugeException("Failed to escape '%s'", e, value); + } + builder.append('\''); + value = builder.toString(); + } + return value; + } +} diff --git a/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlTables.java b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlTables.java new file mode 100644 index 0000000000..794c3e9182 --- /dev/null +++ b/hugegraph-postgresql/src/main/java/com/baidu/hugegraph/backend/store/postgresql/PostgresqlTables.java @@ -0,0 +1,422 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package com.baidu.hugegraph.backend.store.postgresql; + +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import com.baidu.hugegraph.backend.BackendException; +import com.baidu.hugegraph.backend.id.EdgeId; +import com.baidu.hugegraph.backend.id.Id; +import com.baidu.hugegraph.backend.id.IdGenerator; +import com.baidu.hugegraph.backend.id.IdUtil; +import com.baidu.hugegraph.backend.id.SplicingIdGenerator; +import com.baidu.hugegraph.backend.store.BackendEntry; +import com.baidu.hugegraph.backend.store.TableDefine; +import com.baidu.hugegraph.backend.store.mysql.MysqlBackendEntry; +import com.baidu.hugegraph.backend.store.mysql.MysqlSessions.Session; +import com.baidu.hugegraph.backend.store.mysql.MysqlTables; +import com.baidu.hugegraph.type.HugeType; +import com.baidu.hugegraph.type.define.Directions; +import com.baidu.hugegraph.type.define.HugeKeys; +import com.baidu.hugegraph.util.E; + +public class PostgresqlTables { + + private static final String NOT_NULL = "NOT NULL"; + private static final String DEFAULT_EMPTY = "DEFAULT ''"; + + private static final String DATATYPE_PK = "INT"; + private static final String DATATYPE_SL = "INT"; // VL/EL + private static final String DATATYPE_IL = "INT"; + + private static final String INT = "INT"; + private static final String FLOAT = "FLOAT"; + private static final String VARCHAR = "VARCHAR(255)"; + private static final String TEXT = "VARCHAR(65533)"; + private static final String BOOL = "BOOL"; + + public static class PostgresqlTableTemplate extends PostgresqlTable { + + protected TableDefine define; + + public PostgresqlTableTemplate(String table) { + super(table); + } + + @Override + public TableDefine tableDefine() { + return this.define; + } + } + + public static class Counters extends PostgresqlTableTemplate { + + public static final String TABLE = "counters"; + + public Counters() { + super(TABLE); + + this.define = new TableDefine(); + this.define.column(HugeKeys.SCHEMA_TYPE, VARCHAR); + this.define.column(HugeKeys.ID, INT); + this.define.keys(HugeKeys.SCHEMA_TYPE); + } + + public long getCounter(Session session, HugeType type) { + String schemaCol = formatKey(HugeKeys.SCHEMA_TYPE); + String idCol = formatKey(HugeKeys.ID); + + String select = String.format("SELECT ID FROM %s WHERE %s = '%s';", + TABLE, schemaCol, type.name()); + try { + ResultSet resultSet = session.select(select); + if (resultSet.next()) { + return resultSet.getLong(idCol); + } else { + return 0L; + } + } catch (SQLException e) { + throw new BackendException( + "Failed to get id from counters with type '%s'", + e, type); + } + } + + public void increaseCounter(Session session, HugeType type, + long increment) { + String update = String.format("INSERT INTO %s (%s, %s) VALUES " + + "('%s', %s) ON CONFLICT (%s) " + + "DO UPDATE SET ID = %s.ID + %s;", + TABLE, + formatKey(HugeKeys.SCHEMA_TYPE), + formatKey(HugeKeys.ID), + type.name(), increment, + formatKey(HugeKeys.SCHEMA_TYPE), + TABLE, increment); + try { + session.execute(update); + } catch (SQLException e) { + throw new BackendException( + "Failed to update counters with type '%s'", e, type); + } + } + } + + public static class VertexLabel extends PostgresqlTableTemplate { + + public static final String TABLE = "vertex_labels"; + + public VertexLabel() { + super(TABLE); + this.define = new TableDefine(); + this.define.column(HugeKeys.ID, DATATYPE_SL, NOT_NULL); + this.define.column(HugeKeys.NAME, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.ID_STRATEGY, INT, NOT_NULL); + this.define.column(HugeKeys.PRIMARY_KEYS, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.PROPERTIES, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.NULLABLE_KEYS, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.INDEX_LABELS, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.ENABLE_LABEL_INDEX, BOOL, NOT_NULL); + this.define.column(HugeKeys.USER_DATA, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.STATUS, INT, NOT_NULL); + // Unique keys/hash keys + this.define.keys(HugeKeys.ID); + } + } + + public static class EdgeLabel extends PostgresqlTableTemplate { + + public static final String TABLE = "edge_labels"; + + public EdgeLabel() { + super(TABLE); + this.define = new TableDefine(); + this.define.column(HugeKeys.ID, DATATYPE_SL, NOT_NULL); + this.define.column(HugeKeys.NAME, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.FREQUENCY, INT, NOT_NULL); + this.define.column(HugeKeys.SOURCE_LABEL, INT, NOT_NULL); + this.define.column(HugeKeys.TARGET_LABEL, INT, NOT_NULL); + this.define.column(HugeKeys.SORT_KEYS, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.PROPERTIES, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.NULLABLE_KEYS, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.INDEX_LABELS, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.ENABLE_LABEL_INDEX, BOOL, NOT_NULL); + this.define.column(HugeKeys.USER_DATA, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.STATUS, INT, NOT_NULL); + // Unique keys/hash keys + this.define.keys(HugeKeys.ID); + } + } + + public static class PropertyKey extends PostgresqlTableTemplate { + + public static final String TABLE = "property_keys"; + + public PropertyKey() { + super(TABLE); + this.define = new TableDefine(); + this.define.column(HugeKeys.ID, DATATYPE_PK, NOT_NULL); + this.define.column(HugeKeys.NAME, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.DATA_TYPE, INT, NOT_NULL); + this.define.column(HugeKeys.CARDINALITY, INT, NOT_NULL); + this.define.column(HugeKeys.PROPERTIES, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.USER_DATA, VARCHAR, DEFAULT_EMPTY); + this.define.column(HugeKeys.STATUS, INT, NOT_NULL); + // Unique keys/hash keys + this.define.keys(HugeKeys.ID); + } + } + + public static class IndexLabel extends PostgresqlTableTemplate { + + public static final String TABLE = "index_labels"; + + public IndexLabel() { + super(TABLE); + this.define = new TableDefine(); + this.define.column(HugeKeys.ID, DATATYPE_IL, NOT_NULL); + this.define.column(HugeKeys.NAME, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.BASE_TYPE, INT, NOT_NULL); + this.define.column(HugeKeys.BASE_VALUE, INT, NOT_NULL); + this.define.column(HugeKeys.INDEX_TYPE, INT, NOT_NULL); + this.define.column(HugeKeys.FIELDS, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.STATUS, INT, NOT_NULL); + // Unique keys/hash keys + this.define.keys(HugeKeys.ID); + } + } + + public static class Vertex extends PostgresqlTableTemplate { + + public static final String TABLE = "vertices"; + + public Vertex(String store) { + super(joinTableName(store, TABLE)); + + this.define = new TableDefine(); + this.define.column(HugeKeys.ID, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.LABEL, INT, NOT_NULL); + this.define.column(HugeKeys.PROPERTIES, TEXT, DEFAULT_EMPTY); + // Unique keys/hash keys + this.define.keys(HugeKeys.ID); + } + } + + public static class Edge extends PostgresqlTableTemplate { + + private final Directions direction; + private final String delByLabelTemplate; + + public Edge(String store, Directions direction) { + super(joinTableName(store, MysqlTables.Edge.table(direction))); + + this.direction = direction; + this.delByLabelTemplate = String.format( + "DELETE FROM %s WHERE %s = ?;", + this.table(), formatKey(HugeKeys.LABEL)); + + this.define = new TableDefine(); + this.define.column(HugeKeys.OWNER_VERTEX, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.DIRECTION, INT, NOT_NULL); + this.define.column(HugeKeys.LABEL, INT, NOT_NULL); + this.define.column(HugeKeys.SORT_VALUES, VARCHAR, NOT_NULL, + DEFAULT_EMPTY); + this.define.column(HugeKeys.OTHER_VERTEX, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.PROPERTIES, TEXT, DEFAULT_EMPTY); + // Unique keys/hash keys + this.define.keys(HugeKeys.OWNER_VERTEX, HugeKeys.DIRECTION, + HugeKeys.LABEL, HugeKeys.SORT_VALUES, + HugeKeys.OTHER_VERTEX); + } + + @Override + protected List idColumnValue(Id id) { + EdgeId edgeId; + if (!(id instanceof EdgeId)) { + String[] idParts = EdgeId.split(id); + if (idParts.length == 1) { + // Delete edge by label + return Arrays.asList((Object[]) idParts); + } + id = IdUtil.readString(id.asString()); + edgeId = EdgeId.parse(id.asString()); + } else { + edgeId = (EdgeId) id; + } + + E.checkState(edgeId.direction() == this.direction, + "Can't query %s edges from %s edges table", + edgeId.direction(), this.direction); + + List list = new ArrayList<>(5); + list.add(IdUtil.writeString(edgeId.ownerVertexId())); + list.add(edgeId.direction().code()); + list.add(edgeId.edgeLabelId().asLong()); + list.add(edgeId.sortValues()); + list.add(IdUtil.writeString(edgeId.otherVertexId())); + return list; + } + + @Override + public void delete(Session session, MysqlBackendEntry.Row entry) { + // Let super class do delete if not deleting edge by label + List idParts = this.idColumnValue(entry.id()); + if (idParts.size() > 1 || entry.columns().size() > 0) { + super.delete(session, entry); + return; + } + + // The only element is label + this.deleteEdgesByLabel(session, entry.id()); + } + + private void deleteEdgesByLabel(Session session, Id label) { + PreparedStatement deleteStmt; + try { + // Create or get delete prepare statement + deleteStmt = session.prepareStatement(this.delByLabelTemplate); + // Delete edges + deleteStmt.setObject(1, label.asLong()); + } catch (SQLException e) { + throw new BackendException("Failed to prepare statement '%s'", + this.delByLabelTemplate); + } + session.add(deleteStmt); + } + + @Override + protected BackendEntry mergeEntries(BackendEntry e1, BackendEntry e2) { + // Merge edges into vertex + // TODO: merge rows before calling row2Entry() + + MysqlBackendEntry current = (MysqlBackendEntry) e1; + MysqlBackendEntry next = (MysqlBackendEntry) e2; + + E.checkState(current == null || current.type().isVertex(), + "The current entry must be null or VERTEX"); + E.checkState(next != null && next.type().isEdge(), + "The next entry must be EDGE"); + + if (current != null) { + Id nextVertexId = IdGenerator.of( + next.column(HugeKeys.OWNER_VERTEX)); + if (current.id().equals(nextVertexId)) { + current.subRow(next.row()); + return current; + } + } + + return this.wrapByVertex(next); + } + + private MysqlBackendEntry wrapByVertex(MysqlBackendEntry edge) { + assert edge.type().isEdge(); + String ownerVertex = edge.column(HugeKeys.OWNER_VERTEX); + E.checkState(ownerVertex != null, "Invalid backend entry"); + Id vertexId = IdGenerator.of(ownerVertex); + MysqlBackendEntry vertex = new MysqlBackendEntry(HugeType.VERTEX, + vertexId); + + vertex.column(HugeKeys.ID, ownerVertex); + vertex.column(HugeKeys.PROPERTIES, ""); + + vertex.subRow(edge.row()); + return vertex; + } + } + + public abstract static class Index extends PostgresqlTableTemplate { + + public Index(String table) { + super(table); + } + + protected abstract String entryId(MysqlBackendEntry entry); + } + + public static class SecondaryIndex extends Index { + + public static final String TABLE = "secondary_indexes"; + + public SecondaryIndex(String store) { + this(store, TABLE); + } + + protected SecondaryIndex(String store, String table) { + super(joinTableName(store, table)); + + this.define = new TableDefine(); + this.define.column(HugeKeys.FIELD_VALUES, VARCHAR, NOT_NULL); + this.define.column(HugeKeys.INDEX_LABEL_ID, INT, NOT_NULL); + this.define.column(HugeKeys.ELEMENT_IDS, VARCHAR, NOT_NULL); + // Unique keys/hash keys + this.define.keys(HugeKeys.FIELD_VALUES, + HugeKeys.INDEX_LABEL_ID, + HugeKeys.ELEMENT_IDS); + } + + @Override + protected final String entryId(MysqlBackendEntry entry) { + String fieldValues = entry.column(HugeKeys.FIELD_VALUES); + Long labelId = entry.column(HugeKeys.INDEX_LABEL_ID); + return SplicingIdGenerator.concat(fieldValues, labelId.toString()); + } + } + + public static class SearchIndex extends SecondaryIndex { + + public static final String TABLE = "search_indexes"; + + public SearchIndex(String store) { + super(store, TABLE); + } + } + + public static class RangeIndex extends Index { + + public static final String TABLE = "range_indexes"; + + public RangeIndex(String store) { + super(joinTableName(store, TABLE)); + + this.define = new TableDefine(); + this.define.column(HugeKeys.INDEX_LABEL_ID, INT, NOT_NULL); + this.define.column(HugeKeys.FIELD_VALUES, FLOAT, NOT_NULL); + this.define.column(HugeKeys.ELEMENT_IDS, VARCHAR, NOT_NULL); + // Unique keys/hash keys + this.define.keys(HugeKeys.INDEX_LABEL_ID, + HugeKeys.FIELD_VALUES, + HugeKeys.ELEMENT_IDS); + } + + @Override + protected final String entryId(MysqlBackendEntry entry) { + Double fieldValue = entry.column(HugeKeys.FIELD_VALUES); + Long labelId = entry.column(HugeKeys.INDEX_LABEL_ID); + return SplicingIdGenerator.concat(labelId.toString(), + fieldValue.toString()); + } + } +} diff --git a/hugegraph-test/pom.xml b/hugegraph-test/pom.xml index 8d7addb7f9..4af37a9973 100644 --- a/hugegraph-test/pom.xml +++ b/hugegraph-test/pom.xml @@ -47,6 +47,11 @@ hugegraph-mysql ${project.version} + + com.baidu.hugegraph + hugegraph-postgresql + ${project.version} + com.baidu.hugegraph hugegraph-dist @@ -293,5 +298,15 @@ hbase + + postgresql + + false + + + postgresql + postgresql + + diff --git a/hugegraph-test/src/main/java/com/baidu/hugegraph/api/MetricsApiTest.java b/hugegraph-test/src/main/java/com/baidu/hugegraph/api/MetricsApiTest.java index bff59662df..7175785e85 100644 --- a/hugegraph-test/src/main/java/com/baidu/hugegraph/api/MetricsApiTest.java +++ b/hugegraph-test/src/main/java/com/baidu/hugegraph/api/MetricsApiTest.java @@ -68,6 +68,7 @@ public void testMetricsBackend() { case "memory": case "mysql": case "hbase": + case "postgresql": String except = (String) assertMapContains(graph, "exception"); Assert.assertTrue(except, except.contains(notSupport)); break; diff --git a/hugegraph-test/src/main/java/com/baidu/hugegraph/api/TaskApiTest.java b/hugegraph-test/src/main/java/com/baidu/hugegraph/api/TaskApiTest.java index 040c22c0de..75663669ff 100644 --- a/hugegraph-test/src/main/java/com/baidu/hugegraph/api/TaskApiTest.java +++ b/hugegraph-test/src/main/java/com/baidu/hugegraph/api/TaskApiTest.java @@ -56,7 +56,7 @@ public void prepareSchema() { public void testList() { int taskId = this.rebuild(); - Response r = client().get(path); + Response r = client().get(path, ImmutableMap.of("limit", -1)); String content = assertResponseStatus(200, r); List> tasks = assertJsonContains(content, "tasks"); assertArrayContains(tasks, "id", taskId); diff --git a/hugegraph-test/src/main/java/com/baidu/hugegraph/core/EdgeCoreTest.java b/hugegraph-test/src/main/java/com/baidu/hugegraph/core/EdgeCoreTest.java index 21bb51ccc3..f855ad10a8 100644 --- a/hugegraph-test/src/main/java/com/baidu/hugegraph/core/EdgeCoreTest.java +++ b/hugegraph-test/src/main/java/com/baidu/hugegraph/core/EdgeCoreTest.java @@ -2394,7 +2394,7 @@ public void testQueryEdgeByPropertyWithEmptyString() { Assert.assertEquals("", edge.value("tool")); edge = graph.traversal().E().has("tool", "").has("place", "park") - .has("reason", "jeer").next(); + .has("reason", "jeer").next(); Assert.assertEquals(1, (int) edge.value("id")); } @@ -2578,6 +2578,7 @@ public void testQueryEdgeByPage() { Assert.assertEquals(90, edges.size()); } + @SuppressWarnings("unchecked") @Test public void testQueryEdgeByPageResultsMatched() { Assume.assumeTrue("Not support paging", @@ -2593,17 +2594,19 @@ public void testQueryEdgeByPageResultsMatched() { String page = PageState.PAGE_NONE; int size = 20; + Set pageAll = new HashSet<>(); for (int i = 0; i < 100 / size; i++) { iter = graph.traversal().E() .has("~page", page).limit(size); - List vertexes = IteratorUtils.asList(iter); - Assert.assertEquals(size, vertexes.size()); + List edges = IteratorUtils.asList(iter); + Assert.assertEquals(size, edges.size()); - List expected = all.subList(i * size, (i + 1) * size); - Assert.assertEquals(expected, vertexes); + pageAll.addAll(edges); page = TraversalUtil.page(iter); } + Assert.assertEquals(100, pageAll.size()); + Assert.assertTrue(all.containsAll(pageAll)); Assert.assertNull(page); } diff --git a/hugegraph-test/src/main/java/com/baidu/hugegraph/core/VertexCoreTest.java b/hugegraph-test/src/main/java/com/baidu/hugegraph/core/VertexCoreTest.java index 98c1822bb5..8dbf46d3ff 100644 --- a/hugegraph-test/src/main/java/com/baidu/hugegraph/core/VertexCoreTest.java +++ b/hugegraph-test/src/main/java/com/baidu/hugegraph/core/VertexCoreTest.java @@ -3265,6 +3265,7 @@ public void testQueryByPage() { Assert.assertEquals(90, vertexes.size()); } + @SuppressWarnings("unchecked") @Test public void testQueryByPageResultsMatched() { Assume.assumeTrue("Not support paging", @@ -3280,17 +3281,19 @@ public void testQueryByPageResultsMatched() { String page = PageState.PAGE_NONE; int size = 20; + Set pageAll = new HashSet<>(); for (int i = 0; i < 100 / size; i++) { iter = graph.traversal().V() .has("~page", page).limit(size); - List vertexes = IteratorUtils.asList(iter); + List vertexes = IteratorUtils.asList(iter); Assert.assertEquals(size, vertexes.size()); - List expected = all.subList(i * size, (i + 1) * size); - Assert.assertEquals(expected, vertexes); + pageAll.addAll(vertexes); page = TraversalUtil.page(iter); } + Assert.assertEquals(100, pageAll.size()); + Assert.assertTrue(all.containsAll(pageAll)); Assert.assertNull(page); } diff --git a/hugegraph-test/src/main/resources/hugegraph.properties b/hugegraph-test/src/main/resources/hugegraph.properties index 479584a48c..9dbc8ecc97 100644 --- a/hugegraph-test/src/main/resources/hugegraph.properties +++ b/hugegraph-test/src/main/resources/hugegraph.properties @@ -37,6 +37,7 @@ jdbc.username=root jdbc.password= jdbc.reconnect_max_times=3 jdbc.reconnect_interval=3 +jdbc.sslmode=disable # palo backend config palo.host=localhost diff --git a/pom.xml b/pom.xml index 796be2e61d..b954ee2614 100644 --- a/pom.xml +++ b/pom.xml @@ -124,6 +124,7 @@ hugegraph-mysql hugegraph-palo hugegraph-hbase + hugegraph-postgresql